diff options
Diffstat (limited to 'drivers/crypto/caam/caamalg.c')
| -rw-r--r-- | drivers/crypto/caam/caamalg.c | 4805 |
1 files changed, 3271 insertions, 1534 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index bf416a8391a7..32a6e6e15ee2 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -1,7 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * caam - Freescale FSL CAAM support for crypto API * * Copyright 2008-2011 Freescale Semiconductor, Inc. + * Copyright 2016-2019, 2023, 2025 NXP * * Based on talitos crypto API driver. * @@ -53,6 +55,22 @@ #include "error.h" #include "sg_sw_sec4.h" #include "key_gen.h" +#include "caamalg_desc.h" +#include <linux/unaligned.h> +#include <crypto/internal/aead.h> +#include <crypto/internal/engine.h> +#include <crypto/internal/skcipher.h> +#include <crypto/xts.h> +#include <keys/trusted-type.h> +#include <linux/dma-mapping.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/key-type.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <soc/fsl/caam-blob.h> /* * crypto alg @@ -60,667 +78,945 @@ #define CAAM_CRA_PRIORITY 3000 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ + CTR_RFC3686_NONCE_SIZE + \ SHA512_DIGEST_SIZE * 2) -/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ -#define CAAM_MAX_IV_LENGTH 16 -/* length of descriptors text */ -#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3) +#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2) +#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ + CAAM_CMD_SZ * 4) +#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ + CAAM_CMD_SZ * 5) -#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) -#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) -#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ) -#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) +#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6) -#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) -#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ - 20 * CAAM_CMD_SZ) -#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ - 15 * CAAM_CMD_SZ) - -#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \ - CAAM_MAX_KEY_SIZE) +#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN) #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) -#ifdef DEBUG -/* for print_hex_dumps with line references */ -#define xstr(s) str(s) -#define str(s) #s -#define debug(format, arg...) printk(format, arg) -#else -#define debug(format, arg...) -#endif - -/* Set DK bit in class 1 operation if shared */ -static inline void append_dec_op1(u32 *desc, u32 type) -{ - u32 *jump_cmd, *uncond_jump_cmd; - - jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); - append_operation(desc, type | OP_ALG_AS_INITFINAL | - OP_ALG_DECRYPT); - uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); - set_jump_tgt_here(desc, jump_cmd); - append_operation(desc, type | OP_ALG_AS_INITFINAL | - OP_ALG_DECRYPT | OP_ALG_AAI_DK); - set_jump_tgt_here(desc, uncond_jump_cmd); -} - -/* - * Wait for completion of class 1 key loading before allowing - * error propagation - */ -static inline void append_dec_shr_done(u32 *desc) -{ - u32 *jump_cmd; - - jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL); - set_jump_tgt_here(desc, jump_cmd); - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); -} - -/* - * For aead functions, read payload and write payload, - * both of which are specified in req->src and req->dst - */ -static inline void aead_append_src_dst(u32 *desc, u32 msg_type) -{ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | - KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); -} - -/* - * For aead encrypt and decrypt, read iv for both classes - */ -static inline void aead_append_ld_iv(u32 *desc, int ivsize) -{ - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | - LDST_CLASS_1_CCB | ivsize); - append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); -} +struct caam_alg_entry { + int class1_alg_type; + int class2_alg_type; + bool rfc3686; + bool geniv; + bool nodkp; +}; -/* - * For ablkcipher encrypt and decrypt, read from req->src and - * write to req->dst - */ -static inline void ablkcipher_append_src_dst(u32 *desc) -{ - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | - KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); -} +struct caam_aead_alg { + struct aead_engine_alg aead; + struct caam_alg_entry caam; + bool registered; +}; -/* - * If all data, including src (with assoc and iv) or dst (with iv only) are - * contiguous - */ -#define GIV_SRC_CONTIG 1 -#define GIV_DST_CONTIG (1 << 1) +struct caam_skcipher_alg { + struct skcipher_engine_alg skcipher; + struct caam_alg_entry caam; + bool registered; +}; /* * per-session context */ struct caam_ctx { - struct device *jrdev; u32 sh_desc_enc[DESC_MAX_USED_LEN]; u32 sh_desc_dec[DESC_MAX_USED_LEN]; - u32 sh_desc_givenc[DESC_MAX_USED_LEN]; + u8 key[CAAM_MAX_KEY_SIZE]; dma_addr_t sh_desc_enc_dma; dma_addr_t sh_desc_dec_dma; - dma_addr_t sh_desc_givenc_dma; - u32 class1_alg_type; - u32 class2_alg_type; - u32 alg_op; - u8 key[CAAM_MAX_KEY_SIZE]; dma_addr_t key_dma; - unsigned int enckeylen; - unsigned int split_key_len; - unsigned int split_key_pad_len; + u8 protected_key[CAAM_MAX_KEY_SIZE]; + dma_addr_t protected_key_dma; + enum dma_data_direction dir; + struct device *jrdev; + struct alginfo adata; + struct alginfo cdata; unsigned int authsize; + bool xts_key_fallback; + bool is_blob; + struct crypto_skcipher *fallback; }; -static void append_key_aead(u32 *desc, struct caam_ctx *ctx, - int keys_fit_inline) -{ - if (keys_fit_inline) { - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, - ctx->split_key_len, CLASS_2 | - KEY_DEST_MDHA_SPLIT | KEY_ENC); - append_key_as_imm(desc, (void *)ctx->key + - ctx->split_key_pad_len, ctx->enckeylen, - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); - } else { - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | - KEY_DEST_MDHA_SPLIT | KEY_ENC); - append_key(desc, ctx->key_dma + ctx->split_key_pad_len, - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); - } -} +struct caam_skcipher_req_ctx { + struct skcipher_edesc *edesc; + struct skcipher_request fallback_req; +}; + +struct caam_aead_req_ctx { + struct aead_edesc *edesc; +}; -static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, - int keys_fit_inline) +static int aead_null_set_sh_desc(struct crypto_aead *aead) { - u32 *key_jump_cmd; + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); + struct device *jrdev = ctx->jrdev; + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); + u32 *desc; + int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN - + ctx->adata.keylen_pad; - init_sh_desc(desc, HDR_SHARE_SERIAL); + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) { + ctx->adata.key_inline = true; + ctx->adata.key_virt = ctx->key; + } else { + ctx->adata.key_inline = false; + ctx->adata.key_dma = ctx->key_dma; + } - /* Skip if already shared */ - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | - JUMP_COND_SHRD); + /* aead_encrypt shared descriptor */ + desc = ctx->sh_desc_enc; + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize, + ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); - append_key_aead(desc, ctx, keys_fit_inline); + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) { + ctx->adata.key_inline = true; + ctx->adata.key_virt = ctx->key; + } else { + ctx->adata.key_inline = false; + ctx->adata.key_dma = ctx->key_dma; + } - set_jump_tgt_here(desc, key_jump_cmd); + /* aead_decrypt shared descriptor */ + desc = ctx->sh_desc_dec; + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize, + ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), ctx->dir); - /* Propagate errors from shared to job descriptor */ - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); + return 0; } static int aead_set_sh_desc(struct crypto_aead *aead) { - struct aead_tfm *tfm = &aead->base.crt_aead; - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), + struct caam_aead_alg, + aead.base); + unsigned int ivsize = crypto_aead_ivsize(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; - bool keys_fit_inline = false; - u32 *key_jump_cmd, *jump_cmd; - u32 geniv, moveiv; - u32 *desc; - - if (!ctx->enckeylen || !ctx->authsize) + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); + u32 ctx1_iv_off = 0; + u32 *desc, *nonce = NULL; + u32 inl_mask; + unsigned int data_len[2]; + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == + OP_ALG_AAI_CTR_MOD128); + const bool is_rfc3686 = alg->caam.rfc3686; + + if (!ctx->authsize) return 0; + /* NULL encryption / decryption */ + if (!ctx->cdata.keylen) + return aead_null_set_sh_desc(aead); + + /* + * AES-CTR needs to load IV in CONTEXT1 reg + * at an offset of 128bits (16bytes) + * CONTEXT1[255:128] = IV + */ + if (ctr_mode) + ctx1_iv_off = 16; + + /* + * RFC3686 specific: + * CONTEXT1[255:128] = {NONCE, IV, COUNTER} + */ + if (is_rfc3686) { + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); + } + + /* + * In case |user key| > |derived key|, using DKP<imm,imm> + * would result in invalid opcodes (last bytes of user key) in + * the resulting descriptor. Use DKP<ptr,imm> instead => both + * virtual and dma key addresses are needed. + */ + ctx->adata.key_virt = ctx->key; + ctx->adata.key_dma = ctx->key_dma; + + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; + + data_len[0] = ctx->adata.keylen_pad; + data_len[1] = ctx->cdata.keylen; + + if (alg->caam.geniv) + goto skip_enc; + /* * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ - if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN + - ctx->split_key_pad_len + ctx->enckeylen <= - CAAM_DESC_BYTES_MAX) - keys_fit_inline = true; + if (desc_inline_query(DESC_AEAD_ENC_LEN + + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, + ARRAY_SIZE(data_len)) < 0) + return -EINVAL; + + ctx->adata.key_inline = !!(inl_mask & 1); + ctx->cdata.key_inline = !!(inl_mask & 2); /* aead_encrypt shared descriptor */ desc = ctx->sh_desc_enc; + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, is_rfc3686, nonce, ctx1_iv_off, + false, ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); + +skip_enc: + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (desc_inline_query(DESC_AEAD_DEC_LEN + + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, + ARRAY_SIZE(data_len)) < 0) + return -EINVAL; - init_sh_desc_key_aead(desc, ctx, keys_fit_inline); + ctx->adata.key_inline = !!(inl_mask & 1); + ctx->cdata.key_inline = !!(inl_mask & 2); - /* Class 2 operation */ - append_operation(desc, ctx->class2_alg_type | - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + /* aead_decrypt shared descriptor */ + desc = ctx->sh_desc_dec; + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, alg->caam.geniv, is_rfc3686, + nonce, ctx1_iv_off, false, ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), ctx->dir); - /* cryptlen = seqoutlen - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + if (!alg->caam.geniv) + goto skip_givenc; - /* assoclen + cryptlen = seqinlen - ivsize */ - append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (desc_inline_query(DESC_AEAD_GIVENC_LEN + + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, + ARRAY_SIZE(data_len)) < 0) + return -EINVAL; - /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */ - append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); + ctx->adata.key_inline = !!(inl_mask & 1); + ctx->cdata.key_inline = !!(inl_mask & 2); - /* read assoc before reading payload */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | - KEY_VLF); - aead_append_ld_iv(desc, tfm->ivsize); + /* aead_givencrypt shared descriptor */ + desc = ctx->sh_desc_enc; + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, is_rfc3686, nonce, + ctx1_iv_off, false, ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); - /* Class 1 operation */ - append_operation(desc, ctx->class1_alg_type | - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); +skip_givenc: + return 0; +} - /* Read and write cryptlen bytes */ - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); - aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); +static int aead_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); - /* Write ICV */ - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | - LDST_SRCDST_BYTE_CONTEXT); + ctx->authsize = authsize; + aead_set_sh_desc(authenc); - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, - desc_bytes(desc), - DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { - dev_err(jrdev, "unable to map shared descriptor\n"); - return -ENOMEM; + return 0; +} + +static int gcm_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); + struct device *jrdev = ctx->jrdev; + unsigned int ivsize = crypto_aead_ivsize(aead); + u32 *desc; + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - + ctx->cdata.keylen; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + /* + * AES GCM encrypt shared descriptor + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_GCM_ENC_LEN) { + ctx->cdata.key_inline = true; + ctx->cdata.key_virt = ctx->key; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; } -#ifdef DEBUG - print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, desc, - desc_bytes(desc), 1); -#endif + + desc = ctx->sh_desc_enc; + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); /* * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ - if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN + - ctx->split_key_pad_len + ctx->enckeylen <= - CAAM_DESC_BYTES_MAX) - keys_fit_inline = true; + if (rem_bytes >= DESC_GCM_DEC_LEN) { + ctx->cdata.key_inline = true; + ctx->cdata.key_virt = ctx->key; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } desc = ctx->sh_desc_dec; + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), ctx->dir); - /* aead_decrypt shared descriptor */ - init_sh_desc(desc, HDR_SHARE_SERIAL); + return 0; +} + +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); + int err; + + err = crypto_gcm_check_authsize(authsize); + if (err) + return err; + + ctx->authsize = authsize; + gcm_set_sh_desc(authenc); + + return 0; +} + +static int rfc4106_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); + struct device *jrdev = ctx->jrdev; + unsigned int ivsize = crypto_aead_ivsize(aead); + u32 *desc; + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - + ctx->cdata.keylen; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + /* + * RFC4106 encrypt shared descriptor + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_RFC4106_ENC_LEN) { + ctx->cdata.key_inline = true; + ctx->cdata.key_virt = ctx->key; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } - /* Skip if already shared */ - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | - JUMP_COND_SHRD); + desc = ctx->sh_desc_enc; + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize, + false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); - append_key_aead(desc, ctx, keys_fit_inline); + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_RFC4106_DEC_LEN) { + ctx->cdata.key_inline = true; + ctx->cdata.key_virt = ctx->key; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } - /* Only propagate error immediately if shared */ - jump_cmd = append_jump(desc, JUMP_TEST_ALL); - set_jump_tgt_here(desc, key_jump_cmd); - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); - set_jump_tgt_here(desc, jump_cmd); + desc = ctx->sh_desc_dec; + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize, + false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), ctx->dir); - /* Class 2 operation */ - append_operation(desc, ctx->class2_alg_type | - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); + return 0; +} - /* assoclen + cryptlen = seqinlen - ivsize */ - append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, - ctx->authsize + tfm->ivsize) - /* assoclen = (assoclen + cryptlen) - cryptlen */ - append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); - append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); +static int rfc4106_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); + int err; - /* read assoc before reading payload */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | - KEY_VLF); + err = crypto_rfc4106_check_authsize(authsize); + if (err) + return err; - aead_append_ld_iv(desc, tfm->ivsize); + ctx->authsize = authsize; + rfc4106_set_sh_desc(authenc); - append_dec_op1(desc, ctx->class1_alg_type); + return 0; +} - /* Read and write cryptlen bytes */ - append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); - append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); - aead_append_src_dst(desc, FIFOLD_TYPE_MSG); +static int rfc4543_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); + struct device *jrdev = ctx->jrdev; + unsigned int ivsize = crypto_aead_ivsize(aead); + u32 *desc; + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - + ctx->cdata.keylen; - /* Load ICV */ - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | - FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); - append_dec_shr_done(desc); + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, - desc_bytes(desc), - DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { - dev_err(jrdev, "unable to map shared descriptor\n"); - return -ENOMEM; + /* + * RFC4543 encrypt shared descriptor + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_RFC4543_ENC_LEN) { + ctx->cdata.key_inline = true; + ctx->cdata.key_virt = ctx->key; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; } -#ifdef DEBUG - print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, desc, - desc_bytes(desc), 1); -#endif + + desc = ctx->sh_desc_enc; + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize, + false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); /* * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ - if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN + - ctx->split_key_pad_len + ctx->enckeylen <= - CAAM_DESC_BYTES_MAX) - keys_fit_inline = true; - - /* aead_givencrypt shared descriptor */ - desc = ctx->sh_desc_givenc; - - init_sh_desc_key_aead(desc, ctx, keys_fit_inline); - - /* Generate IV */ - geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | - NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | - NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); - append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); - append_move(desc, MOVE_SRC_INFIFO | - MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT)); - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); - - /* Copy IV to class 1 context */ - append_move(desc, MOVE_SRC_CLASS1CTX | - MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT)); - - /* Return to encryption */ - append_operation(desc, ctx->class2_alg_type | - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); - - /* ivsize + cryptlen = seqoutlen - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); - - /* assoclen = seqinlen - (ivsize + cryptlen) */ - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); - - /* read assoc before reading payload */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | - KEY_VLF); - - /* Copy iv from class 1 ctx to class 2 fifo*/ - moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | - NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); - append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); - append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB | - LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); - - /* Class 1 operation */ - append_operation(desc, ctx->class1_alg_type | - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); - - /* Will write ivsize + cryptlen */ - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); - - /* Not need to reload iv */ - append_seq_fifo_load(desc, tfm->ivsize, - FIFOLD_CLASS_SKIP); - - /* Will read cryptlen */ - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); - aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); - - /* Write ICV */ - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | - LDST_SRCDST_BYTE_CONTEXT); - - ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, - desc_bytes(desc), - DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { - dev_err(jrdev, "unable to map shared descriptor\n"); - return -ENOMEM; + if (rem_bytes >= DESC_RFC4543_DEC_LEN) { + ctx->cdata.key_inline = true; + ctx->cdata.key_virt = ctx->key; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; } -#ifdef DEBUG - print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, desc, - desc_bytes(desc), 1); -#endif + + desc = ctx->sh_desc_dec; + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize, + false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), ctx->dir); return 0; } -static int aead_setauthsize(struct crypto_aead *authenc, - unsigned int authsize) +static int rfc4543_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(authenc); + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); + + if (authsize != 16) + return -EINVAL; ctx->authsize = authsize; - aead_set_sh_desc(authenc); + rfc4543_set_sh_desc(authenc); return 0; } -static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in, - u32 authkeylen) +static int chachapoly_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); + struct device *jrdev = ctx->jrdev; + unsigned int ivsize = crypto_aead_ivsize(aead); + u32 *desc; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + desc = ctx->sh_desc_enc; + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, true, false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); + + desc = ctx->sh_desc_dec; + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, false, false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), ctx->dir); + + return 0; +} + +static int chachapoly_setauthsize(struct crypto_aead *aead, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); + + if (authsize != POLY1305_DIGEST_SIZE) + return -EINVAL; + + ctx->authsize = authsize; + return chachapoly_set_sh_desc(aead); +} + +static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) { - return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, - ctx->split_key_pad_len, key_in, authkeylen, - ctx->alg_op); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; + + if (keylen != CHACHA_KEY_SIZE + saltlen) + return -EINVAL; + + memcpy(ctx->key, key, keylen); + ctx->cdata.key_virt = ctx->key; + ctx->cdata.keylen = keylen - saltlen; + + return chachapoly_set_sh_desc(aead); } static int aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; - struct rtattr *rta = (void *)key; - struct crypto_authenc_key_param *param; - unsigned int authkeylen; - unsigned int enckeylen; + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); + struct crypto_authenc_keys keys; int ret = 0; - param = RTA_DATA(rta); - enckeylen = be32_to_cpu(param->enckeylen); - - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - - if (keylen < enckeylen) + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto badkey; - authkeylen = keylen - enckeylen; + dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n", + keys.authkeylen + keys.enckeylen, keys.enckeylen, + keys.authkeylen); + print_hex_dump_debug("key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); - if (keylen > CAAM_MAX_KEY_SIZE) - goto badkey; + /* + * If DKP is supported, use it in the shared descriptor to generate + * the split key. + */ + if (ctrlpriv->era >= 6) { + ctx->adata.keylen = keys.authkeylen; + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & + OP_ALG_ALGSEL_MASK); + + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) + goto badkey; + + memcpy(ctx->key, keys.authkey, keys.authkeylen); + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, + keys.enckeylen); + dma_sync_single_for_device(jrdev, ctx->key_dma, + ctx->adata.keylen_pad + + keys.enckeylen, ctx->dir); + goto skip_split_key; + } - /* Pick class 2 key length from algorithm submask */ - ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> - OP_ALG_ALGSEL_SHIFT] * 2; - ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); - -#ifdef DEBUG - printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", - keylen, enckeylen, authkeylen); - printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", - ctx->split_key_len, ctx->split_key_pad_len); - print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); -#endif - - ret = gen_split_aead_key(ctx, key, authkeylen); + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey, + keys.authkeylen, CAAM_MAX_KEY_SIZE - + keys.enckeylen); if (ret) { goto badkey; } /* postpend encryption key to auth split key */ - memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen); + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + + keys.enckeylen, ctx->dir); + + print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, + ctx->adata.keylen_pad + keys.enckeylen, 1); + +skip_split_key: + ctx->cdata.keylen = keys.enckeylen; + memzero_explicit(&keys, sizeof(keys)); + return aead_set_sh_desc(aead); +badkey: + memzero_explicit(&keys, sizeof(keys)); + return -EINVAL; +} - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + - enckeylen, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, ctx->key_dma)) { - dev_err(jrdev, "unable to map key i/o memory\n"); - return -ENOMEM; - } -#ifdef DEBUG - print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, - ctx->split_key_pad_len + enckeylen, 1); -#endif +static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct crypto_authenc_keys keys; + int err; - ctx->enckeylen = enckeylen; + err = crypto_authenc_extractkeys(&keys, key, keylen); + if (unlikely(err)) + return err; - ret = aead_set_sh_desc(aead); - if (ret) { - dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len + - enckeylen, DMA_TO_DEVICE); - } + err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: + aead_setkey(aead, key, keylen); - return ret; -badkey: - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; + memzero_explicit(&keys, sizeof(keys)); + return err; } -static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, - const u8 *key, unsigned int keylen) +static int gcm_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); - struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher; + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; - int ret = 0; - u32 *key_jump_cmd, *jump_cmd; - u32 *desc; + int err; + + err = aes_check_keylen(keylen); + if (err) + return err; -#ifdef DEBUG - print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); -#endif + print_hex_dump_debug("key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); memcpy(ctx->key, key, keylen); - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, - DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, ctx->key_dma)) { - dev_err(jrdev, "unable to map key i/o memory\n"); - return -ENOMEM; - } - ctx->enckeylen = keylen; + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); + ctx->cdata.keylen = keylen; + + return gcm_set_sh_desc(aead); +} + +static int rfc4106_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); + struct device *jrdev = ctx->jrdev; + int err; + + err = aes_check_keylen(keylen - 4); + if (err) + return err; + + print_hex_dump_debug("key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + memcpy(ctx->key, key, keylen); + + /* + * The last four bytes of the key material are used as the salt value + * in the nonce. Update the AES key length. + */ + ctx->cdata.keylen = keylen - 4; + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, + ctx->dir); + return rfc4106_set_sh_desc(aead); +} + +static int rfc4543_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); + struct device *jrdev = ctx->jrdev; + int err; + + err = aes_check_keylen(keylen - 4); + if (err) + return err; + + print_hex_dump_debug("key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + memcpy(ctx->key, key, keylen); + + /* + * The last four bytes of the key material are used as the salt value + * in the nonce. Update the AES key length. + */ + ctx->cdata.keylen = keylen - 4; + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, + ctx->dir); + return rfc4543_set_sh_desc(aead); +} + +static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, + unsigned int keylen, const u32 ctx1_iv_off) +{ + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); + struct caam_skcipher_alg *alg = + container_of(crypto_skcipher_alg(skcipher), typeof(*alg), + skcipher.base); + struct device *jrdev = ctx->jrdev; + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); + u32 *desc; + const bool is_rfc3686 = alg->caam.rfc3686; - /* ablkcipher_encrypt shared descriptor */ + print_hex_dump_debug("key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + /* Here keylen is actual key length */ + ctx->cdata.keylen = keylen; + ctx->cdata.key_virt = key; + ctx->cdata.key_inline = true; + /* Here protected key len is plain key length */ + ctx->cdata.plain_keylen = keylen; + ctx->cdata.key_cmd_opt = 0; + + + /* skcipher_encrypt shared descriptor */ desc = ctx->sh_desc_enc; - init_sh_desc(desc, HDR_SHARE_SERIAL); - /* Skip if already shared */ - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | - JUMP_COND_SHRD); + cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, + ctx1_iv_off); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); + + /* skcipher_decrypt shared descriptor */ + desc = ctx->sh_desc_dec; + cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, + ctx1_iv_off); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), ctx->dir); + + return 0; +} - /* Load class1 key only */ - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, - ctx->enckeylen, CLASS_1 | - KEY_DEST_CLASS_REG); +static int paes_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, + unsigned int keylen) +{ + struct caam_pkey_info *pkey_info = (struct caam_pkey_info *)key; + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); + struct device *jrdev = ctx->jrdev; + int err; - set_jump_tgt_here(desc, key_jump_cmd); + ctx->cdata.key_inline = false; - /* Propagate errors from shared to job descriptor */ - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); + keylen = keylen - CAAM_PKEY_HEADER; - /* Load iv */ - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | - LDST_CLASS_1_CCB | tfm->ivsize); + /* Retrieve the length of key */ + ctx->cdata.plain_keylen = pkey_info->plain_key_sz; - /* Load operation */ - append_operation(desc, ctx->class1_alg_type | - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + /* Retrieve the length of blob*/ + ctx->cdata.keylen = keylen; - /* Perform operation */ - ablkcipher_append_src_dst(desc); + /* Retrieve the address of the blob */ + ctx->cdata.key_virt = pkey_info->key_buf; - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, - desc_bytes(desc), - DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { - dev_err(jrdev, "unable to map shared descriptor\n"); - return -ENOMEM; + /* Validate key length for AES algorithms */ + err = aes_check_keylen(ctx->cdata.plain_keylen); + if (err) { + dev_err(jrdev, "bad key length\n"); + return err; } -#ifdef DEBUG - print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, desc, - desc_bytes(desc), 1); -#endif - /* ablkcipher_decrypt shared descriptor */ - desc = ctx->sh_desc_dec; - init_sh_desc(desc, HDR_SHARE_SERIAL); - /* Skip if already shared */ - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | - JUMP_COND_SHRD); + /* set command option */ + ctx->cdata.key_cmd_opt |= KEY_ENC; - /* Load class1 key only */ - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, - ctx->enckeylen, CLASS_1 | - KEY_DEST_CLASS_REG); + /* check if the Protected-Key is CCM key */ + if (pkey_info->key_enc_algo == CAAM_ENC_ALGO_CCM) + ctx->cdata.key_cmd_opt |= KEY_EKT; - /* For aead, only propagate error immediately if shared */ - jump_cmd = append_jump(desc, JUMP_TEST_ALL); - set_jump_tgt_here(desc, key_jump_cmd); - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); - set_jump_tgt_here(desc, jump_cmd); + memcpy(ctx->key, ctx->cdata.key_virt, keylen); + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); + ctx->cdata.key_dma = ctx->key_dma; - /* load IV */ - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | - LDST_CLASS_1_CCB | tfm->ivsize); + if (pkey_info->key_enc_algo == CAAM_ENC_ALGO_CCM) + ctx->protected_key_dma = dma_map_single(jrdev, ctx->protected_key, + ctx->cdata.plain_keylen + + CAAM_CCM_OVERHEAD, + DMA_FROM_DEVICE); + else + ctx->protected_key_dma = dma_map_single(jrdev, ctx->protected_key, + ctx->cdata.plain_keylen, + DMA_FROM_DEVICE); - /* Choose operation */ - append_dec_op1(desc, ctx->class1_alg_type); + ctx->cdata.protected_key_dma = ctx->protected_key_dma; + ctx->is_blob = true; - /* Perform operation */ - ablkcipher_append_src_dst(desc); + return 0; +} - /* Wait for key to load before allowing propagating error */ - append_dec_shr_done(desc); +static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + int err; - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, - desc_bytes(desc), - DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { - dev_err(jrdev, "unable to map shared descriptor\n"); - return -ENOMEM; + err = aes_check_keylen(keylen); + if (err) + return err; + + return skcipher_setkey(skcipher, key, keylen, 0); +} + +static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 ctx1_iv_off; + int err; + + /* + * RFC3686 specific: + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} + * | *key = {KEY, NONCE} + */ + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; + keylen -= CTR_RFC3686_NONCE_SIZE; + + err = aes_check_keylen(keylen); + if (err) + return err; + + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); +} + +static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 ctx1_iv_off; + int err; + + /* + * AES-CTR needs to load IV in CONTEXT1 reg + * at an offset of 128bits (16bytes) + * CONTEXT1[255:128] = IV + */ + ctx1_iv_off = 16; + + err = aes_check_keylen(keylen); + if (err) + return err; + + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); +} + +static int des_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + return verify_skcipher_des_key(skcipher, key) ?: + skcipher_setkey(skcipher, key, keylen, 0); +} + +static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + return verify_skcipher_des3_key(skcipher, key) ?: + skcipher_setkey(skcipher, key, keylen, 0); +} + +static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, + unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); + struct device *jrdev = ctx->jrdev; + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); + u32 *desc; + int err; + + err = xts_verify_key(skcipher, key, keylen); + if (err) { + dev_dbg(jrdev, "key size mismatch\n"); + return err; } -#ifdef DEBUG - print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, desc, - desc_bytes(desc), 1); -#endif + if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256) + ctx->xts_key_fallback = true; - return ret; + if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) { + err = crypto_skcipher_setkey(ctx->fallback, key, keylen); + if (err) + return err; + } + + ctx->cdata.keylen = keylen; + ctx->cdata.key_virt = key; + ctx->cdata.key_inline = true; + + /* xts_skcipher_encrypt shared descriptor */ + desc = ctx->sh_desc_enc; + cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); + + /* xts_skcipher_decrypt shared descriptor */ + desc = ctx->sh_desc_dec; + cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), ctx->dir); + + return 0; } /* * aead_edesc - s/w-extended aead descriptor - * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist - * @assoc_chained: if source is chained - * @src_nents: number of segments in input scatterlist - * @src_chained: if source is chained - * @dst_nents: number of segments in output scatterlist - * @dst_chained: if destination is chained - * @iv_dma: dma address of iv for checking continuity and link table - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) + * @src_nents: number of segments in input s/w scatterlist + * @dst_nents: number of segments in output s/w scatterlist + * @mapped_src_nents: number of segments in input h/w link table + * @mapped_dst_nents: number of segments in output h/w link table * @sec4_sg_bytes: length of dma mapped sec4_sg space + * @bklog: stored to determine if the request needs backlog * @sec4_sg_dma: bus physical mapped address of h/w link table + * @sec4_sg: pointer to h/w link table * @hw_desc: the h/w job descriptor followed by any referenced link tables */ struct aead_edesc { - int assoc_nents; - bool assoc_chained; int src_nents; - bool src_chained; int dst_nents; - bool dst_chained; - dma_addr_t iv_dma; + int mapped_src_nents; + int mapped_dst_nents; int sec4_sg_bytes; + bool bklog; dma_addr_t sec4_sg_dma; struct sec4_sg_entry *sec4_sg; - u32 hw_desc[0]; + u32 hw_desc[]; }; /* - * ablkcipher_edesc - s/w-extended ablkcipher descriptor - * @src_nents: number of segments in input scatterlist - * @src_chained: if source is chained - * @dst_nents: number of segments in output scatterlist - * @dst_chained: if destination is chained + * skcipher_edesc - s/w-extended skcipher descriptor + * @src_nents: number of segments in input s/w scatterlist + * @dst_nents: number of segments in output s/w scatterlist + * @mapped_src_nents: number of segments in input h/w link table + * @mapped_dst_nents: number of segments in output h/w link table * @iv_dma: dma address of iv for checking continuity and link table - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) * @sec4_sg_bytes: length of dma mapped sec4_sg space + * @bklog: stored to determine if the request needs backlog * @sec4_sg_dma: bus physical mapped address of h/w link table + * @sec4_sg: pointer to h/w link table * @hw_desc: the h/w job descriptor followed by any referenced link tables + * and IV */ -struct ablkcipher_edesc { +struct skcipher_edesc { int src_nents; - bool src_chained; int dst_nents; - bool dst_chained; + int mapped_src_nents; + int mapped_dst_nents; dma_addr_t iv_dma; int sec4_sg_bytes; + bool bklog; dma_addr_t sec4_sg_dma; struct sec4_sg_entry *sec4_sg; - u32 hw_desc[0]; + u32 hw_desc[]; }; static void caam_unmap(struct device *dev, struct scatterlist *src, struct scatterlist *dst, int src_nents, - bool src_chained, int dst_nents, bool dst_chained, + int dst_nents, dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, int sec4_sg_bytes) { if (dst != src) { - dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE, - src_chained); - dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE, - dst_chained); + if (src_nents) + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); + if (dst_nents) + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); } else { - dma_unmap_sg_chained(dev, src, src_nents ? : 1, - DMA_BIDIRECTIONAL, src_chained); + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); } if (iv_dma) - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); + dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL); if (sec4_sg_bytes) dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, DMA_TO_DEVICE); @@ -730,267 +1026,158 @@ static void aead_unmap(struct device *dev, struct aead_edesc *edesc, struct aead_request *req) { - struct crypto_aead *aead = crypto_aead_reqtfm(req); - int ivsize = crypto_aead_ivsize(aead); - - dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents, - DMA_TO_DEVICE, edesc->assoc_chained); - caam_unmap(dev, req->src, req->dst, - edesc->src_nents, edesc->src_chained, edesc->dst_nents, - edesc->dst_chained, edesc->iv_dma, ivsize, + edesc->src_nents, edesc->dst_nents, 0, 0, edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } -static void ablkcipher_unmap(struct device *dev, - struct ablkcipher_edesc *edesc, - struct ablkcipher_request *req) +static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, + struct skcipher_request *req) { - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + int ivsize = crypto_skcipher_ivsize(skcipher); caam_unmap(dev, req->src, req->dst, - edesc->src_nents, edesc->src_chained, edesc->dst_nents, - edesc->dst_chained, edesc->iv_dma, ivsize, + edesc->src_nents, edesc->dst_nents, + edesc->iv_dma, ivsize, edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } -static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, - void *context) +static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) { struct aead_request *req = context; + struct caam_aead_req_ctx *rctx = aead_request_ctx(req); + struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); struct aead_edesc *edesc; -#ifdef DEBUG - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); - int ivsize = crypto_aead_ivsize(aead); + int ecode = 0; + bool has_bklog; - dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); -#endif + dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); - edesc = (struct aead_edesc *)((char *)desc - - offsetof(struct aead_edesc, hw_desc)); + edesc = rctx->edesc; + has_bklog = edesc->bklog; - if (err) { - char tmp[CAAM_ERROR_STR_MAX]; - - dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); - } + if (err) + ecode = caam_jr_strstatus(jrdev, err); aead_unmap(jrdev, edesc, req); -#ifdef DEBUG - print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), - req->assoclen , 1); - print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize, - edesc->src_nents ? 100 : ivsize, 1); - print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), - edesc->src_nents ? 100 : req->cryptlen + - ctx->authsize + 4, 1); -#endif - kfree(edesc); - aead_request_complete(req, err); -} - -static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, - void *context) -{ - struct aead_request *req = context; - struct aead_edesc *edesc; -#ifdef DEBUG - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); - int ivsize = crypto_aead_ivsize(aead); - - dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); -#endif - - edesc = (struct aead_edesc *)((char *)desc - - offsetof(struct aead_edesc, hw_desc)); - -#ifdef DEBUG - print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->iv, - ivsize, 1); - print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), - req->cryptlen, 1); -#endif - - if (err) { - char tmp[CAAM_ERROR_STR_MAX]; - - dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); - } - - aead_unmap(jrdev, edesc, req); - /* - * verify hw auth check passed else return -EBADMSG + * If no backlog flag, the completion of the request is done + * by CAAM, not crypto engine. */ - if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) - err = -EBADMSG; - -#ifdef DEBUG - print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, - ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)), - sizeof(struct iphdr) + req->assoclen + - ((req->cryptlen > 1500) ? 1500 : req->cryptlen) + - ctx->authsize + 36, 1); - if (!err && edesc->sec4_sg_bytes) { - struct scatterlist *sg = sg_last(req->src, edesc->src_nents); - print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), - sg->length + ctx->authsize + 16, 1); - } -#endif - - kfree(edesc); - - aead_request_complete(req, err); + if (!has_bklog) + aead_request_complete(req, ecode); + else + crypto_finalize_aead_request(jrp->engine, req, ecode); } -static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, - void *context) +static inline u8 *skcipher_edesc_iv(struct skcipher_edesc *edesc) { - struct ablkcipher_request *req = context; - struct ablkcipher_edesc *edesc; -#ifdef DEBUG - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); - - dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); -#endif - edesc = (struct ablkcipher_edesc *)((char *)desc - - offsetof(struct ablkcipher_edesc, hw_desc)); - - if (err) { - char tmp[CAAM_ERROR_STR_MAX]; - - dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); - } - -#ifdef DEBUG - print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->info, - edesc->src_nents > 1 ? 100 : ivsize, 1); - print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), - edesc->dst_nents > 1 ? 100 : req->nbytes, 1); -#endif - - ablkcipher_unmap(jrdev, edesc, req); - kfree(edesc); - - ablkcipher_request_complete(req, err); + return PTR_ALIGN((u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes, + dma_get_cache_alignment()); } -static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, - void *context) +static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) { - struct ablkcipher_request *req = context; - struct ablkcipher_edesc *edesc; -#ifdef DEBUG - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + struct skcipher_request *req = context; + struct skcipher_edesc *edesc; + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); + int ivsize = crypto_skcipher_ivsize(skcipher); + int ecode = 0; + bool has_bklog; - dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); -#endif + dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); - edesc = (struct ablkcipher_edesc *)((char *)desc - - offsetof(struct ablkcipher_edesc, hw_desc)); - if (err) { - char tmp[CAAM_ERROR_STR_MAX]; + edesc = rctx->edesc; + has_bklog = edesc->bklog; + if (err) + ecode = caam_jr_strstatus(jrdev, err); - dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); + skcipher_unmap(jrdev, edesc, req); + + /* + * The crypto API expects us to set the IV (req->iv) to the last + * ciphertext block (CBC mode) or last counter (CTR mode). + * This is used e.g. by the CTS mode. + */ + if (ivsize && !ecode) { + memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize); + + print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, + ivsize, 1); } -#ifdef DEBUG - print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->info, - ivsize, 1); - print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), - edesc->dst_nents > 1 ? 100 : req->nbytes, 1); -#endif + caam_dump_sg("dst @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->dst, + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); - ablkcipher_unmap(jrdev, edesc, req); kfree(edesc); - ablkcipher_request_complete(req, err); + /* + * If no backlog flag, the completion of the request is done + * by CAAM, not crypto engine. + */ + if (!has_bklog) + skcipher_request_complete(req, ecode); + else + crypto_finalize_skcipher_request(jrp->engine, req, ecode); } /* * Fill in aead job descriptor */ -static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, +static void init_aead_job(struct aead_request *req, struct aead_edesc *edesc, - struct aead_request *req, bool all_contig, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); - int ivsize = crypto_aead_ivsize(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); int authsize = ctx->authsize; u32 *desc = edesc->hw_desc; - u32 out_options = 0, in_options; + u32 out_options, in_options; dma_addr_t dst_dma, src_dma; int len, sec4_sg_index = 0; + dma_addr_t ptr; + u32 *sh_desc; -#ifdef DEBUG - debug("assoclen %d cryptlen %d authsize %d\n", - req->assoclen, req->cryptlen, authsize); - print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), - req->assoclen , 1); - print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->iv, - edesc->src_nents ? 100 : ivsize, 1); - print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), - edesc->src_nents ? 100 : req->cryptlen, 1); - print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, - desc_bytes(sh_desc), 1); -#endif + sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec; + ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma; len = desc_len(sh_desc); init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); if (all_contig) { - src_dma = sg_dma_address(req->assoc); + src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) : + 0; in_options = 0; } else { src_dma = edesc->sec4_sg_dma; - sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 + - (edesc->src_nents ? : 1); + sec4_sg_index += edesc->mapped_src_nents; in_options = LDST_SGF; } - if (encrypt) - append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + - req->cryptlen - authsize, in_options); - else - append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + - req->cryptlen, in_options); - if (likely(req->src == req->dst)) { - if (all_contig) { - dst_dma = sg_dma_address(req->src); - } else { - dst_dma = src_dma + sizeof(struct sec4_sg_entry) * - ((edesc->assoc_nents ? : 1) + 1); - out_options = LDST_SGF; - } - } else { - if (!edesc->dst_nents) { + append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen, + in_options); + + dst_dma = src_dma; + out_options = in_options; + + if (unlikely(req->src != req->dst)) { + if (!edesc->mapped_dst_nents) { + dst_dma = 0; + out_options = 0; + } else if (edesc->mapped_dst_nents == 1) { dst_dma = sg_dma_address(req->dst); + out_options = 0; } else { dst_dma = edesc->sec4_sg_dma + sec4_sg_index * @@ -998,1254 +1185,2804 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, out_options = LDST_SGF; } } + if (encrypt) - append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); + append_seq_out_ptr(desc, dst_dma, + req->assoclen + req->cryptlen + authsize, + out_options); else - append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, + append_seq_out_ptr(desc, dst_dma, + req->assoclen + req->cryptlen - authsize, out_options); } -/* - * Fill in aead givencrypt job descriptor - */ -static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, - struct aead_edesc *edesc, - struct aead_request *req, - int contig) +static void init_gcm_job(struct aead_request *req, + struct aead_edesc *edesc, + bool all_contig, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); - int ivsize = crypto_aead_ivsize(aead); - int authsize = ctx->authsize; + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); u32 *desc = edesc->hw_desc; - u32 out_options = 0, in_options; - dma_addr_t dst_dma, src_dma; - int len, sec4_sg_index = 0; + bool generic_gcm = (ivsize == GCM_AES_IV_SIZE); + unsigned int last; + + init_aead_job(req, edesc, all_contig, encrypt); + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); + + /* BUG This should not be specific to generic GCM. */ + last = 0; + if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen)) + last = FIFOLD_TYPE_LAST1; + + /* Read GCM IV */ + append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last); + /* Append Salt */ + if (!generic_gcm) + append_data(desc, ctx->key + ctx->cdata.keylen, 4); + /* Append IV */ + append_data(desc, req->iv, ivsize); + /* End of blank commands */ +} -#ifdef DEBUG - debug("assoclen %d cryptlen %d authsize %d\n", - req->assoclen, req->cryptlen, authsize); - print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), - req->assoclen , 1); - print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); - print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), - edesc->src_nents > 1 ? 100 : req->cryptlen, 1); - print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, - desc_bytes(sh_desc), 1); -#endif +static void init_chachapoly_job(struct aead_request *req, + struct aead_edesc *edesc, bool all_contig, + bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + unsigned int ivsize = crypto_aead_ivsize(aead); + unsigned int assoclen = req->assoclen; + u32 *desc = edesc->hw_desc; + u32 ctx_iv_off = 4; - len = desc_len(sh_desc); - init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); + init_aead_job(req, edesc, all_contig, encrypt); - if (contig & GIV_SRC_CONTIG) { - src_dma = sg_dma_address(req->assoc); - in_options = 0; - } else { - src_dma = edesc->sec4_sg_dma; - sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; - in_options = LDST_SGF; - } - append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + - req->cryptlen - authsize, in_options); + if (ivsize != CHACHAPOLY_IV_SIZE) { + /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */ + ctx_iv_off += 4; - if (contig & GIV_DST_CONTIG) { - dst_dma = edesc->iv_dma; - } else { - if (likely(req->src == req->dst)) { - dst_dma = src_dma + sizeof(struct sec4_sg_entry) * - edesc->assoc_nents; - out_options = LDST_SGF; - } else { - dst_dma = edesc->sec4_sg_dma + - sec4_sg_index * - sizeof(struct sec4_sg_entry); - out_options = LDST_SGF; - } + /* + * The associated data comes already with the IV but we need + * to skip it when we authenticate or encrypt... + */ + assoclen -= ivsize; } - append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options); + append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen); + + /* + * For IPsec load the IV further in the same register. + * For RFC7539 simply load the 12 bytes nonce in a single operation + */ + append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ctx_iv_off << LDST_OFFSET_SHIFT); +} + +static void init_authenc_job(struct aead_request *req, + struct aead_edesc *edesc, + bool all_contig, bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), + struct caam_aead_alg, + aead.base); + unsigned int ivsize = crypto_aead_ivsize(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == + OP_ALG_AAI_CTR_MOD128); + const bool is_rfc3686 = alg->caam.rfc3686; + u32 *desc = edesc->hw_desc; + u32 ivoffset = 0; + + /* + * AES-CTR needs to load IV in CONTEXT1 reg + * at an offset of 128bits (16bytes) + * CONTEXT1[255:128] = IV + */ + if (ctr_mode) + ivoffset = 16; + + /* + * RFC3686 specific: + * CONTEXT1[255:128] = {NONCE, IV, COUNTER} + */ + if (is_rfc3686) + ivoffset = 16 + CTR_RFC3686_NONCE_SIZE; + + init_aead_job(req, edesc, all_contig, encrypt); + + /* + * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports + * having DPOVRD as destination. + */ + if (ctrlpriv->era < 3) + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); + else + append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen); + + if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv)) + append_load_as_imm(desc, req->iv, ivsize, + LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + (ivoffset << LDST_OFFSET_SHIFT)); } /* - * Fill in ablkcipher job descriptor + * Fill in skcipher job descriptor */ -static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, - struct ablkcipher_edesc *edesc, - struct ablkcipher_request *req, - bool iv_contig) +static void init_skcipher_job(struct skcipher_request *req, + struct skcipher_edesc *edesc, + const bool encrypt) { - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); - u32 *desc = edesc->hw_desc; - u32 out_options = 0, in_options; - dma_addr_t dst_dma, src_dma; + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); + struct device *jrdev = ctx->jrdev; + int ivsize = crypto_skcipher_ivsize(skcipher); + u32 *desc = !ctx->is_blob ? edesc->hw_desc : + (u32 *)((u8 *)edesc->hw_desc + CAAM_DESC_BYTES_MAX); + dma_addr_t desc_dma; + u32 *sh_desc; + u32 in_options = 0, out_options = 0; + dma_addr_t src_dma, dst_dma, ptr; int len, sec4_sg_index = 0; -#ifdef DEBUG - print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->info, - ivsize, 1); - print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), - edesc->src_nents ? 100 : req->nbytes, 1); -#endif + print_hex_dump_debug("presciv@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); + dev_dbg(jrdev, "asked=%d, cryptlen%d\n", + (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen); - len = desc_len(sh_desc); - init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); + caam_dump_sg("src @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->src, + edesc->src_nents > 1 ? 100 : req->cryptlen, 1); - if (iv_contig) { - src_dma = edesc->iv_dma; - in_options = 0; - } else { + + if (ivsize || edesc->mapped_src_nents > 1) { src_dma = edesc->sec4_sg_dma; - sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents; + sec4_sg_index = edesc->mapped_src_nents + !!ivsize; in_options = LDST_SGF; + } else { + src_dma = sg_dma_address(req->src); } - append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); if (likely(req->src == req->dst)) { - if (!edesc->src_nents && iv_contig) { - dst_dma = sg_dma_address(req->src); - } else { - dst_dma = edesc->sec4_sg_dma + - sizeof(struct sec4_sg_entry); - out_options = LDST_SGF; - } + dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry); + out_options = in_options; + } else if (!ivsize && edesc->mapped_dst_nents == 1) { + dst_dma = sg_dma_address(req->dst); } else { - if (!edesc->dst_nents) { - dst_dma = sg_dma_address(req->dst); - } else { - dst_dma = edesc->sec4_sg_dma + - sec4_sg_index * sizeof(struct sec4_sg_entry); - out_options = LDST_SGF; - } + dst_dma = edesc->sec4_sg_dma + sec4_sg_index * + sizeof(struct sec4_sg_entry); + out_options = LDST_SGF; + } + + if (ctx->is_blob) { + cnstr_desc_skcipher_enc_dec(desc, &ctx->cdata, + src_dma, dst_dma, req->cryptlen + ivsize, + in_options, out_options, + ivsize, encrypt); + + desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE); + + cnstr_desc_protected_blob_decap(edesc->hw_desc, &ctx->cdata, desc_dma); + } else { + sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec; + ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma; + + len = desc_len(sh_desc); + init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); + append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options); + + append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options); } - append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options); } /* * allocate and map the aead extended descriptor */ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, - int desc_bytes, bool *all_contig_ptr) + int desc_bytes, bool *all_contig_ptr, + bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; - int assoc_nents, src_nents, dst_nents = 0; + struct caam_aead_req_ctx *rctx = aead_request_ctx(req); + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; + int src_len, dst_len = 0; struct aead_edesc *edesc; - dma_addr_t iv_dma = 0; - int sgc; - bool all_contig = true; - bool assoc_chained = false, src_chained = false, dst_chained = false; - int ivsize = crypto_aead_ivsize(aead); - int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; - - assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); - src_nents = sg_count(req->src, req->cryptlen, &src_chained); + int sec4_sg_index, sec4_sg_len, sec4_sg_bytes; + unsigned int authsize = ctx->authsize; + + if (unlikely(req->dst != req->src)) { + src_len = req->assoclen + req->cryptlen; + dst_len = src_len + (encrypt ? authsize : (-authsize)); + + src_nents = sg_nents_for_len(req->src, src_len); + if (unlikely(src_nents < 0)) { + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", + src_len); + return ERR_PTR(src_nents); + } - if (unlikely(req->dst != req->src)) - dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); + dst_nents = sg_nents_for_len(req->dst, dst_len); + if (unlikely(dst_nents < 0)) { + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", + dst_len); + return ERR_PTR(dst_nents); + } + } else { + src_len = req->assoclen + req->cryptlen + + (encrypt ? authsize : 0); + + src_nents = sg_nents_for_len(req->src, src_len); + if (unlikely(src_nents < 0)) { + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", + src_len); + return ERR_PTR(src_nents); + } + } - sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, - DMA_TO_DEVICE, assoc_chained); if (likely(req->src == req->dst)) { - sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, - DMA_BIDIRECTIONAL, src_chained); + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, + DMA_BIDIRECTIONAL); + if (unlikely(!mapped_src_nents)) { + dev_err(jrdev, "unable to map source\n"); + return ERR_PTR(-ENOMEM); + } } else { - sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, - DMA_TO_DEVICE, src_chained); - sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, - DMA_FROM_DEVICE, dst_chained); - } + /* Cover also the case of null (zero length) input data */ + if (src_nents) { + mapped_src_nents = dma_map_sg(jrdev, req->src, + src_nents, DMA_TO_DEVICE); + if (unlikely(!mapped_src_nents)) { + dev_err(jrdev, "unable to map source\n"); + return ERR_PTR(-ENOMEM); + } + } else { + mapped_src_nents = 0; + } - /* Check if data are contiguous */ - iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); - if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != - iv_dma || src_nents || iv_dma + ivsize != - sg_dma_address(req->src)) { - all_contig = false; - assoc_nents = assoc_nents ? : 1; - src_nents = src_nents ? : 1; - sec4_sg_len = assoc_nents + 1 + src_nents; + /* Cover also the case of null (zero length) output data */ + if (dst_nents) { + mapped_dst_nents = dma_map_sg(jrdev, req->dst, + dst_nents, + DMA_FROM_DEVICE); + if (unlikely(!mapped_dst_nents)) { + dev_err(jrdev, "unable to map destination\n"); + dma_unmap_sg(jrdev, req->src, src_nents, + DMA_TO_DEVICE); + return ERR_PTR(-ENOMEM); + } + } else { + mapped_dst_nents = 0; + } } - sec4_sg_len += dst_nents; + + /* + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond + * the end of the table by allocating more S/G entries. + */ + sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0; + if (mapped_dst_nents > 1) + sec4_sg_len += pad_sg_nents(mapped_dst_nents); + else + sec4_sg_len = pad_sg_nents(sec4_sg_len); sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + - sec4_sg_bytes, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, flags); if (!edesc) { - dev_err(jrdev, "could not allocate extended descriptor\n"); + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0); return ERR_PTR(-ENOMEM); } - edesc->assoc_nents = assoc_nents; - edesc->assoc_chained = assoc_chained; edesc->src_nents = src_nents; - edesc->src_chained = src_chained; edesc->dst_nents = dst_nents; - edesc->dst_chained = dst_chained; - edesc->iv_dma = iv_dma; - edesc->sec4_sg_bytes = sec4_sg_bytes; + edesc->mapped_src_nents = mapped_src_nents; + edesc->mapped_dst_nents = mapped_dst_nents; edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + desc_bytes; - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, DMA_TO_DEVICE); - *all_contig_ptr = all_contig; + + rctx->edesc = edesc; + + *all_contig_ptr = !(mapped_src_nents > 1); sec4_sg_index = 0; - if (!all_contig) { - sg_to_sec4_sg(req->assoc, - (assoc_nents ? : 1), - edesc->sec4_sg + - sec4_sg_index, 0); - sec4_sg_index += assoc_nents ? : 1; - dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, - iv_dma, ivsize, 0); - sec4_sg_index += 1; - sg_to_sec4_sg_last(req->src, - (src_nents ? : 1), - edesc->sec4_sg + - sec4_sg_index, 0); - sec4_sg_index += src_nents ? : 1; + if (mapped_src_nents > 1) { + sg_to_sec4_sg_last(req->src, src_len, + edesc->sec4_sg + sec4_sg_index, 0); + sec4_sg_index += mapped_src_nents; } - if (dst_nents) { - sg_to_sec4_sg_last(req->dst, dst_nents, + if (mapped_dst_nents > 1) { + sg_to_sec4_sg_last(req->dst, dst_len, edesc->sec4_sg + sec4_sg_index, 0); } + if (!sec4_sg_bytes) + return edesc; + + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + aead_unmap(jrdev, edesc, req); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } + + edesc->sec4_sg_bytes = sec4_sg_bytes; + return edesc; } -static int aead_encrypt(struct aead_request *req) +static int aead_enqueue_req(struct device *jrdev, struct aead_request *req) { - struct aead_edesc *edesc; - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); - struct device *jrdev = ctx->jrdev; - bool all_contig; - u32 *desc; - int ret = 0; - - req->cryptlen += ctx->authsize; - - /* allocate extended descriptor */ - edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &all_contig); - if (IS_ERR(edesc)) - return PTR_ERR(edesc); + struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); + struct caam_aead_req_ctx *rctx = aead_request_ctx(req); + struct aead_edesc *edesc = rctx->edesc; + u32 *desc = edesc->hw_desc; + int ret; - /* Create and submit job descriptor */ - init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, - all_contig, true); -#ifdef DEBUG - print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, - desc_bytes(edesc->hw_desc), 1); -#endif + /* + * Only the backlog request are sent to crypto-engine since the others + * can be handled by CAAM, if free, especially since JR has up to 1024 + * entries (more than the 10 entries from crypto-engine). + */ + if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + ret = crypto_transfer_aead_request_to_engine(jrpriv->engine, + req); + else + ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req); - desc = edesc->hw_desc; - ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); - if (!ret) { - ret = -EINPROGRESS; - } else { + if ((ret != -EINPROGRESS) && (ret != -EBUSY)) { aead_unmap(jrdev, edesc, req); - kfree(edesc); + kfree(rctx->edesc); } return ret; } -static int aead_decrypt(struct aead_request *req) +static inline int chachapoly_crypt(struct aead_request *req, bool encrypt) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; bool all_contig; u32 *desc; - int ret = 0; - /* allocate extended descriptor */ - edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &all_contig); + edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig, + encrypt); if (IS_ERR(edesc)) return PTR_ERR(edesc); -#ifdef DEBUG - print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), - req->cryptlen, 1); -#endif + desc = edesc->hw_desc; - /* Create and submit job descriptor*/ - init_aead_job(ctx->sh_desc_dec, - ctx->sh_desc_dec_dma, edesc, req, all_contig, false); -#ifdef DEBUG - print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, - desc_bytes(edesc->hw_desc), 1); -#endif + init_chachapoly_job(req, edesc, all_contig, encrypt); + print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); - desc = edesc->hw_desc; - ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); - if (!ret) { - ret = -EINPROGRESS; - } else { - aead_unmap(jrdev, edesc, req); - kfree(edesc); - } + return aead_enqueue_req(jrdev, req); +} - return ret; +static int chachapoly_encrypt(struct aead_request *req) +{ + return chachapoly_crypt(req, true); } -/* - * allocate and map the aead extended descriptor for aead givencrypt - */ -static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request - *greq, int desc_bytes, - u32 *contig_ptr) +static int chachapoly_decrypt(struct aead_request *req) +{ + return chachapoly_crypt(req, false); +} + +static inline int aead_crypt(struct aead_request *req, bool encrypt) { - struct aead_request *req = &greq->areq; + struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; - int assoc_nents, src_nents, dst_nents = 0; - struct aead_edesc *edesc; - dma_addr_t iv_dma = 0; - int sgc; - u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; - int ivsize = crypto_aead_ivsize(aead); - bool assoc_chained = false, src_chained = false, dst_chained = false; - int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; + bool all_contig; - assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); - src_nents = sg_count(req->src, req->cryptlen, &src_chained); + /* allocate extended descriptor */ + edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, + &all_contig, encrypt); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); - if (unlikely(req->dst != req->src)) - dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); + /* Create and submit job descriptor */ + init_authenc_job(req, edesc, all_contig, encrypt); - sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, - DMA_TO_DEVICE, assoc_chained); - if (likely(req->src == req->dst)) { - sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, - DMA_BIDIRECTIONAL, src_chained); - } else { - sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, - DMA_TO_DEVICE, src_chained); - sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, - DMA_FROM_DEVICE, dst_chained); - } + print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); - /* Check if data are contiguous */ - iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); - if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != - iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src)) - contig &= ~GIV_SRC_CONTIG; - if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst)) - contig &= ~GIV_DST_CONTIG; - if (unlikely(req->src != req->dst)) { - dst_nents = dst_nents ? : 1; - sec4_sg_len += 1; - } - if (!(contig & GIV_SRC_CONTIG)) { - assoc_nents = assoc_nents ? : 1; - src_nents = src_nents ? : 1; - sec4_sg_len += assoc_nents + 1 + src_nents; - if (likely(req->src == req->dst)) - contig &= ~GIV_DST_CONTIG; - } - sec4_sg_len += dst_nents; + return aead_enqueue_req(jrdev, req); +} - sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); +static int aead_encrypt(struct aead_request *req) +{ + return aead_crypt(req, true); +} - /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + - sec4_sg_bytes, GFP_DMA | flags); - if (!edesc) { - dev_err(jrdev, "could not allocate extended descriptor\n"); - return ERR_PTR(-ENOMEM); - } +static int aead_decrypt(struct aead_request *req) +{ + return aead_crypt(req, false); +} - edesc->assoc_nents = assoc_nents; - edesc->assoc_chained = assoc_chained; - edesc->src_nents = src_nents; - edesc->src_chained = src_chained; - edesc->dst_nents = dst_nents; - edesc->dst_chained = dst_chained; - edesc->iv_dma = iv_dma; - edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + - desc_bytes; - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, DMA_TO_DEVICE); - *contig_ptr = contig; +static int aead_do_one_req(struct crypto_engine *engine, void *areq) +{ + struct aead_request *req = aead_request_cast(areq); + struct caam_ctx *ctx = crypto_aead_ctx_dma(crypto_aead_reqtfm(req)); + struct caam_aead_req_ctx *rctx = aead_request_ctx(req); + u32 *desc = rctx->edesc->hw_desc; + int ret; - sec4_sg_index = 0; - if (!(contig & GIV_SRC_CONTIG)) { - sg_to_sec4_sg(req->assoc, assoc_nents, - edesc->sec4_sg + - sec4_sg_index, 0); - sec4_sg_index += assoc_nents; - dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, - iv_dma, ivsize, 0); - sec4_sg_index += 1; - sg_to_sec4_sg_last(req->src, src_nents, - edesc->sec4_sg + - sec4_sg_index, 0); - sec4_sg_index += src_nents; - } - if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { - dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, - iv_dma, ivsize, 0); - sec4_sg_index += 1; - sg_to_sec4_sg_last(req->dst, dst_nents, - edesc->sec4_sg + sec4_sg_index, 0); + rctx->edesc->bklog = true; + + ret = caam_jr_enqueue(ctx->jrdev, desc, aead_crypt_done, req); + + if (ret == -ENOSPC && engine->retry_support) + return ret; + + if (ret != -EINPROGRESS) { + aead_unmap(ctx->jrdev, rctx->edesc, req); + kfree(rctx->edesc); + } else { + ret = 0; } - return edesc; + return ret; } -static int aead_givencrypt(struct aead_givcrypt_request *areq) +static inline int gcm_crypt(struct aead_request *req, bool encrypt) { - struct aead_request *req = &areq->areq; struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; - u32 contig; - u32 *desc; - int ret = 0; - - req->cryptlen += ctx->authsize; + bool all_contig; /* allocate extended descriptor */ - edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &contig); - + edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, + encrypt); if (IS_ERR(edesc)) return PTR_ERR(edesc); -#ifdef DEBUG - print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), - req->cryptlen, 1); -#endif + /* Create and submit job descriptor */ + init_gcm_job(req, edesc, all_contig, encrypt); - /* Create and submit job descriptor*/ - init_aead_giv_job(ctx->sh_desc_givenc, - ctx->sh_desc_givenc_dma, edesc, req, contig); -#ifdef DEBUG - print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, - desc_bytes(edesc->hw_desc), 1); -#endif + print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); - desc = edesc->hw_desc; - ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); - if (!ret) { - ret = -EINPROGRESS; - } else { - aead_unmap(jrdev, edesc, req); - kfree(edesc); - } + return aead_enqueue_req(jrdev, req); +} - return ret; +static int gcm_encrypt(struct aead_request *req) +{ + return gcm_crypt(req, true); +} + +static int gcm_decrypt(struct aead_request *req) +{ + return gcm_crypt(req, false); +} + +static int ipsec_gcm_encrypt(struct aead_request *req) +{ + return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req); +} + +static int ipsec_gcm_decrypt(struct aead_request *req) +{ + return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req); } /* - * allocate and map the ablkcipher extended descriptor for ablkcipher + * allocate and map the skcipher extended descriptor for skcipher */ -static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request - *req, int desc_bytes, - bool *iv_contig_out) +static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, + int desc_bytes) { - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); struct device *jrdev = ctx->jrdev; - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP)) ? + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - int src_nents, dst_nents = 0, sec4_sg_bytes; - struct ablkcipher_edesc *edesc; + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; + struct skcipher_edesc *edesc; dma_addr_t iv_dma = 0; - bool iv_contig = false; - int sgc; - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); - bool src_chained = false, dst_chained = false; - int sec4_sg_index; - - src_nents = sg_count(req->src, req->nbytes, &src_chained); + u8 *iv; + int ivsize = crypto_skcipher_ivsize(skcipher); + int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; + unsigned int aligned_size; + + src_nents = sg_nents_for_len(req->src, req->cryptlen); + if (unlikely(src_nents < 0)) { + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", + req->cryptlen); + return ERR_PTR(src_nents); + } - if (req->dst != req->src) - dst_nents = sg_count(req->dst, req->nbytes, &dst_chained); + if (req->dst != req->src) { + dst_nents = sg_nents_for_len(req->dst, req->cryptlen); + if (unlikely(dst_nents < 0)) { + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", + req->cryptlen); + return ERR_PTR(dst_nents); + } + } if (likely(req->src == req->dst)) { - sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, - DMA_BIDIRECTIONAL, src_chained); + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, + DMA_BIDIRECTIONAL); + if (unlikely(!mapped_src_nents)) { + dev_err(jrdev, "unable to map source\n"); + return ERR_PTR(-ENOMEM); + } } else { - sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, - DMA_TO_DEVICE, src_chained); - sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, - DMA_FROM_DEVICE, dst_chained); + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, + DMA_TO_DEVICE); + if (unlikely(!mapped_src_nents)) { + dev_err(jrdev, "unable to map source\n"); + return ERR_PTR(-ENOMEM); + } + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents, + DMA_FROM_DEVICE); + if (unlikely(!mapped_dst_nents)) { + dev_err(jrdev, "unable to map destination\n"); + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); + return ERR_PTR(-ENOMEM); + } } + if (!ivsize && mapped_src_nents == 1) + sec4_sg_ents = 0; // no need for an input hw s/g table + else + sec4_sg_ents = mapped_src_nents + !!ivsize; + dst_sg_idx = sec4_sg_ents; + /* - * Check if iv can be contiguous with source and destination. - * If so, include it. If not, create scatterlist. + * Input, output HW S/G tables: [IV, src][dst, IV] + * IV entries point to the same buffer + * If src == dst, S/G entries are reused (S/G tables overlap) + * + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond + * the end of the table by allocating more S/G entries. Logic: + * if (output S/G) + * pad output S/G, if needed + * else if (input S/G) ... + * pad input S/G, if needed */ - iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); - if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) - iv_contig = true; - else - src_nents = src_nents ? : 1; - sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * - sizeof(struct sec4_sg_entry); + if (ivsize || mapped_dst_nents > 1) { + if (req->src == req->dst) + sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents); + else + sec4_sg_ents += pad_sg_nents(mapped_dst_nents + + !!ivsize); + } else { + sec4_sg_ents = pad_sg_nents(sec4_sg_ents); + } - /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + - sec4_sg_bytes, GFP_DMA | flags); + sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); + + /* + * allocate space for base edesc and hw desc commands, link tables, IV + */ + aligned_size = sizeof(*edesc) + desc_bytes + sec4_sg_bytes; + aligned_size = ALIGN(aligned_size, dma_get_cache_alignment()); + aligned_size += ~(ARCH_KMALLOC_MINALIGN - 1) & + (dma_get_cache_alignment() - 1); + aligned_size += ALIGN(ivsize, dma_get_cache_alignment()); + edesc = kzalloc(aligned_size, flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0); return ERR_PTR(-ENOMEM); } edesc->src_nents = src_nents; - edesc->src_chained = src_chained; edesc->dst_nents = dst_nents; - edesc->dst_chained = dst_chained; + edesc->mapped_src_nents = mapped_src_nents; + edesc->mapped_dst_nents = mapped_dst_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + - desc_bytes; + edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc + + desc_bytes); + rctx->edesc = edesc; + + /* Make sure IV is located in a DMAable area */ + if (ivsize) { + iv = skcipher_edesc_iv(edesc); + memcpy(iv, req->iv, ivsize); + + iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); + caam_unmap(jrdev, req->src, req->dst, src_nents, + dst_nents, 0, 0, 0, 0); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } - sec4_sg_index = 0; - if (!iv_contig) { dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); - sg_to_sec4_sg_last(req->src, src_nents, - edesc->sec4_sg + 1, 0); - sec4_sg_index += 1 + src_nents; } - - if (dst_nents) { - sg_to_sec4_sg_last(req->dst, dst_nents, - edesc->sec4_sg + sec4_sg_index, 0); + if (dst_sg_idx) + sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg + + !!ivsize, 0); + + if (req->src != req->dst && (ivsize || mapped_dst_nents > 1)) + sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg + + dst_sg_idx, 0); + + if (ivsize) + dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx + + mapped_dst_nents, iv_dma, ivsize, 0); + + if (ivsize || mapped_dst_nents > 1) + sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx + + mapped_dst_nents - 1 + !!ivsize); + + if (sec4_sg_bytes) { + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + caam_unmap(jrdev, req->src, req->dst, src_nents, + dst_nents, iv_dma, ivsize, 0, 0); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } } - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, DMA_TO_DEVICE); edesc->iv_dma = iv_dma; -#ifdef DEBUG - print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, - sec4_sg_bytes, 1); -#endif + print_hex_dump_debug("skcipher sec4_sg@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, + sec4_sg_bytes, 1); - *iv_contig_out = iv_contig; return edesc; } -static int ablkcipher_encrypt(struct ablkcipher_request *req) +static int skcipher_do_one_req(struct crypto_engine *engine, void *areq) { - struct ablkcipher_edesc *edesc; - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); - struct device *jrdev = ctx->jrdev; - bool iv_contig; - u32 *desc; - int ret = 0; + struct skcipher_request *req = skcipher_request_cast(areq); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(crypto_skcipher_reqtfm(req)); + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + u32 *desc = rctx->edesc->hw_desc; + int ret; - /* allocate extended descriptor */ - edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &iv_contig); - if (IS_ERR(edesc)) - return PTR_ERR(edesc); + rctx->edesc->bklog = true; - /* Create and submit job descriptor*/ - init_ablkcipher_job(ctx->sh_desc_enc, - ctx->sh_desc_enc_dma, edesc, req, iv_contig); -#ifdef DEBUG - print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, - desc_bytes(edesc->hw_desc), 1); -#endif - desc = edesc->hw_desc; - ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); + ret = caam_jr_enqueue(ctx->jrdev, desc, skcipher_crypt_done, req); + + if (ret == -ENOSPC && engine->retry_support) + return ret; - if (!ret) { - ret = -EINPROGRESS; + if (ret != -EINPROGRESS) { + skcipher_unmap(ctx->jrdev, rctx->edesc, req); + kfree(rctx->edesc); } else { - ablkcipher_unmap(jrdev, edesc, req); - kfree(edesc); + ret = 0; } return ret; } -static int ablkcipher_decrypt(struct ablkcipher_request *req) +static inline bool xts_skcipher_ivsize(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); + + return !!get_unaligned((u64 *)(req->iv + (ivsize / 2))); +} + +static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) { - struct ablkcipher_edesc *edesc; - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct skcipher_edesc *edesc; + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *jrdev = ctx->jrdev; - bool iv_contig; + struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); u32 *desc; int ret = 0; + int len; + + /* + * XTS is expected to return an error even for input length = 0 + * Note that the case input length < block size will be caught during + * HW offloading and return an error. + */ + if (!req->cryptlen && !ctx->fallback) + return 0; + + if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) || + ctx->xts_key_fallback)) { + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + + return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : + crypto_skcipher_decrypt(&rctx->fallback_req); + } + + len = DESC_JOB_IO_LEN * CAAM_CMD_SZ; + if (ctx->is_blob) + len += CAAM_DESC_BYTES_MAX; /* allocate extended descriptor */ - edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &iv_contig); + edesc = skcipher_edesc_alloc(req, len); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Create and submit job descriptor*/ - init_ablkcipher_job(ctx->sh_desc_dec, - ctx->sh_desc_dec_dma, edesc, req, iv_contig); + init_skcipher_job(req, edesc, encrypt); + + print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); + desc = edesc->hw_desc; -#ifdef DEBUG - print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, - desc_bytes(edesc->hw_desc), 1); -#endif - - ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req); - if (!ret) { - ret = -EINPROGRESS; - } else { - ablkcipher_unmap(jrdev, edesc, req); + /* + * Only the backlog request are sent to crypto-engine since the others + * can be handled by CAAM, if free, especially since JR has up to 1024 + * entries (more than the 10 entries from crypto-engine). + */ + if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + ret = crypto_transfer_skcipher_request_to_engine(jrpriv->engine, + req); + else + ret = caam_jr_enqueue(jrdev, desc, skcipher_crypt_done, req); + + if ((ret != -EINPROGRESS) && (ret != -EBUSY)) { + skcipher_unmap(jrdev, edesc, req); kfree(edesc); } return ret; } -#define template_aead template_u.aead -#define template_ablkcipher template_u.ablkcipher -struct caam_alg_template { - char name[CRYPTO_MAX_ALG_NAME]; - char driver_name[CRYPTO_MAX_ALG_NAME]; - unsigned int blocksize; - u32 type; - union { - struct ablkcipher_alg ablkcipher; - struct aead_alg aead; - struct blkcipher_alg blkcipher; - struct cipher_alg cipher; - struct compress_alg compress; - struct rng_alg rng; - } template_u; - u32 class1_alg_type; - u32 class2_alg_type; - u32 alg_op; +static int skcipher_encrypt(struct skcipher_request *req) +{ + return skcipher_crypt(req, true); +} + +static int skcipher_decrypt(struct skcipher_request *req) +{ + return skcipher_crypt(req, false); +} + +static struct caam_skcipher_alg driver_algs[] = { + { + .skcipher.base = { + .base = { + .cra_name = "cbc(paes)", + .cra_driver_name = "cbc-paes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = paes_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE + CAAM_BLOB_OVERHEAD + + CAAM_PKEY_HEADER, + .max_keysize = AES_MAX_KEY_SIZE + CAAM_BLOB_OVERHEAD + + CAAM_PKEY_HEADER, + .ivsize = AES_BLOCK_SIZE, + }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + }, + { + .skcipher.base = { + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aes_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + }, + { + .skcipher.base = { + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-3des-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + }, + { + .skcipher.base = { + .base = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = des_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + }, + { + .skcipher.base = { + .base = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = ctr_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .chunksize = AES_BLOCK_SIZE, + }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + }, + { + .skcipher.base = { + .base = { + .cra_name = "rfc3686(ctr(aes))", + .cra_driver_name = "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = rfc3686_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE + + CTR_RFC3686_NONCE_SIZE, + .max_keysize = AES_MAX_KEY_SIZE + + CTR_RFC3686_NONCE_SIZE, + .ivsize = CTR_RFC3686_IV_SIZE, + .chunksize = AES_BLOCK_SIZE, + }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .rfc3686 = true, + }, + }, + { + .skcipher.base = { + .base = { + .cra_name = "xts(aes)", + .cra_driver_name = "xts-aes-caam", + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = xts_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, + }, + { + .skcipher.base = { + .base = { + .cra_name = "ecb(des)", + .cra_driver_name = "ecb-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = des_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB, + }, + { + .skcipher.base = { + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aes_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB, + }, + { + .skcipher.base = { + .base = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB, + }, }; -static struct caam_alg_template driver_algs[] = { +static struct caam_aead_alg driver_aeads[] = { + { + .aead.base = { + .base = { + .cra_name = "rfc4106(gcm(aes))", + .cra_driver_name = "rfc4106-gcm-aes-caam", + .cra_blocksize = 1, + }, + .setkey = rfc4106_setkey, + .setauthsize = rfc4106_setauthsize, + .encrypt = ipsec_gcm_encrypt, + .decrypt = ipsec_gcm_decrypt, + .ivsize = GCM_RFC4106_IV_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .nodkp = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "rfc4543(gcm(aes))", + .cra_driver_name = "rfc4543-gcm-aes-caam", + .cra_blocksize = 1, + }, + .setkey = rfc4543_setkey, + .setauthsize = rfc4543_setauthsize, + .encrypt = ipsec_gcm_encrypt, + .decrypt = ipsec_gcm_decrypt, + .ivsize = GCM_RFC4543_IV_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .nodkp = true, + }, + }, + /* Galois Counter Mode */ + { + .aead.base = { + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "gcm-aes-caam", + .cra_blocksize = 1, + }, + .setkey = gcm_setkey, + .setauthsize = gcm_setauthsize, + .encrypt = gcm_encrypt, + .decrypt = gcm_decrypt, + .ivsize = GCM_AES_IV_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .nodkp = true, + }, + }, /* single-pass ipsec_esp descriptor */ { - .name = "authenc(hmac(md5),cbc(aes))", - .driver_name = "authenc-hmac-md5-cbc-aes-caam", - .blocksize = AES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(md5)," + "ecb(cipher_null))", + .cra_driver_name = "authenc-hmac-md5-" + "ecb-cipher_null-caam", + .cra_blocksize = NULL_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = NULL_IV_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha1)," + "ecb(cipher_null))", + .cra_driver_name = "authenc-hmac-sha1-" + "ecb-cipher_null-caam", + .cra_blocksize = NULL_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha224)," + "ecb(cipher_null))", + .cra_driver_name = "authenc-hmac-sha224-" + "ecb-cipher_null-caam", + .cra_blocksize = NULL_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha256)," + "ecb(cipher_null))", + .cra_driver_name = "authenc-hmac-sha256-" + "ecb-cipher_null-caam", + .cra_blocksize = NULL_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha384)," + "ecb(cipher_null))", + .cra_driver_name = "authenc-hmac-sha384-" + "ecb-cipher_null-caam", + .cra_blocksize = NULL_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha512)," + "ecb(cipher_null))", + .cra_driver_name = "authenc-hmac-sha512-" + "ecb-cipher_null-caam", + .cra_blocksize = NULL_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(aes))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "echainiv(authenc(hmac(md5)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-hmac-md5-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, }, { - .name = "authenc(hmac(sha1),cbc(aes))", - .driver_name = "authenc-hmac-sha1-cbc-aes-caam", - .blocksize = AES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha1)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha1-cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, }, { - .name = "authenc(hmac(sha224),cbc(aes))", - .driver_name = "authenc-hmac-sha224-cbc-aes-caam", - .blocksize = AES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha224),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha224)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha224-cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA224 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, }, { - .name = "authenc(hmac(sha256),cbc(aes))", - .driver_name = "authenc-hmac-sha256-cbc-aes-caam", - .blocksize = AES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha256)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha256-cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA256 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, }, { - .name = "authenc(hmac(sha384),cbc(aes))", - .driver_name = "authenc-hmac-sha384-cbc-aes-caam", - .blocksize = AES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha384),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha384-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha384)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha384-cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA384 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, }, - { - .name = "authenc(hmac(sha512),cbc(aes))", - .driver_name = "authenc-hmac-sha512-cbc-aes-caam", - .blocksize = AES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha512),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha512-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA512 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + }, }, { - .name = "authenc(hmac(md5),cbc(des3_ede))", - .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam", - .blocksize = DES3_EDE_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead.base = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha512)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha512-cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + } + }, + { + .aead.base = { + .base = { + .cra_name = "echainiv(authenc(hmac(md5)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-hmac-md5-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } }, { - .name = "authenc(hmac(sha1),cbc(des3_ede))", - .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", - .blocksize = DES3_EDE_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { - .setkey = aead_setkey, + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha1)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha1)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha1-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha224)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + }, }, { - .name = "authenc(hmac(sha224),cbc(des3_ede))", - .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam", - .blocksize = DES3_EDE_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { - .setkey = aead_setkey, + .aead.base = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha224)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha224-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha256)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA224 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + }, }, { - .name = "authenc(hmac(sha256),cbc(des3_ede))", - .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", - .blocksize = DES3_EDE_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { - .setkey = aead_setkey, + .aead.base = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha256)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha256-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha384)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha384-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA256 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + }, }, { - .name = "authenc(hmac(sha384),cbc(des3_ede))", - .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam", - .blocksize = DES3_EDE_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { - .setkey = aead_setkey, + .aead.base = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha384)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha384-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha512)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha512-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA384 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + }, }, { - .name = "authenc(hmac(sha512),cbc(des3_ede))", - .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam", - .blocksize = DES3_EDE_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { - .setkey = aead_setkey, + .aead.base = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha512)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha512-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(des))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA512 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + }, }, { - .name = "authenc(hmac(md5),cbc(des))", - .driver_name = "authenc-hmac-md5-cbc-des-caam", - .blocksize = DES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead.base = { + .base = { + .cra_name = "echainiv(authenc(hmac(md5)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-hmac-md5-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", .ivsize = DES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + }, }, { - .name = "authenc(hmac(sha1),cbc(des))", - .driver_name = "authenc-hmac-sha1-cbc-des-caam", - .blocksize = DES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead.base = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha1)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha1-cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha224),cbc(des))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + }, }, { - .name = "authenc(hmac(sha224),cbc(des))", - .driver_name = "authenc-hmac-sha224-cbc-des-caam", - .blocksize = DES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead.base = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha224)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha224-cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha256),cbc(des))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA224 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + }, }, { - .name = "authenc(hmac(sha256),cbc(des))", - .driver_name = "authenc-hmac-sha256-cbc-des-caam", - .blocksize = DES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead.base = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha256)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha256-cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha384),cbc(des))", + .cra_driver_name = "authenc-hmac-sha384-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA256 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + }, }, { - .name = "authenc(hmac(sha384),cbc(des))", - .driver_name = "authenc-hmac-sha384-cbc-des-caam", - .blocksize = DES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead.base = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha384)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha384-cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha512),cbc(des))", + .cra_driver_name = "authenc-hmac-sha512-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA384 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + }, }, { - .name = "authenc(hmac(sha512),cbc(des))", - .driver_name = "authenc-hmac-sha512-cbc-des-caam", - .blocksize = DES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead.base = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha512)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha512-cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(md5)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-md5-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, }, - .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA512 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, - }, - /* ablkcipher descriptor */ - { - .name = "cbc(aes)", - .driver_name = "cbc-aes-caam", - .blocksize = AES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_ABLKCIPHER, - .template_ablkcipher = { - .setkey = ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, - .geniv = "eseqiv", - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "seqiv(authenc(" + "hmac(md5),rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-md5-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + .geniv = true, + }, }, { - .name = "cbc(des3_ede)", - .driver_name = "cbc-3des-caam", - .blocksize = DES3_EDE_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_ABLKCIPHER, - .template_ablkcipher = { - .setkey = ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, - .geniv = "eseqiv", - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha1)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha1-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "seqiv(authenc(" + "hmac(sha1),rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha1-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, }, - .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + .geniv = true, + }, }, { - .name = "cbc(des)", - .driver_name = "cbc-des-caam", - .blocksize = DES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_ABLKCIPHER, - .template_ablkcipher = { - .setkey = ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, - .geniv = "eseqiv", - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha224)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha224-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, }, - .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, - } -}; - -struct caam_crypto_alg { - struct list_head entry; - struct device *ctrldev; - int class1_alg_type; - int class2_alg_type; - int alg_op; - struct crypto_alg crypto_alg; + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "seqiv(authenc(" + "hmac(sha224),rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha224-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + .geniv = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha256)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha256-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "seqiv(authenc(hmac(sha256)," + "rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha256-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + .geniv = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha384)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha384-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "seqiv(authenc(hmac(sha384)," + "rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha384-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + .geniv = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "authenc(hmac(sha512)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha512-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "seqiv(authenc(hmac(sha512)," + "rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha512-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + .geniv = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "rfc7539(chacha20,poly1305)", + .cra_driver_name = "rfc7539-chacha20-poly1305-" + "caam", + .cra_blocksize = 1, + }, + .setkey = chachapoly_setkey, + .setauthsize = chachapoly_setauthsize, + .encrypt = chachapoly_encrypt, + .decrypt = chachapoly_decrypt, + .ivsize = CHACHAPOLY_IV_SIZE, + .maxauthsize = POLY1305_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | + OP_ALG_AAI_AEAD, + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | + OP_ALG_AAI_AEAD, + .nodkp = true, + }, + }, + { + .aead.base = { + .base = { + .cra_name = "rfc7539esp(chacha20,poly1305)", + .cra_driver_name = "rfc7539esp-chacha20-" + "poly1305-caam", + .cra_blocksize = 1, + }, + .setkey = chachapoly_setkey, + .setauthsize = chachapoly_setauthsize, + .encrypt = chachapoly_encrypt, + .decrypt = chachapoly_decrypt, + .ivsize = 8, + .maxauthsize = POLY1305_DIGEST_SIZE, + }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | + OP_ALG_AAI_AEAD, + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | + OP_ALG_AAI_AEAD, + .nodkp = true, + }, + }, }; -static int caam_cra_init(struct crypto_tfm *tfm) +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, + bool uses_dkp) { - struct crypto_alg *alg = tfm->__crt_alg; - struct caam_crypto_alg *caam_alg = - container_of(alg, struct caam_crypto_alg, crypto_alg); - struct caam_ctx *ctx = crypto_tfm_ctx(tfm); - struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev); - int tgt_jr = atomic_inc_return(&priv->tfm_count); + dma_addr_t dma_addr; + struct caam_drv_private *priv; + const size_t sh_desc_enc_offset = offsetof(struct caam_ctx, + sh_desc_enc); - /* - * distribute tfms across job rings to ensure in-order - * crypto request processing per tfm - */ - ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs]; + ctx->jrdev = caam_jr_alloc(); + if (IS_ERR(ctx->jrdev)) { + pr_err("Job Ring Device allocation for transform failed\n"); + return PTR_ERR(ctx->jrdev); + } + + priv = dev_get_drvdata(ctx->jrdev->parent); + if (priv->era >= 6 && uses_dkp) + ctx->dir = DMA_BIDIRECTIONAL; + else + ctx->dir = DMA_TO_DEVICE; + + dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc, + offsetof(struct caam_ctx, + sh_desc_enc_dma) - + sh_desc_enc_offset, + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); + if (dma_mapping_error(ctx->jrdev, dma_addr)) { + dev_err(ctx->jrdev, "unable to map key, shared descriptors\n"); + caam_jr_free(ctx->jrdev); + return -ENOMEM; + } + + ctx->sh_desc_enc_dma = dma_addr; + ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx, + sh_desc_dec) - + sh_desc_enc_offset; + ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key) - + sh_desc_enc_offset; /* copy descriptor header template value */ - ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; - ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type; - ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op; + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; return 0; } -static void caam_cra_exit(struct crypto_tfm *tfm) +static int caam_cra_init(struct crypto_skcipher *tfm) { - struct caam_ctx *ctx = crypto_tfm_ctx(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct caam_skcipher_alg *caam_alg = + container_of(alg, typeof(*caam_alg), skcipher.base); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); + u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; + int ret = 0; - if (ctx->sh_desc_enc_dma && - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) - dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, - desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE); - if (ctx->sh_desc_dec_dma && - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma)) - dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma, - desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE); - if (ctx->sh_desc_givenc_dma && - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma)) - dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, - desc_bytes(ctx->sh_desc_givenc), - DMA_TO_DEVICE); + if (alg_aai == OP_ALG_AAI_XTS) { + const char *tfm_name = crypto_tfm_alg_name(&tfm->base); + struct crypto_skcipher *fallback; + + fallback = crypto_alloc_skcipher(tfm_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + pr_err("Failed to allocate %s fallback: %ld\n", + tfm_name, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + + ctx->fallback = fallback; + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) + + crypto_skcipher_reqsize(fallback)); + } else { + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx)); + } + + ret = caam_init_common(ctx, &caam_alg->caam, false); + if (ret && ctx->fallback) + crypto_free_skcipher(ctx->fallback); + + return ret; } -static void __exit caam_algapi_exit(void) +static int caam_aead_init(struct crypto_aead *tfm) { + struct aead_alg *alg = crypto_aead_alg(tfm); + struct caam_aead_alg *caam_alg = + container_of(alg, struct caam_aead_alg, aead.base); + struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm); - struct device_node *dev_node; - struct platform_device *pdev; - struct device *ctrldev; - struct caam_drv_private *priv; - struct caam_crypto_alg *t_alg, *n; + crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx)); - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); - if (!dev_node) { - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); - if (!dev_node) - return; - } + return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp); +} - pdev = of_find_device_by_node(dev_node); - if (!pdev) - return; +static void caam_exit_common(struct caam_ctx *ctx) +{ + dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma, + offsetof(struct caam_ctx, sh_desc_enc_dma) - + offsetof(struct caam_ctx, sh_desc_enc), + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); + caam_jr_free(ctx->jrdev); +} - ctrldev = &pdev->dev; - of_node_put(dev_node); - priv = dev_get_drvdata(ctrldev); +static void caam_cra_exit(struct crypto_skcipher *tfm) +{ + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); - if (!priv->alg_list.next) - return; + if (ctx->fallback) + crypto_free_skcipher(ctx->fallback); + caam_exit_common(ctx); +} - list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { - crypto_unregister_alg(&t_alg->crypto_alg); - list_del(&t_alg->entry); - kfree(t_alg); - } +static void caam_aead_exit(struct crypto_aead *tfm) +{ + caam_exit_common(crypto_aead_ctx_dma(tfm)); } -static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, - struct caam_alg_template - *template) +void caam_algapi_exit(void) { - struct caam_crypto_alg *t_alg; - struct crypto_alg *alg; + int i; - t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); - if (!t_alg) { - dev_err(ctrldev, "failed to allocate t_alg\n"); - return ERR_PTR(-ENOMEM); + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { + struct caam_aead_alg *t_alg = driver_aeads + i; + + if (t_alg->registered) + crypto_engine_unregister_aead(&t_alg->aead); } - alg = &t_alg->crypto_alg; - - snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); - snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - template->driver_name); - alg->cra_module = THIS_MODULE; - alg->cra_init = caam_cra_init; - alg->cra_exit = caam_cra_exit; - alg->cra_priority = CAAM_CRA_PRIORITY; - alg->cra_blocksize = template->blocksize; - alg->cra_alignmask = 0; - alg->cra_ctxsize = sizeof(struct caam_ctx); - alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | - template->type; - switch (template->type) { - case CRYPTO_ALG_TYPE_ABLKCIPHER: - alg->cra_type = &crypto_ablkcipher_type; - alg->cra_ablkcipher = template->template_ablkcipher; - break; - case CRYPTO_ALG_TYPE_AEAD: - alg->cra_type = &crypto_aead_type; - alg->cra_aead = template->template_aead; - break; + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { + struct caam_skcipher_alg *t_alg = driver_algs + i; + + if (t_alg->registered) + crypto_engine_unregister_skcipher(&t_alg->skcipher); } +} + +static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) +{ + struct skcipher_alg *alg = &t_alg->skcipher.base; - t_alg->class1_alg_type = template->class1_alg_type; - t_alg->class2_alg_type = template->class2_alg_type; - t_alg->alg_op = template->alg_op; - t_alg->ctrldev = ctrldev; + alg->base.cra_module = THIS_MODULE; + alg->base.cra_priority = CAAM_CRA_PRIORITY; + alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); + alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY); - return t_alg; + alg->init = caam_cra_init; + alg->exit = caam_cra_exit; } -static int __init caam_algapi_init(void) +static void caam_aead_alg_init(struct caam_aead_alg *t_alg) { - struct device_node *dev_node; - struct platform_device *pdev; - struct device *ctrldev; - struct caam_drv_private *priv; + struct aead_alg *alg = &t_alg->aead.base; + + alg->base.cra_module = THIS_MODULE; + alg->base.cra_priority = CAAM_CRA_PRIORITY; + alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; + + alg->init = caam_aead_init; + alg->exit = caam_aead_exit; +} + +int caam_algapi_init(struct device *ctrldev) +{ + struct caam_drv_private *priv = dev_get_drvdata(ctrldev); int i = 0, err = 0; + u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; + unsigned int md_limit = SHA512_DIGEST_SIZE; + bool registered = false, gcm_support; - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); - if (!dev_node) { - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); - if (!dev_node) - return -ENODEV; - } + /* + * Register crypto algorithms the device supports. + * First, detect presence and attributes of DES, AES, and MD blocks. + */ + if (priv->era < 10) { + struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon; + u32 cha_vid, cha_inst, aes_rn; + + cha_vid = rd_reg32(&perfmon->cha_id_ls); + aes_vid = cha_vid & CHA_ID_LS_AES_MASK; + md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + + cha_inst = rd_reg32(&perfmon->cha_num_ls); + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> + CHA_ID_LS_DES_SHIFT; + aes_inst = cha_inst & CHA_ID_LS_AES_MASK; + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + ccha_inst = 0; + ptha_inst = 0; + + aes_rn = rd_reg32(&perfmon->cha_rev_ls) & CHA_ID_LS_AES_MASK; + gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8); + } else { + struct version_regs __iomem *vreg = &priv->jr[0]->vreg; + u32 aesa, mdha; - pdev = of_find_device_by_node(dev_node); - if (!pdev) - return -ENODEV; + aesa = rd_reg32(&vreg->aesa); + mdha = rd_reg32(&vreg->mdha); - ctrldev = &pdev->dev; - priv = dev_get_drvdata(ctrldev); - of_node_put(dev_node); + aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; - INIT_LIST_HEAD(&priv->alg_list); + des_inst = rd_reg32(&vreg->desa) & CHA_VER_NUM_MASK; + aes_inst = aesa & CHA_VER_NUM_MASK; + md_inst = mdha & CHA_VER_NUM_MASK; + ccha_inst = rd_reg32(&vreg->ccha) & CHA_VER_NUM_MASK; + ptha_inst = rd_reg32(&vreg->ptha) & CHA_VER_NUM_MASK; + + gcm_support = aesa & CHA_VER_MISC_AES_GCM; + } - atomic_set(&priv->tfm_count, -1); + /* If MD is present, limit digest size based on LP256 */ + if (md_inst && md_vid == CHA_VER_VID_MD_LP256) + md_limit = SHA256_DIGEST_SIZE; - /* register crypto algorithms the device supports */ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { - /* TODO: check if h/w supports alg */ - struct caam_crypto_alg *t_alg; - - t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); - if (IS_ERR(t_alg)) { - err = PTR_ERR(t_alg); - dev_warn(ctrldev, "%s alg allocation failed\n", - driver_algs[i].driver_name); + struct caam_skcipher_alg *t_alg = driver_algs + i; + u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; + + /* Skip DES algorithms if not supported by device */ + if (!des_inst && + ((alg_sel == OP_ALG_ALGSEL_3DES) || + (alg_sel == OP_ALG_ALGSEL_DES))) + continue; + + /* Skip AES algorithms if not supported by device */ + if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) + continue; + + /* + * Check support for AES modes not available + * on LP devices. + */ + if (aes_vid == CHA_VER_VID_AES_LP && + (t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) == + OP_ALG_AAI_XTS) + continue; + + caam_skcipher_alg_init(t_alg); + + err = crypto_engine_register_skcipher(&t_alg->skcipher); + if (err) { + pr_warn("%s alg registration failed\n", + t_alg->skcipher.base.base.cra_driver_name); continue; } - err = crypto_register_alg(&t_alg->crypto_alg); + t_alg->registered = true; + registered = true; + } + + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { + struct caam_aead_alg *t_alg = driver_aeads + i; + u32 c1_alg_sel = t_alg->caam.class1_alg_type & + OP_ALG_ALGSEL_MASK; + u32 c2_alg_sel = t_alg->caam.class2_alg_type & + OP_ALG_ALGSEL_MASK; + u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; + + /* Skip DES algorithms if not supported by device */ + if (!des_inst && + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || + (c1_alg_sel == OP_ALG_ALGSEL_DES))) + continue; + + /* Skip AES algorithms if not supported by device */ + if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) + continue; + + /* Skip CHACHA20 algorithms if not supported by device */ + if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst) + continue; + + /* Skip POLY1305 algorithms if not supported by device */ + if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst) + continue; + + /* Skip GCM algorithms if not supported by device */ + if (c1_alg_sel == OP_ALG_ALGSEL_AES && + alg_aai == OP_ALG_AAI_GCM && !gcm_support) + continue; + + /* + * Skip algorithms requiring message digests + * if MD or MD size is not supported by device. + */ + if (is_mdha(c2_alg_sel) && + (!md_inst || t_alg->aead.base.maxauthsize > md_limit)) + continue; + + caam_aead_alg_init(t_alg); + + err = crypto_engine_register_aead(&t_alg->aead); if (err) { - dev_warn(ctrldev, "%s alg registration failed\n", - t_alg->crypto_alg.cra_driver_name); - kfree(t_alg); - } else - list_add_tail(&t_alg->entry, &priv->alg_list); + pr_warn("%s alg registration failed\n", + t_alg->aead.base.base.cra_driver_name); + continue; + } + + t_alg->registered = true; + registered = true; } - if (!list_empty(&priv->alg_list)) - dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", - (char *)of_get_property(dev_node, "compatible", NULL)); + + if (registered) + pr_info("caam algorithms registered in /proc/crypto\n"); return err; } - -module_init(caam_algapi_init); -module_exit(caam_algapi_exit); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("FSL CAAM support for crypto API"); -MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); |
