summaryrefslogtreecommitdiff
path: root/drivers/crypto/caam
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/caam')
-rw-r--r--drivers/crypto/caam/Kconfig22
-rw-r--r--drivers/crypto/caam/Makefile6
-rw-r--r--drivers/crypto/caam/blob_gen.c246
-rw-r--r--drivers/crypto/caam/caamalg.c644
-rw-r--r--drivers/crypto/caam/caamalg_desc.c87
-rw-r--r--drivers/crypto/caam/caamalg_desc.h13
-rw-r--r--drivers/crypto/caam/caamalg_qi.c100
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c292
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h12
-rw-r--r--drivers/crypto/caam/caamhash.c169
-rw-r--r--drivers/crypto/caam/caamhash_desc.c2
-rw-r--r--drivers/crypto/caam/caampkc.c143
-rw-r--r--drivers/crypto/caam/caampkc.h3
-rw-r--r--drivers/crypto/caam/caamprng.c241
-rw-r--r--drivers/crypto/caam/caamrng.c68
-rw-r--r--drivers/crypto/caam/ctrl.c521
-rw-r--r--drivers/crypto/caam/debugfs.c14
-rw-r--r--drivers/crypto/caam/debugfs.h9
-rw-r--r--drivers/crypto/caam/desc.h9
-rw-r--r--drivers/crypto/caam/desc_constr.h11
-rw-r--r--drivers/crypto/caam/dpseci-debugfs.c2
-rw-r--r--drivers/crypto/caam/intern.h48
-rw-r--r--drivers/crypto/caam/jr.c281
-rw-r--r--drivers/crypto/caam/key_gen.c2
-rw-r--r--drivers/crypto/caam/pdb.h2
-rw-r--r--drivers/crypto/caam/qi.c97
-rw-r--r--drivers/crypto/caam/qi.h12
-rw-r--r--drivers/crypto/caam/regs.h33
28 files changed, 2365 insertions, 724 deletions
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 84ea7cba5ee5..05210a0edb8a 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -10,7 +10,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
config CRYPTO_DEV_FSL_CAAM
tristate "Freescale CAAM-Multicore platform driver backend"
- depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
+ depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
select SOC_BUS
select CRYPTO_DEV_FSL_CAAM_COMMON
imply FSL_MC_BUS
@@ -151,6 +151,26 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
Selecting this will register the SEC4 hardware rng to
the hw_random API for supplying the kernel entropy pool.
+config CRYPTO_DEV_FSL_CAAM_PRNG_API
+ bool "Register Pseudo random number generation implementation with Crypto API"
+ default y
+ select CRYPTO_RNG
+ help
+ Selecting this will register the SEC hardware prng to
+ the Crypto API.
+
+config CRYPTO_DEV_FSL_CAAM_BLOB_GEN
+ bool
+
+config CRYPTO_DEV_FSL_CAAM_RNG_TEST
+ bool "Test caam rng"
+ select CRYPTO_DEV_FSL_CAAM_RNG_API
+ help
+ Selecting this will enable a self-test to run for the
+ caam RNG.
+ This test is several minutes long and executes
+ just before the RNG is registered with the hw_random API.
+
endif # CRYPTO_DEV_FSL_CAAM_JR
endif # CRYPTO_DEV_FSL_CAAM
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 3570286eb9ce..d2eaf5205b1c 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -20,13 +20,11 @@ caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PRNG_API) += caamprng.o
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_BLOB_GEN) += blob_gen.o
caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
-ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
- ccflags-y += -DCONFIG_CAAM_QI
-endif
-
caam-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c
new file mode 100644
index 000000000000..c18dbac56493
--- /dev/null
+++ b/drivers/crypto/caam/blob_gen.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
+ * Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ * Copyright 2024-2025 NXP
+ */
+
+#define pr_fmt(fmt) "caam blob_gen: " fmt
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <keys/trusted-type.h>
+#include <soc/fsl/caam-blob.h>
+
+#include "compat.h"
+#include "desc_constr.h"
+#include "desc.h"
+#include "error.h"
+#include "intern.h"
+#include "jr.h"
+#include "regs.h"
+
+#define CAAM_BLOB_DESC_BYTES_MAX \
+ /* Command to initialize & stating length of descriptor */ \
+ (CAAM_CMD_SZ + \
+ /* Command to append the key-modifier + key-modifier data */ \
+ CAAM_CMD_SZ + CAAM_BLOB_KEYMOD_LENGTH + \
+ /* Command to include input key + pointer to the input key */ \
+ CAAM_CMD_SZ + CAAM_PTR_SZ_MAX + \
+ /* Command to include output key + pointer to the output key */ \
+ CAAM_CMD_SZ + CAAM_PTR_SZ_MAX + \
+ /* Command describing the operation to perform */ \
+ CAAM_CMD_SZ)
+
+struct caam_blob_priv {
+ struct device jrdev;
+};
+
+struct caam_blob_job_result {
+ int err;
+ struct completion completion;
+};
+
+static void caam_blob_job_done(struct device *dev, u32 *desc, u32 err, void *context)
+{
+ struct caam_blob_job_result *res = context;
+ int ecode = 0;
+
+ dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+
+ if (err)
+ ecode = caam_jr_strstatus(dev, err);
+
+ res->err = ecode;
+
+ /*
+ * Upon completion, desc points to a buffer containing a CAAM job
+ * descriptor which encapsulates data into an externally-storable
+ * blob.
+ */
+ complete(&res->completion);
+}
+
+static u32 check_caam_state(struct device *jrdev)
+{
+ const struct caam_drv_private *ctrlpriv;
+
+ ctrlpriv = dev_get_drvdata(jrdev->parent);
+ return FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status));
+}
+
+int caam_process_blob(struct caam_blob_priv *priv,
+ struct caam_blob_info *info, bool encap)
+{
+ struct caam_blob_job_result testres;
+ struct device *jrdev = &priv->jrdev;
+ dma_addr_t dma_in, dma_out;
+ int op = OP_PCLID_BLOB;
+ int hwbk_caam_ovhd = 0;
+ size_t output_len;
+ u32 *desc;
+ u32 moo;
+ int ret;
+ int len;
+
+ if (info->key_mod_len > CAAM_BLOB_KEYMOD_LENGTH)
+ return -EINVAL;
+
+ if (encap) {
+ op |= OP_TYPE_ENCAP_PROTOCOL;
+ output_len = info->input_len + CAAM_BLOB_OVERHEAD;
+ } else {
+ op |= OP_TYPE_DECAP_PROTOCOL;
+ output_len = info->input_len - CAAM_BLOB_OVERHEAD;
+ info->output_len = output_len;
+ }
+
+ if (encap && info->pkey_info.is_pkey) {
+ op |= OP_PCL_BLOB_BLACK;
+ if (info->pkey_info.key_enc_algo == CAAM_ENC_ALGO_CCM) {
+ op |= OP_PCL_BLOB_EKT;
+ hwbk_caam_ovhd = CAAM_CCM_OVERHEAD;
+ }
+ if ((info->input_len + hwbk_caam_ovhd) > MAX_KEY_SIZE)
+ return -EINVAL;
+
+ len = info->input_len + hwbk_caam_ovhd;
+ } else {
+ len = info->input_len;
+ }
+
+ desc = kzalloc(CAAM_BLOB_DESC_BYTES_MAX, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ dma_in = dma_map_single(jrdev, info->input, len,
+ encap ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, dma_in)) {
+ dev_err(jrdev, "unable to map input DMA buffer\n");
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ dma_out = dma_map_single(jrdev, info->output, output_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, dma_out)) {
+ dev_err(jrdev, "unable to map output DMA buffer\n");
+ ret = -ENOMEM;
+ goto out_unmap_in;
+ }
+
+ moo = check_caam_state(jrdev);
+ if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
+ dev_warn(jrdev,
+ "using insecure test key, enable HAB to use unique device key!\n");
+
+ /*
+ * A data blob is encrypted using a blob key (BK); a random number.
+ * The BK is used as an AES-CCM key. The initial block (B0) and the
+ * initial counter (Ctr0) are generated automatically and stored in
+ * Class 1 Context DWords 0+1+2+3. The random BK is stored in the
+ * Class 1 Key Register. Operation Mode is set to AES-CCM.
+ */
+ init_job_desc(desc, 0);
+
+ if (encap && info->pkey_info.is_pkey) {
+ /*!1. key command used to load class 1 key register
+ * from input plain key.
+ */
+ append_key(desc, dma_in, info->input_len,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ /*!2. Fifostore to store protected key from class 1 key register. */
+ if (info->pkey_info.key_enc_algo == CAAM_ENC_ALGO_CCM) {
+ append_fifo_store(desc, dma_in, info->input_len,
+ LDST_CLASS_1_CCB |
+ FIFOST_TYPE_KEY_CCM_JKEK);
+ } else {
+ append_fifo_store(desc, dma_in, info->input_len,
+ LDST_CLASS_1_CCB |
+ FIFOST_TYPE_KEY_KEK);
+ }
+ /*
+ * JUMP_OFFSET specifies the offset of the JUMP target from
+ * the JUMP command's address in the descriptor buffer.
+ */
+ append_jump(desc, JUMP_COND_NOP | BIT(0) << JUMP_OFFSET_SHIFT);
+ }
+
+ /*!3. Load class 2 key with key modifier. */
+ append_key_as_imm(desc, info->key_mod, info->key_mod_len,
+ info->key_mod_len, CLASS_2 | KEY_DEST_CLASS_REG);
+
+ /*!4. SEQ IN PTR Command. */
+ append_seq_in_ptr(desc, dma_in, info->input_len, 0);
+
+ /*!5. SEQ OUT PTR Command. */
+ append_seq_out_ptr(desc, dma_out, output_len, 0);
+
+ /*!6. Blob encapsulation/decapsulation PROTOCOL Command. */
+ append_operation(desc, op);
+
+ print_hex_dump_debug("data@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 1, info->input,
+ len, false);
+ print_hex_dump_debug("jobdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 1, desc,
+ desc_bytes(desc), false);
+
+ testres.err = 0;
+ init_completion(&testres.completion);
+
+ ret = caam_jr_enqueue(jrdev, desc, caam_blob_job_done, &testres);
+ if (ret == -EINPROGRESS) {
+ wait_for_completion(&testres.completion);
+ ret = testres.err;
+ print_hex_dump_debug("output@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 1, info->output,
+ output_len, false);
+ }
+
+ if (ret == 0)
+ info->output_len = output_len;
+
+ dma_unmap_single(jrdev, dma_out, output_len, DMA_FROM_DEVICE);
+out_unmap_in:
+ dma_unmap_single(jrdev, dma_in, len,
+ encap ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+out_free:
+ kfree(desc);
+ return ret;
+}
+EXPORT_SYMBOL(caam_process_blob);
+
+struct caam_blob_priv *caam_blob_gen_init(void)
+{
+ struct caam_drv_private *ctrlpriv;
+ struct device *jrdev;
+
+ /*
+ * caam_blob_gen_init() may expectedly fail with -ENODEV, e.g. when
+ * CAAM driver didn't probe or when SoC lacks BLOB support. An
+ * error would be harsh in this case, so we stick to info level.
+ */
+
+ jrdev = caam_jr_alloc();
+ if (IS_ERR(jrdev)) {
+ pr_info("job ring requested, but none currently available\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ ctrlpriv = dev_get_drvdata(jrdev->parent);
+ if (!ctrlpriv->blob_present) {
+ dev_info(jrdev, "no hardware blob generation support\n");
+ caam_jr_free(jrdev);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return container_of(jrdev, struct caam_blob_priv, jrdev);
+}
+EXPORT_SYMBOL(caam_blob_gen_init);
+
+void caam_blob_gen_exit(struct caam_blob_priv *priv)
+{
+ caam_jr_free(&priv->jrdev);
+}
+EXPORT_SYMBOL(caam_blob_gen_exit);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 8697ae53b063..32a6e6e15ee2 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2019, 2023, 2025 NXP
*
* Based on talitos crypto API driver.
*
@@ -56,9 +56,21 @@
#include "sg_sw_sec4.h"
#include "key_gen.h"
#include "caamalg_desc.h"
-#include <crypto/engine.h>
+#include <linux/unaligned.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/engine.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/xts.h>
-#include <asm/unaligned.h>
+#include <keys/trusted-type.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/key-type.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <soc/fsl/caam-blob.h>
/*
* crypto alg
@@ -89,13 +101,13 @@ struct caam_alg_entry {
};
struct caam_aead_alg {
- struct aead_alg aead;
+ struct aead_engine_alg aead;
struct caam_alg_entry caam;
bool registered;
};
struct caam_skcipher_alg {
- struct skcipher_alg skcipher;
+ struct skcipher_engine_alg skcipher;
struct caam_alg_entry caam;
bool registered;
};
@@ -104,19 +116,21 @@ struct caam_skcipher_alg {
* per-session context
*/
struct caam_ctx {
- struct crypto_engine_ctx enginectx;
u32 sh_desc_enc[DESC_MAX_USED_LEN];
u32 sh_desc_dec[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t sh_desc_enc_dma;
dma_addr_t sh_desc_dec_dma;
dma_addr_t key_dma;
+ u8 protected_key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t protected_key_dma;
enum dma_data_direction dir;
struct device *jrdev;
struct alginfo adata;
struct alginfo cdata;
unsigned int authsize;
bool xts_key_fallback;
+ bool is_blob;
struct crypto_skcipher *fallback;
};
@@ -131,7 +145,7 @@ struct caam_aead_req_ctx {
static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
@@ -182,9 +196,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
static int aead_set_sh_desc(struct crypto_aead *aead)
{
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
- struct caam_aead_alg, aead);
+ struct caam_aead_alg,
+ aead.base);
unsigned int ivsize = crypto_aead_ivsize(aead);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 ctx1_iv_off = 0;
@@ -312,7 +327,7 @@ skip_givenc:
static int aead_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
ctx->authsize = authsize;
aead_set_sh_desc(authenc);
@@ -322,7 +337,7 @@ static int aead_setauthsize(struct crypto_aead *authenc,
static int gcm_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
@@ -372,7 +387,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
int err;
err = crypto_gcm_check_authsize(authsize);
@@ -387,7 +402,7 @@ static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
static int rfc4106_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
@@ -440,7 +455,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
static int rfc4106_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
int err;
err = crypto_rfc4106_check_authsize(authsize);
@@ -455,7 +470,7 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc,
static int rfc4543_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
@@ -508,7 +523,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
static int rfc4543_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
if (authsize != 16)
return -EINVAL;
@@ -521,7 +536,7 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
static int chachapoly_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
@@ -547,7 +562,7 @@ static int chachapoly_set_sh_desc(struct crypto_aead *aead)
static int chachapoly_setauthsize(struct crypto_aead *aead,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
if (authsize != POLY1305_DIGEST_SIZE)
return -EINVAL;
@@ -559,14 +574,15 @@ static int chachapoly_setauthsize(struct crypto_aead *aead,
static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
if (keylen != CHACHA_KEY_SIZE + saltlen)
return -EINVAL;
- ctx->cdata.key_virt = key;
+ memcpy(ctx->key, key, keylen);
+ ctx->cdata.key_virt = ctx->key;
ctx->cdata.keylen = keylen - saltlen;
return chachapoly_set_sh_desc(aead);
@@ -575,7 +591,7 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
static int aead_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
@@ -656,7 +672,7 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
static int gcm_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
int err;
@@ -677,7 +693,7 @@ static int gcm_setkey(struct crypto_aead *aead,
static int rfc4106_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
int err;
@@ -703,7 +719,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
static int rfc4543_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
int err;
@@ -729,10 +745,10 @@ static int rfc4543_setkey(struct crypto_aead *aead,
static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen, const u32 ctx1_iv_off)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct caam_skcipher_alg *alg =
container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
- skcipher);
+ skcipher.base);
struct device *jrdev = ctx->jrdev;
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc;
@@ -741,9 +757,14 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+ /* Here keylen is actual key length */
ctx->cdata.keylen = keylen;
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
+ /* Here protected key len is plain key length */
+ ctx->cdata.plain_keylen = keylen;
+ ctx->cdata.key_cmd_opt = 0;
+
/* skcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
@@ -762,6 +783,62 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
return 0;
}
+static int paes_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_pkey_info *pkey_info = (struct caam_pkey_info *)key;
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
+ struct device *jrdev = ctx->jrdev;
+ int err;
+
+ ctx->cdata.key_inline = false;
+
+ keylen = keylen - CAAM_PKEY_HEADER;
+
+ /* Retrieve the length of key */
+ ctx->cdata.plain_keylen = pkey_info->plain_key_sz;
+
+ /* Retrieve the length of blob*/
+ ctx->cdata.keylen = keylen;
+
+ /* Retrieve the address of the blob */
+ ctx->cdata.key_virt = pkey_info->key_buf;
+
+ /* Validate key length for AES algorithms */
+ err = aes_check_keylen(ctx->cdata.plain_keylen);
+ if (err) {
+ dev_err(jrdev, "bad key length\n");
+ return err;
+ }
+
+ /* set command option */
+ ctx->cdata.key_cmd_opt |= KEY_ENC;
+
+ /* check if the Protected-Key is CCM key */
+ if (pkey_info->key_enc_algo == CAAM_ENC_ALGO_CCM)
+ ctx->cdata.key_cmd_opt |= KEY_EKT;
+
+ memcpy(ctx->key, ctx->cdata.key_virt, keylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ ctx->cdata.key_dma = ctx->key_dma;
+
+ if (pkey_info->key_enc_algo == CAAM_ENC_ALGO_CCM)
+ ctx->protected_key_dma = dma_map_single(jrdev, ctx->protected_key,
+ ctx->cdata.plain_keylen +
+ CAAM_CCM_OVERHEAD,
+ DMA_FROM_DEVICE);
+ else
+ ctx->protected_key_dma = dma_map_single(jrdev, ctx->protected_key,
+ ctx->cdata.plain_keylen,
+ DMA_FROM_DEVICE);
+
+ ctx->cdata.protected_key_dma = ctx->protected_key_dma;
+ ctx->is_blob = true;
+
+ return 0;
+}
+
static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
const u8 *key, unsigned int keylen)
{
@@ -832,7 +909,7 @@ static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
@@ -998,6 +1075,13 @@ static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
crypto_finalize_aead_request(jrp->engine, req, ecode);
}
+static inline u8 *skcipher_edesc_iv(struct skcipher_edesc *edesc)
+{
+
+ return PTR_ALIGN((u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
+ dma_get_cache_alignment());
+}
+
static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
@@ -1025,8 +1109,7 @@ static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
* This is used e.g. by the CTS mode.
*/
if (ivsize && !ecode) {
- memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
- ivsize);
+ memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize);
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
@@ -1057,7 +1140,7 @@ static void init_aead_job(struct aead_request *req,
bool all_contig, bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
int authsize = ctx->authsize;
u32 *desc = edesc->hw_desc;
u32 out_options, in_options;
@@ -1118,7 +1201,7 @@ static void init_gcm_job(struct aead_request *req,
bool all_contig, bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc = edesc->hw_desc;
bool generic_gcm = (ivsize == GCM_AES_IV_SIZE);
@@ -1183,9 +1266,10 @@ static void init_authenc_job(struct aead_request *req,
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
- struct caam_aead_alg, aead);
+ struct caam_aead_alg,
+ aead.base);
unsigned int ivsize = crypto_aead_ivsize(aead);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
@@ -1234,10 +1318,12 @@ static void init_skcipher_job(struct skcipher_request *req,
const bool encrypt)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *jrdev = ctx->jrdev;
int ivsize = crypto_skcipher_ivsize(skcipher);
- u32 *desc = edesc->hw_desc;
+ u32 *desc = !ctx->is_blob ? edesc->hw_desc :
+ (u32 *)((u8 *)edesc->hw_desc + CAAM_DESC_BYTES_MAX);
+ dma_addr_t desc_dma;
u32 *sh_desc;
u32 in_options = 0, out_options = 0;
dma_addr_t src_dma, dst_dma, ptr;
@@ -1252,11 +1338,6 @@ static void init_skcipher_job(struct skcipher_request *req,
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
- sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
- ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
-
- len = desc_len(sh_desc);
- init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
if (ivsize || edesc->mapped_src_nents > 1) {
src_dma = edesc->sec4_sg_dma;
@@ -1266,8 +1347,6 @@ static void init_skcipher_job(struct skcipher_request *req,
src_dma = sg_dma_address(req->src);
}
- append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
-
if (likely(req->src == req->dst)) {
dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
out_options = in_options;
@@ -1279,7 +1358,25 @@ static void init_skcipher_job(struct skcipher_request *req,
out_options = LDST_SGF;
}
- append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
+ if (ctx->is_blob) {
+ cnstr_desc_skcipher_enc_dec(desc, &ctx->cdata,
+ src_dma, dst_dma, req->cryptlen + ivsize,
+ in_options, out_options,
+ ivsize, encrypt);
+
+ desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE);
+
+ cnstr_desc_protected_blob_decap(edesc->hw_desc, &ctx->cdata, desc_dma);
+ } else {
+ sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
+ ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
+
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+ append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
+
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
+ }
}
/*
@@ -1290,7 +1387,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
@@ -1379,8 +1476,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
- GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, flags);
if (!edesc) {
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
0, 0, 0);
@@ -1457,7 +1553,7 @@ static inline int chachapoly_crypt(struct aead_request *req, bool encrypt)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
bool all_contig;
u32 *desc;
@@ -1491,7 +1587,7 @@ static inline int aead_crypt(struct aead_request *req, bool encrypt)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
bool all_contig;
@@ -1524,7 +1620,7 @@ static int aead_decrypt(struct aead_request *req)
static int aead_do_one_req(struct crypto_engine *engine, void *areq)
{
struct aead_request *req = aead_request_cast(areq);
- struct caam_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(crypto_aead_reqtfm(req));
struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
u32 *desc = rctx->edesc->hw_desc;
int ret;
@@ -1533,6 +1629,9 @@ static int aead_do_one_req(struct crypto_engine *engine, void *areq)
ret = caam_jr_enqueue(ctx->jrdev, desc, aead_crypt_done, req);
+ if (ret == -ENOSPC && engine->retry_support)
+ return ret;
+
if (ret != -EINPROGRESS) {
aead_unmap(ctx->jrdev, rctx->edesc, req);
kfree(rctx->edesc);
@@ -1547,7 +1646,7 @@ static inline int gcm_crypt(struct aead_request *req, bool encrypt)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
bool all_contig;
@@ -1594,7 +1693,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
int desc_bytes)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
@@ -1605,6 +1704,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
u8 *iv;
int ivsize = crypto_skcipher_ivsize(skcipher);
int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+ unsigned int aligned_size;
src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (unlikely(src_nents < 0)) {
@@ -1678,8 +1778,12 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
/*
* allocate space for base edesc and hw desc commands, link tables, IV
*/
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
- GFP_DMA | flags);
+ aligned_size = sizeof(*edesc) + desc_bytes + sec4_sg_bytes;
+ aligned_size = ALIGN(aligned_size, dma_get_cache_alignment());
+ aligned_size += ~(ARCH_KMALLOC_MINALIGN - 1) &
+ (dma_get_cache_alignment() - 1);
+ aligned_size += ALIGN(ivsize, dma_get_cache_alignment());
+ edesc = kzalloc(aligned_size, flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
@@ -1698,7 +1802,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
/* Make sure IV is located in a DMAable area */
if (ivsize) {
- iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes;
+ iv = skcipher_edesc_iv(edesc);
memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);
@@ -1753,7 +1857,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
{
struct skcipher_request *req = skcipher_request_cast(areq);
- struct caam_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(crypto_skcipher_reqtfm(req));
struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
u32 *desc = rctx->edesc->hw_desc;
int ret;
@@ -1762,6 +1866,9 @@ static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
ret = caam_jr_enqueue(ctx->jrdev, desc, skcipher_crypt_done, req);
+ if (ret == -ENOSPC && engine->retry_support)
+ return ret;
+
if (ret != -EINPROGRESS) {
skcipher_unmap(ctx->jrdev, rctx->edesc, req);
kfree(rctx->edesc);
@@ -1784,12 +1891,13 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
int ret = 0;
+ int len;
/*
* XTS is expected to return an error even for input length = 0
@@ -1815,8 +1923,12 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
crypto_skcipher_decrypt(&rctx->fallback_req);
}
+ len = DESC_JOB_IO_LEN * CAAM_CMD_SZ;
+ if (ctx->is_blob)
+ len += CAAM_DESC_BYTES_MAX;
+
/* allocate extended descriptor */
- edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ edesc = skcipher_edesc_alloc(req, len);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1859,7 +1971,28 @@ static int skcipher_decrypt(struct skcipher_request *req)
static struct caam_skcipher_alg driver_algs[] = {
{
- .skcipher = {
+ .skcipher.base = {
+ .base = {
+ .cra_name = "cbc(paes)",
+ .cra_driver_name = "cbc-paes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = paes_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE + CAAM_BLOB_OVERHEAD +
+ CAAM_PKEY_HEADER,
+ .max_keysize = AES_MAX_KEY_SIZE + CAAM_BLOB_OVERHEAD +
+ CAAM_PKEY_HEADER,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher.base = {
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-caam",
@@ -1872,10 +2005,13 @@ static struct caam_skcipher_alg driver_algs[] = {
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-3des-caam",
@@ -1888,10 +2024,13 @@ static struct caam_skcipher_alg driver_algs[] = {
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "cbc(des)",
.cra_driver_name = "cbc-des-caam",
@@ -1904,10 +2043,13 @@ static struct caam_skcipher_alg driver_algs[] = {
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-caam",
@@ -1921,11 +2063,14 @@ static struct caam_skcipher_alg driver_algs[] = {
.ivsize = AES_BLOCK_SIZE,
.chunksize = AES_BLOCK_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "rfc3686-ctr-aes-caam",
@@ -1941,6 +2086,9 @@ static struct caam_skcipher_alg driver_algs[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.chunksize = AES_BLOCK_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -1948,7 +2096,7 @@ static struct caam_skcipher_alg driver_algs[] = {
},
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-caam",
@@ -1962,10 +2110,13 @@ static struct caam_skcipher_alg driver_algs[] = {
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "ecb(des)",
.cra_driver_name = "ecb-des-caam",
@@ -1977,10 +2128,13 @@ static struct caam_skcipher_alg driver_algs[] = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-caam",
@@ -1992,10 +2146,13 @@ static struct caam_skcipher_alg driver_algs[] = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3-caam",
@@ -2007,13 +2164,16 @@ static struct caam_skcipher_alg driver_algs[] = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
},
};
static struct caam_aead_alg driver_aeads[] = {
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "rfc4106(gcm(aes))",
.cra_driver_name = "rfc4106-gcm-aes-caam",
@@ -2026,13 +2186,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
.nodkp = true,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "rfc4543(gcm(aes))",
.cra_driver_name = "rfc4543-gcm-aes-caam",
@@ -2045,6 +2208,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = GCM_RFC4543_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
.nodkp = true,
@@ -2052,7 +2218,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
/* Galois Counter Mode */
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "gcm-aes-caam",
@@ -2065,6 +2231,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
.nodkp = true,
@@ -2072,7 +2241,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
/* single-pass ipsec_esp descriptor */
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(md5),"
"ecb(cipher_null))",
@@ -2087,13 +2256,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = NULL_IV_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha1),"
"ecb(cipher_null))",
@@ -2108,13 +2280,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha224),"
"ecb(cipher_null))",
@@ -2129,13 +2304,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha256),"
"ecb(cipher_null))",
@@ -2150,13 +2328,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha384),"
"ecb(cipher_null))",
@@ -2171,13 +2352,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha512),"
"ecb(cipher_null))",
@@ -2192,13 +2376,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_driver_name = "authenc-hmac-md5-"
@@ -2212,6 +2399,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2219,7 +2409,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(md5),"
"cbc(aes)))",
@@ -2234,6 +2424,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2242,7 +2435,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha1-"
@@ -2256,6 +2449,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2263,7 +2459,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha1),"
"cbc(aes)))",
@@ -2278,6 +2474,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2286,7 +2485,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha224),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha224-"
@@ -2300,6 +2499,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2307,7 +2509,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha224),"
"cbc(aes)))",
@@ -2322,6 +2524,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2330,7 +2535,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha256-"
@@ -2344,6 +2549,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2351,7 +2559,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha256),"
"cbc(aes)))",
@@ -2366,6 +2574,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2374,7 +2585,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha384),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha384-"
@@ -2388,6 +2599,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2395,7 +2609,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha384),"
"cbc(aes)))",
@@ -2410,6 +2624,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2418,7 +2635,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha512-"
@@ -2432,6 +2649,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2439,7 +2659,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha512),"
"cbc(aes)))",
@@ -2454,6 +2674,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2462,7 +2685,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-md5-"
@@ -2476,6 +2699,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2483,7 +2709,7 @@ static struct caam_aead_alg driver_aeads[] = {
}
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(md5),"
"cbc(des3_ede)))",
@@ -2498,6 +2724,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2506,7 +2735,7 @@ static struct caam_aead_alg driver_aeads[] = {
}
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha1),"
"cbc(des3_ede))",
@@ -2521,6 +2750,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2528,7 +2760,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha1),"
"cbc(des3_ede)))",
@@ -2544,6 +2776,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2552,7 +2787,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha224),"
"cbc(des3_ede))",
@@ -2567,6 +2802,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2574,7 +2812,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha224),"
"cbc(des3_ede)))",
@@ -2590,6 +2828,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2598,7 +2839,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha256),"
"cbc(des3_ede))",
@@ -2613,6 +2854,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2620,7 +2864,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha256),"
"cbc(des3_ede)))",
@@ -2636,6 +2880,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2644,7 +2891,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha384),"
"cbc(des3_ede))",
@@ -2659,6 +2906,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2666,7 +2916,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha384),"
"cbc(des3_ede)))",
@@ -2682,6 +2932,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2690,7 +2943,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha512),"
"cbc(des3_ede))",
@@ -2705,6 +2958,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2712,7 +2968,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha512),"
"cbc(des3_ede)))",
@@ -2728,6 +2984,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2736,7 +2995,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(des))",
.cra_driver_name = "authenc-hmac-md5-"
@@ -2750,6 +3009,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2757,7 +3019,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(md5),"
"cbc(des)))",
@@ -2772,6 +3034,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2780,7 +3045,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(des))",
.cra_driver_name = "authenc-hmac-sha1-"
@@ -2794,6 +3059,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2801,7 +3069,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha1),"
"cbc(des)))",
@@ -2816,6 +3084,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2824,7 +3095,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha224),cbc(des))",
.cra_driver_name = "authenc-hmac-sha224-"
@@ -2838,6 +3109,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2845,7 +3119,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha224),"
"cbc(des)))",
@@ -2860,6 +3134,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2868,7 +3145,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(des))",
.cra_driver_name = "authenc-hmac-sha256-"
@@ -2882,6 +3159,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2889,7 +3169,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha256),"
"cbc(des)))",
@@ -2904,6 +3184,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2912,7 +3195,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha384),cbc(des))",
.cra_driver_name = "authenc-hmac-sha384-"
@@ -2926,6 +3209,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2933,7 +3219,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha384),"
"cbc(des)))",
@@ -2948,6 +3234,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2956,7 +3245,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha512),cbc(des))",
.cra_driver_name = "authenc-hmac-sha512-"
@@ -2970,6 +3259,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2977,7 +3269,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha512),"
"cbc(des)))",
@@ -2992,6 +3284,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -3000,7 +3295,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(md5),"
"rfc3686(ctr(aes)))",
@@ -3015,6 +3310,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3024,7 +3322,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "seqiv(authenc("
"hmac(md5),rfc3686(ctr(aes))))",
@@ -3039,6 +3337,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3049,7 +3350,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha1),"
"rfc3686(ctr(aes)))",
@@ -3064,6 +3365,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3073,7 +3377,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "seqiv(authenc("
"hmac(sha1),rfc3686(ctr(aes))))",
@@ -3088,6 +3392,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3098,7 +3405,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha224),"
"rfc3686(ctr(aes)))",
@@ -3113,6 +3420,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3122,7 +3432,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "seqiv(authenc("
"hmac(sha224),rfc3686(ctr(aes))))",
@@ -3137,6 +3447,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3147,7 +3460,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha256),"
"rfc3686(ctr(aes)))",
@@ -3162,6 +3475,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3171,7 +3487,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "seqiv(authenc(hmac(sha256),"
"rfc3686(ctr(aes))))",
@@ -3186,6 +3502,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3196,7 +3515,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha384),"
"rfc3686(ctr(aes)))",
@@ -3211,6 +3530,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3220,7 +3542,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "seqiv(authenc(hmac(sha384),"
"rfc3686(ctr(aes))))",
@@ -3235,6 +3557,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3245,7 +3570,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha512),"
"rfc3686(ctr(aes)))",
@@ -3260,6 +3585,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3269,7 +3597,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "seqiv(authenc(hmac(sha512),"
"rfc3686(ctr(aes))))",
@@ -3284,6 +3612,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3294,7 +3625,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "rfc7539(chacha20,poly1305)",
.cra_driver_name = "rfc7539-chacha20-poly1305-"
@@ -3308,6 +3639,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CHACHAPOLY_IV_SIZE,
.maxauthsize = POLY1305_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
OP_ALG_AAI_AEAD,
@@ -3317,7 +3651,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "rfc7539esp(chacha20,poly1305)",
.cra_driver_name = "rfc7539esp-chacha20-"
@@ -3331,6 +3665,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = 8,
.maxauthsize = POLY1305_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
OP_ALG_AAI_AEAD,
@@ -3390,13 +3727,11 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
{
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
- container_of(alg, typeof(*caam_alg), skcipher);
- struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+ container_of(alg, typeof(*caam_alg), skcipher.base);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
int ret = 0;
- ctx->enginectx.op.do_one_request = skcipher_do_one_req;
-
if (alg_aai == OP_ALG_AAI_XTS) {
const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
struct crypto_skcipher *fallback;
@@ -3427,13 +3762,11 @@ static int caam_aead_init(struct crypto_aead *tfm)
{
struct aead_alg *alg = crypto_aead_alg(tfm);
struct caam_aead_alg *caam_alg =
- container_of(alg, struct caam_aead_alg, aead);
- struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+ container_of(alg, struct caam_aead_alg, aead.base);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm);
crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx));
- ctx->enginectx.op.do_one_request = aead_do_one_req;
-
return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
}
@@ -3448,7 +3781,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
if (ctx->fallback)
crypto_free_skcipher(ctx->fallback);
@@ -3457,7 +3790,7 @@ static void caam_cra_exit(struct crypto_skcipher *tfm)
static void caam_aead_exit(struct crypto_aead *tfm)
{
- caam_exit_common(crypto_aead_ctx(tfm));
+ caam_exit_common(crypto_aead_ctx_dma(tfm));
}
void caam_algapi_exit(void)
@@ -3468,24 +3801,24 @@ void caam_algapi_exit(void)
struct caam_aead_alg *t_alg = driver_aeads + i;
if (t_alg->registered)
- crypto_unregister_aead(&t_alg->aead);
+ crypto_engine_unregister_aead(&t_alg->aead);
}
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
struct caam_skcipher_alg *t_alg = driver_algs + i;
if (t_alg->registered)
- crypto_unregister_skcipher(&t_alg->skcipher);
+ crypto_engine_unregister_skcipher(&t_alg->skcipher);
}
}
static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
{
- struct skcipher_alg *alg = &t_alg->skcipher;
+ struct skcipher_alg *alg = &t_alg->skcipher.base;
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
- alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY);
@@ -3495,11 +3828,11 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
{
- struct aead_alg *alg = &t_alg->aead;
+ struct aead_alg *alg = &t_alg->aead.base;
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
- alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY;
@@ -3520,13 +3853,14 @@ int caam_algapi_init(struct device *ctrldev)
* First, detect presence and attributes of DES, AES, and MD blocks.
*/
if (priv->era < 10) {
+ struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
u32 cha_vid, cha_inst, aes_rn;
- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+ cha_vid = rd_reg32(&perfmon->cha_id_ls);
aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ cha_inst = rd_reg32(&perfmon->cha_num_ls);
des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
CHA_ID_LS_DES_SHIFT;
aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
@@ -3534,23 +3868,23 @@ int caam_algapi_init(struct device *ctrldev)
ccha_inst = 0;
ptha_inst = 0;
- aes_rn = rd_reg32(&priv->ctrl->perfmon.cha_rev_ls) &
- CHA_ID_LS_AES_MASK;
+ aes_rn = rd_reg32(&perfmon->cha_rev_ls) & CHA_ID_LS_AES_MASK;
gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
} else {
+ struct version_regs __iomem *vreg = &priv->jr[0]->vreg;
u32 aesa, mdha;
- aesa = rd_reg32(&priv->ctrl->vreg.aesa);
- mdha = rd_reg32(&priv->ctrl->vreg.mdha);
+ aesa = rd_reg32(&vreg->aesa);
+ mdha = rd_reg32(&vreg->mdha);
aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
- des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
+ des_inst = rd_reg32(&vreg->desa) & CHA_VER_NUM_MASK;
aes_inst = aesa & CHA_VER_NUM_MASK;
md_inst = mdha & CHA_VER_NUM_MASK;
- ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
- ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
+ ccha_inst = rd_reg32(&vreg->ccha) & CHA_VER_NUM_MASK;
+ ptha_inst = rd_reg32(&vreg->ptha) & CHA_VER_NUM_MASK;
gcm_support = aesa & CHA_VER_MISC_AES_GCM;
}
@@ -3584,10 +3918,10 @@ int caam_algapi_init(struct device *ctrldev)
caam_skcipher_alg_init(t_alg);
- err = crypto_register_skcipher(&t_alg->skcipher);
+ err = crypto_engine_register_skcipher(&t_alg->skcipher);
if (err) {
pr_warn("%s alg registration failed\n",
- t_alg->skcipher.base.cra_driver_name);
+ t_alg->skcipher.base.base.cra_driver_name);
continue;
}
@@ -3631,15 +3965,15 @@ int caam_algapi_init(struct device *ctrldev)
* if MD or MD size is not supported by device.
*/
if (is_mdha(c2_alg_sel) &&
- (!md_inst || t_alg->aead.maxauthsize > md_limit))
+ (!md_inst || t_alg->aead.base.maxauthsize > md_limit))
continue;
caam_aead_alg_init(t_alg);
- err = crypto_register_aead(&t_alg->aead);
+ err = crypto_engine_register_aead(&t_alg->aead);
if (err) {
pr_warn("%s alg registration failed\n",
- t_alg->aead.base.cra_driver_name);
+ t_alg->aead.base.base.cra_driver_name);
continue;
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 7571e1ac913b..04c1105eb1f5 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -2,12 +2,13 @@
/*
* Shared descriptors for aead, skcipher algorithms
*
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2019, 2025 NXP
*/
#include "compat.h"
#include "desc_constr.h"
#include "caamalg_desc.h"
+#include <soc/fsl/caam-blob.h>
/*
* For aead functions, read payload and write payload,
@@ -1364,6 +1365,84 @@ static inline void skcipher_append_src_dst(u32 *desc)
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
}
+void cnstr_desc_skcipher_enc_dec(u32 * const desc, struct alginfo *cdata,
+ dma_addr_t src, dma_addr_t dst, unsigned int data_sz,
+ unsigned int in_options, unsigned int out_options,
+ unsigned int ivsize, const bool encrypt)
+{
+ u32 options = cdata->algtype | OP_ALG_AS_INIT;
+
+ if (encrypt)
+ options |= OP_ALG_ENCRYPT;
+ else
+ options |= OP_ALG_DECRYPT;
+
+ init_job_desc(desc, 0);
+
+ append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL |
+ JUMP_COND_NOP | JUMP_TEST_ALL | 1);
+
+ append_key(desc, cdata->protected_key_dma, cdata->plain_keylen,
+ CLASS_1 | KEY_DEST_CLASS_REG | cdata->key_cmd_opt);
+
+ append_seq_in_ptr(desc, src, data_sz, in_options);
+
+ append_seq_out_ptr(desc, dst, data_sz, out_options);
+
+ /* Load IV, if there is one */
+ if (ivsize)
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB);
+
+ append_operation(desc, options);
+
+ skcipher_append_src_dst(desc);
+
+ /* Store IV */
+ if (ivsize)
+ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB);
+
+ print_hex_dump_debug("skcipher_enc_dec job desc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+}
+EXPORT_SYMBOL(cnstr_desc_skcipher_enc_dec);
+
+void cnstr_desc_protected_blob_decap(u32 * const desc, struct alginfo *cdata,
+ dma_addr_t next_desc_addr)
+{
+ u32 protected_store;
+
+ init_job_desc(desc, 0);
+
+ /* Load key modifier */
+ append_load_as_imm(desc, KEYMOD, sizeof(KEYMOD) - 1,
+ LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY);
+
+ append_seq_in_ptr_intlen(desc, cdata->key_dma,
+ cdata->plain_keylen + CAAM_BLOB_OVERHEAD, 0);
+
+ append_seq_out_ptr_intlen(desc, cdata->protected_key_dma,
+ cdata->plain_keylen, 0);
+
+ protected_store = OP_PCLID_BLOB | OP_PCL_BLOB_BLACK;
+ if ((cdata->key_cmd_opt >> KEY_EKT_OFFSET) & 1)
+ protected_store |= OP_PCL_BLOB_EKT;
+
+ append_operation(desc, OP_TYPE_DECAP_PROTOCOL | protected_store);
+
+ if (next_desc_addr) {
+ append_jump(desc, JUMP_TYPE_NONLOCAL | JUMP_TEST_ALL);
+ append_ptr(desc, next_desc_addr);
+ }
+
+ print_hex_dump_debug("protected blob decap job desc@" __stringify(__LINE__) ":",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+}
+EXPORT_SYMBOL(cnstr_desc_protected_blob_decap);
+
/**
* cnstr_shdsc_skcipher_encap - skcipher encapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
@@ -1391,7 +1470,8 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
/* Load class1 key only */
append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
- cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ cdata->plain_keylen, CLASS_1 | KEY_DEST_CLASS_REG
+ | cdata->key_cmd_opt);
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
@@ -1466,7 +1546,8 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
/* Load class1 key only */
append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
- cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ cdata->plain_keylen, CLASS_1 | KEY_DEST_CLASS_REG
+ | cdata->key_cmd_opt);
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index f2893393ba5e..323490a4a756 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -2,7 +2,7 @@
/*
* Shared descriptors for aead, skcipher algorithms
*
- * Copyright 2016 NXP
+ * Copyright 2016, 2025 NXP
*/
#ifndef _CAAMALG_DESC_H_
@@ -48,6 +48,9 @@
#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
16 * CAAM_CMD_SZ)
+/* Key modifier for CAAM Protected blobs */
+#define KEYMOD "SECURE_KEY"
+
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
unsigned int icvsize, int era);
@@ -113,4 +116,12 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata);
void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata);
+void cnstr_desc_protected_blob_decap(u32 * const desc, struct alginfo *cdata,
+ dma_addr_t next_desc);
+
+void cnstr_desc_skcipher_enc_dec(u32 * const desc, struct alginfo *cdata,
+ dma_addr_t src, dma_addr_t dst, unsigned int data_sz,
+ unsigned int in_options, unsigned int out_options,
+ unsigned int ivsize, const bool encrypt);
+
#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 189a7438b29c..65f6adb6c673 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -19,7 +19,12 @@
#include "jr.h"
#include "caamalg_desc.h"
#include <crypto/xts.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
/*
* crypto alg
@@ -81,7 +86,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
{
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
typeof(*alg), aead);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
u32 ctx1_iv_off = 0;
u32 *nonce = NULL;
@@ -184,7 +189,7 @@ skip_givenc:
static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
ctx->authsize = authsize;
aead_set_sh_desc(authenc);
@@ -195,7 +200,7 @@ static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
static int aead_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
@@ -299,7 +304,7 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
static int gcm_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
ctx->cdata.keylen;
@@ -342,7 +347,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
int err;
err = crypto_gcm_check_authsize(authsize);
@@ -358,7 +363,7 @@ static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
static int gcm_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
int ret;
@@ -402,7 +407,7 @@ static int gcm_setkey(struct crypto_aead *aead,
static int rfc4106_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
ctx->cdata.keylen;
@@ -446,7 +451,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
static int rfc4106_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
int err;
err = crypto_rfc4106_check_authsize(authsize);
@@ -462,7 +467,7 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc,
static int rfc4106_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
int ret;
@@ -510,7 +515,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
static int rfc4543_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
ctx->cdata.keylen;
@@ -554,7 +559,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
static int rfc4543_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
if (authsize != 16)
return -EINVAL;
@@ -568,7 +573,7 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
static int rfc4543_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
int ret;
@@ -617,7 +622,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen, const u32 ctx1_iv_off)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct caam_skcipher_alg *alg =
container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
skcipher);
@@ -731,7 +736,7 @@ static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
int ret = 0;
@@ -915,7 +920,7 @@ static void aead_done(struct caam_drv_req *drv_req, u32 status)
struct aead_edesc *edesc;
struct aead_request *aead_req = drv_req->app_ctx;
struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
- struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *caam_ctx = crypto_aead_ctx_dma(aead);
int ecode = 0;
qidev = caam_ctx->qidev;
@@ -937,7 +942,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
typeof(*alg), aead);
struct device *qidev = ctx->qidev;
@@ -956,10 +961,10 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (IS_ERR(drv_ctx))
- return (struct aead_edesc *)drv_ctx;
+ return ERR_CAST(drv_ctx);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = qi_cache_alloc(GFP_DMA | flags);
+ edesc = qi_cache_alloc(flags);
if (unlikely(!edesc)) {
dev_err(qidev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
@@ -1157,7 +1162,7 @@ static inline int aead_crypt(struct aead_request *req, bool encrypt)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
int ret;
if (unlikely(caam_congested))
@@ -1202,12 +1207,18 @@ static int ipsec_gcm_decrypt(struct aead_request *req)
false);
}
+static inline u8 *skcipher_edesc_iv(struct skcipher_edesc *edesc)
+{
+ return PTR_ALIGN((u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
+ dma_get_cache_alignment());
+}
+
static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
{
struct skcipher_edesc *edesc;
struct skcipher_request *req = drv_req->app_ctx;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *caam_ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *qidev = caam_ctx->qidev;
int ivsize = crypto_skcipher_ivsize(skcipher);
int ecode = 0;
@@ -1234,8 +1245,7 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
* This is used e.g. by the CTS mode.
*/
if (!ecode)
- memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
- ivsize);
+ memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize);
qi_cache_free(edesc);
skcipher_request_complete(req, ecode);
@@ -1245,7 +1255,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
bool encrypt)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *qidev = ctx->qidev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
@@ -1257,10 +1267,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_ctx *drv_ctx;
+ unsigned int len;
drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (IS_ERR(drv_ctx))
- return (struct skcipher_edesc *)drv_ctx;
+ return ERR_CAST(drv_ctx);
src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (unlikely(src_nents < 0)) {
@@ -1317,8 +1328,12 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
- if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
- ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+
+ len = offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes;
+ len = ALIGN(len, dma_get_cache_alignment());
+ len += ivsize;
+
+ if (unlikely(len > CAAM_QI_MEMCACHE_SIZE)) {
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
@@ -1327,7 +1342,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
}
/* allocate space for base edesc, link tables and IV */
- edesc = qi_cache_alloc(GFP_DMA | flags);
+ edesc = qi_cache_alloc(flags);
if (unlikely(!edesc)) {
dev_err(qidev, "could not allocate extended descriptor\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
@@ -1335,9 +1350,16 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
return ERR_PTR(-ENOMEM);
}
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+ edesc->drv_req.app_ctx = req;
+ edesc->drv_req.cbk = skcipher_done;
+ edesc->drv_req.drv_ctx = drv_ctx;
+
/* Make sure IV is located in a DMAable area */
sg_table = &edesc->sgt[0];
- iv = (u8 *)(sg_table + qm_sg_ents);
+ iv = skcipher_edesc_iv(edesc);
memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
@@ -1349,13 +1371,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
return ERR_PTR(-ENOMEM);
}
- edesc->src_nents = src_nents;
- edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
- edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->drv_req.app_ctx = req;
- edesc->drv_req.cbk = skcipher_done;
- edesc->drv_req.drv_ctx = drv_ctx;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
@@ -1405,7 +1421,7 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
int ret;
@@ -2491,7 +2507,7 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
container_of(alg, typeof(*caam_alg), skcipher);
- struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
int ret = 0;
@@ -2524,7 +2540,7 @@ static int caam_aead_init(struct crypto_aead *tfm)
struct aead_alg *alg = crypto_aead_alg(tfm);
struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
aead);
- struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm);
return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
}
@@ -2542,7 +2558,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
if (ctx->fallback)
crypto_free_skcipher(ctx->fallback);
@@ -2551,7 +2567,7 @@ static void caam_cra_exit(struct crypto_skcipher *tfm)
static void caam_aead_exit(struct crypto_aead *tfm)
{
- caam_exit_common(crypto_aead_ctx(tfm));
+ caam_exit_common(crypto_aead_ctx_dma(tfm));
}
void caam_qi_algapi_exit(void)
@@ -2579,7 +2595,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
- alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY);
@@ -2593,7 +2609,7 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
- alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY;
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 8b8ed77d8715..107ccb2ade42 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -16,11 +16,14 @@
#include "caamalg_desc.h"
#include "caamhash_desc.h"
#include "dpseci-debugfs.h"
+#include <linux/dma-mapping.h>
#include <linux/fsl/mc.h>
+#include <linux/kernel.h>
+#include <linux/string_choices.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
#include <crypto/xts.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#define CAAM_CRA_PRIORITY 2000
@@ -29,7 +32,7 @@
SHA512_DIGEST_SIZE * 2)
/*
- * This is a a cache of buffers, from which the users of CAAM QI driver
+ * This is a cache of buffers, from which the users of CAAM QI driver
* can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
* NOTE: A more elegant solution would be to have some headroom in the frames
* being processed. This can be added by the dpaa2-eth driver. This would
@@ -134,12 +137,12 @@ static struct caam_request *to_caam_req(struct crypto_async_request *areq)
{
switch (crypto_tfm_alg_type(areq->tfm)) {
case CRYPTO_ALG_TYPE_SKCIPHER:
- return skcipher_request_ctx(skcipher_request_cast(areq));
+ return skcipher_request_ctx_dma(skcipher_request_cast(areq));
case CRYPTO_ALG_TYPE_AEAD:
- return aead_request_ctx(container_of(areq, struct aead_request,
- base));
+ return aead_request_ctx_dma(
+ container_of(areq, struct aead_request, base));
case CRYPTO_ALG_TYPE_AHASH:
- return ahash_request_ctx(ahash_request_cast(areq));
+ return ahash_request_ctx_dma(ahash_request_cast(areq));
default:
return ERR_PTR(-EINVAL);
}
@@ -171,7 +174,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
{
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
typeof(*alg), aead);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct device *dev = ctx->dev;
struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
@@ -276,7 +279,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
ctx->authsize = authsize;
aead_set_sh_desc(authenc);
@@ -287,7 +290,7 @@ static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
static int aead_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
struct crypto_authenc_keys keys;
@@ -350,10 +353,10 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_request *req_ctx = aead_request_ctx(req);
+ struct caam_request *req_ctx = aead_request_ctx_dma(req);
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
typeof(*alg), aead);
struct device *dev = ctx->dev;
@@ -370,7 +373,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
struct dpaa2_sg_entry *sg_table;
/* allocate space for base edesc, link tables and IV */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (unlikely(!edesc)) {
dev_err(dev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
@@ -587,7 +590,7 @@ skip_out_fle:
static int chachapoly_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct device *dev = ctx->dev;
struct caam_flc *flc;
@@ -620,7 +623,7 @@ static int chachapoly_set_sh_desc(struct crypto_aead *aead)
static int chachapoly_setauthsize(struct crypto_aead *aead,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
if (authsize != POLY1305_DIGEST_SIZE)
return -EINVAL;
@@ -632,14 +635,15 @@ static int chachapoly_setauthsize(struct crypto_aead *aead,
static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
if (keylen != CHACHA_KEY_SIZE + saltlen)
return -EINVAL;
- ctx->cdata.key_virt = key;
+ memcpy(ctx->key, key, keylen);
+ ctx->cdata.key_virt = ctx->key;
ctx->cdata.keylen = keylen - saltlen;
return chachapoly_set_sh_desc(aead);
@@ -647,7 +651,7 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
static int gcm_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_flc *flc;
@@ -704,7 +708,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
int err;
err = crypto_gcm_check_authsize(authsize);
@@ -720,7 +724,7 @@ static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
static int gcm_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
int ret;
@@ -739,7 +743,7 @@ static int gcm_setkey(struct crypto_aead *aead,
static int rfc4106_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_flc *flc;
@@ -799,7 +803,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
static int rfc4106_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
int err;
err = crypto_rfc4106_check_authsize(authsize);
@@ -815,7 +819,7 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc,
static int rfc4106_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
int ret;
@@ -840,7 +844,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
static int rfc4543_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_flc *flc;
@@ -900,7 +904,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
static int rfc4543_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
if (authsize != 16)
return -EINVAL;
@@ -914,7 +918,7 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
static int rfc4543_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
int ret;
@@ -940,7 +944,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen, const u32 ctx1_iv_off)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct caam_skcipher_alg *alg =
container_of(crypto_skcipher_alg(skcipher),
struct caam_skcipher_alg, skcipher);
@@ -1059,7 +1063,7 @@ static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *dev = ctx->dev;
struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
struct caam_flc *flc;
@@ -1109,10 +1113,10 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_request *req_ctx = skcipher_request_ctx(req);
+ struct caam_request *req_ctx = skcipher_request_ctx_dma(req);
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *dev = ctx->dev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
@@ -1189,7 +1193,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
}
/* allocate space for base edesc, link tables and IV */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (unlikely(!edesc)) {
dev_err(dev, "could not allocate extended descriptor\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
@@ -1286,7 +1290,7 @@ static void aead_encrypt_done(void *cbk_ctx, u32 status)
struct caam_request *req_ctx = to_caam_req(areq);
struct aead_edesc *edesc = req_ctx->edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
int ecode = 0;
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
@@ -1307,7 +1311,7 @@ static void aead_decrypt_done(void *cbk_ctx, u32 status)
struct caam_request *req_ctx = to_caam_req(areq);
struct aead_edesc *edesc = req_ctx->edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
int ecode = 0;
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
@@ -1324,8 +1328,8 @@ static int aead_encrypt(struct aead_request *req)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct caam_request *caam_req = aead_request_ctx(req);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
+ struct caam_request *caam_req = aead_request_ctx_dma(req);
int ret;
/* allocate extended descriptor */
@@ -1352,8 +1356,8 @@ static int aead_decrypt(struct aead_request *req)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct caam_request *caam_req = aead_request_ctx(req);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
+ struct caam_request *caam_req = aead_request_ctx_dma(req);
int ret;
/* allocate extended descriptor */
@@ -1392,7 +1396,7 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
struct skcipher_request *req = skcipher_request_cast(areq);
struct caam_request *req_ctx = to_caam_req(areq);
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct skcipher_edesc *edesc = req_ctx->edesc;
int ecode = 0;
int ivsize = crypto_skcipher_ivsize(skcipher);
@@ -1430,7 +1434,7 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
struct skcipher_request *req = skcipher_request_cast(areq);
struct caam_request *req_ctx = to_caam_req(areq);
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct skcipher_edesc *edesc = req_ctx->edesc;
int ecode = 0;
int ivsize = crypto_skcipher_ivsize(skcipher);
@@ -1474,8 +1478,8 @@ static int skcipher_encrypt(struct skcipher_request *req)
{
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- struct caam_request *caam_req = skcipher_request_ctx(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
+ struct caam_request *caam_req = skcipher_request_ctx_dma(req);
struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
int ret;
@@ -1524,8 +1528,8 @@ static int skcipher_decrypt(struct skcipher_request *req)
{
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- struct caam_request *caam_req = skcipher_request_ctx(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
+ struct caam_request *caam_req = skcipher_request_ctx_dma(req);
struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
int ret;
@@ -1603,7 +1607,7 @@ static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
container_of(alg, typeof(*caam_alg), skcipher);
- struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
int ret = 0;
@@ -1621,10 +1625,12 @@ static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
}
ctx->fallback = fallback;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) +
- crypto_skcipher_reqsize(fallback));
+ crypto_skcipher_set_reqsize_dma(
+ tfm, sizeof(struct caam_request) +
+ crypto_skcipher_reqsize(fallback));
} else {
- crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
+ crypto_skcipher_set_reqsize_dma(tfm,
+ sizeof(struct caam_request));
}
ret = caam_cra_init(ctx, &caam_alg->caam, false);
@@ -1640,8 +1646,8 @@ static int caam_cra_init_aead(struct crypto_aead *tfm)
struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
aead);
- crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
- return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
+ crypto_aead_set_reqsize_dma(tfm, sizeof(struct caam_request));
+ return caam_cra_init(crypto_aead_ctx_dma(tfm), &caam_alg->caam,
!caam_alg->caam.nodkp);
}
@@ -1654,7 +1660,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
if (ctx->fallback)
crypto_free_skcipher(ctx->fallback);
@@ -1663,7 +1669,7 @@ static void caam_cra_exit(struct crypto_skcipher *tfm)
static void caam_cra_exit_aead(struct crypto_aead *tfm)
{
- caam_exit_common(crypto_aead_ctx(tfm));
+ caam_exit_common(crypto_aead_ctx_dma(tfm));
}
static struct caam_skcipher_alg driver_algs[] = {
@@ -3008,7 +3014,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
- alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY);
@@ -3022,7 +3028,7 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
- alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY;
@@ -3132,7 +3138,7 @@ static inline int ctx_map_to_qm_sg(struct device *dev,
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
{
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
struct caam_flc *flc;
@@ -3218,14 +3224,14 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
int ret = -ENOMEM;
struct dpaa2_fl_entry *in_fle, *out_fle;
- req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
+ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL);
if (!req_ctx)
return -ENOMEM;
in_fle = &req_ctx->fd_flt[1];
out_fle = &req_ctx->fd_flt[0];
- flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
+ flc = kzalloc(sizeof(*flc), GFP_KERNEL);
if (!flc)
goto err_flc;
@@ -3305,7 +3311,7 @@ err_flc:
static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
unsigned int keylen)
{
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
unsigned int digestsize = crypto_ahash_digestsize(ahash);
int ret;
@@ -3314,7 +3320,13 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
if (keylen > blocksize) {
- hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ unsigned int aligned_len =
+ ALIGN(keylen, dma_get_cache_alignment());
+
+ if (aligned_len < keylen)
+ return -EOVERFLOW;
+
+ hashed_key = kmemdup(key, aligned_len, GFP_KERNEL);
if (!hashed_key)
return -ENOMEM;
ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
@@ -3356,7 +3368,7 @@ bad_free_key:
static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
if (edesc->src_nents)
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
@@ -3376,7 +3388,7 @@ static inline void ahash_unmap_ctx(struct device *dev,
struct ahash_edesc *edesc,
struct ahash_request *req, u32 flag)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
if (state->ctx_dma) {
dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
@@ -3390,9 +3402,9 @@ static void ahash_done(void *cbk_ctx, u32 status)
struct crypto_async_request *areq = cbk_ctx;
struct ahash_request *req = ahash_request_cast(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct ahash_edesc *edesc = state->caam_req.edesc;
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
int ecode = 0;
@@ -3409,7 +3421,7 @@ static void ahash_done(void *cbk_ctx, u32 status)
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
}
static void ahash_done_bi(void *cbk_ctx, u32 status)
@@ -3417,9 +3429,9 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
struct crypto_async_request *areq = cbk_ctx;
struct ahash_request *req = ahash_request_cast(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct ahash_edesc *edesc = state->caam_req.edesc;
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int ecode = 0;
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
@@ -3447,7 +3459,7 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
crypto_ahash_digestsize(ahash), 1);
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
}
static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
@@ -3455,9 +3467,9 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
struct crypto_async_request *areq = cbk_ctx;
struct ahash_request *req = ahash_request_cast(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct ahash_edesc *edesc = state->caam_req.edesc;
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
int ecode = 0;
@@ -3474,7 +3486,7 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
}
static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
@@ -3482,9 +3494,9 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
struct crypto_async_request *areq = cbk_ctx;
struct ahash_request *req = ahash_request_cast(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct ahash_edesc *edesc = state->caam_req.edesc;
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int ecode = 0;
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
@@ -3512,14 +3524,14 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
crypto_ahash_digestsize(ahash), 1);
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
}
static int ahash_update_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
@@ -3558,7 +3570,7 @@ static int ahash_update_ctx(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents,
DMA_TO_DEVICE);
@@ -3637,8 +3649,8 @@ unmap_ctx:
static int ahash_final_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
@@ -3652,7 +3664,7 @@ static int ahash_final_ctx(struct ahash_request *req)
int ret;
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc)
return -ENOMEM;
@@ -3708,8 +3720,8 @@ unmap_ctx:
static int ahash_finup_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
@@ -3741,7 +3753,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@@ -3802,8 +3814,8 @@ unmap_ctx:
static int ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
@@ -3834,7 +3846,7 @@ static int ahash_digest(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
return ret;
@@ -3897,8 +3909,8 @@ unmap:
static int ahash_final_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
@@ -3911,7 +3923,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
int ret = -ENOMEM;
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc)
return ret;
@@ -3970,8 +3982,8 @@ unmap:
static int ahash_update_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
@@ -4010,7 +4022,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents,
DMA_TO_DEVICE);
@@ -4091,8 +4103,8 @@ unmap_ctx:
static int ahash_finup_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
@@ -4123,7 +4135,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
return ret;
@@ -4187,8 +4199,8 @@ unmap:
static int ahash_update_first(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
@@ -4228,7 +4240,7 @@ static int ahash_update_first(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents,
DMA_TO_DEVICE);
@@ -4320,7 +4332,7 @@ static int ahash_finup_first(struct ahash_request *req)
static int ahash_init(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
state->update = ahash_update_first;
state->finup = ahash_finup_first;
@@ -4337,28 +4349,28 @@ static int ahash_init(struct ahash_request *req)
static int ahash_update(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
return state->update(req);
}
static int ahash_finup(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
return state->finup(req);
}
static int ahash_final(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
return state->final(req);
}
static int ahash_export(struct ahash_request *req, void *out)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_export_state *export = out;
u8 *buf = state->buf;
int len = state->buflen;
@@ -4375,7 +4387,7 @@ static int ahash_export(struct ahash_request *req, void *out)
static int ahash_import(struct ahash_request *req, const void *in)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
const struct caam_export_state *export = in;
memset(state, 0, sizeof(*state));
@@ -4534,6 +4546,7 @@ struct caam_hash_alg {
struct list_head entry;
struct device *dev;
int alg_type;
+ bool is_hmac;
struct ahash_alg ahash_alg;
};
@@ -4547,7 +4560,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
container_of(halg, struct ahash_alg, halg);
struct caam_hash_alg *caam_hash =
container_of(alg, struct caam_hash_alg, ahash_alg);
- struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
HASH_MSG_LEN + SHA1_DIGEST_SIZE,
@@ -4560,7 +4573,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
ctx->dev = caam_hash->dev;
- if (alg->setkey) {
+ if (caam_hash->is_hmac) {
ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
ARRAY_SIZE(ctx->key),
DMA_TO_DEVICE,
@@ -4594,19 +4607,18 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
OP_ALG_ALGSEL_SUBMASK) >>
OP_ALG_ALGSEL_SHIFT];
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct caam_hash_state));
+ crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
/*
* For keyed hash algorithms shared descriptors
* will be created later in setkey() callback
*/
- return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
+ return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash);
}
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
{
- struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
@@ -4636,17 +4648,19 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
template->hmac_name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->hmac_driver_name);
+ t_alg->is_hmac = true;
} else {
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
t_alg->ahash_alg.setkey = NULL;
+ t_alg->is_hmac = false;
}
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;
alg->cra_exit = caam_hash_cra_exit;
- alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
+ alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
alg->cra_priority = CAAM_CRA_PRIORITY;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
@@ -4925,6 +4939,7 @@ static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
{
struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
struct device *dev = priv->dev;
+ unsigned int alignmask;
int err;
/*
@@ -4935,13 +4950,14 @@ static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
!(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
return 0;
- priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
- GFP_KERNEL | GFP_DMA);
+ alignmask = DPAA2_CSCN_ALIGN - 1;
+ alignmask |= dma_get_cache_alignment() - 1;
+ priv->cscn_mem = kzalloc(ALIGN(DPAA2_CSCN_SIZE, alignmask + 1),
+ GFP_KERNEL);
if (!priv->cscn_mem)
return -ENOMEM;
- priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
- priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
+ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem,
DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, priv->cscn_dma)) {
dev_err(dev, "Error mapping CSCN memory area\n");
@@ -4975,14 +4991,30 @@ err_dma_map:
return err;
}
+static void free_dpaa2_pcpu_netdev(struct dpaa2_caam_priv *priv, const cpumask_t *cpus)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int i;
+
+ for_each_cpu(i, cpus) {
+ ppriv = per_cpu_ptr(priv->ppriv, i);
+ free_netdev(ppriv->net_dev);
+ }
+}
+
static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
{
struct device *dev = &ls_dev->dev;
struct dpaa2_caam_priv *priv;
struct dpaa2_caam_priv_per_cpu *ppriv;
+ cpumask_var_t clean_mask;
int err, cpu;
u8 i;
+ err = -ENOMEM;
+ if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL))
+ goto err_cpumask;
+
priv = dev_get_drvdata(dev);
priv->dev = dev;
@@ -5081,19 +5113,32 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
priv->rx_queue_attr[j].fqid,
priv->tx_queue_attr[j].fqid);
- ppriv->net_dev.dev = *dev;
- INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
- netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
- DPAA2_CAAM_NAPI_WEIGHT);
+ ppriv->net_dev = alloc_netdev_dummy(0);
+ if (!ppriv->net_dev) {
+ err = -ENOMEM;
+ goto err_alloc_netdev;
+ }
+ cpumask_set_cpu(cpu, clean_mask);
+ ppriv->net_dev->dev = *dev;
+
+ netif_napi_add_tx_weight(ppriv->net_dev, &ppriv->napi,
+ dpaa2_dpseci_poll,
+ DPAA2_CAAM_NAPI_WEIGHT);
}
- return 0;
+ err = 0;
+ goto free_cpumask;
+err_alloc_netdev:
+ free_dpaa2_pcpu_netdev(priv, clean_mask);
err_get_rx_queue:
dpaa2_dpseci_congestion_free(priv);
err_get_vers:
dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
err_open:
+free_cpumask:
+ free_cpumask_var(clean_mask);
+err_cpumask:
return err;
}
@@ -5131,12 +5176,13 @@ static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
return err;
}
- dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
+ dev_dbg(dev, "disable: %s\n", str_false_true(enabled));
for (i = 0; i < priv->num_pairs; i++) {
ppriv = per_cpu_ptr(priv->ppriv, i);
napi_disable(&ppriv->napi);
netif_napi_del(&ppriv->napi);
+ free_netdev(ppriv->net_dev);
}
return 0;
@@ -5172,7 +5218,7 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
priv->domain = iommu_get_domain_for_dev(dev);
qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
- 0, SLAB_CACHE_DMA, NULL);
+ 0, 0, NULL);
if (!qi_cache) {
dev_err(dev, "Can't allocate SEC cache\n");
return -ENOMEM;
@@ -5390,7 +5436,7 @@ err_dma_mask:
return err;
}
-static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
+static void __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
{
struct device *dev;
struct dpaa2_caam_priv *priv;
@@ -5431,8 +5477,6 @@ static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
free_percpu(priv->ppriv);
fsl_mc_portal_free(priv->mc_io);
kmem_cache_destroy(qi_cache);
-
- return 0;
}
int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
@@ -5449,7 +5493,7 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
DPAA2_CSCN_SIZE,
DMA_FROM_DEVICE);
- if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
+ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
dev_dbg_ratelimited(dev, "Dropping request\n");
return -EBUSY;
}
@@ -5470,7 +5514,7 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
dpaa2_fd_set_flc(&fd, req->flc_dma);
- ppriv = this_cpu_ptr(priv->ppriv);
+ ppriv = raw_cpu_ptr(priv->ppriv);
for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
&fd);
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index d35253407ade..61d1219a202f 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -7,13 +7,14 @@
#ifndef _CAAMALG_QI2_H_
#define _CAAMALG_QI2_H_
+#include <crypto/internal/skcipher.h>
+#include <linux/compiler_attributes.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
#include <linux/threads.h>
#include <linux/netdevice.h>
#include "dpseci.h"
#include "desc_constr.h"
-#include <crypto/skcipher.h>
#define DPAA2_CAAM_STORE_SIZE 16
/* NAPI weight *must* be a multiple of the store size. */
@@ -36,8 +37,6 @@
* @tx_queue_attr: array of Tx queue attributes
* @cscn_mem: pointer to memory region containing the congestion SCN
* it's size is larger than to accommodate alignment
- * @cscn_mem_aligned: pointer to congestion SCN; it is computed as
- * PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
* @cscn_dma: dma address used by the QMAN to write CSCN messages
* @dev: device associated with the DPSECI object
* @mc_io: pointer to MC portal's I/O object
@@ -58,7 +57,6 @@ struct dpaa2_caam_priv {
/* congestion */
void *cscn_mem;
- void *cscn_mem_aligned;
dma_addr_t cscn_dma;
struct device *dev;
@@ -83,7 +81,7 @@ struct dpaa2_caam_priv {
*/
struct dpaa2_caam_priv_per_cpu {
struct napi_struct napi;
- struct net_device net_dev;
+ struct net_device *net_dev;
int req_fqid;
int rsp_fqid;
int prio;
@@ -158,7 +156,7 @@ struct ahash_edesc {
struct caam_flc {
u32 flc[16];
u32 sh_desc[MAX_SDLEN];
-} ____cacheline_aligned;
+} __aligned(CRYPTO_DMA_ALIGN);
enum optype {
ENCRYPT = 0,
@@ -180,7 +178,7 @@ enum optype {
* @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
*/
struct caam_request {
- struct dpaa2_fl_entry fd_flt[2];
+ struct dpaa2_fl_entry fd_flt[2] __aligned(CRYPTO_DMA_ALIGN);
dma_addr_t fd_flt_dma;
struct caam_flc *flc;
dma_addr_t flc_dma;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index e8a6d8bc43b5..25c02e267258 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for ahash functions of crypto API
*
* Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2023 NXP
*
* Based on caamalg.c crypto API driver.
*
@@ -65,7 +65,13 @@
#include "sg_sw_sec4.h"
#include "key_gen.h"
#include "caamhash_desc.h"
-#include <crypto/engine.h>
+#include <crypto/internal/engine.h>
+#include <crypto/internal/hash.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#define CAAM_CRA_PRIORITY 3000
@@ -87,7 +93,6 @@ static struct list_head hash_list;
/* ahash per-session context */
struct caam_hash_ctx {
- struct crypto_engine_ctx enginectx;
u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
@@ -199,7 +204,7 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
{
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
@@ -255,7 +260,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
{
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
u32 *desc;
@@ -307,7 +312,7 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
static int acmac_set_sh_desc(struct crypto_ahash *ahash)
{
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
u32 *desc;
@@ -365,11 +370,9 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
dma_addr_t key_dma;
int ret;
- desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
- if (!desc) {
- dev_err(jrdev, "unable to allocate key input memory\n");
+ desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL);
+ if (!desc)
return -ENOMEM;
- }
init_job_desc(desc, 0);
@@ -421,7 +424,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
static int ahash_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
struct device *jrdev = ctx->jrdev;
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
@@ -432,7 +435,13 @@ static int ahash_setkey(struct crypto_ahash *ahash,
dev_dbg(jrdev, "keylen %d\n", keylen);
if (keylen > blocksize) {
- hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ unsigned int aligned_len =
+ ALIGN(keylen, dma_get_cache_alignment());
+
+ if (aligned_len < keylen)
+ return -EOVERFLOW;
+
+ hashed_key = kmemdup(key, keylen, GFP_KERNEL);
if (!hashed_key)
return -ENOMEM;
ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
@@ -484,7 +493,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
unsigned int keylen)
{
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
struct device *jrdev = ctx->jrdev;
if (keylen != AES_KEYSIZE_128)
@@ -504,7 +513,7 @@ static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
unsigned int keylen)
{
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int err;
err = aes_check_keylen(keylen);
@@ -543,7 +552,7 @@ static inline void ahash_unmap(struct device *dev,
struct ahash_edesc *edesc,
struct ahash_request *req, int dst_len)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
if (edesc->src_nents)
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
@@ -563,7 +572,7 @@ static inline void ahash_unmap_ctx(struct device *dev,
struct ahash_edesc *edesc,
struct ahash_request *req, int dst_len, u32 flag)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
if (state->ctx_dma) {
dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
@@ -580,8 +589,8 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int ecode = 0;
bool has_bklog;
@@ -606,7 +615,7 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
* by CAAM, not crypto engine.
*/
if (!has_bklog)
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
else
crypto_finalize_hash_request(jrp->engine, req, ecode);
}
@@ -630,8 +639,8 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
int digestsize = crypto_ahash_digestsize(ahash);
int ecode = 0;
bool has_bklog;
@@ -668,7 +677,7 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
* by CAAM, not crypto engine.
*/
if (!has_bklog)
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
else
crypto_finalize_hash_request(jrp->engine, req, ecode);
@@ -694,19 +703,15 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
int sg_num, u32 *sh_desc,
dma_addr_t sh_desc_dma)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
struct ahash_edesc *edesc;
- unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
- edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
- if (!edesc) {
- dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
+ sg_num = pad_sg_nents(sg_num);
+ edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags);
+ if (!edesc)
return NULL;
- }
state->edesc = edesc;
@@ -755,8 +760,8 @@ static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
{
struct ahash_request *req = ahash_request_cast(areq);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(crypto_ahash_reqtfm(req));
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct device *jrdev = ctx->jrdev;
u32 *desc = state->edesc->hw_desc;
int ret;
@@ -765,6 +770,9 @@ static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
+ if (ret == -ENOSPC && engine->retry_support)
+ return ret;
+
if (ret != -EINPROGRESS) {
ahash_unmap(jrdev, state->edesc, req, 0);
kfree(state->edesc);
@@ -782,7 +790,7 @@ static int ahash_enqueue_req(struct device *jrdev,
int dst_len, enum dma_data_direction dir)
{
struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct ahash_edesc *edesc = state->edesc;
u32 *desc = edesc->hw_desc;
int ret;
@@ -812,8 +820,8 @@ static int ahash_enqueue_req(struct device *jrdev,
static int ahash_update_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct device *jrdev = ctx->jrdev;
u8 *buf = state->buf;
int *buflen = &state->buflen;
@@ -937,8 +945,8 @@ unmap_ctx:
static int ahash_final_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct device *jrdev = ctx->jrdev;
int buflen = state->buflen;
u32 *desc;
@@ -998,8 +1006,8 @@ static int ahash_final_ctx(struct ahash_request *req)
static int ahash_finup_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct device *jrdev = ctx->jrdev;
int buflen = state->buflen;
u32 *desc;
@@ -1072,8 +1080,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
static int ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -1139,8 +1147,8 @@ static int ahash_digest(struct ahash_request *req)
static int ahash_final_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct device *jrdev = ctx->jrdev;
u8 *buf = state->buf;
int buflen = state->buflen;
@@ -1188,8 +1196,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
static int ahash_update_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct device *jrdev = ctx->jrdev;
u8 *buf = state->buf;
int *buflen = &state->buflen;
@@ -1309,8 +1317,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
static int ahash_finup_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct device *jrdev = ctx->jrdev;
int buflen = state->buflen;
u32 *desc;
@@ -1385,8 +1393,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
static int ahash_update_first(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct device *jrdev = ctx->jrdev;
u8 *buf = state->buf;
int *buflen = &state->buflen;
@@ -1495,7 +1503,7 @@ static int ahash_finup_first(struct ahash_request *req)
static int ahash_init(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
state->update = ahash_update_first;
state->finup = ahash_finup_first;
@@ -1512,28 +1520,28 @@ static int ahash_init(struct ahash_request *req)
static int ahash_update(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
return state->update(req);
}
static int ahash_finup(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
return state->finup(req);
}
static int ahash_final(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
return state->final(req);
}
static int ahash_export(struct ahash_request *req, void *out)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_export_state *export = out;
u8 *buf = state->buf;
int len = state->buflen;
@@ -1550,7 +1558,7 @@ static int ahash_export(struct ahash_request *req, void *out)
static int ahash_import(struct ahash_request *req, const void *in)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
const struct caam_export_state *export = in;
memset(state, 0, sizeof(*state));
@@ -1746,7 +1754,8 @@ static struct caam_hash_template driver_hash[] = {
struct caam_hash_alg {
struct list_head entry;
int alg_type;
- struct ahash_alg ahash_alg;
+ bool is_hmac;
+ struct ahash_engine_alg ahash_alg;
};
static int caam_hash_cra_init(struct crypto_tfm *tfm)
@@ -1758,8 +1767,8 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
struct ahash_alg *alg =
container_of(halg, struct ahash_alg, halg);
struct caam_hash_alg *caam_hash =
- container_of(alg, struct caam_hash_alg, ahash_alg);
- struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ container_of(alg, struct caam_hash_alg, ahash_alg.base);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
HASH_MSG_LEN + SHA1_DIGEST_SIZE,
@@ -1797,7 +1806,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
} else {
if (priv->era >= 6) {
ctx->dir = DMA_BIDIRECTIONAL;
- ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
+ ctx->key_dir = caam_hash->is_hmac ? DMA_TO_DEVICE : DMA_NONE;
} else {
ctx->dir = DMA_TO_DEVICE;
ctx->key_dir = DMA_NONE;
@@ -1849,21 +1858,18 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
sh_desc_digest) -
sh_desc_update_offset;
- ctx->enginectx.op.do_one_request = ahash_do_one_req;
-
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct caam_hash_state));
+ crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
/*
* For keyed hash algorithms shared descriptors
* will be created later in setkey() callback
*/
- return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
+ return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash);
}
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
{
- struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
offsetof(struct caam_hash_ctx, key) -
@@ -1884,7 +1890,7 @@ void caam_algapi_hash_exit(void)
return;
list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
- crypto_unregister_ahash(&t_alg->ahash_alg);
+ crypto_engine_unregister_ahash(&t_alg->ahash_alg);
list_del(&t_alg->entry);
kfree(t_alg);
}
@@ -1899,13 +1905,11 @@ caam_hash_alloc(struct caam_hash_template *template,
struct crypto_alg *alg;
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
- if (!t_alg) {
- pr_err("failed to allocate t_alg\n");
+ if (!t_alg)
return ERR_PTR(-ENOMEM);
- }
- t_alg->ahash_alg = template->template_ahash;
- halg = &t_alg->ahash_alg;
+ t_alg->ahash_alg.base = template->template_ahash;
+ halg = &t_alg->ahash_alg.base;
alg = &halg->halg.base;
if (keyed) {
@@ -1913,23 +1917,26 @@ caam_hash_alloc(struct caam_hash_template *template,
template->hmac_name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->hmac_driver_name);
+ t_alg->is_hmac = true;
} else {
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
- t_alg->ahash_alg.setkey = NULL;
+ halg->setkey = NULL;
+ t_alg->is_hmac = false;
}
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;
alg->cra_exit = caam_hash_cra_exit;
- alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
+ alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
alg->cra_priority = CAAM_CRA_PRIORITY;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
t_alg->alg_type = template->alg_type;
+ t_alg->ahash_alg.op.do_one_request = ahash_do_one_req;
return t_alg;
}
@@ -1946,12 +1953,14 @@ int caam_algapi_hash_init(struct device *ctrldev)
* presence and attributes of MD block.
*/
if (priv->era < 10) {
- md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
+ struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
+
+ md_vid = (rd_reg32(&perfmon->cha_id_ls) &
CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
- md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+ md_inst = (rd_reg32(&perfmon->cha_num_ls) &
CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
} else {
- u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
+ u32 mdha = rd_reg32(&priv->jr[0]->vreg.mdha);
md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
md_inst = mdha & CHA_VER_NUM_MASK;
@@ -1989,10 +1998,10 @@ int caam_algapi_hash_init(struct device *ctrldev)
continue;
}
- err = crypto_register_ahash(&t_alg->ahash_alg);
+ err = crypto_engine_register_ahash(&t_alg->ahash_alg);
if (err) {
pr_warn("%s alg registration failed: %d\n",
- t_alg->ahash_alg.halg.base.cra_driver_name,
+ t_alg->ahash_alg.base.halg.base.cra_driver_name,
err);
kfree(t_alg);
} else
@@ -2009,10 +2018,10 @@ int caam_algapi_hash_init(struct device *ctrldev)
continue;
}
- err = crypto_register_ahash(&t_alg->ahash_alg);
+ err = crypto_engine_register_ahash(&t_alg->ahash_alg);
if (err) {
pr_warn("%s alg registration failed: %d\n",
- t_alg->ahash_alg.halg.base.cra_driver_name,
+ t_alg->ahash_alg.base.halg.base.cra_driver_name,
err);
kfree(t_alg);
} else
diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c
index 78383d77da99..619564509936 100644
--- a/drivers/crypto/caam/caamhash_desc.c
+++ b/drivers/crypto/caam/caamhash_desc.c
@@ -22,7 +22,7 @@
* @ctx_len: size of Context Register
* @import_ctx: true if previous Context Register needs to be restored
* must be true for ahash update and final
- * must be false for for ahash first and digest
+ * must be false for ahash first and digest
* @era: SEC Era
*/
void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index e313233ec6de..cb001aa1de66 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for Public Key Cryptography
*
* Copyright 2016 Freescale Semiconductor, Inc.
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2023 NXP
*
* There is no Shared Descriptor for PKC so that the Job Descriptor must carry
* all the desired key parameters, input and output pointers.
@@ -16,6 +16,12 @@
#include "desc_constr.h"
#include "sg_sw_sec4.h"
#include "caampkc.h"
+#include <crypto/internal/engine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
@@ -36,7 +42,7 @@ static u8 *zero_buffer;
static bool init_done;
struct caam_akcipher_alg {
- struct akcipher_alg akcipher;
+ struct akcipher_engine_alg akcipher;
bool registered;
};
@@ -57,7 +63,7 @@ static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
@@ -69,7 +75,7 @@ static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
@@ -81,7 +87,7 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
size_t p_sz = key->p_sz;
@@ -98,7 +104,7 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
size_t p_sz = key->p_sz;
@@ -149,7 +155,7 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
struct akcipher_request *req = context;
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
struct rsa_edesc *edesc;
@@ -223,7 +229,9 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
if (len && *buff)
break;
- sg_miter_next(&miter);
+ if (!sg_miter_next(&miter))
+ break;
+
buff = miter.addr;
len = miter.length;
@@ -242,7 +250,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
size_t desclen)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct device *dev = ctx->dev;
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
struct caam_rsa_key *key = &ctx->key;
@@ -310,8 +318,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc, hw desc commands and link tables */
- edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
- GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, flags);
if (!edesc)
goto dst_fail;
@@ -371,7 +378,7 @@ static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
base);
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct device *jrdev = ctx->dev;
u32 *desc = req_ctx->edesc->hw_desc;
int ret;
@@ -380,6 +387,9 @@ static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
+ if (ret == -ENOSPC && engine->retry_support)
+ return ret;
+
if (ret != -EINPROGRESS) {
rsa_pub_unmap(jrdev, req_ctx->edesc, req);
rsa_io_unmap(jrdev, req_ctx->edesc, req);
@@ -396,7 +406,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct device *dev = ctx->dev;
struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
@@ -441,7 +451,7 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
struct rsa_edesc *edesc)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct device *dev = ctx->dev;
struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
@@ -488,7 +498,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
struct rsa_edesc *edesc)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct device *dev = ctx->dev;
struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
@@ -565,7 +575,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
struct rsa_edesc *edesc)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct device *dev = ctx->dev;
struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
@@ -661,7 +671,7 @@ static int akcipher_enqueue_req(struct device *jrdev,
{
struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
struct rsa_edesc *edesc = req_ctx->edesc;
@@ -704,7 +714,7 @@ static int akcipher_enqueue_req(struct device *jrdev,
static int caam_rsa_enc(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct device *jrdev = ctx->dev;
struct rsa_edesc *edesc;
@@ -743,7 +753,7 @@ init_fail:
static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct device *jrdev = ctx->dev;
struct rsa_edesc *edesc;
int ret;
@@ -772,7 +782,7 @@ init_fail:
static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct device *jrdev = ctx->dev;
struct rsa_edesc *edesc;
int ret;
@@ -801,7 +811,7 @@ init_fail:
static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct device *jrdev = ctx->dev;
struct rsa_edesc *edesc;
int ret;
@@ -830,7 +840,7 @@ init_fail:
static int caam_rsa_dec(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
int ret;
@@ -895,7 +905,7 @@ static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
if (!nbytes)
return NULL;
- dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
+ dst = kzalloc(dstlen, GFP_KERNEL);
if (!dst)
return NULL;
@@ -907,7 +917,7 @@ static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
/**
* caam_read_raw_data - Read a raw byte stream as a positive integer.
* The function skips buffer's leading zeros, copies the remained data
- * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
+ * to a buffer allocated in the GFP_KERNEL zone and returns
* the address of the new buffer.
*
* @buf : The data to read
@@ -920,7 +930,7 @@ static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
if (!*nbytes)
return NULL;
- return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
+ return kmemdup(buf, *nbytes, GFP_KERNEL);
}
static int caam_rsa_check_key_length(unsigned int len)
@@ -933,7 +943,7 @@ static int caam_rsa_check_key_length(unsigned int len)
static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct rsa_key raw_key = {NULL};
struct caam_rsa_key *rsa_key = &ctx->key;
int ret;
@@ -946,13 +956,13 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
return ret;
/* Copy key in DMA zone */
- rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
+ rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL);
if (!rsa_key->e)
goto err;
/*
* Skip leading zeros and copy the positive integer to a buffer
- * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
+ * allocated in the GFP_KERNEL zone. The decryption descriptor
* expects a positive integer for the RSA modulus and uses its length as
* decryption output length.
*/
@@ -974,16 +984,17 @@ err:
return -ENOMEM;
}
-static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
+static int caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
struct rsa_key *raw_key)
{
struct caam_rsa_key *rsa_key = &ctx->key;
size_t p_sz = raw_key->p_sz;
size_t q_sz = raw_key->q_sz;
+ unsigned aligned_size;
rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
if (!rsa_key->p)
- return;
+ return -ENOMEM;
rsa_key->p_sz = p_sz;
rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
@@ -991,11 +1002,13 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
goto free_p;
rsa_key->q_sz = q_sz;
- rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
+ aligned_size = ALIGN(raw_key->p_sz, dma_get_cache_alignment());
+ rsa_key->tmp1 = kzalloc(aligned_size, GFP_KERNEL);
if (!rsa_key->tmp1)
goto free_q;
- rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
+ aligned_size = ALIGN(raw_key->q_sz, dma_get_cache_alignment());
+ rsa_key->tmp2 = kzalloc(aligned_size, GFP_KERNEL);
if (!rsa_key->tmp2)
goto free_tmp1;
@@ -1016,7 +1029,7 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
rsa_key->priv_form = FORM3;
- return;
+ return 0;
free_dq:
kfree_sensitive(rsa_key->dq);
@@ -1030,12 +1043,13 @@ free_q:
kfree_sensitive(rsa_key->q);
free_p:
kfree_sensitive(rsa_key->p);
+ return -ENOMEM;
}
static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct rsa_key raw_key = {NULL};
struct caam_rsa_key *rsa_key = &ctx->key;
int ret;
@@ -1048,17 +1062,17 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
return ret;
/* Copy key in DMA zone */
- rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
+ rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_KERNEL);
if (!rsa_key->d)
goto err;
- rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
+ rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL);
if (!rsa_key->e)
goto err;
/*
* Skip leading zeros and copy the positive integer to a buffer
- * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
+ * allocated in the GFP_KERNEL zone. The decryption descriptor
* expects a positive integer for the RSA modulus and uses its length as
* decryption output length.
*/
@@ -1075,7 +1089,9 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
rsa_key->e_sz = raw_key.e_sz;
rsa_key->n_sz = raw_key.n_sz;
- caam_rsa_set_priv_key_form(ctx, &raw_key);
+ ret = caam_rsa_set_priv_key_form(ctx, &raw_key);
+ if (ret)
+ goto err;
return 0;
@@ -1086,7 +1102,7 @@ err:
static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
{
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
return ctx->key.n_sz;
}
@@ -1094,7 +1110,9 @@ static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
/* Per session pkc's driver context creation function */
static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
{
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
+
+ akcipher_set_reqsize(tfm, sizeof(struct caam_rsa_req_ctx));
ctx->dev = caam_jr_alloc();
@@ -1112,15 +1130,13 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
return -ENOMEM;
}
- ctx->enginectx.op.do_one_request = akcipher_do_one_req;
-
return 0;
}
/* Per session pkc's driver context cleanup function */
static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
{
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
@@ -1130,7 +1146,7 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
}
static struct caam_akcipher_alg caam_rsa = {
- .akcipher = {
+ .akcipher.base = {
.encrypt = caam_rsa_enc,
.decrypt = caam_rsa_dec,
.set_pub_key = caam_rsa_set_pub_key,
@@ -1138,48 +1154,61 @@ static struct caam_akcipher_alg caam_rsa = {
.max_size = caam_rsa_max_size,
.init = caam_rsa_init_tfm,
.exit = caam_rsa_exit_tfm,
- .reqsize = sizeof(struct caam_rsa_req_ctx),
.base = {
.cra_name = "rsa",
.cra_driver_name = "rsa-caam",
.cra_priority = 3000,
.cra_module = THIS_MODULE,
- .cra_ctxsize = sizeof(struct caam_rsa_ctx),
+ .cra_ctxsize = sizeof(struct caam_rsa_ctx) +
+ CRYPTO_DMA_PADDING,
},
- }
+ },
+ .akcipher.op = {
+ .do_one_request = akcipher_do_one_req,
+ },
};
/* Public Key Cryptography module initialization handler */
int caam_pkc_init(struct device *ctrldev)
{
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
- u32 pk_inst;
+ u32 pk_inst, pkha;
int err;
init_done = false;
/* Determine public key hardware accelerator presence. */
- if (priv->era < 10)
- pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+ if (priv->era < 10) {
+ pk_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
- else
- pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
+ } else {
+ pkha = rd_reg32(&priv->jr[0]->vreg.pkha);
+ pk_inst = pkha & CHA_VER_NUM_MASK;
+
+ /*
+ * Newer CAAMs support partially disabled functionality. If this is the
+ * case, the number is non-zero, but this bit is set to indicate that
+ * no encryption or decryption is supported. Only signing and verifying
+ * is supported.
+ */
+ if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
+ pk_inst = 0;
+ }
/* Do not register algorithms if PKHA is not present. */
if (!pk_inst)
return 0;
/* allocate zero buffer, used for padding input */
- zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
- GFP_KERNEL);
+ zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_KERNEL);
if (!zero_buffer)
return -ENOMEM;
- err = crypto_register_akcipher(&caam_rsa.akcipher);
+ err = crypto_engine_register_akcipher(&caam_rsa.akcipher);
if (err) {
kfree(zero_buffer);
dev_warn(ctrldev, "%s alg registration failed\n",
- caam_rsa.akcipher.base.cra_driver_name);
+ caam_rsa.akcipher.base.base.cra_driver_name);
} else {
init_done = true;
caam_rsa.registered = true;
@@ -1195,7 +1224,7 @@ void caam_pkc_exit(void)
return;
if (caam_rsa.registered)
- crypto_unregister_akcipher(&caam_rsa.akcipher);
+ crypto_engine_unregister_akcipher(&caam_rsa.akcipher);
kfree(zero_buffer);
}
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index cc889a525e2f..96d03704c9be 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -12,7 +12,6 @@
#define _PKC_DESC_H_
#include "compat.h"
#include "pdb.h"
-#include <crypto/engine.h>
/**
* caam_priv_key_form - CAAM RSA private key representation
@@ -88,13 +87,11 @@ struct caam_rsa_key {
/**
* caam_rsa_ctx - per session context.
- * @enginectx : crypto engine context
* @key : RSA key in DMA zone
* @dev : device structure
* @padding_dma : dma address of padding, for adding it to the input
*/
struct caam_rsa_ctx {
- struct crypto_engine_ctx enginectx;
struct caam_rsa_key key;
struct device *dev;
dma_addr_t padding_dma;
diff --git a/drivers/crypto/caam/caamprng.c b/drivers/crypto/caam/caamprng.c
new file mode 100644
index 000000000000..6e4c1191cb28
--- /dev/null
+++ b/drivers/crypto/caam/caamprng.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver to expose SEC4 PRNG via crypto RNG API
+ *
+ * Copyright 2022 NXP
+ *
+ */
+
+#include <linux/completion.h>
+#include <crypto/internal/rng.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include "compat.h"
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "jr.h"
+#include "error.h"
+
+/*
+ * Length of used descriptors, see caam_init_desc()
+ */
+#define CAAM_PRNG_MAX_DESC_LEN (CAAM_CMD_SZ + \
+ CAAM_CMD_SZ + \
+ CAAM_CMD_SZ + CAAM_PTR_SZ_MAX)
+
+/* prng per-device context */
+struct caam_prng_ctx {
+ int err;
+ struct completion done;
+};
+
+struct caam_prng_alg {
+ struct rng_alg rng;
+ bool registered;
+};
+
+static void caam_prng_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct caam_prng_ctx *jctx = context;
+
+ jctx->err = err ? caam_jr_strstatus(jrdev, err) : 0;
+
+ complete(&jctx->done);
+}
+
+static u32 *caam_init_reseed_desc(u32 *desc)
+{
+ init_job_desc(desc, 0); /* + 1 cmd_sz */
+ /* Generate random bytes: + 1 cmd_sz */
+ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ OP_ALG_AS_FINALIZE);
+
+ print_hex_dump_debug("prng reseed desc@: ", DUMP_PREFIX_ADDRESS,
+ 16, 4, desc, desc_bytes(desc), 1);
+
+ return desc;
+}
+
+static u32 *caam_init_prng_desc(u32 *desc, dma_addr_t dst_dma, u32 len)
+{
+ init_job_desc(desc, 0); /* + 1 cmd_sz */
+ /* Generate random bytes: + 1 cmd_sz */
+ append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
+ /* Store bytes: + 1 cmd_sz + caam_ptr_sz */
+ append_fifo_store(desc, dst_dma,
+ len, FIFOST_TYPE_RNGSTORE);
+
+ print_hex_dump_debug("prng job desc@: ", DUMP_PREFIX_ADDRESS,
+ 16, 4, desc, desc_bytes(desc), 1);
+
+ return desc;
+}
+
+static int caam_prng_generate(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int dlen)
+{
+ unsigned int aligned_dlen = ALIGN(dlen, dma_get_cache_alignment());
+ struct caam_prng_ctx ctx;
+ struct device *jrdev;
+ dma_addr_t dst_dma;
+ u32 *desc;
+ u8 *buf;
+ int ret;
+
+ if (aligned_dlen < dlen)
+ return -EOVERFLOW;
+
+ buf = kzalloc(aligned_dlen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ jrdev = caam_jr_alloc();
+ ret = PTR_ERR_OR_ZERO(jrdev);
+ if (ret) {
+ pr_err("Job Ring Device allocation failed\n");
+ kfree(buf);
+ return ret;
+ }
+
+ desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
+ dst_dma = dma_map_single(jrdev, buf, dlen, DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, dst_dma)) {
+ dev_err(jrdev, "Failed to map destination buffer memory\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ init_completion(&ctx.done);
+ ret = caam_jr_enqueue(jrdev,
+ caam_init_prng_desc(desc, dst_dma, dlen),
+ caam_prng_done, &ctx);
+
+ if (ret == -EINPROGRESS) {
+ wait_for_completion(&ctx.done);
+ ret = ctx.err;
+ }
+
+ dma_unmap_single(jrdev, dst_dma, dlen, DMA_FROM_DEVICE);
+
+ if (!ret)
+ memcpy(dst, buf, dlen);
+out:
+ kfree(desc);
+out1:
+ caam_jr_free(jrdev);
+ kfree(buf);
+ return ret;
+}
+
+static void caam_prng_exit(struct crypto_tfm *tfm) {}
+
+static int caam_prng_init(struct crypto_tfm *tfm)
+{
+ return 0;
+}
+
+static int caam_prng_seed(struct crypto_rng *tfm,
+ const u8 *seed, unsigned int slen)
+{
+ struct caam_prng_ctx ctx;
+ struct device *jrdev;
+ u32 *desc;
+ int ret;
+
+ if (slen) {
+ pr_err("Seed length should be zero\n");
+ return -EINVAL;
+ }
+
+ jrdev = caam_jr_alloc();
+ ret = PTR_ERR_OR_ZERO(jrdev);
+ if (ret) {
+ pr_err("Job Ring Device allocation failed\n");
+ return ret;
+ }
+
+ desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL);
+ if (!desc) {
+ caam_jr_free(jrdev);
+ return -ENOMEM;
+ }
+
+ init_completion(&ctx.done);
+ ret = caam_jr_enqueue(jrdev,
+ caam_init_reseed_desc(desc),
+ caam_prng_done, &ctx);
+
+ if (ret == -EINPROGRESS) {
+ wait_for_completion(&ctx.done);
+ ret = ctx.err;
+ }
+
+ kfree(desc);
+ caam_jr_free(jrdev);
+ return ret;
+}
+
+static struct caam_prng_alg caam_prng_alg = {
+ .rng = {
+ .generate = caam_prng_generate,
+ .seed = caam_prng_seed,
+ .seedsize = 0,
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "prng-caam",
+ .cra_priority = 500,
+ .cra_ctxsize = sizeof(struct caam_prng_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = caam_prng_init,
+ .cra_exit = caam_prng_exit,
+ },
+ }
+};
+
+void caam_prng_unregister(void *data)
+{
+ if (caam_prng_alg.registered)
+ crypto_unregister_rng(&caam_prng_alg.rng);
+}
+
+int caam_prng_register(struct device *ctrldev)
+{
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
+ u32 rng_inst;
+ int ret = 0;
+
+ /* Check for available RNG blocks before registration */
+ if (priv->era < 10)
+ rng_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
+ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
+ else
+ rng_inst = rd_reg32(&priv->jr[0]->vreg.rng) & CHA_VER_NUM_MASK;
+
+ if (!rng_inst) {
+ dev_dbg(ctrldev, "RNG block is not available... skipping registering algorithm\n");
+ return ret;
+ }
+
+ ret = crypto_register_rng(&caam_prng_alg.rng);
+ if (ret) {
+ dev_err(ctrldev,
+ "couldn't register rng crypto alg: %d\n",
+ ret);
+ return ret;
+ }
+
+ caam_prng_alg.registered = true;
+
+ dev_info(ctrldev,
+ "rng crypto API alg registered %s\n", caam_prng_alg.rng.base.cra_driver_name);
+
+ return 0;
+}
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 77d048dfe5d0..0eb43c862516 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for hw_random
*
* Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2023 NXP
*
* Based on caamalg.c crypto API driver.
*
@@ -12,6 +12,8 @@
#include <linux/hw_random.h>
#include <linux/completion.h>
#include <linux/atomic.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
#include <linux/kfifo.h>
#include "compat.h"
@@ -170,23 +172,70 @@ static void caam_cleanup(struct hwrng *rng)
kfifo_free(&ctx->fifo);
}
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
+static inline void test_len(struct hwrng *rng, size_t len, bool wait)
+{
+ u8 *buf;
+ int read_len;
+ struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
+ struct device *dev = ctx->ctrldev;
+
+ buf = kcalloc(CAAM_RNG_MAX_FIFO_STORE_SIZE, sizeof(u8), GFP_KERNEL);
+ if (!buf) {
+ return;
+ }
+ while (len > 0) {
+ read_len = rng->read(rng, buf, len, wait);
+
+ if (read_len < 0 || (read_len == 0 && wait)) {
+ dev_err(dev, "RNG Read FAILED received %d bytes\n",
+ read_len);
+ kfree(buf);
+ return;
+ }
+
+ print_hex_dump_debug("random bytes@: ",
+ DUMP_PREFIX_ADDRESS, 16, 4,
+ buf, read_len, 1);
+
+ len = len - read_len;
+ }
+
+ kfree(buf);
+}
+
+static inline void test_mode_once(struct hwrng *rng, bool wait)
+{
+ test_len(rng, 32, wait);
+ test_len(rng, 64, wait);
+ test_len(rng, 128, wait);
+}
+
+static void self_test(struct hwrng *rng)
+{
+ pr_info("Executing RNG SELF-TEST with wait\n");
+ test_mode_once(rng, true);
+}
+#endif
+
static int caam_init(struct hwrng *rng)
{
struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
int err;
ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
- GFP_DMA | GFP_KERNEL);
+ GFP_KERNEL);
if (!ctx->desc_sync)
return -ENOMEM;
ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
- GFP_DMA | GFP_KERNEL);
+ GFP_KERNEL);
if (!ctx->desc_async)
return -ENOMEM;
- if (kfifo_alloc(&ctx->fifo, CAAM_RNG_MAX_FIFO_STORE_SIZE,
- GFP_DMA | GFP_KERNEL))
+ if (kfifo_alloc(&ctx->fifo, ALIGN(CAAM_RNG_MAX_FIFO_STORE_SIZE,
+ dma_get_cache_alignment()),
+ GFP_KERNEL))
return -ENOMEM;
INIT_WORK(&ctx->worker, caam_rng_worker);
@@ -224,10 +273,10 @@ int caam_rng_init(struct device *ctrldev)
/* Check for an instantiated RNG before registration */
if (priv->era < 10)
- rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+ rng_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
else
- rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
+ rng_inst = rd_reg32(&priv->jr[0]->vreg.rng) & CHA_VER_NUM_MASK;
if (!rng_inst)
return 0;
@@ -246,7 +295,6 @@ int caam_rng_init(struct device *ctrldev)
ctx->rng.cleanup = caam_cleanup;
ctx->rng.read = caam_read;
ctx->rng.priv = (unsigned long)ctx;
- ctx->rng.quality = 1024;
dev_info(ctrldev, "registering rng-caam\n");
@@ -256,6 +304,10 @@ int caam_rng_init(struct device *ctrldev)
return ret;
}
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
+ self_test(&ctx->rng);
+#endif
+
devres_close_group(ctrldev, caam_rng_init);
return 0;
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index ca0361b2dbb0..320be5d77737 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -3,12 +3,13 @@
* Controller-level driver, kernel property detection, initialization
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2023 NXP
*/
#include <linux/device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/sys_soc.h>
#include <linux/fsl/mc.h>
@@ -23,7 +24,7 @@
bool caam_dpaa2;
EXPORT_SYMBOL(caam_dpaa2);
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
#include "qi.h"
#endif
@@ -79,6 +80,17 @@ static void build_deinstantiation_desc(u32 *desc, int handle)
append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
}
+#ifdef CONFIG_OF
+static const struct of_device_id imx8m_machine_match[] = {
+ { .compatible = "fsl,imx8mm", },
+ { .compatible = "fsl,imx8mn", },
+ { .compatible = "fsl,imx8mp", },
+ { .compatible = "fsl,imx8mq", },
+ { .compatible = "fsl,imx8ulp", },
+ { }
+};
+#endif
+
/*
* run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
* the software (no JR/QI used).
@@ -105,10 +117,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
* Apparently on i.MX8M{Q,M,N,P} it doesn't matter if virt_en == 1
* and the following steps should be performed regardless
*/
- of_machine_is_compatible("fsl,imx8mq") ||
- of_machine_is_compatible("fsl,imx8mm") ||
- of_machine_is_compatible("fsl,imx8mn") ||
- of_machine_is_compatible("fsl,imx8mp")) {
+ of_match_node(imx8m_machine_match, of_root)) {
clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
@@ -199,7 +208,7 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
u32 *desc, status;
int sh_idx, ret = 0;
- desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL | GFP_DMA);
+ desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
if (!desc)
return -ENOMEM;
@@ -276,7 +285,7 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
int ret = 0, sh_idx;
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
- desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL | GFP_DMA);
+ desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
if (!desc)
return -ENOMEM;
@@ -284,6 +293,10 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
const u32 rdsta_if = RDSTA_IF0 << sh_idx;
const u32 rdsta_pr = RDSTA_PR0 << sh_idx;
const u32 rdsta_mask = rdsta_if | rdsta_pr;
+
+ /* Clear the contents before using the descriptor */
+ memset(desc, 0x00, CAAM_CMD_SZ * 7);
+
/*
* If the corresponding bit is set, this state handle
* was initialized by somebody else, so it's left alone.
@@ -327,8 +340,6 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
}
dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
- /* Clear the contents before recreating the descriptor */
- memset(desc, 0x00, CAAM_CMD_SZ * 7);
}
kfree(desc);
@@ -342,16 +353,15 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
/*
* kick_trng - sets the various parameters for enabling the initialization
* of the RNG4 block in CAAM
- * @pdev - pointer to the platform device
+ * @dev - pointer to the controller device
* @ent_delay - Defines the length (in system clocks) of each entropy sample.
*/
-static void kick_trng(struct platform_device *pdev, int ent_delay)
+static void kick_trng(struct device *dev, int ent_delay)
{
- struct device *ctrldev = &pdev->dev;
- struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
struct caam_ctrl __iomem *ctrl;
struct rng4tst __iomem *r4tst;
- u32 val;
+ u32 val, rtsdctl;
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
r4tst = &ctrl->r4tst[0];
@@ -367,26 +377,38 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
* Performance-wise, it does not make sense to
* set the delay to a value that is lower
* than the last one that worked (i.e. the state handles
- * were instantiated properly. Thus, instead of wasting
- * time trying to set the values controlling the sample
- * frequency, the function simply returns.
+ * were instantiated properly).
+ */
+ rtsdctl = rd_reg32(&r4tst->rtsdctl);
+ val = (rtsdctl & RTSDCTL_ENT_DLY_MASK) >> RTSDCTL_ENT_DLY_SHIFT;
+ if (ent_delay > val) {
+ val = ent_delay;
+ /* min. freq. count, equal to 1/4 of the entropy sample length */
+ wr_reg32(&r4tst->rtfrqmin, val >> 2);
+ /* disable maximum frequency count */
+ wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
+ }
+
+ wr_reg32(&r4tst->rtsdctl, (val << RTSDCTL_ENT_DLY_SHIFT) |
+ RTSDCTL_SAMP_SIZE_VAL);
+
+ /*
+ * To avoid reprogramming the self-test parameters over and over again,
+ * use RTSDCTL[SAMP_SIZE] as an indicator.
*/
- val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
- >> RTSDCTL_ENT_DLY_SHIFT;
- if (ent_delay <= val)
- goto start_rng;
-
- val = rd_reg32(&r4tst->rtsdctl);
- val = (val & ~RTSDCTL_ENT_DLY_MASK) |
- (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
- wr_reg32(&r4tst->rtsdctl, val);
- /* min. freq. count, equal to 1/4 of the entropy sample length */
- wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
- /* disable maximum frequency count */
- wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
- /* read the control register */
- val = rd_reg32(&r4tst->rtmctl);
-start_rng:
+ if ((rtsdctl & RTSDCTL_SAMP_SIZE_MASK) != RTSDCTL_SAMP_SIZE_VAL) {
+ wr_reg32(&r4tst->rtscmisc, (2 << 16) | 32);
+ wr_reg32(&r4tst->rtpkrrng, 570);
+ wr_reg32(&r4tst->rtpkrmax, 1600);
+ wr_reg32(&r4tst->rtscml, (122 << 16) | 317);
+ wr_reg32(&r4tst->rtscrl[0], (80 << 16) | 107);
+ wr_reg32(&r4tst->rtscrl[1], (57 << 16) | 62);
+ wr_reg32(&r4tst->rtscrl[2], (39 << 16) | 39);
+ wr_reg32(&r4tst->rtscrl[3], (27 << 16) | 26);
+ wr_reg32(&r4tst->rtscrl[4], (19 << 16) | 18);
+ wr_reg32(&r4tst->rtscrl[5], (18 << 16) | 17);
+ }
+
/*
* select raw sampling in both entropy shifter
* and statistical checker; ; put RNG4 into run mode
@@ -395,7 +417,7 @@ start_rng:
RTMCTL_SAMP_MODE_RAW_ES_SC);
}
-static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
+static int caam_get_era_from_hw(struct caam_perfmon __iomem *perfmon)
{
static const struct {
u16 ip_id;
@@ -421,12 +443,12 @@ static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
u16 ip_id;
int i;
- ccbvid = rd_reg32(&ctrl->perfmon.ccb_id);
+ ccbvid = rd_reg32(&perfmon->ccb_id);
era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT;
if (era) /* This is '0' prior to CAAM ERA-6 */
return era;
- id_ms = rd_reg32(&ctrl->perfmon.caam_id_ms);
+ id_ms = rd_reg32(&perfmon->caam_id_ms);
ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT;
maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT;
@@ -444,9 +466,9 @@ static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
* In case this property is not passed an attempt to retrieve the CAAM
* era via register reads will be made.
*
- * @ctrl: controller region
+ * @perfmon: Performance Monitor Registers
*/
-static int caam_get_era(struct caam_ctrl __iomem *ctrl)
+static int caam_get_era(struct caam_perfmon __iomem *perfmon)
{
struct device_node *caam_node;
int ret;
@@ -459,7 +481,7 @@ static int caam_get_era(struct caam_ctrl __iomem *ctrl)
if (!ret)
return prop;
else
- return caam_get_era_from_hw(ctrl);
+ return caam_get_era_from_hw(perfmon);
}
/*
@@ -492,6 +514,7 @@ static const struct of_device_id caam_match[] = {
MODULE_DEVICE_TABLE(of, caam_match);
struct caam_imx_data {
+ bool page0_access;
const struct clk_bulk_data *clks;
int num_clks;
};
@@ -504,6 +527,7 @@ static const struct clk_bulk_data caam_imx6_clks[] = {
};
static const struct caam_imx_data caam_imx6_data = {
+ .page0_access = true,
.clks = caam_imx6_clks,
.num_clks = ARRAY_SIZE(caam_imx6_clks),
};
@@ -514,6 +538,7 @@ static const struct clk_bulk_data caam_imx7_clks[] = {
};
static const struct caam_imx_data caam_imx7_data = {
+ .page0_access = true,
.clks = caam_imx7_clks,
.num_clks = ARRAY_SIZE(caam_imx7_clks),
};
@@ -525,6 +550,7 @@ static const struct clk_bulk_data caam_imx6ul_clks[] = {
};
static const struct caam_imx_data caam_imx6ul_data = {
+ .page0_access = true,
.clks = caam_imx6ul_clks,
.num_clks = ARRAY_SIZE(caam_imx6ul_clks),
};
@@ -534,15 +560,20 @@ static const struct clk_bulk_data caam_vf610_clks[] = {
};
static const struct caam_imx_data caam_vf610_data = {
+ .page0_access = true,
.clks = caam_vf610_clks,
.num_clks = ARRAY_SIZE(caam_vf610_clks),
};
+static const struct caam_imx_data caam_imx8ulp_data;
+
static const struct soc_device_attribute caam_imx_soc_table[] = {
{ .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
{ .soc_id = "i.MX6*", .data = &caam_imx6_data },
{ .soc_id = "i.MX7*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8M*", .data = &caam_imx7_data },
+ { .soc_id = "i.MX8ULP", .data = &caam_imx8ulp_data },
+ { .soc_id = "i.MX8Q*", .data = &caam_imx8ulp_data },
{ .soc_id = "VF*", .data = &caam_vf610_data },
{ .family = "Freescale i.MX" },
{ /* sentinel */ }
@@ -561,9 +592,9 @@ static int init_clocks(struct device *dev, const struct caam_imx_data *data)
int ret;
ctrlpriv->num_clks = data->num_clks;
- ctrlpriv->clks = devm_kmemdup(dev, data->clks,
- data->num_clks * sizeof(data->clks[0]),
- GFP_KERNEL);
+ ctrlpriv->clks = devm_kmemdup_array(dev, data->clks,
+ data->num_clks, sizeof(*data->clks),
+ GFP_KERNEL);
if (!ctrlpriv->clks)
return -ENOMEM;
@@ -609,22 +640,238 @@ static bool check_version(struct fsl_mc_version *mc_version, u32 major,
}
#endif
+static bool needs_entropy_delay_adjustment(void)
+{
+ if (of_machine_is_compatible("fsl,imx6sx"))
+ return true;
+ return false;
+}
+
+static int caam_ctrl_rng_init(struct device *dev)
+{
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+ struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+ int ret, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
+ u8 rng_vid;
+
+ if (ctrlpriv->era < 10) {
+ struct caam_perfmon __iomem *perfmon;
+
+ perfmon = ctrlpriv->total_jobrs ?
+ (struct caam_perfmon __iomem *)&ctrlpriv->jr[0]->perfmon :
+ (struct caam_perfmon __iomem *)&ctrl->perfmon;
+
+ rng_vid = (rd_reg32(&perfmon->cha_id_ls) &
+ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
+ } else {
+ struct version_regs __iomem *vreg;
+
+ vreg = ctrlpriv->total_jobrs ?
+ (struct version_regs __iomem *)&ctrlpriv->jr[0]->vreg :
+ (struct version_regs __iomem *)&ctrl->vreg;
+
+ rng_vid = (rd_reg32(&vreg->rng) & CHA_VER_VID_MASK) >>
+ CHA_VER_VID_SHIFT;
+ }
+
+ /*
+ * If SEC has RNG version >= 4 and RNG state handle has not been
+ * already instantiated, do RNG instantiation
+ * In case of SoCs with Management Complex, RNG is managed by MC f/w.
+ */
+ if (!(ctrlpriv->mc_en && ctrlpriv->pr_support) && rng_vid >= 4) {
+ ctrlpriv->rng4_sh_init =
+ rd_reg32(&ctrl->r4tst[0].rdsta);
+ /*
+ * If the secure keys (TDKEK, JDKEK, TDSK), were already
+ * generated, signal this to the function that is instantiating
+ * the state handles. An error would occur if RNG4 attempts
+ * to regenerate these keys before the next POR.
+ */
+ gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
+ ctrlpriv->rng4_sh_init &= RDSTA_MASK;
+ do {
+ int inst_handles =
+ rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_MASK;
+ /*
+ * If either SH were instantiated by somebody else
+ * (e.g. u-boot) then it is assumed that the entropy
+ * parameters are properly set and thus the function
+ * setting these (kick_trng(...)) is skipped.
+ * Also, if a handle was instantiated, do not change
+ * the TRNG parameters.
+ */
+ if (needs_entropy_delay_adjustment())
+ ent_delay = 12000;
+ if (!inst_handles) {
+ dev_info(dev,
+ "Entropy delay = %u\n",
+ ent_delay);
+ kick_trng(dev, ent_delay);
+ ent_delay = ent_delay * 2;
+ }
+ /*
+ * if instantiate_rng(...) fails, the loop will rerun
+ * and the kick_trng(...) function will modify the
+ * upper and lower limits of the entropy sampling
+ * interval, leading to a successful initialization of
+ * the RNG.
+ */
+ ret = instantiate_rng(dev, inst_handles,
+ gen_sk);
+ /*
+ * Entropy delay is determined via TRNG characterization.
+ * TRNG characterization is run across different voltages
+ * and temperatures.
+ * If worst case value for ent_dly is identified,
+ * the loop can be skipped for that platform.
+ */
+ if (needs_entropy_delay_adjustment())
+ break;
+ if (ret == -EAGAIN)
+ /*
+ * if here, the loop will rerun,
+ * so don't hog the CPU
+ */
+ cpu_relax();
+ } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
+ if (ret) {
+ dev_err(dev, "failed to instantiate RNG");
+ return ret;
+ }
+ /*
+ * Set handles initialized by this module as the complement of
+ * the already initialized ones
+ */
+ ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK;
+
+ /* Enable RDB bit so that RNG works faster */
+ clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
+ }
+
+ return 0;
+}
+
+/* Indicate if the internal state of the CAAM is lost during PM */
+static int caam_off_during_pm(void)
+{
+ bool not_off_during_pm = of_machine_is_compatible("fsl,imx6q") ||
+ of_machine_is_compatible("fsl,imx6qp") ||
+ of_machine_is_compatible("fsl,imx6dl");
+
+ return not_off_during_pm ? 0 : 1;
+}
+
+static void caam_state_save(struct device *dev)
+{
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+ struct caam_ctl_state *state = &ctrlpriv->state;
+ struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+ u32 deco_inst, jr_inst;
+ int i;
+
+ state->mcr = rd_reg32(&ctrl->mcr);
+ state->scfgr = rd_reg32(&ctrl->scfgr);
+
+ deco_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
+ CHA_ID_MS_DECO_MASK) >> CHA_ID_MS_DECO_SHIFT;
+ for (i = 0; i < deco_inst; i++) {
+ state->deco_mid[i].liodn_ms =
+ rd_reg32(&ctrl->deco_mid[i].liodn_ms);
+ state->deco_mid[i].liodn_ls =
+ rd_reg32(&ctrl->deco_mid[i].liodn_ls);
+ }
+
+ jr_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
+ CHA_ID_MS_JR_MASK) >> CHA_ID_MS_JR_SHIFT;
+ for (i = 0; i < jr_inst; i++) {
+ state->jr_mid[i].liodn_ms =
+ rd_reg32(&ctrl->jr_mid[i].liodn_ms);
+ state->jr_mid[i].liodn_ls =
+ rd_reg32(&ctrl->jr_mid[i].liodn_ls);
+ }
+}
+
+static void caam_state_restore(const struct device *dev)
+{
+ const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+ const struct caam_ctl_state *state = &ctrlpriv->state;
+ struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+ u32 deco_inst, jr_inst;
+ int i;
+
+ wr_reg32(&ctrl->mcr, state->mcr);
+ wr_reg32(&ctrl->scfgr, state->scfgr);
+
+ deco_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
+ CHA_ID_MS_DECO_MASK) >> CHA_ID_MS_DECO_SHIFT;
+ for (i = 0; i < deco_inst; i++) {
+ wr_reg32(&ctrl->deco_mid[i].liodn_ms,
+ state->deco_mid[i].liodn_ms);
+ wr_reg32(&ctrl->deco_mid[i].liodn_ls,
+ state->deco_mid[i].liodn_ls);
+ }
+
+ jr_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
+ CHA_ID_MS_JR_MASK) >> CHA_ID_MS_JR_SHIFT;
+ for (i = 0; i < jr_inst; i++) {
+ wr_reg32(&ctrl->jr_mid[i].liodn_ms,
+ state->jr_mid[i].liodn_ms);
+ wr_reg32(&ctrl->jr_mid[i].liodn_ls,
+ state->jr_mid[i].liodn_ls);
+ }
+
+ if (ctrlpriv->virt_en == 1)
+ clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
+ JRSTART_JR1_START | JRSTART_JR2_START |
+ JRSTART_JR3_START);
+}
+
+static int caam_ctrl_suspend(struct device *dev)
+{
+ const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+
+ if (ctrlpriv->caam_off_during_pm && !ctrlpriv->no_page0)
+ caam_state_save(dev);
+
+ return 0;
+}
+
+static int caam_ctrl_resume(struct device *dev)
+{
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (ctrlpriv->caam_off_during_pm && !ctrlpriv->no_page0) {
+ caam_state_restore(dev);
+
+ /* HW and rng will be reset so deinstantiation can be removed */
+ devm_remove_action(dev, devm_deinstantiate_rng, dev);
+ ret = caam_ctrl_rng_init(dev);
+ }
+
+ return ret;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(caam_ctrl_pm_ops, caam_ctrl_suspend, caam_ctrl_resume);
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
- int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
+ int ret, ring;
u64 caam_id;
const struct soc_device_attribute *imx_soc_match;
struct device *dev;
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
struct caam_drv_private *ctrlpriv;
+ struct caam_perfmon __iomem *perfmon;
struct dentry *dfs_root;
u32 scfgr, comp_params;
- u8 rng_vid;
int pg_size;
int BLOCK_OFFSET = 0;
- bool pr_support = false;
+ bool reg_access = true;
+ const struct caam_imx_data *imx_soc_data;
ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
if (!ctrlpriv)
@@ -635,20 +882,45 @@ static int caam_probe(struct platform_device *pdev)
nprop = pdev->dev.of_node;
imx_soc_match = soc_device_match(caam_imx_soc_table);
+ if (!imx_soc_match && of_match_node(imx8m_machine_match, of_root))
+ return -EPROBE_DEFER;
+
caam_imx = (bool)imx_soc_match;
+ ctrlpriv->caam_off_during_pm = caam_imx && caam_off_during_pm();
+
if (imx_soc_match) {
+ /*
+ * Until Layerscape and i.MX OP-TEE get in sync,
+ * only i.MX OP-TEE use cases disallow access to
+ * caam page 0 (controller) registers.
+ */
+ np = of_find_compatible_node(NULL, NULL, "linaro,optee-tz");
+ ctrlpriv->optee_en = !!np;
+ of_node_put(np);
+
+ reg_access = !ctrlpriv->optee_en;
+
if (!imx_soc_match->data) {
dev_err(dev, "No clock data provided for i.MX SoC");
return -EINVAL;
}
+ imx_soc_data = imx_soc_match->data;
+ reg_access = reg_access && imx_soc_data->page0_access;
+ ctrlpriv->no_page0 = !reg_access;
+ /*
+ * CAAM clocks cannot be controlled from kernel.
+ */
+ if (!imx_soc_data->num_clks)
+ goto iomap_ctrl;
+
ret = init_clocks(dev, imx_soc_match->data);
if (ret)
return ret;
}
-
+iomap_ctrl:
/* Get configuration properties from device tree */
/* First, get register page */
ctrl = devm_of_iomap(dev, nprop, 0, NULL);
@@ -658,17 +930,45 @@ static int caam_probe(struct platform_device *pdev)
return ret;
}
- caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
+ ring = 0;
+ for_each_available_child_of_node(nprop, np)
+ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+ of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
+ u32 reg;
+
+ if (of_property_read_u32_index(np, "reg", 0, &reg)) {
+ dev_err(dev, "%s read reg property error\n",
+ np->full_name);
+ continue;
+ }
+
+ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+ ((__force uint8_t *)ctrl + reg);
+
+ ctrlpriv->total_jobrs++;
+ ring++;
+ }
+
+ /*
+ * Wherever possible, instead of accessing registers from the global page,
+ * use the alias registers in the first (cf. DT nodes order)
+ * job ring's page.
+ */
+ perfmon = ring ? (struct caam_perfmon __iomem *)&ctrlpriv->jr[0]->perfmon :
+ (struct caam_perfmon __iomem *)&ctrl->perfmon;
+
+ caam_little_end = !(bool)(rd_reg32(&perfmon->status) &
(CSTA_PLEND | CSTA_ALT_PLEND));
- comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
- if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
+ comp_params = rd_reg32(&perfmon->comp_parms_ms);
+ if (reg_access && comp_params & CTPR_MS_PS &&
+ rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
caam_ptr_sz = sizeof(u64);
else
caam_ptr_sz = sizeof(u32);
caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
/* If (DPAA 1.x) QI present, check whether dependencies are available */
if (ctrlpriv->qi_present && !caam_dpaa2) {
ret = qman_is_probed();
@@ -720,12 +1020,16 @@ static int caam_probe(struct platform_device *pdev)
mc_version = fsl_mc_get_version();
if (mc_version)
- pr_support = check_version(mc_version, 10, 20, 0);
+ ctrlpriv->pr_support = check_version(mc_version, 10, 20,
+ 0);
else
return -EPROBE_DEFER;
}
#endif
+ if (!reg_access)
+ goto set_dma_mask;
+
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register.
@@ -765,13 +1069,14 @@ static int caam_probe(struct platform_device *pdev)
JRSTART_JR1_START | JRSTART_JR2_START |
JRSTART_JR3_START);
+set_dma_mask:
ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
return ret;
}
- ctrlpriv->era = caam_get_era(ctrl);
+ ctrlpriv->era = caam_get_era(perfmon);
ctrlpriv->domain = iommu_get_domain_for_dev(dev);
dfs_root = debugfs_create_dir(dev_name(dev), NULL);
@@ -782,7 +1087,7 @@ static int caam_probe(struct platform_device *pdev)
return ret;
}
- caam_debugfs_init(ctrlpriv, dfs_root);
+ caam_debugfs_init(ctrlpriv, perfmon, dfs_root);
/* Check to see if (DPAA 1.x) QI present. If so, enable */
if (ctrlpriv->qi_present && !caam_dpaa2) {
@@ -794,108 +1099,49 @@ static int caam_probe(struct platform_device *pdev)
wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
/* If QMAN driver is present, init CAAM-QI backend */
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
ret = caam_qi_init(pdev);
if (ret)
dev_err(dev, "caam qi i/f init failed: %d\n", ret);
#endif
}
- ring = 0;
- for_each_available_child_of_node(nprop, np)
- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
- of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
- ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
- ((__force uint8_t *)ctrl +
- (ring + JR_BLOCK_NUMBER) *
- BLOCK_OFFSET
- );
- ctrlpriv->total_jobrs++;
- ring++;
- }
-
/* If no QI and no rings specified, quit and go home */
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
dev_err(dev, "no queues configured, terminating\n");
return -ENOMEM;
}
- if (ctrlpriv->era < 10)
- rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
- CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
- else
- rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
- CHA_VER_VID_SHIFT;
+ comp_params = rd_reg32(&perfmon->comp_parms_ls);
+ ctrlpriv->blob_present = !!(comp_params & CTPR_LS_BLOB);
/*
- * If SEC has RNG version >= 4 and RNG state handle has not been
- * already instantiated, do RNG instantiation
- * In case of SoCs with Management Complex, RNG is managed by MC f/w.
+ * Some SoCs like the LS1028A (non-E) indicate CTPR_LS_BLOB support,
+ * but fail when actually using it due to missing AES support, so
+ * check both here.
*/
- if (!(ctrlpriv->mc_en && pr_support) && rng_vid >= 4) {
- ctrlpriv->rng4_sh_init =
- rd_reg32(&ctrl->r4tst[0].rdsta);
- /*
- * If the secure keys (TDKEK, JDKEK, TDSK), were already
- * generated, signal this to the function that is instantiating
- * the state handles. An error would occur if RNG4 attempts
- * to regenerate these keys before the next POR.
- */
- gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
- ctrlpriv->rng4_sh_init &= RDSTA_MASK;
- do {
- int inst_handles =
- rd_reg32(&ctrl->r4tst[0].rdsta) &
- RDSTA_MASK;
- /*
- * If either SH were instantiated by somebody else
- * (e.g. u-boot) then it is assumed that the entropy
- * parameters are properly set and thus the function
- * setting these (kick_trng(...)) is skipped.
- * Also, if a handle was instantiated, do not change
- * the TRNG parameters.
- */
- if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
- dev_info(dev,
- "Entropy delay = %u\n",
- ent_delay);
- kick_trng(pdev, ent_delay);
- ent_delay += 400;
- }
- /*
- * if instantiate_rng(...) fails, the loop will rerun
- * and the kick_trng(...) function will modify the
- * upper and lower limits of the entropy sampling
- * interval, leading to a successful initialization of
- * the RNG.
- */
- ret = instantiate_rng(dev, inst_handles,
- gen_sk);
- if (ret == -EAGAIN)
- /*
- * if here, the loop will rerun,
- * so don't hog the CPU
- */
- cpu_relax();
- } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
- if (ret) {
- dev_err(dev, "failed to instantiate RNG");
- return ret;
- }
- /*
- * Set handles initialized by this module as the complement of
- * the already initialized ones
- */
- ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK;
+ if (ctrlpriv->era < 10) {
+ ctrlpriv->blob_present = ctrlpriv->blob_present &&
+ (rd_reg32(&perfmon->cha_num_ls) & CHA_ID_LS_AES_MASK);
+ } else {
+ struct version_regs __iomem *vreg;
- /* Enable RDB bit so that RNG works faster */
- clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
+ vreg = ctrlpriv->total_jobrs ?
+ (struct version_regs __iomem *)&ctrlpriv->jr[0]->vreg :
+ (struct version_regs __iomem *)&ctrl->vreg;
+
+ ctrlpriv->blob_present = ctrlpriv->blob_present &&
+ (rd_reg32(&vreg->aesa) & CHA_VER_MISC_AES_NUM_MASK);
}
- /* NOTE: RTIC detection ought to go here, around Si time */
+ if (reg_access) {
+ ret = caam_ctrl_rng_init(dev);
+ if (ret)
+ return ret;
+ }
- caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
- (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
+ caam_id = (u64)rd_reg32(&perfmon->caam_id_ms) << 32 |
+ (u64)rd_reg32(&perfmon->caam_id_ls);
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
@@ -914,6 +1160,7 @@ static struct platform_driver caam_driver = {
.driver = {
.name = "caam",
.of_match_table = caam_match,
+ .pm = pm_ptr(&caam_ctrl_pm_ops),
},
.probe = caam_probe,
};
diff --git a/drivers/crypto/caam/debugfs.c b/drivers/crypto/caam/debugfs.c
index 806bb20d2aa1..718352b7afb5 100644
--- a/drivers/crypto/caam/debugfs.c
+++ b/drivers/crypto/caam/debugfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/* Copyright 2019 NXP */
+/* Copyright 2019, 2023 NXP */
#include <linux/debugfs.h>
#include "compat.h"
@@ -22,7 +22,7 @@ static int caam_debugfs_u32_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
/*
* This is a counter for the number of times the congestion group (where all
* the request and response queueus are) reached congestion. Incremented
@@ -42,16 +42,15 @@ void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv)
}
#endif
-void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root)
+void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
+ struct caam_perfmon __force *perfmon,
+ struct dentry *root)
{
- struct caam_perfmon *perfmon;
-
/*
* FIXME: needs better naming distinction, as some amalgamation of
* "caam" and nprop->full_name. The OF name isn't distinctive,
* but does separate instances
*/
- perfmon = (struct caam_perfmon __force *)&ctrlpriv->ctrl->perfmon;
ctrlpriv->ctl = debugfs_create_dir("ctl", root);
@@ -78,6 +77,9 @@ void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root)
debugfs_create_file("fault_status", 0444, ctrlpriv->ctl,
&perfmon->status, &caam_fops_u32_ro);
+ if (ctrlpriv->optee_en)
+ return;
+
/* Internal covering keys (useful in non-secure mode only) */
ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
diff --git a/drivers/crypto/caam/debugfs.h b/drivers/crypto/caam/debugfs.h
index 661d768acdbf..ef238c71f92a 100644
--- a/drivers/crypto/caam/debugfs.h
+++ b/drivers/crypto/caam/debugfs.h
@@ -1,21 +1,24 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
-/* Copyright 2019 NXP */
+/* Copyright 2019, 2023 NXP */
#ifndef CAAM_DEBUGFS_H
#define CAAM_DEBUGFS_H
struct dentry;
struct caam_drv_private;
+struct caam_perfmon;
#ifdef CONFIG_DEBUG_FS
-void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root);
+void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
+ struct caam_perfmon __force *perfmon, struct dentry *root);
#else
static inline void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
+ struct caam_perfmon __force *perfmon,
struct dentry *root)
{}
#endif
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_CAAM_QI)
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
void caam_debugfs_qi_congested(void);
void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv);
#else
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index e13470901586..c28e94fcb8c7 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -4,7 +4,7 @@
* Definitions to support CAAM descriptor instruction generation
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2018 NXP
+ * Copyright 2018, 2025 NXP
*/
#ifndef DESC_H
@@ -162,6 +162,7 @@
* Enhanced Encryption of Key
*/
#define KEY_EKT 0x00100000
+#define KEY_EKT_OFFSET 20
/*
* Encrypted with Trusted Key
@@ -403,6 +404,7 @@
#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_CCM_JKEK (0x14 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
@@ -1001,6 +1003,11 @@
#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
+/* Blob protocol protinfo bits */
+
+#define OP_PCL_BLOB_BLACK 0x0004
+#define OP_PCL_BLOB_EKT 0x0100
+
/* For DTLS - OP_PCLID_DTLS */
#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 62ce6421bb3f..2a29dd2c9c8a 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -3,7 +3,7 @@
* caam descriptor construction helper functions
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
- * Copyright 2019 NXP
+ * Copyright 2019, 2025 NXP
*/
#ifndef DESC_CONSTR_H
@@ -163,7 +163,8 @@ static inline void append_data(u32 * const desc, const void *data, int len)
{
u32 *offset = desc_end(desc);
- if (len) /* avoid sparse warning: memcpy with byte count of 0 */
+ /* Avoid gcc warning: memcpy with data == NULL */
+ if (!IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG) || data)
memcpy(offset, data, len);
(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) +
@@ -497,17 +498,23 @@ do { \
* @keylen: length of the provided algorithm key, in bytes
* @keylen_pad: padded length of the provided algorithm key, in bytes
* @key_dma: dma (bus) address where algorithm key resides
+ * @protected_key_dma: dma (bus) address where protected key resides
* @key_virt: virtual address where algorithm key resides
* @key_inline: true - key can be inlined in the descriptor; false - key is
* referenced by the descriptor
+ * @plain_keylen: size of the key to be loaded by the CAAM
+ * @key_cmd_opt: optional parameters for KEY command
*/
struct alginfo {
u32 algtype;
unsigned int keylen;
unsigned int keylen_pad;
dma_addr_t key_dma;
+ dma_addr_t protected_key_dma;
const void *key_virt;
bool key_inline;
+ u32 plain_keylen;
+ u32 key_cmd_opt;
};
/**
diff --git a/drivers/crypto/caam/dpseci-debugfs.c b/drivers/crypto/caam/dpseci-debugfs.c
index 0eca8c2fd916..020a9d8a8a07 100644
--- a/drivers/crypto/caam/dpseci-debugfs.c
+++ b/drivers/crypto/caam/dpseci-debugfs.c
@@ -8,7 +8,7 @@
static int dpseci_dbg_fqs_show(struct seq_file *file, void *offset)
{
- struct dpaa2_caam_priv *priv = (struct dpaa2_caam_priv *)file->private;
+ struct dpaa2_caam_priv *priv = file->private;
u32 fqid, fcnt, bcnt;
int i, err;
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 7d45b21bd55a..a88da0d31b23 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -4,7 +4,7 @@
* Private/internal definitions between modules
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2019 NXP
+ * Copyright 2019, 2023 NXP
*/
#ifndef INTERN_H
@@ -47,6 +47,16 @@ struct caam_jrentry_info {
u32 desc_size; /* Stored size for postprocessing, header derived */
};
+struct caam_jr_state {
+ dma_addr_t inpbusaddr;
+ dma_addr_t outbusaddr;
+};
+
+struct caam_jr_dequeue_params {
+ struct device *dev;
+ int enable_itr;
+};
+
/* Private sub-storage for a single JobR */
struct caam_drv_private_jr {
struct list_head list_node; /* Job Ring device list */
@@ -54,6 +64,7 @@ struct caam_drv_private_jr {
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
struct tasklet_struct irqtask;
+ struct caam_jr_dequeue_params tasklet_params;
int irq; /* One per queue */
bool hwrng;
@@ -71,6 +82,15 @@ struct caam_drv_private_jr {
int tail; /* entinfo (s/w ring) tail index */
void *outring; /* Base of output ring, DMA-safe */
struct crypto_engine *engine;
+
+ struct caam_jr_state state; /* State of the JR during PM */
+};
+
+struct caam_ctl_state {
+ struct masterid deco_mid[16];
+ struct masterid jr_mid[4];
+ u32 mcr;
+ u32 scfgr;
};
/*
@@ -92,7 +112,11 @@ struct caam_drv_private {
*/
u8 total_jobrs; /* Total Job Rings in device */
u8 qi_present; /* Nonzero if QI present in device */
+ u8 blob_present; /* Nonzero if BLOB support present in device */
u8 mc_en; /* Nonzero if MC f/w is active */
+ u8 optee_en; /* Nonzero if OP-TEE f/w is active */
+ u8 no_page0; /* Nonzero if register page 0 is not controlled by Linux */
+ bool pr_support; /* RNG prediction resistance available */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
int era; /* CAAM Era (internal HW revision) */
@@ -113,6 +137,9 @@ struct caam_drv_private {
struct dentry *ctl; /* controller dir */
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
#endif
+
+ int caam_off_during_pm; /* If the CAAM is reset after suspend */
+ struct caam_ctl_state state; /* State of the CTL during PM */
};
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
@@ -185,7 +212,22 @@ static inline void caam_rng_exit(struct device *dev) {}
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PRNG_API
+
+int caam_prng_register(struct device *dev);
+void caam_prng_unregister(void *data);
+
+#else
+
+static inline int caam_prng_register(struct device *dev)
+{
+ return 0;
+}
+
+static inline void caam_prng_unregister(void *data) {}
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PRNG_API */
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
int caam_qi_algapi_init(struct device *dev);
void caam_qi_algapi_exit(void);
@@ -201,7 +243,7 @@ static inline void caam_qi_algapi_exit(void)
{
}
-#endif /* CONFIG_CAAM_QI */
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI */
static inline u64 caam_get_dma_mask(struct device *dev)
{
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 7f2b1101f567..0ef00df9730e 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -4,11 +4,12 @@
* JobR backend functionality
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
- * Copyright 2019 NXP
+ * Copyright 2019, 2023 NXP
*/
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include "compat.h"
#include "ctrl.h"
@@ -39,6 +40,7 @@ static void register_algs(struct caam_drv_private_jr *jrpriv,
caam_algapi_hash_init(dev);
caam_pkc_init(dev);
jrpriv->hwrng = !caam_rng_init(dev);
+ caam_prng_register(dev);
caam_qi_algapi_init(dev);
algs_unlock:
@@ -53,7 +55,7 @@ static void unregister_algs(void)
goto algs_unlock;
caam_qi_algapi_exit();
-
+ caam_prng_unregister(NULL);
caam_pkc_exit();
caam_algapi_hash_exit();
caam_algapi_exit();
@@ -71,19 +73,27 @@ static void caam_jr_crypto_engine_exit(void *data)
crypto_engine_exit(jrpriv->engine);
}
-static int caam_reset_hw_jr(struct device *dev)
+/*
+ * Put the CAAM in quiesce, ie stop
+ *
+ * Must be called with itr disabled
+ */
+static int caam_jr_stop_processing(struct device *dev, u32 jrcr_bits)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
unsigned int timeout = 100000;
- /*
- * mask interrupts since we are going to poll
- * for reset completion status
- */
- clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
+ /* Check the current status */
+ if (rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_INPROGRESS)
+ goto wait_quiesce_completion;
- /* initiate flush (required prior to reset) */
- wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+ /* Reset the field */
+ clrsetbits_32(&jrp->rregs->jrintstatus, JRINT_ERR_HALT_MASK, 0);
+
+ /* initiate flush / park (required prior to reset) */
+ wr_reg32(&jrp->rregs->jrcommand, jrcr_bits);
+
+wait_quiesce_completion:
while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
JRINT_ERR_HALT_INPROGRESS) && --timeout)
cpu_relax();
@@ -94,8 +104,52 @@ static int caam_reset_hw_jr(struct device *dev)
return -EIO;
}
+ return 0;
+}
+
+/*
+ * Flush the job ring, so the jobs running will be stopped, jobs queued will be
+ * invalidated and the CAAM will no longer fetch fron input ring.
+ *
+ * Must be called with itr disabled
+ */
+static int caam_jr_flush(struct device *dev)
+{
+ return caam_jr_stop_processing(dev, JRCR_RESET);
+}
+
+/* The resume can be used after a park or a flush if CAAM has not been reset */
+static int caam_jr_restart_processing(struct device *dev)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ u32 halt_status = rd_reg32(&jrp->rregs->jrintstatus) &
+ JRINT_ERR_HALT_MASK;
+
+ /* Check that the flush/park is completed */
+ if (halt_status != JRINT_ERR_HALT_COMPLETE)
+ return -1;
+
+ /* Resume processing of jobs */
+ clrsetbits_32(&jrp->rregs->jrintstatus, 0, JRINT_ERR_HALT_COMPLETE);
+
+ return 0;
+}
+
+static int caam_reset_hw_jr(struct device *dev)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ unsigned int timeout = 100000;
+ int err;
+ /*
+ * mask interrupts since we are going to poll
+ * for reset completion status
+ */
+ clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
+ err = caam_jr_flush(dev);
+ if (err)
+ return err;
+
/* initiate reset */
- timeout = 100000;
wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
cpu_relax();
@@ -126,7 +180,7 @@ static int caam_jr_shutdown(struct device *dev)
return ret;
}
-static int caam_jr_remove(struct platform_device *pdev)
+static void caam_jr_remove(struct platform_device *pdev)
{
int ret;
struct device *jrdev;
@@ -139,11 +193,14 @@ static int caam_jr_remove(struct platform_device *pdev)
caam_rng_exit(jrdev->parent);
/*
- * Return EBUSY if job ring already allocated.
+ * If a job ring is still allocated there is trouble ahead. Once
+ * caam_jr_remove() returned, jrpriv will be freed and the registers
+ * will get unmapped. So any user of such a job ring will probably
+ * crash.
*/
if (atomic_read(&jrpriv->tfm_count)) {
- dev_err(jrdev, "Device is busy\n");
- return -EBUSY;
+ dev_alert(jrdev, "Device is busy; consumers might start to crash\n");
+ return;
}
/* Unregister JR-based RNG & crypto algorithms */
@@ -158,8 +215,6 @@ static int caam_jr_remove(struct platform_device *pdev)
ret = caam_jr_shutdown(jrdev);
if (ret)
dev_err(jrdev, "Failed to shut down job ring\n");
-
- return ret;
}
/* Main per-ring interrupt handler */
@@ -174,7 +229,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
* tasklet if jobs done.
*/
irqstate = rd_reg32(&jrp->rregs->jrintstatus);
- if (!irqstate)
+ if (!(irqstate & JRINT_JR_INT))
return IRQ_NONE;
/*
@@ -204,7 +259,8 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
static void caam_jr_dequeue(unsigned long devarg)
{
int hw_idx, sw_idx, i, head, tail;
- struct device *dev = (struct device *)devarg;
+ struct caam_jr_dequeue_params *params = (void *)devarg;
+ struct device *dev = params->dev;
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
@@ -278,8 +334,9 @@ static void caam_jr_dequeue(unsigned long devarg)
outring_used--;
}
- /* reenable / unmask IRQs */
- clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
+ if (params->enable_itr)
+ /* reenable / unmask IRQs */
+ clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
}
/**
@@ -404,8 +461,16 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
* Guarantee that the descriptor's DMA address has been written to
* the next slot in the ring before the write index is updated, since
* other cores may update this index independently.
+ *
+ * Under heavy DDR load, smp_wmb() or dma_wmb() fail to make the input
+ * ring be updated before the CAAM starts reading it. So, CAAM will
+ * process, again, an old descriptor address and will put it in the
+ * output ring. This will make caam_jr_dequeue() to fail, since this
+ * old descriptor is not in the software ring.
+ * To fix this, use wmb() which works on the full system instead of
+ * inner/outer shareable domains.
*/
- smp_wmb();
+ wmb();
jrp->head = (head + 1) & (JOBR_DEPTH - 1);
@@ -429,6 +494,29 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
}
EXPORT_SYMBOL(caam_jr_enqueue);
+static void caam_jr_init_hw(struct device *dev, dma_addr_t inpbusaddr,
+ dma_addr_t outbusaddr)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+
+ wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
+ wr_reg64(&jrp->rregs->outring_base, outbusaddr);
+ wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
+ wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
+
+ /* Select interrupt coalescing parameters */
+ clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
+ (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
+ (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
+}
+
+static void caam_jr_reset_index(struct caam_drv_private_jr *jrp)
+{
+ jrp->out_ring_read_index = 0;
+ jrp->head = 0;
+ jrp->tail = 0;
+}
+
/*
* Init JobR independent of platform property detection
*/
@@ -465,25 +553,16 @@ static int caam_jr_init(struct device *dev)
jrp->entinfo[i].desc_addr_dma = !0;
/* Setup rings */
- jrp->out_ring_read_index = 0;
- jrp->head = 0;
- jrp->tail = 0;
-
- wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
- wr_reg64(&jrp->rregs->outring_base, outbusaddr);
- wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
- wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
-
+ caam_jr_reset_index(jrp);
jrp->inpring_avail = JOBR_DEPTH;
+ caam_jr_init_hw(dev, inpbusaddr, outbusaddr);
spin_lock_init(&jrp->inplock);
- /* Select interrupt coalescing parameters */
- clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
- (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
- (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
-
- tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
+ jrp->tasklet_params.dev = dev;
+ jrp->tasklet_params.enable_itr = 1;
+ tasklet_init(&jrp->irqtask, caam_jr_dequeue,
+ (unsigned long)&jrp->tasklet_params);
/* Connect job ring interrupt handler. */
error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED,
@@ -550,8 +629,7 @@ static int caam_jr_probe(struct platform_device *pdev)
}
/* Initialize crypto engine */
- jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, NULL,
- false,
+ jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, false,
CRYPTO_ENGINE_MAX_QLEN);
if (!jrpriv->engine) {
dev_err(jrdev, "Could not init crypto-engine\n");
@@ -594,11 +672,134 @@ static int caam_jr_probe(struct platform_device *pdev)
atomic_set(&jrpriv->tfm_count, 0);
+ device_init_wakeup(&pdev->dev, 1);
+ device_set_wakeup_enable(&pdev->dev, false);
+
register_algs(jrpriv, jrdev->parent);
return 0;
}
+static void caam_jr_get_hw_state(struct device *dev)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+
+ jrp->state.inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
+ jrp->state.outbusaddr = rd_reg64(&jrp->rregs->outring_base);
+}
+
+static int caam_jr_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent);
+ struct caam_jr_dequeue_params suspend_params = {
+ .dev = dev,
+ .enable_itr = 0,
+ };
+
+ /* Remove the node from Physical JobR list maintained by driver */
+ spin_lock(&driver_data.jr_alloc_lock);
+ list_del(&jrpriv->list_node);
+ spin_unlock(&driver_data.jr_alloc_lock);
+
+ if (jrpriv->hwrng)
+ caam_rng_exit(dev->parent);
+
+ if (ctrlpriv->caam_off_during_pm) {
+ int err;
+
+ tasklet_disable(&jrpriv->irqtask);
+
+ /* mask itr to call flush */
+ clrsetbits_32(&jrpriv->rregs->rconfig_lo, 0, JRCFG_IMSK);
+
+ /* Invalid job in process */
+ err = caam_jr_flush(dev);
+ if (err) {
+ dev_err(dev, "Failed to flush\n");
+ return err;
+ }
+
+ /* Dequeing jobs flushed */
+ caam_jr_dequeue((unsigned long)&suspend_params);
+
+ /* Save state */
+ caam_jr_get_hw_state(dev);
+ } else if (device_may_wakeup(&pdev->dev)) {
+ enable_irq_wake(jrpriv->irq);
+ }
+
+ return 0;
+}
+
+static int caam_jr_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent);
+
+ if (ctrlpriv->caam_off_during_pm) {
+ u64 inp_addr;
+ int err;
+
+ /*
+ * Check if the CAAM has been resetted checking the address of
+ * the input ring
+ */
+ inp_addr = rd_reg64(&jrpriv->rregs->inpring_base);
+ if (inp_addr != 0) {
+ /* JR still has some configuration */
+ if (inp_addr == jrpriv->state.inpbusaddr) {
+ /* JR has not been resetted */
+ err = caam_jr_restart_processing(dev);
+ if (err) {
+ dev_err(dev,
+ "Restart processing failed\n");
+ return err;
+ }
+
+ tasklet_enable(&jrpriv->irqtask);
+
+ clrsetbits_32(&jrpriv->rregs->rconfig_lo,
+ JRCFG_IMSK, 0);
+
+ goto add_jr;
+ } else if (ctrlpriv->optee_en) {
+ /* JR has been used by OPTEE, reset it */
+ err = caam_reset_hw_jr(dev);
+ if (err) {
+ dev_err(dev, "Failed to reset JR\n");
+ return err;
+ }
+ } else {
+ /* No explanation, return error */
+ return -EIO;
+ }
+ }
+
+ caam_jr_reset_index(jrpriv);
+ caam_jr_init_hw(dev, jrpriv->state.inpbusaddr,
+ jrpriv->state.outbusaddr);
+
+ tasklet_enable(&jrpriv->irqtask);
+ } else if (device_may_wakeup(&pdev->dev)) {
+ disable_irq_wake(jrpriv->irq);
+ }
+
+add_jr:
+ spin_lock(&driver_data.jr_alloc_lock);
+ list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
+ spin_unlock(&driver_data.jr_alloc_lock);
+
+ if (jrpriv->hwrng)
+ jrpriv->hwrng = !caam_rng_init(dev->parent);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(caam_jr_pm_ops, caam_jr_suspend, caam_jr_resume);
+
static const struct of_device_id caam_jr_match[] = {
{
.compatible = "fsl,sec-v4.0-job-ring",
@@ -614,9 +815,11 @@ static struct platform_driver caam_jr_driver = {
.driver = {
.name = "caam_jr",
.of_match_table = caam_jr_match,
+ .pm = pm_ptr(&caam_jr_pm_ops),
},
.probe = caam_jr_probe,
.remove = caam_jr_remove,
+ .shutdown = caam_jr_remove,
};
static int __init jr_driver_init(void)
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index b0e8a4939b4f..88cc4fe2a585 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -64,7 +64,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
if (local_max > max_keylen)
return -EINVAL;
- desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+ desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
return ret;
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
index 8ccc22075043..4b1bcf53f7ac 100644
--- a/drivers/crypto/caam/pdb.h
+++ b/drivers/crypto/caam/pdb.h
@@ -144,7 +144,7 @@ struct ipsec_encap_pdb {
};
u32 spi;
u32 ip_hdr_len;
- u32 ip_hdr[0];
+ u32 ip_hdr[];
};
/**
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 8163f5df8ebf..1e731ed8702b 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -8,7 +8,14 @@
*/
#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include <soc/fsl/qman.h>
#include "debugfs.h"
@@ -50,7 +57,7 @@ struct caam_napi {
*/
struct caam_qi_pcpu_priv {
struct caam_napi caam_napi;
- struct net_device net_dev;
+ struct net_device *net_dev;
struct qman_fq *rsp_fq;
} ____cacheline_aligned;
@@ -75,7 +82,7 @@ bool caam_congested __read_mostly;
EXPORT_SYMBOL(caam_congested);
/*
- * This is a a cache of buffers, from which the users of CAAM QI driver
+ * This is a cache of buffers, from which the users of CAAM QI driver
* can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
* doing malloc on the hotpath.
* NOTE: A more elegant solution would be to have some headroom in the frames
@@ -115,12 +122,12 @@ int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
qm_fd_addr_set64(&fd, addr);
do {
+ refcount_inc(&req->drv_ctx->refcnt);
ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
- if (likely(!ret)) {
- refcount_inc(&req->drv_ctx->refcnt);
+ if (likely(!ret))
return 0;
- }
+ refcount_dec(&req->drv_ctx->refcnt);
if (ret != -EBUSY)
break;
num_retries++;
@@ -137,7 +144,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
{
const struct qm_fd *fd;
struct caam_drv_req *drv_req;
- struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+ struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev->dev);
struct caam_drv_private *priv = dev_get_drvdata(qidev);
fd = &msg->ern.fd;
@@ -435,11 +442,8 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
if (!cpumask_test_cpu(*cpu, cpus)) {
int *pcpu = &get_cpu_var(last_cpu);
- *pcpu = cpumask_next(*pcpu, cpus);
- if (*pcpu >= nr_cpu_ids)
- *pcpu = cpumask_first(cpus);
+ *pcpu = cpumask_next_wrap(*pcpu, cpus);
*cpu = *pcpu;
-
put_cpu_var(last_cpu);
}
drv_ctx->cpu = *cpu;
@@ -523,6 +527,7 @@ static void caam_qi_shutdown(void *data)
if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
+ free_netdev(per_cpu(pcpu_qipriv.net_dev, i));
}
qman_delete_cgr_safe(&priv->cgr);
@@ -566,7 +571,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
struct caam_drv_req *drv_req;
const struct qm_fd *fd;
- struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+ struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev->dev);
struct caam_drv_private *priv = dev_get_drvdata(qidev);
u32 status;
@@ -614,7 +619,7 @@ static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
struct qman_fq *fq;
int ret;
- fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
+ fq = kzalloc(sizeof(*fq), GFP_KERNEL);
if (!fq)
return -ENOMEM;
@@ -711,29 +716,43 @@ static void free_rsp_fqs(void)
kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
}
+static void free_caam_qi_pcpu_netdev(const cpumask_t *cpus)
+{
+ struct caam_qi_pcpu_priv *priv;
+ int i;
+
+ for_each_cpu(i, cpus) {
+ priv = per_cpu_ptr(&pcpu_qipriv, i);
+ free_netdev(priv->net_dev);
+ }
+}
+
int caam_qi_init(struct platform_device *caam_pdev)
{
int err, i;
- struct device *ctrldev = &caam_pdev->dev, *qidev;
+ struct device *qidev = &caam_pdev->dev;
struct caam_drv_private *ctrlpriv;
const cpumask_t *cpus = qman_affine_cpus();
+ cpumask_var_t clean_mask;
+
+ err = -ENOMEM;
+ if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL))
+ goto fail_cpumask;
- ctrlpriv = dev_get_drvdata(ctrldev);
- qidev = ctrldev;
+ ctrlpriv = dev_get_drvdata(qidev);
/* Initialize the congestion detection */
err = init_cgr(qidev);
if (err) {
dev_err(qidev, "CGR initialization failed: %d\n", err);
- return err;
+ goto fail_cgr;
}
/* Initialise response FQs */
err = alloc_rsp_fqs(qidev);
if (err) {
dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
- free_rsp_fqs();
- return err;
+ goto fail_fqs;
}
/*
@@ -744,31 +763,51 @@ int caam_qi_init(struct platform_device *caam_pdev)
struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
struct caam_napi *caam_napi = &priv->caam_napi;
struct napi_struct *irqtask = &caam_napi->irqtask;
- struct net_device *net_dev = &priv->net_dev;
+ struct net_device *net_dev;
+ net_dev = alloc_netdev_dummy(0);
+ if (!net_dev) {
+ err = -ENOMEM;
+ goto fail;
+ }
+ cpumask_set_cpu(i, clean_mask);
+ priv->net_dev = net_dev;
net_dev->dev = *qidev;
- INIT_LIST_HEAD(&net_dev->napi_list);
- netif_napi_add(net_dev, irqtask, caam_qi_poll,
- CAAM_NAPI_WEIGHT);
+ netif_napi_add_tx_weight(net_dev, irqtask, caam_qi_poll,
+ CAAM_NAPI_WEIGHT);
napi_enable(irqtask);
}
- qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
- SLAB_CACHE_DMA, NULL);
+ qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE,
+ dma_get_cache_alignment(), 0, NULL);
if (!qi_cache) {
dev_err(qidev, "Can't allocate CAAM cache\n");
- free_rsp_fqs();
- return -ENOMEM;
+ err = -ENOMEM;
+ goto fail;
}
caam_debugfs_qi_init(ctrlpriv);
- err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv);
+ err = devm_add_action_or_reset(qidev, caam_qi_shutdown, qidev);
if (err)
- return err;
+ goto fail2;
dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
- return 0;
+ goto free_cpumask;
+
+fail2:
+ kmem_cache_destroy(qi_cache);
+fail:
+ free_caam_qi_pcpu_netdev(clean_mask);
+fail_fqs:
+ free_rsp_fqs();
+ qman_delete_cgr_safe(&qipriv.cgr);
+ qman_release_cgrid(qipriv.cgr.cgrid);
+fail_cgr:
+free_cpumask:
+ free_cpumask_var(clean_mask);
+fail_cpumask:
+ return err;
}
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index 5894f16f8fe3..a96e3d213c06 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -9,6 +9,8 @@
#ifndef __QI_H__
#define __QI_H__
+#include <crypto/algapi.h>
+#include <linux/compiler_attributes.h>
#include <soc/fsl/qman.h>
#include "compat.h"
#include "desc.h"
@@ -58,8 +60,10 @@ enum optype {
* @qidev: device pointer for CAAM/QI backend
*/
struct caam_drv_ctx {
- u32 prehdr[2];
- u32 sh_desc[MAX_SDLEN];
+ struct {
+ u32 prehdr[2];
+ u32 sh_desc[MAX_SDLEN];
+ } __aligned(CRYPTO_DMA_ALIGN);
dma_addr_t context_a;
struct qman_fq *req_fq;
struct qman_fq *rsp_fq;
@@ -67,7 +71,7 @@ struct caam_drv_ctx {
int cpu;
enum optype op_type;
struct device *qidev;
-} ____cacheline_aligned;
+};
/**
* caam_drv_req - The request structure the driver application should fill while
@@ -88,7 +92,7 @@ struct caam_drv_req {
struct caam_drv_ctx *drv_ctx;
caam_qi_cbk cbk;
void *app_ctx;
-} ____cacheline_aligned;
+} __aligned(CRYPTO_DMA_ALIGN);
/**
* caam_drv_ctx_init - Initialise a CAAM/QI driver context
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index af61f3a2c0d4..873df9de9890 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -3,7 +3,7 @@
* CAAM hardware register-level view
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2018 NXP
+ * Copyright 2018, 2023 NXP
*/
#ifndef REGS_H
@@ -320,7 +320,11 @@ struct version_regs {
#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT)
/* CHA Miscellaneous Information - AESA_MISC specific */
-#define CHA_VER_MISC_AES_GCM BIT(1 + CHA_VER_MISC_SHIFT)
+#define CHA_VER_MISC_AES_NUM_MASK GENMASK(7, 0)
+#define CHA_VER_MISC_AES_GCM BIT(1 + CHA_VER_MISC_SHIFT)
+
+/* CHA Miscellaneous Information - PKHA_MISC specific */
+#define CHA_VER_MISC_PKHA_NO_CRYPT BIT(7 + CHA_VER_MISC_SHIFT)
/*
* caam_perfmon - Performance Monitor/Secure Memory Status/
@@ -411,6 +415,7 @@ struct caam_perfmon {
#define CTPR_MS_PG_SZ_MASK 0x10
#define CTPR_MS_PG_SZ_SHIFT 4
u32 comp_parms_ms; /* CTPR - Compile Parameters Register */
+#define CTPR_LS_BLOB BIT(1)
u32 comp_parms_ls; /* CTPR - Compile Parameters Register */
u64 rsvd1[2];
@@ -421,6 +426,9 @@ struct caam_perfmon {
u32 rsvd2;
#define CSTA_PLEND BIT(10)
#define CSTA_ALT_PLEND BIT(18)
+#define CSTA_MOO GENMASK(9, 8)
+#define CSTA_MOO_SECURE 1
+#define CSTA_MOO_TRUSTED 2
u32 status; /* CSTA - CAAM Status */
u64 rsvd3;
@@ -451,12 +459,6 @@ struct masterid {
u32 liodn_ls; /* LIODN for non-sequence and seq access */
};
-/* Partition ID for DMA configuration */
-struct partid {
- u32 rsvd1;
- u32 pidr; /* partition ID, DECO */
-};
-
/* RNGB test mode (replicated twice in some configurations) */
/* Padded out to 0x100 */
struct rngtst {
@@ -515,6 +517,8 @@ struct rng4tst {
#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
#define RTSDCTL_ENT_DLY_MIN 3200
#define RTSDCTL_ENT_DLY_MAX 12800
+#define RTSDCTL_SAMP_SIZE_MASK 0xffff
+#define RTSDCTL_SAMP_SIZE_VAL 512
u32 rtsdctl; /* seed control register */
union {
u32 rtsblim; /* PRGM=1: sparse bit limit register */
@@ -526,7 +530,15 @@ struct rng4tst {
u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */
u32 rtfrqcnt; /* PRGM=0: freq. count register */
};
- u32 rsvd1[40];
+ union {
+ u32 rtscmc; /* statistical check run monobit count */
+ u32 rtscml; /* statistical check run monobit limit */
+ };
+ union {
+ u32 rtscrc[6]; /* statistical check run length count */
+ u32 rtscrl[6]; /* statistical check run length limit */
+ };
+ u32 rsvd1[33];
#define RDSTA_SKVT 0x80000000
#define RDSTA_SKVN 0x40000000
#define RDSTA_PR0 BIT(4)
@@ -572,8 +584,7 @@ struct caam_ctrl {
u32 deco_rsr; /* DECORSR - Deco Request Source */
u32 rsvd11;
u32 deco_rq; /* DECORR - DECO Request */
- struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */
- u32 rsvd5[22];
+ struct masterid deco_mid[16]; /* DECOxLIODNR - 1 per DECO */
/* DECO Availability/Reset Section 120-3ff */
u32 deco_avail; /* DAR - DECO availability */