summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-12-14 12:31:09 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-12-14 12:31:09 -0800
commit64e7003c6b85626a533a67c1ba938b75a3db24e6 (patch)
tree5e3e776d23a9520f51251b4838d4aa66d920dbff /drivers/crypto
parent48ea09cddae0b794cde2070f106ef676703dbcd3 (diff)
parent453de3eb08c4b7e31b3019a4b0cc3ebce51a6219 (diff)
Merge tag 'v6.2-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Optimise away self-test overhead when they are disabled - Support symmetric encryption via keyring keys in af_alg - Flip hwrng default_quality, the default is now maximum entropy Algorithms: - Add library version of aesgcm - CFI fixes for assembly code - Add arm/arm64 accelerated versions of sm3/sm4 Drivers: - Remove assumption on arm64 that kmalloc is DMA-aligned - Fix selftest failures in rockchip - Add support for RK3328/RK3399 in rockchip - Add deflate support in qat - Merge ux500 into stm32 - Add support for TEE for PCI ID 0x14CA in ccp - Add mt7986 support in mtk - Add MaxLinear platform support in inside-secure - Add NPCM8XX support in npcm" * tag 'v6.2-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (184 commits) crypto: ux500/cryp - delete driver crypto: stm32/cryp - enable for use with Ux500 crypto: stm32 - enable drivers to be used on Ux500 dt-bindings: crypto: Let STM32 define Ux500 CRYP hwrng: geode - Fix PCI device refcount leak hwrng: amd - Fix PCI device refcount leak crypto: qce - Set DMA alignment explicitly crypto: octeontx2 - Set DMA alignment explicitly crypto: octeontx - Set DMA alignment explicitly crypto: keembay - Set DMA alignment explicitly crypto: safexcel - Set DMA alignment explicitly crypto: hisilicon/hpre - Set DMA alignment explicitly crypto: chelsio - Set DMA alignment explicitly crypto: ccree - Set DMA alignment explicitly crypto: ccp - Set DMA alignment explicitly crypto: cavium - Set DMA alignment explicitly crypto: img-hash - Fix variable dereferenced before check 'hdev->req' crypto: arm64/ghash-ce - use frame_push/pop macros consistently crypto: arm64/crct10dif - use frame_push/pop macros consistently crypto: arm64/aes-modes - use frame_push/pop macros consistently ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig19
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c1
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c2
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c1
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl.h2
-rw-r--r--drivers/crypto/atmel-ecc.c6
-rw-r--r--drivers/crypto/atmel-sha204a.c7
-rw-r--r--drivers/crypto/caam/blob_gen.c9
-rw-r--r--drivers/crypto/caam/caamalg.c72
-rw-r--r--drivers/crypto/caam/caamalg_qi.c52
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c173
-rw-r--r--drivers/crypto/caam/caamhash.c87
-rw-r--r--drivers/crypto/caam/caampkc.c50
-rw-r--r--drivers/crypto/caam/caamrng.c1
-rw-r--r--drivers/crypto/caam/regs.h3
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c10
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c12
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c21
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c12
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c20
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c29
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c17
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c15
-rw-r--r--drivers/crypto/ccp/ccp-crypto-rsa.c18
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c26
-rw-r--r--drivers/crypto/ccp/sp-pci.c11
-rw-r--r--drivers/crypto/ccree/cc_aead.c62
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c18
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c2
-rw-r--r--drivers/crypto/ccree/cc_driver.c10
-rw-r--r--drivers/crypto/ccree/cc_hash.c86
-rw-r--r--drivers/crypto/chelsio/Kconfig2
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c49
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h6
-rw-r--r--drivers/crypto/hisilicon/Kconfig2
-rw-r--r--drivers/crypto/hisilicon/Makefile2
-rw-r--r--drivers/crypto/hisilicon/debugfs.c1147
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c47
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c17
-rw-r--r--drivers/crypto/hisilicon/qm.c1229
-rw-r--r--drivers/crypto/hisilicon/qm_common.h87
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c5
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c10
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c7
-rw-r--r--drivers/crypto/img-hash.c8
-rw-r--r--drivers/crypto/inside-secure/safexcel.c69
-rw-r--r--drivers/crypto/inside-secure/safexcel.h10
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c99
-rw-r--r--drivers/crypto/ixp4xx_crypto.c10
-rw-r--r--drivers/crypto/keembay/keembay-ocs-hcu-core.c26
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h2
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c3
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c69
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c79
-rw-r--r--drivers/crypto/n2_core.c6
-rw-r--r--drivers/crypto/nx/nx-842.h2
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c4
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h2
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_drv.c146
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c4
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_drv.c2
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c4
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c2
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c4
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_drv.c2
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c4
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c2
-rw-r--r--drivers/crypto/qat/qat_common/Makefile8
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h14
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_strings.h1
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h9
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_config.c206
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_config.h10
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_dc.c70
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_dc.h10
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_dc.c83
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_dc.h10
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c11
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c4
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw.h24
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_comp.h404
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_hw.h66
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h164
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h300
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c208
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs_send.h16
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c6
-rw-r--r--drivers/crypto/qat/qat_common/qat_bl.c383
-rw-r--r--drivers/crypto/qat/qat_common/qat_bl.h67
-rw-r--r--drivers/crypto/qat/qat_common/qat_comp_algs.c344
-rw-r--r--drivers/crypto/qat/qat_common/qat_comp_req.h123
-rw-r--r--drivers/crypto/qat/qat_common/qat_compression.c297
-rw-r--r--drivers/crypto/qat/qat_common/qat_compression.h37
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c120
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.h55
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c4
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c2
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c4
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c2
-rw-r--r--drivers/crypto/qce/aead.c22
-rw-r--r--drivers/crypto/qce/common.c5
-rw-r--r--drivers/crypto/qce/sha.c18
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c505
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h107
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c267
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c543
-rw-r--r--drivers/crypto/stm32/Kconfig4
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c417
-rw-r--r--drivers/crypto/talitos.c6
-rw-r--r--drivers/crypto/talitos.h4
-rw-r--r--drivers/crypto/ux500/Kconfig10
-rw-r--r--drivers/crypto/ux500/Makefile1
-rw-r--r--drivers/crypto/ux500/cryp/Makefile10
-rw-r--r--drivers/crypto/ux500/cryp/cryp.c394
-rw-r--r--drivers/crypto/ux500/cryp/cryp.h315
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c1600
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irq.c45
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irq.h31
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irqp.h125
-rw-r--r--drivers/crypto/ux500/cryp/cryp_p.h122
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c5
125 files changed, 5988 insertions, 5664 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 55e75fbb658e..dfb103f81a64 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -669,7 +669,12 @@ config CRYPTO_DEV_IMGTEC_HASH
config CRYPTO_DEV_ROCKCHIP
tristate "Rockchip's Cryptographic Engine driver"
depends on OF && ARCH_ROCKCHIP
+ depends on PM
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_DES
select CRYPTO_AES
+ select CRYPTO_ENGINE
select CRYPTO_LIB_DES
select CRYPTO_MD5
select CRYPTO_SHA1
@@ -681,6 +686,16 @@ config CRYPTO_DEV_ROCKCHIP
This driver interfaces with the hardware crypto accelerator.
Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
+config CRYPTO_DEV_ROCKCHIP_DEBUG
+ bool "Enable Rockchip crypto stats"
+ depends on CRYPTO_DEV_ROCKCHIP
+ depends on DEBUG_FS
+ help
+ Say y to enable Rockchip crypto debug stats.
+ This will create /sys/kernel/debug/rk3288_crypto/stats for displaying
+ the number of requests per algorithm and other internal stats.
+
+
config CRYPTO_DEV_ZYNQMP_AES
tristate "Support for Xilinx ZynqMP AES hw accelerator"
depends on ZYNQMP_FIRMWARE || COMPILE_TEST
@@ -785,8 +800,8 @@ config CRYPTO_DEV_CCREE
select CRYPTO_ECB
select CRYPTO_CTR
select CRYPTO_XTS
- select CRYPTO_SM4
- select CRYPTO_SM3
+ select CRYPTO_SM4_GENERIC
+ select CRYPTO_SM3_GENERIC
help
Say 'Y' to enable a driver for the REE interface of the Arm
TrustZone CryptoCell family of processors. Currently the
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 116de173a66c..fa8bf1be1a8c 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -41,7 +41,7 @@ obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SA2UL) += sa2ul.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_CRYPTO_DEV_SL3516) += gemini/
-obj-$(CONFIG_ARCH_STM32) += stm32/
+obj-y += stm32/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
index c4b0a8b58842..e2b9b9104694 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
@@ -108,7 +108,6 @@ int sun8i_ce_hwrng_register(struct sun8i_ce_dev *ce)
}
ce->trng.name = "sun8i Crypto Engine TRNG";
ce->trng.read = sun8i_ce_trng_read;
- ce->trng.quality = 1000;
ret = hwrng_register(&ce->trng);
if (ret)
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 910d6751644c..902f6be057ec 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -124,7 +124,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
int i = 0;
- u32 a;
+ dma_addr_t a;
int err;
rctx->ivlen = ivsize;
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index 6e7ae896717c..937187027ad5 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -237,7 +237,6 @@ static int meson_crypto_probe(struct platform_device *pdev)
return err;
}
- mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL);
for (i = 0; i < MAXFLOW; i++) {
mc->irqs[i] = platform_get_irq(pdev, i);
if (mc->irqs[i] < 0)
diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h
index dc0f142324a3..8c0746a1d6d4 100644
--- a/drivers/crypto/amlogic/amlogic-gxl.h
+++ b/drivers/crypto/amlogic/amlogic-gxl.h
@@ -95,7 +95,7 @@ struct meson_dev {
struct device *dev;
struct meson_flow *chanlist;
atomic_t flow;
- int *irqs;
+ int irqs[MAXFLOW];
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
struct dentry *dbgfs_dir;
#endif
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 82bf15d49561..53100fb9b07b 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -311,9 +311,9 @@ static struct kpp_alg atmel_ecdh_nist_p256 = {
},
};
-static int atmel_ecc_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int atmel_ecc_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct atmel_i2c_client_priv *i2c_priv;
int ret;
@@ -390,7 +390,7 @@ static struct i2c_driver atmel_ecc_driver = {
.name = "atmel-ecc",
.of_match_table = of_match_ptr(atmel_ecc_dt_ids),
},
- .probe = atmel_ecc_probe,
+ .probe_new = atmel_ecc_probe,
.remove = atmel_ecc_remove,
.id_table = atmel_ecc_id,
};
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index a84b657598c6..272a06f0b588 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -91,9 +91,9 @@ static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max,
return max;
}
-static int atmel_sha204a_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int atmel_sha204a_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct atmel_i2c_client_priv *i2c_priv;
int ret;
@@ -107,7 +107,6 @@ static int atmel_sha204a_probe(struct i2c_client *client,
i2c_priv->hwrng.name = dev_name(&client->dev);
i2c_priv->hwrng.read = atmel_sha204a_rng_read;
- i2c_priv->hwrng.quality = 1024;
ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng);
if (ret)
@@ -143,7 +142,7 @@ static const struct i2c_device_id atmel_sha204a_id[] = {
MODULE_DEVICE_TABLE(i2c, atmel_sha204a_id);
static struct i2c_driver atmel_sha204a_driver = {
- .probe = atmel_sha204a_probe,
+ .probe_new = atmel_sha204a_probe,
.remove = atmel_sha204a_remove,
.id_table = atmel_sha204a_id,
diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c
index 6345c7269eb0..1f65df489847 100644
--- a/drivers/crypto/caam/blob_gen.c
+++ b/drivers/crypto/caam/blob_gen.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) "caam blob_gen: " fmt
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <soc/fsl/caam-blob.h>
@@ -61,12 +62,14 @@ static void caam_blob_job_done(struct device *dev, u32 *desc, u32 err, void *con
int caam_process_blob(struct caam_blob_priv *priv,
struct caam_blob_info *info, bool encap)
{
+ const struct caam_drv_private *ctrlpriv;
struct caam_blob_job_result testres;
struct device *jrdev = &priv->jrdev;
dma_addr_t dma_in, dma_out;
int op = OP_PCLID_BLOB;
size_t output_len;
u32 *desc;
+ u32 moo;
int ret;
if (info->key_mod_len > CAAM_BLOB_KEYMOD_LENGTH)
@@ -100,6 +103,12 @@ int caam_process_blob(struct caam_blob_priv *priv,
goto out_unmap_in;
}
+ ctrlpriv = dev_get_drvdata(jrdev->parent);
+ moo = FIELD_GET(CSTA_MOO, ioread32(&ctrlpriv->ctrl->perfmon.status));
+ if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
+ dev_warn(jrdev,
+ "using insecure test key, enable HAB to use unique device key!\n");
+
/*
* A data blob is encrypted using a blob key (BK); a random number.
* The BK is used as an AES-CCM key. The initial block (B0) and the
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index d3d8bb0a6990..ecc15bc521db 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -131,7 +131,7 @@ struct caam_aead_req_ctx {
static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
@@ -184,7 +184,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 ctx1_iv_off = 0;
@@ -312,7 +312,7 @@ skip_givenc:
static int aead_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
ctx->authsize = authsize;
aead_set_sh_desc(authenc);
@@ -322,7 +322,7 @@ static int aead_setauthsize(struct crypto_aead *authenc,
static int gcm_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
@@ -372,7 +372,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
int err;
err = crypto_gcm_check_authsize(authsize);
@@ -387,7 +387,7 @@ static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
static int rfc4106_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
@@ -440,7 +440,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
static int rfc4106_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
int err;
err = crypto_rfc4106_check_authsize(authsize);
@@ -455,7 +455,7 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc,
static int rfc4543_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
@@ -508,7 +508,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
static int rfc4543_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
if (authsize != 16)
return -EINVAL;
@@ -521,7 +521,7 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
static int chachapoly_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
@@ -547,7 +547,7 @@ static int chachapoly_set_sh_desc(struct crypto_aead *aead)
static int chachapoly_setauthsize(struct crypto_aead *aead,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
if (authsize != POLY1305_DIGEST_SIZE)
return -EINVAL;
@@ -559,7 +559,7 @@ static int chachapoly_setauthsize(struct crypto_aead *aead,
static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
@@ -575,7 +575,7 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
static int aead_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
@@ -656,7 +656,7 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
static int gcm_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
int err;
@@ -677,7 +677,7 @@ static int gcm_setkey(struct crypto_aead *aead,
static int rfc4106_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
int err;
@@ -703,7 +703,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
static int rfc4543_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
int err;
@@ -729,7 +729,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen, const u32 ctx1_iv_off)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct caam_skcipher_alg *alg =
container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
skcipher);
@@ -832,7 +832,7 @@ static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
@@ -1057,7 +1057,7 @@ static void init_aead_job(struct aead_request *req,
bool all_contig, bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
int authsize = ctx->authsize;
u32 *desc = edesc->hw_desc;
u32 out_options, in_options;
@@ -1118,7 +1118,7 @@ static void init_gcm_job(struct aead_request *req,
bool all_contig, bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc = edesc->hw_desc;
bool generic_gcm = (ivsize == GCM_AES_IV_SIZE);
@@ -1185,7 +1185,7 @@ static void init_authenc_job(struct aead_request *req,
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
@@ -1234,7 +1234,7 @@ static void init_skcipher_job(struct skcipher_request *req,
const bool encrypt)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *jrdev = ctx->jrdev;
int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc = edesc->hw_desc;
@@ -1290,7 +1290,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
@@ -1457,7 +1457,7 @@ static inline int chachapoly_crypt(struct aead_request *req, bool encrypt)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
bool all_contig;
u32 *desc;
@@ -1491,7 +1491,7 @@ static inline int aead_crypt(struct aead_request *req, bool encrypt)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
bool all_contig;
@@ -1524,7 +1524,7 @@ static int aead_decrypt(struct aead_request *req)
static int aead_do_one_req(struct crypto_engine *engine, void *areq)
{
struct aead_request *req = aead_request_cast(areq);
- struct caam_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(crypto_aead_reqtfm(req));
struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
u32 *desc = rctx->edesc->hw_desc;
int ret;
@@ -1550,7 +1550,7 @@ static inline int gcm_crypt(struct aead_request *req, bool encrypt)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
bool all_contig;
@@ -1597,7 +1597,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
int desc_bytes)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
@@ -1756,7 +1756,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
{
struct skcipher_request *req = skcipher_request_cast(areq);
- struct caam_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(crypto_skcipher_reqtfm(req));
struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
u32 *desc = rctx->edesc->hw_desc;
int ret;
@@ -1790,7 +1790,7 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
@@ -3397,7 +3397,7 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
container_of(alg, typeof(*caam_alg), skcipher);
- struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
int ret = 0;
@@ -3434,7 +3434,7 @@ static int caam_aead_init(struct crypto_aead *tfm)
struct aead_alg *alg = crypto_aead_alg(tfm);
struct caam_aead_alg *caam_alg =
container_of(alg, struct caam_aead_alg, aead);
- struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm);
crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx));
@@ -3454,7 +3454,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
if (ctx->fallback)
crypto_free_skcipher(ctx->fallback);
@@ -3463,7 +3463,7 @@ static void caam_cra_exit(struct crypto_skcipher *tfm)
static void caam_aead_exit(struct crypto_aead *tfm)
{
- caam_exit_common(crypto_aead_ctx(tfm));
+ caam_exit_common(crypto_aead_ctx_dma(tfm));
}
void caam_algapi_exit(void)
@@ -3491,7 +3491,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
- alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY);
@@ -3505,7 +3505,7 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
- alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY;
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 189a7438b29c..c37b67be0492 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -81,7 +81,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
{
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
typeof(*alg), aead);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
u32 ctx1_iv_off = 0;
u32 *nonce = NULL;
@@ -184,7 +184,7 @@ skip_givenc:
static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
ctx->authsize = authsize;
aead_set_sh_desc(authenc);
@@ -195,7 +195,7 @@ static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
static int aead_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
@@ -299,7 +299,7 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
static int gcm_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
ctx->cdata.keylen;
@@ -342,7 +342,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
int err;
err = crypto_gcm_check_authsize(authsize);
@@ -358,7 +358,7 @@ static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
static int gcm_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
int ret;
@@ -402,7 +402,7 @@ static int gcm_setkey(struct crypto_aead *aead,
static int rfc4106_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
ctx->cdata.keylen;
@@ -446,7 +446,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
static int rfc4106_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
int err;
err = crypto_rfc4106_check_authsize(authsize);
@@ -462,7 +462,7 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc,
static int rfc4106_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
int ret;
@@ -510,7 +510,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
static int rfc4543_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
ctx->cdata.keylen;
@@ -554,7 +554,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
static int rfc4543_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
if (authsize != 16)
return -EINVAL;
@@ -568,7 +568,7 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
static int rfc4543_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
int ret;
@@ -617,7 +617,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen, const u32 ctx1_iv_off)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct caam_skcipher_alg *alg =
container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
skcipher);
@@ -731,7 +731,7 @@ static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
int ret = 0;
@@ -915,7 +915,7 @@ static void aead_done(struct caam_drv_req *drv_req, u32 status)
struct aead_edesc *edesc;
struct aead_request *aead_req = drv_req->app_ctx;
struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
- struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *caam_ctx = crypto_aead_ctx_dma(aead);
int ecode = 0;
qidev = caam_ctx->qidev;
@@ -937,7 +937,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
typeof(*alg), aead);
struct device *qidev = ctx->qidev;
@@ -1157,7 +1157,7 @@ static inline int aead_crypt(struct aead_request *req, bool encrypt)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
int ret;
if (unlikely(caam_congested))
@@ -1207,7 +1207,7 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
struct skcipher_edesc *edesc;
struct skcipher_request *req = drv_req->app_ctx;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *caam_ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *qidev = caam_ctx->qidev;
int ivsize = crypto_skcipher_ivsize(skcipher);
int ecode = 0;
@@ -1245,7 +1245,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
bool encrypt)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *qidev = ctx->qidev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
@@ -1405,7 +1405,7 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
int ret;
@@ -2491,7 +2491,7 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
container_of(alg, typeof(*caam_alg), skcipher);
- struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
int ret = 0;
@@ -2524,7 +2524,7 @@ static int caam_aead_init(struct crypto_aead *tfm)
struct aead_alg *alg = crypto_aead_alg(tfm);
struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
aead);
- struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm);
return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
}
@@ -2542,7 +2542,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
if (ctx->fallback)
crypto_free_skcipher(ctx->fallback);
@@ -2551,7 +2551,7 @@ static void caam_cra_exit(struct crypto_skcipher *tfm)
static void caam_aead_exit(struct crypto_aead *tfm)
{
- caam_exit_common(crypto_aead_ctx(tfm));
+ caam_exit_common(crypto_aead_ctx_dma(tfm));
}
void caam_qi_algapi_exit(void)
@@ -2579,7 +2579,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
- alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY);
@@ -2593,7 +2593,7 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
- alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY;
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 4482cb145d05..1b0dd742c53f 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -134,12 +134,12 @@ static struct caam_request *to_caam_req(struct crypto_async_request *areq)
{
switch (crypto_tfm_alg_type(areq->tfm)) {
case CRYPTO_ALG_TYPE_SKCIPHER:
- return skcipher_request_ctx(skcipher_request_cast(areq));
+ return skcipher_request_ctx_dma(skcipher_request_cast(areq));
case CRYPTO_ALG_TYPE_AEAD:
- return aead_request_ctx(container_of(areq, struct aead_request,
- base));
+ return aead_request_ctx_dma(
+ container_of(areq, struct aead_request, base));
case CRYPTO_ALG_TYPE_AHASH:
- return ahash_request_ctx(ahash_request_cast(areq));
+ return ahash_request_ctx_dma(ahash_request_cast(areq));
default:
return ERR_PTR(-EINVAL);
}
@@ -171,7 +171,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
{
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
typeof(*alg), aead);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct device *dev = ctx->dev;
struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
@@ -276,7 +276,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
ctx->authsize = authsize;
aead_set_sh_desc(authenc);
@@ -287,7 +287,7 @@ static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
static int aead_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
struct crypto_authenc_keys keys;
@@ -350,10 +350,10 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_request *req_ctx = aead_request_ctx(req);
+ struct caam_request *req_ctx = aead_request_ctx_dma(req);
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
typeof(*alg), aead);
struct device *dev = ctx->dev;
@@ -587,7 +587,7 @@ skip_out_fle:
static int chachapoly_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct device *dev = ctx->dev;
struct caam_flc *flc;
@@ -620,7 +620,7 @@ static int chachapoly_set_sh_desc(struct crypto_aead *aead)
static int chachapoly_setauthsize(struct crypto_aead *aead,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
if (authsize != POLY1305_DIGEST_SIZE)
return -EINVAL;
@@ -632,7 +632,7 @@ static int chachapoly_setauthsize(struct crypto_aead *aead,
static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
@@ -647,7 +647,7 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
static int gcm_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_flc *flc;
@@ -704,7 +704,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
int err;
err = crypto_gcm_check_authsize(authsize);
@@ -720,7 +720,7 @@ static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
static int gcm_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
int ret;
@@ -739,7 +739,7 @@ static int gcm_setkey(struct crypto_aead *aead,
static int rfc4106_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_flc *flc;
@@ -799,7 +799,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
static int rfc4106_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
int err;
err = crypto_rfc4106_check_authsize(authsize);
@@ -815,7 +815,7 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc,
static int rfc4106_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
int ret;
@@ -840,7 +840,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
static int rfc4543_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_flc *flc;
@@ -900,7 +900,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
static int rfc4543_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
if (authsize != 16)
return -EINVAL;
@@ -914,7 +914,7 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
static int rfc4543_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
int ret;
@@ -940,7 +940,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen, const u32 ctx1_iv_off)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct caam_skcipher_alg *alg =
container_of(crypto_skcipher_alg(skcipher),
struct caam_skcipher_alg, skcipher);
@@ -1059,7 +1059,7 @@ static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *dev = ctx->dev;
struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
struct caam_flc *flc;
@@ -1109,10 +1109,10 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_request *req_ctx = skcipher_request_ctx(req);
+ struct caam_request *req_ctx = skcipher_request_ctx_dma(req);
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *dev = ctx->dev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
@@ -1286,7 +1286,7 @@ static void aead_encrypt_done(void *cbk_ctx, u32 status)
struct caam_request *req_ctx = to_caam_req(areq);
struct aead_edesc *edesc = req_ctx->edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
int ecode = 0;
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
@@ -1307,7 +1307,7 @@ static void aead_decrypt_done(void *cbk_ctx, u32 status)
struct caam_request *req_ctx = to_caam_req(areq);
struct aead_edesc *edesc = req_ctx->edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
int ecode = 0;
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
@@ -1324,8 +1324,8 @@ static int aead_encrypt(struct aead_request *req)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct caam_request *caam_req = aead_request_ctx(req);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
+ struct caam_request *caam_req = aead_request_ctx_dma(req);
int ret;
/* allocate extended descriptor */
@@ -1352,8 +1352,8 @@ static int aead_decrypt(struct aead_request *req)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct caam_request *caam_req = aead_request_ctx(req);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
+ struct caam_request *caam_req = aead_request_ctx_dma(req);
int ret;
/* allocate extended descriptor */
@@ -1392,7 +1392,7 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
struct skcipher_request *req = skcipher_request_cast(areq);
struct caam_request *req_ctx = to_caam_req(areq);
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct skcipher_edesc *edesc = req_ctx->edesc;
int ecode = 0;
int ivsize = crypto_skcipher_ivsize(skcipher);
@@ -1430,7 +1430,7 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
struct skcipher_request *req = skcipher_request_cast(areq);
struct caam_request *req_ctx = to_caam_req(areq);
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct skcipher_edesc *edesc = req_ctx->edesc;
int ecode = 0;
int ivsize = crypto_skcipher_ivsize(skcipher);
@@ -1474,8 +1474,8 @@ static int skcipher_encrypt(struct skcipher_request *req)
{
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- struct caam_request *caam_req = skcipher_request_ctx(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
+ struct caam_request *caam_req = skcipher_request_ctx_dma(req);
struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
int ret;
@@ -1524,8 +1524,8 @@ static int skcipher_decrypt(struct skcipher_request *req)
{
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- struct caam_request *caam_req = skcipher_request_ctx(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
+ struct caam_request *caam_req = skcipher_request_ctx_dma(req);
struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
int ret;
@@ -1603,7 +1603,7 @@ static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
container_of(alg, typeof(*caam_alg), skcipher);
- struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
int ret = 0;
@@ -1621,10 +1621,12 @@ static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
}
ctx->fallback = fallback;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) +
- crypto_skcipher_reqsize(fallback));
+ crypto_skcipher_set_reqsize_dma(
+ tfm, sizeof(struct caam_request) +
+ crypto_skcipher_reqsize(fallback));
} else {
- crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
+ crypto_skcipher_set_reqsize_dma(tfm,
+ sizeof(struct caam_request));
}
ret = caam_cra_init(ctx, &caam_alg->caam, false);
@@ -1640,8 +1642,8 @@ static int caam_cra_init_aead(struct crypto_aead *tfm)
struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
aead);
- crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
- return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
+ crypto_aead_set_reqsize_dma(tfm, sizeof(struct caam_request));
+ return caam_cra_init(crypto_aead_ctx_dma(tfm), &caam_alg->caam,
!caam_alg->caam.nodkp);
}
@@ -1654,7 +1656,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
if (ctx->fallback)
crypto_free_skcipher(ctx->fallback);
@@ -1663,7 +1665,7 @@ static void caam_cra_exit(struct crypto_skcipher *tfm)
static void caam_cra_exit_aead(struct crypto_aead *tfm)
{
- caam_exit_common(crypto_aead_ctx(tfm));
+ caam_exit_common(crypto_aead_ctx_dma(tfm));
}
static struct caam_skcipher_alg driver_algs[] = {
@@ -3008,7 +3010,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
- alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY);
@@ -3022,7 +3024,7 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
- alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY;
@@ -3132,7 +3134,7 @@ static inline int ctx_map_to_qm_sg(struct device *dev,
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
{
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
struct caam_flc *flc;
@@ -3305,7 +3307,7 @@ err_flc:
static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
unsigned int keylen)
{
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
unsigned int digestsize = crypto_ahash_digestsize(ahash);
int ret;
@@ -3356,7 +3358,7 @@ bad_free_key:
static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
if (edesc->src_nents)
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
@@ -3376,7 +3378,7 @@ static inline void ahash_unmap_ctx(struct device *dev,
struct ahash_edesc *edesc,
struct ahash_request *req, u32 flag)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
if (state->ctx_dma) {
dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
@@ -3390,9 +3392,9 @@ static void ahash_done(void *cbk_ctx, u32 status)
struct crypto_async_request *areq = cbk_ctx;
struct ahash_request *req = ahash_request_cast(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct ahash_edesc *edesc = state->caam_req.edesc;
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
int ecode = 0;
@@ -3417,9 +3419,9 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
struct crypto_async_request *areq = cbk_ctx;
struct ahash_request *req = ahash_request_cast(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct ahash_edesc *edesc = state->caam_req.edesc;
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int ecode = 0;
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
@@ -3455,9 +3457,9 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
struct crypto_async_request *areq = cbk_ctx;
struct ahash_request *req = ahash_request_cast(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct ahash_edesc *edesc = state->caam_req.edesc;
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
int ecode = 0;
@@ -3482,9 +3484,9 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
struct crypto_async_request *areq = cbk_ctx;
struct ahash_request *req = ahash_request_cast(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct ahash_edesc *edesc = state->caam_req.edesc;
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int ecode = 0;
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
@@ -3518,8 +3520,8 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
static int ahash_update_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
@@ -3637,8 +3639,8 @@ unmap_ctx:
static int ahash_final_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
@@ -3708,8 +3710,8 @@ unmap_ctx:
static int ahash_finup_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
@@ -3802,8 +3804,8 @@ unmap_ctx:
static int ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
@@ -3897,8 +3899,8 @@ unmap:
static int ahash_final_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
@@ -3970,8 +3972,8 @@ unmap:
static int ahash_update_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
@@ -4091,8 +4093,8 @@ unmap_ctx:
static int ahash_finup_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
@@ -4187,8 +4189,8 @@ unmap:
static int ahash_update_first(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
@@ -4320,7 +4322,7 @@ static int ahash_finup_first(struct ahash_request *req)
static int ahash_init(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
state->update = ahash_update_first;
state->finup = ahash_finup_first;
@@ -4337,28 +4339,28 @@ static int ahash_init(struct ahash_request *req)
static int ahash_update(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
return state->update(req);
}
static int ahash_finup(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
return state->finup(req);
}
static int ahash_final(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
return state->final(req);
}
static int ahash_export(struct ahash_request *req, void *out)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_export_state *export = out;
u8 *buf = state->buf;
int len = state->buflen;
@@ -4375,7 +4377,7 @@ static int ahash_export(struct ahash_request *req, void *out)
static int ahash_import(struct ahash_request *req, const void *in)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
const struct caam_export_state *export = in;
memset(state, 0, sizeof(*state));
@@ -4547,7 +4549,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
container_of(halg, struct ahash_alg, halg);
struct caam_hash_alg *caam_hash =
container_of(alg, struct caam_hash_alg, ahash_alg);
- struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
HASH_MSG_LEN + SHA1_DIGEST_SIZE,
@@ -4594,8 +4596,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
OP_ALG_ALGSEL_SUBMASK) >>
OP_ALG_ALGSEL_SHIFT];
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct caam_hash_state));
+ crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
/*
* For keyed hash algorithms shared descriptors
@@ -4606,7 +4607,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
{
- struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
@@ -4646,7 +4647,7 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;
alg->cra_exit = caam_hash_cra_exit;
- alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
+ alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
alg->cra_priority = CAAM_CRA_PRIORITY;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 36ef738e4a18..1050e965a438 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -199,7 +199,7 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
{
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
@@ -255,7 +255,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
{
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
u32 *desc;
@@ -307,7 +307,7 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
static int acmac_set_sh_desc(struct crypto_ahash *ahash)
{
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
u32 *desc;
@@ -421,7 +421,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
static int ahash_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
struct device *jrdev = ctx->jrdev;
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
@@ -484,7 +484,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
unsigned int keylen)
{
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
struct device *jrdev = ctx->jrdev;
if (keylen != AES_KEYSIZE_128)
@@ -504,7 +504,7 @@ static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
unsigned int keylen)
{
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int err;
err = aes_check_keylen(keylen);
@@ -543,7 +543,7 @@ static inline void ahash_unmap(struct device *dev,
struct ahash_edesc *edesc,
struct ahash_request *req, int dst_len)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
if (edesc->src_nents)
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
@@ -563,7 +563,7 @@ static inline void ahash_unmap_ctx(struct device *dev,
struct ahash_edesc *edesc,
struct ahash_request *req, int dst_len, u32 flag)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
if (state->ctx_dma) {
dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
@@ -580,8 +580,8 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int ecode = 0;
bool has_bklog;
@@ -630,8 +630,8 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
int digestsize = crypto_ahash_digestsize(ahash);
int ecode = 0;
bool has_bklog;
@@ -695,8 +695,8 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
dma_addr_t sh_desc_dma)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
struct ahash_edesc *edesc;
@@ -755,8 +755,8 @@ static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
{
struct ahash_request *req = ahash_request_cast(areq);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(crypto_ahash_reqtfm(req));
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct device *jrdev = ctx->jrdev;
u32 *desc = state->edesc->hw_desc;
int ret;
@@ -785,7 +785,7 @@ static int ahash_enqueue_req(struct device *jrdev,
int dst_len, enum dma_data_direction dir)
{
struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct ahash_edesc *edesc = state->edesc;
u32 *desc = edesc->hw_desc;
int ret;
@@ -815,8 +815,8 @@ static int ahash_enqueue_req(struct device *jrdev,
static int ahash_update_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct device *jrdev = ctx->jrdev;
u8 *buf = state->buf;
int *buflen = &state->buflen;
@@ -940,8 +940,8 @@ unmap_ctx:
static int ahash_final_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct device *jrdev = ctx->jrdev;
int buflen = state->buflen;
u32 *desc;
@@ -1001,8 +1001,8 @@ static int ahash_final_ctx(struct ahash_request *req)
static int ahash_finup_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct device *jrdev = ctx->jrdev;
int buflen = state->buflen;
u32 *desc;
@@ -1075,8 +1075,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
static int ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -1142,8 +1142,8 @@ static int ahash_digest(struct ahash_request *req)
static int ahash_final_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct device *jrdev = ctx->jrdev;
u8 *buf = state->buf;
int buflen = state->buflen;
@@ -1191,8 +1191,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
static int ahash_update_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct device *jrdev = ctx->jrdev;
u8 *buf = state->buf;
int *buflen = &state->buflen;
@@ -1312,8 +1312,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
static int ahash_finup_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct device *jrdev = ctx->jrdev;
int buflen = state->buflen;
u32 *desc;
@@ -1388,8 +1388,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
static int ahash_update_first(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct device *jrdev = ctx->jrdev;
u8 *buf = state->buf;
int *buflen = &state->buflen;
@@ -1498,7 +1498,7 @@ static int ahash_finup_first(struct ahash_request *req)
static int ahash_init(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
state->update = ahash_update_first;
state->finup = ahash_finup_first;
@@ -1515,28 +1515,28 @@ static int ahash_init(struct ahash_request *req)
static int ahash_update(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
return state->update(req);
}
static int ahash_finup(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
return state->finup(req);
}
static int ahash_final(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
return state->final(req);
}
static int ahash_export(struct ahash_request *req, void *out)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_export_state *export = out;
u8 *buf = state->buf;
int len = state->buflen;
@@ -1553,7 +1553,7 @@ static int ahash_export(struct ahash_request *req, void *out)
static int ahash_import(struct ahash_request *req, const void *in)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
const struct caam_export_state *export = in;
memset(state, 0, sizeof(*state));
@@ -1762,7 +1762,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
container_of(halg, struct ahash_alg, halg);
struct caam_hash_alg *caam_hash =
container_of(alg, struct caam_hash_alg, ahash_alg);
- struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
HASH_MSG_LEN + SHA1_DIGEST_SIZE,
@@ -1854,8 +1854,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
ctx->enginectx.op.do_one_request = ahash_do_one_req;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct caam_hash_state));
+ crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
/*
* For keyed hash algorithms shared descriptors
@@ -1866,7 +1865,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
{
- struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
offsetof(struct caam_hash_ctx, key) -
@@ -1926,7 +1925,7 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;
alg->cra_exit = caam_hash_cra_exit;
- alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
+ alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
alg->cra_priority = CAAM_CRA_PRIORITY;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 886727576710..aef031946f33 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -57,7 +57,7 @@ static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
@@ -69,7 +69,7 @@ static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
@@ -81,7 +81,7 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
size_t p_sz = key->p_sz;
@@ -98,7 +98,7 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
size_t p_sz = key->p_sz;
@@ -149,7 +149,7 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
struct akcipher_request *req = context;
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
struct rsa_edesc *edesc;
@@ -242,7 +242,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
size_t desclen)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct device *dev = ctx->dev;
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
struct caam_rsa_key *key = &ctx->key;
@@ -371,7 +371,7 @@ static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
base);
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct device *jrdev = ctx->dev;
u32 *desc = req_ctx->edesc->hw_desc;
int ret;
@@ -399,7 +399,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct device *dev = ctx->dev;
struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
@@ -444,7 +444,7 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
struct rsa_edesc *edesc)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct device *dev = ctx->dev;
struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
@@ -491,7 +491,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
struct rsa_edesc *edesc)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct device *dev = ctx->dev;
struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
@@ -568,7 +568,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
struct rsa_edesc *edesc)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct device *dev = ctx->dev;
struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
@@ -664,7 +664,7 @@ static int akcipher_enqueue_req(struct device *jrdev,
{
struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
struct rsa_edesc *edesc = req_ctx->edesc;
@@ -707,7 +707,7 @@ static int akcipher_enqueue_req(struct device *jrdev,
static int caam_rsa_enc(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
struct device *jrdev = ctx->dev;
struct rsa_edesc *edesc;
@@ -746,7 +746,7 @@ init_fail:
static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct device *jrdev = ctx->dev;
struct rsa_edesc *edesc;
int ret;
@@ -775,7 +775,7 @@ init_fail:
static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct device *jrdev = ctx->dev;
struct rsa_edesc *edesc;
int ret;
@@ -804,7 +804,7 @@ init_fail:
static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct device *jrdev = ctx->dev;
struct rsa_edesc *edesc;
int ret;
@@ -833,7 +833,7 @@ init_fail:
static int caam_rsa_dec(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
int ret;
@@ -936,7 +936,7 @@ static int caam_rsa_check_key_length(unsigned int len)
static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct rsa_key raw_key = {NULL};
struct caam_rsa_key *rsa_key = &ctx->key;
int ret;
@@ -1038,7 +1038,7 @@ free_p:
static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct rsa_key raw_key = {NULL};
struct caam_rsa_key *rsa_key = &ctx->key;
int ret;
@@ -1089,7 +1089,7 @@ err:
static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
{
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
return ctx->key.n_sz;
}
@@ -1097,7 +1097,9 @@ static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
/* Per session pkc's driver context creation function */
static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
{
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
+
+ akcipher_set_reqsize(tfm, sizeof(struct caam_rsa_req_ctx));
ctx->dev = caam_jr_alloc();
@@ -1123,7 +1125,7 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
/* Per session pkc's driver context cleanup function */
static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
{
- struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct caam_rsa_key *key = &ctx->key;
dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
@@ -1141,13 +1143,13 @@ static struct caam_akcipher_alg caam_rsa = {
.max_size = caam_rsa_max_size,
.init = caam_rsa_init_tfm,
.exit = caam_rsa_exit_tfm,
- .reqsize = sizeof(struct caam_rsa_req_ctx),
.base = {
.cra_name = "rsa",
.cra_driver_name = "rsa-caam",
.cra_priority = 3000,
.cra_module = THIS_MODULE,
- .cra_ctxsize = sizeof(struct caam_rsa_ctx),
+ .cra_ctxsize = sizeof(struct caam_rsa_ctx) +
+ CRYPTO_DMA_PADDING,
},
}
};
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 77d048dfe5d0..1f0e82050976 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -246,7 +246,6 @@ int caam_rng_init(struct device *ctrldev)
ctx->rng.cleanup = caam_cleanup;
ctx->rng.read = caam_read;
ctx->rng.priv = (unsigned long)ctx;
- ctx->rng.quality = 1024;
dev_info(ctrldev, "registering rng-caam\n");
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 66d6dad841bb..66928f8a0c4b 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -426,6 +426,9 @@ struct caam_perfmon {
u32 rsvd2;
#define CSTA_PLEND BIT(10)
#define CSTA_ALT_PLEND BIT(18)
+#define CSTA_MOO GENMASK(9, 8)
+#define CSTA_MOO_SECURE 1
+#define CSTA_MOO_TRUSTED 2
u32 status; /* CSTA - CAAM Status */
u64 rsvd3;
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index ce3b91c612f0..9eca0c302186 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -97,7 +97,7 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
+ struct cvm_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct fc_context *fctx = &rctx->fctx;
u32 enc_iv_len = crypto_skcipher_ivsize(tfm);
struct cpt_request_info *req_info = &rctx->cpt_req;
@@ -151,7 +151,7 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
u32 enc_iv_len)
{
- struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
+ struct cvm_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct cpt_request_info *req_info = &rctx->cpt_req;
u32 argcnt = 0;
@@ -173,7 +173,7 @@ static inline void store_cb_info(struct skcipher_request *req,
static inline void create_output_list(struct skcipher_request *req,
u32 enc_iv_len)
{
- struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
+ struct cvm_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct cpt_request_info *req_info = &rctx->cpt_req;
u32 argcnt = 0;
@@ -193,7 +193,7 @@ static inline void create_output_list(struct skcipher_request *req,
static inline int cvm_enc_dec(struct skcipher_request *req, u32 enc)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
+ struct cvm_req_ctx *rctx = skcipher_request_ctx_dma(req);
u32 enc_iv_len = crypto_skcipher_ivsize(tfm);
struct fc_context *fctx = &rctx->fctx;
struct cpt_request_info *req_info = &rctx->cpt_req;
@@ -335,7 +335,7 @@ static int cvm_ecb_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
static int cvm_enc_dec_init(struct crypto_skcipher *tfm)
{
- crypto_skcipher_set_reqsize(tfm, sizeof(struct cvm_req_ctx));
+ crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct cvm_req_ctx));
return 0;
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
index c93c4e41d267..0653484df23f 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_aead.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -392,7 +392,7 @@ static int nitrox_rfc4106_setauthsize(struct crypto_aead *aead,
static int nitrox_rfc4106_set_aead_rctx_sglist(struct aead_request *areq)
{
- struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+ struct nitrox_rfc4106_rctx *rctx = aead_request_ctx_dma(areq);
struct nitrox_aead_rctx *aead_rctx = &rctx->base;
unsigned int assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
struct scatterlist *sg;
@@ -424,7 +424,7 @@ static int nitrox_rfc4106_set_aead_rctx_sglist(struct aead_request *areq)
static void nitrox_rfc4106_callback(void *arg, int err)
{
struct aead_request *areq = arg;
- struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+ struct nitrox_rfc4106_rctx *rctx = aead_request_ctx_dma(areq);
struct nitrox_kcrypt_request *nkreq = &rctx->base.nkreq;
free_src_sglist(nkreq);
@@ -441,7 +441,7 @@ static int nitrox_rfc4106_enc(struct aead_request *areq)
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
- struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+ struct nitrox_rfc4106_rctx *rctx = aead_request_ctx_dma(areq);
struct nitrox_aead_rctx *aead_rctx = &rctx->base;
struct se_crypto_request *creq = &aead_rctx->nkreq.creq;
int ret;
@@ -472,7 +472,7 @@ static int nitrox_rfc4106_enc(struct aead_request *areq)
static int nitrox_rfc4106_dec(struct aead_request *areq)
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
- struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx_dma(aead);
struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
struct nitrox_aead_rctx *aead_rctx = &rctx->base;
struct se_crypto_request *creq = &aead_rctx->nkreq.creq;
@@ -510,8 +510,8 @@ static int nitrox_rfc4106_init(struct crypto_aead *aead)
if (ret)
return ret;
- crypto_aead_set_reqsize(aead, sizeof(struct aead_request) +
- sizeof(struct nitrox_rfc4106_rctx));
+ crypto_aead_set_reqsize_dma(aead, sizeof(struct aead_request) +
+ sizeof(struct nitrox_rfc4106_rctx));
return 0;
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
index 9e7308e39b30..d4e06999af9b 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
@@ -195,6 +195,7 @@ int nitrox_mbox_init(struct nitrox_device *ndev)
ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0);
if (!ndev->iov.pf2vf_wq) {
kfree(ndev->iov.vfdev);
+ ndev->iov.vfdev = NULL;
return -ENOMEM;
}
/* enable pf2vf mailbox interrupts */
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 11a305fa19e6..d8426bdf3190 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -25,7 +25,7 @@ static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
{
struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req);
unsigned int digest_size = crypto_ahash_digestsize(tfm);
if (ret)
@@ -56,8 +56,8 @@ static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
unsigned int final)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_ctx *ctx = crypto_ahash_ctx_dma(tfm);
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req);
struct scatterlist *sg, *cmac_key_sg = NULL;
unsigned int block_size =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
@@ -182,7 +182,7 @@ e_free:
static int ccp_aes_cmac_init(struct ahash_request *req)
{
- struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req);
memset(rctx, 0, sizeof(*rctx));
@@ -219,7 +219,7 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
{
- struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req);
struct ccp_aes_cmac_exp_ctx state;
/* Don't let anything leak to 'out' */
@@ -238,7 +238,7 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
{
- struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req);
struct ccp_aes_cmac_exp_ctx state;
/* 'in' may not be aligned so memcpy to local variable */
@@ -256,7 +256,7 @@ static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct ccp_ctx *ctx = crypto_ahash_ctx_dma(tfm);
struct ccp_crypto_ahash_alg *alg =
ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo;
@@ -334,13 +334,14 @@ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_tfm_ctx_dma(tfm);
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
ctx->complete = ccp_aes_cmac_complete;
ctx->u.aes.key_len = 0;
- crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx));
+ crypto_ahash_set_reqsize_dma(ahash,
+ sizeof(struct ccp_aes_cmac_req_ctx));
return 0;
}
@@ -382,7 +383,7 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = AES_BLOCK_SIZE;
- base->cra_ctxsize = sizeof(struct ccp_ctx);
+ base->cra_ctxsize = sizeof(struct ccp_ctx) + crypto_dma_padding();
base->cra_priority = CCP_CRA_PRIORITY;
base->cra_init = ccp_aes_cmac_cra_init;
base->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index 1c1c939f5c39..b1dbb8cea559 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -29,7 +29,7 @@ static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret)
static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int key_len)
{
- struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_aead_ctx_dma(tfm);
switch (key_len) {
case AES_KEYSIZE_128:
@@ -76,8 +76,8 @@ static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
- struct ccp_aes_req_ctx *rctx = aead_request_ctx(req);
+ struct ccp_ctx *ctx = crypto_aead_ctx_dma(tfm);
+ struct ccp_aes_req_ctx *rctx = aead_request_ctx_dma(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
int i;
@@ -148,12 +148,12 @@ static int ccp_aes_gcm_decrypt(struct aead_request *req)
static int ccp_aes_gcm_cra_init(struct crypto_aead *tfm)
{
- struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_aead_ctx_dma(tfm);
ctx->complete = ccp_aes_gcm_complete;
ctx->u.aes.key_len = 0;
- crypto_aead_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
+ crypto_aead_set_reqsize_dma(tfm, sizeof(struct ccp_aes_req_ctx));
return 0;
}
@@ -176,7 +176,7 @@ static struct aead_alg ccp_aes_gcm_defaults = {
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct ccp_ctx),
+ .cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = CCP_CRA_PRIORITY,
.cra_exit = ccp_aes_gcm_cra_exit,
.cra_module = THIS_MODULE,
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 6849261ca47d..93f735d6b02b 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -62,7 +62,7 @@ static struct ccp_unit_size_map xts_unit_sizes[] = {
static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
{
struct skcipher_request *req = skcipher_request_cast(async_req);
- struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req);
if (ret)
return ret;
@@ -75,7 +75,7 @@ static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
static int ccp_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
unsigned int ccpversion = ccp_version();
int ret;
@@ -105,8 +105,8 @@ static int ccp_aes_xts_crypt(struct skcipher_request *req,
unsigned int encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req);
unsigned int ccpversion = ccp_version();
unsigned int fallback = 0;
unsigned int unit;
@@ -196,7 +196,7 @@ static int ccp_aes_xts_decrypt(struct skcipher_request *req)
static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
struct crypto_skcipher *fallback_tfm;
ctx->complete = ccp_aes_xts_complete;
@@ -210,15 +210,16 @@ static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm)
}
ctx->u.aes.tfm_skcipher = fallback_tfm;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx) +
- crypto_skcipher_reqsize(fallback_tfm));
+ crypto_skcipher_set_reqsize_dma(tfm,
+ sizeof(struct ccp_aes_req_ctx) +
+ crypto_skcipher_reqsize(fallback_tfm));
return 0;
}
static void ccp_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
crypto_free_skcipher(ctx->u.aes.tfm_skcipher);
}
@@ -246,7 +247,8 @@ static int ccp_register_aes_xts_alg(struct list_head *head,
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
alg->base.cra_blocksize = AES_BLOCK_SIZE;
- alg->base.cra_ctxsize = sizeof(struct ccp_ctx);
+ alg->base.cra_ctxsize = sizeof(struct ccp_ctx) +
+ crypto_dma_padding();
alg->base.cra_priority = CCP_CRA_PRIORITY;
alg->base.cra_module = THIS_MODULE;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index bed331953ff9..918e223f21b6 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -22,8 +22,9 @@
static int ccp_aes_complete(struct crypto_async_request *async_req, int ret)
{
struct skcipher_request *req = skcipher_request_cast(async_req);
- struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(
+ crypto_skcipher_reqtfm(req));
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req);
if (ret)
return ret;
@@ -38,7 +39,7 @@ static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm);
- struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
switch (key_len) {
case AES_KEYSIZE_128:
@@ -65,8 +66,8 @@ static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
@@ -118,7 +119,7 @@ static int ccp_aes_decrypt(struct skcipher_request *req)
static int ccp_aes_init_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
ctx->complete = ccp_aes_complete;
ctx->u.aes.key_len = 0;
@@ -132,7 +133,7 @@ static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req,
int ret)
{
struct skcipher_request *req = skcipher_request_cast(async_req);
- struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req);
/* Restore the original pointer */
req->iv = rctx->rfc3686_info;
@@ -143,7 +144,7 @@ static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req,
static int ccp_aes_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
@@ -157,8 +158,8 @@ static int ccp_aes_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
static int ccp_aes_rfc3686_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req);
u8 *iv;
/* Initialize the CTR block */
@@ -190,12 +191,12 @@ static int ccp_aes_rfc3686_decrypt(struct skcipher_request *req)
static int ccp_aes_rfc3686_init_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
ctx->complete = ccp_aes_rfc3686_complete;
ctx->u.aes.key_len = 0;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
+ crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct ccp_aes_req_ctx));
return 0;
}
@@ -213,7 +214,7 @@ static const struct skcipher_alg ccp_aes_defaults = {
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct ccp_ctx),
+ .base.cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING,
.base.cra_priority = CCP_CRA_PRIORITY,
.base.cra_module = THIS_MODULE,
};
@@ -231,7 +232,7 @@ static const struct skcipher_alg ccp_aes_rfc3686_defaults = {
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = CTR_RFC3686_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct ccp_ctx),
+ .base.cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING,
.base.cra_priority = CCP_CRA_PRIORITY,
.base.cra_module = THIS_MODULE,
};
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index 278636ed251a..afae30adb703 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -21,8 +21,9 @@
static int ccp_des3_complete(struct crypto_async_request *async_req, int ret)
{
struct skcipher_request *req = skcipher_request_cast(async_req);
- struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(
+ crypto_skcipher_reqtfm(req));
+ struct ccp_des3_req_ctx *rctx = skcipher_request_ctx_dma(req);
if (ret)
return ret;
@@ -37,7 +38,7 @@ static int ccp_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm);
- struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
int err;
err = verify_skcipher_des3_key(tfm, key);
@@ -60,8 +61,8 @@ static int ccp_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
+ struct ccp_des3_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
@@ -114,12 +115,12 @@ static int ccp_des3_decrypt(struct skcipher_request *req)
static int ccp_des3_init_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
ctx->complete = ccp_des3_complete;
ctx->u.des3.key_len = 0;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_des3_req_ctx));
+ crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct ccp_des3_req_ctx));
return 0;
}
@@ -137,7 +138,7 @@ static const struct skcipher_alg ccp_des3_defaults = {
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct ccp_ctx),
+ .base.cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING,
.base.cra_priority = CCP_CRA_PRIORITY,
.base.cra_module = THIS_MODULE,
};
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 5976530c00a8..73442a382f68 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -78,13 +78,6 @@ struct ccp_crypto_cmd {
int ret;
};
-struct ccp_crypto_cpu {
- struct work_struct work;
- struct completion completion;
- struct ccp_crypto_cmd *crypto_cmd;
- int err;
-};
-
static inline bool ccp_crypto_success(int err)
{
if (err && (err != -EINPROGRESS) && (err != -EBUSY))
@@ -146,7 +139,7 @@ static void ccp_crypto_complete(void *data, int err)
struct ccp_crypto_cmd *crypto_cmd = data;
struct ccp_crypto_cmd *held, *next, *backlog;
struct crypto_async_request *req = crypto_cmd->req;
- struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
+ struct ccp_ctx *ctx = crypto_tfm_ctx_dma(req->tfm);
int ret;
if (err == -EINPROGRESS) {
@@ -190,7 +183,7 @@ static void ccp_crypto_complete(void *data, int err)
break;
/* Error occurred, report it and get the next entry */
- ctx = crypto_tfm_ctx(held->req->tfm);
+ ctx = crypto_tfm_ctx_dma(held->req->tfm);
if (ctx->complete)
ret = ctx->complete(held->req, ret);
held->req->complete(held->req, ret);
@@ -400,7 +393,7 @@ static void ccp_unregister_algs(void)
}
}
-static int ccp_crypto_init(void)
+static int __init ccp_crypto_init(void)
{
int ret;
@@ -421,7 +414,7 @@ static int ccp_crypto_init(void)
return ret;
}
-static void ccp_crypto_exit(void)
+static void __exit ccp_crypto_exit(void)
{
ccp_unregister_algs();
}
diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
index 1223ac70aea2..a14f85512cf4 100644
--- a/drivers/crypto/ccp/ccp-crypto-rsa.c
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -44,7 +44,7 @@ static inline int ccp_copy_and_save_keypart(u8 **kpbuf, unsigned int *kplen,
static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
{
struct akcipher_request *req = akcipher_request_cast(async_req);
- struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
+ struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx_dma(req);
if (ret)
return ret;
@@ -56,7 +56,7 @@ static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
{
- struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
return ctx->u.rsa.n_len;
}
@@ -64,8 +64,8 @@ static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
+ struct ccp_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
+ struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx_dma(req);
int ret = 0;
memset(&rctx->cmd, 0, sizeof(rctx->cmd));
@@ -126,7 +126,7 @@ static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx)
static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen, bool private)
{
- struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
struct rsa_key raw_key;
int ret;
@@ -192,9 +192,9 @@ static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm)
{
- struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
- akcipher_set_reqsize(tfm, sizeof(struct ccp_rsa_req_ctx));
+ akcipher_set_reqsize_dma(tfm, sizeof(struct ccp_rsa_req_ctx));
ctx->complete = ccp_rsa_complete;
return 0;
@@ -202,7 +202,7 @@ static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm)
static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+ struct ccp_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
ccp_rsa_free_key_bufs(ctx);
}
@@ -220,7 +220,7 @@ static struct akcipher_alg ccp_rsa_defaults = {
.cra_driver_name = "rsa-ccp",
.cra_priority = CCP_CRA_PRIORITY,
.cra_module = THIS_MODULE,
- .cra_ctxsize = 2 * sizeof(struct ccp_ctx),
+ .cra_ctxsize = 2 * sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING,
},
};
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 74fa5360e722..fa3ae8e78f6f 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -28,7 +28,7 @@ static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
{
struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx_dma(req);
unsigned int digest_size = crypto_ahash_digestsize(tfm);
if (ret)
@@ -59,8 +59,8 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
unsigned int final)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_ctx *ctx = crypto_ahash_ctx_dma(tfm);
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx_dma(req);
struct scatterlist *sg;
unsigned int block_size =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
@@ -182,8 +182,8 @@ e_free:
static int ccp_sha_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_ctx *ctx = crypto_ahash_ctx_dma(tfm);
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx_dma(req);
struct ccp_crypto_ahash_alg *alg =
ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
unsigned int block_size =
@@ -231,7 +231,7 @@ static int ccp_sha_digest(struct ahash_request *req)
static int ccp_sha_export(struct ahash_request *req, void *out)
{
- struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx_dma(req);
struct ccp_sha_exp_ctx state;
/* Don't let anything leak to 'out' */
@@ -252,7 +252,7 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
static int ccp_sha_import(struct ahash_request *req, const void *in)
{
- struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx_dma(req);
struct ccp_sha_exp_ctx state;
/* 'in' may not be aligned so memcpy to local variable */
@@ -272,7 +272,7 @@ static int ccp_sha_import(struct ahash_request *req, const void *in)
static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct ccp_ctx *ctx = crypto_ahash_ctx_dma(tfm);
struct crypto_shash *shash = ctx->u.sha.hmac_tfm;
unsigned int block_size = crypto_shash_blocksize(shash);
unsigned int digest_size = crypto_shash_digestsize(shash);
@@ -313,13 +313,13 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
static int ccp_sha_cra_init(struct crypto_tfm *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct ccp_ctx *ctx = crypto_ahash_ctx_dma(ahash);
ctx->complete = ccp_sha_complete;
ctx->u.sha.key_len = 0;
- crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sha_req_ctx));
+ crypto_ahash_set_reqsize_dma(ahash, sizeof(struct ccp_sha_req_ctx));
return 0;
}
@@ -330,7 +330,7 @@ static void ccp_sha_cra_exit(struct crypto_tfm *tfm)
static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_tfm_ctx_dma(tfm);
struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm);
struct crypto_shash *hmac_tfm;
@@ -348,7 +348,7 @@ static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm)
static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_tfm_ctx_dma(tfm);
if (ctx->u.sha.hmac_tfm)
crypto_free_shash(ctx->u.sha.hmac_tfm);
@@ -492,7 +492,7 @@ static int ccp_register_sha_alg(struct list_head *head,
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = def->block_size;
- base->cra_ctxsize = sizeof(struct ccp_ctx);
+ base->cra_ctxsize = sizeof(struct ccp_ctx) + crypto_dma_padding();
base->cra_priority = CCP_CRA_PRIORITY;
base->cra_init = ccp_sha_cra_init;
base->cra_exit = ccp_sha_cra_exit;
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 792d6da7f0c0..084d052fddcc 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -381,6 +381,15 @@ static const struct psp_vdata pspv3 = {
.inten_reg = 0x10690,
.intsts_reg = 0x10694,
};
+
+static const struct psp_vdata pspv4 = {
+ .sev = &sevv2,
+ .tee = &teev1,
+ .feature_reg = 0x109fc,
+ .inten_reg = 0x10690,
+ .intsts_reg = 0x10694,
+};
+
#endif
static const struct sp_dev_vdata dev_vdata[] = {
@@ -426,7 +435,7 @@ static const struct sp_dev_vdata dev_vdata[] = {
{ /* 5 */
.bar = 2,
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
- .psp_vdata = &pspv2,
+ .psp_vdata = &pspv4,
#endif
},
{ /* 6 */
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 35794c7271fb..109ffb375fc6 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -138,7 +138,7 @@ static int cc_aead_init(struct crypto_aead *tfm)
ctx->flow_mode = cc_alg->flow_mode;
ctx->auth_mode = cc_alg->auth_mode;
ctx->drvdata = cc_alg->drvdata;
- crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
+ crypto_aead_set_reqsize_dma(tfm, sizeof(struct aead_req_ctx));
/* Allocate key buffer, cache line aligned */
ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
@@ -208,7 +208,7 @@ init_failed:
static void cc_aead_complete(struct device *dev, void *cc_req, int err)
{
struct aead_request *areq = (struct aead_request *)cc_req;
- struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq);
struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -723,7 +723,7 @@ static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq);
enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
unsigned int idx = *seq_size;
struct device *dev = drvdata_to_dev(ctx->drvdata);
@@ -762,7 +762,7 @@ static void cc_proc_authen_desc(struct aead_request *areq,
struct cc_hw_desc desc[],
unsigned int *seq_size, int direct)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq);
enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
unsigned int idx = *seq_size;
struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
@@ -827,7 +827,7 @@ static void cc_proc_cipher_desc(struct aead_request *areq,
unsigned int *seq_size)
{
unsigned int idx = *seq_size;
- struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq);
enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -873,7 +873,7 @@ static void cc_proc_digest_desc(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
unsigned int idx = *seq_size;
unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
@@ -923,7 +923,7 @@ static void cc_set_cipher_desc(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
unsigned int hw_iv_size = req_ctx->hw_iv_size;
unsigned int idx = *seq_size;
int direct = req_ctx->gen_ctx.op_type;
@@ -965,7 +965,7 @@ static void cc_set_cipher_desc(struct aead_request *req,
static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
unsigned int *seq_size, unsigned int data_flow_mode)
{
- struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
int direct = req_ctx->gen_ctx.op_type;
unsigned int idx = *seq_size;
@@ -1082,7 +1082,7 @@ static void cc_proc_header_desc(struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
unsigned int idx = *seq_size;
/* Hash associated data */
@@ -1158,7 +1158,7 @@ static void cc_proc_scheme_desc(struct aead_request *req,
static void cc_mlli_to_sram(struct aead_request *req,
struct cc_hw_desc desc[], unsigned int *seq_size)
{
- struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
@@ -1212,7 +1212,7 @@ static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
int direct = req_ctx->gen_ctx.op_type;
unsigned int data_flow_mode =
cc_get_data_flow(direct, ctx->flow_mode,
@@ -1265,7 +1265,7 @@ cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
int direct = req_ctx->gen_ctx.op_type;
unsigned int data_flow_mode =
cc_get_data_flow(direct, ctx->flow_mode,
@@ -1312,7 +1312,7 @@ static int validate_data_size(struct cc_aead_ctx *ctx,
enum drv_crypto_direction direct,
struct aead_request *req)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
struct device *dev = drvdata_to_dev(ctx->drvdata);
unsigned int assoclen = areq_ctx->assoclen;
unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
@@ -1411,7 +1411,7 @@ static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
unsigned int idx = *seq_size;
unsigned int cipher_flow_mode;
dma_addr_t mac_result;
@@ -1533,7 +1533,7 @@ static int config_ccm_adata(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
//unsigned int size_of_a = 0, rem_a_size = 0;
unsigned int lp = req->iv[0];
/* Note: The code assume that req->iv[0] already contains the value
@@ -1591,7 +1591,7 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
/* L' */
memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
@@ -1615,7 +1615,7 @@ static void cc_set_ghash_desc(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
unsigned int idx = *seq_size;
/* load key to AES*/
@@ -1693,7 +1693,7 @@ static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
unsigned int idx = *seq_size;
/* load key to AES*/
@@ -1730,7 +1730,7 @@ static void cc_proc_gcm_result(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
dma_addr_t mac_result;
unsigned int idx = *seq_size;
@@ -1792,7 +1792,7 @@ static void cc_proc_gcm_result(struct aead_request *req,
static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
unsigned int *seq_size)
{
- struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
unsigned int cipher_flow_mode;
//in RFC4543 no data to encrypt. just copy data from src to dest.
@@ -1830,7 +1830,7 @@ static int config_gcm_context(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
struct device *dev = drvdata_to_dev(ctx->drvdata);
unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
@@ -1879,7 +1879,7 @@ static void cc_proc_rfc4_gcm(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
@@ -1896,7 +1896,7 @@ static int cc_proc_aead(struct aead_request *req,
struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct cc_crypto_req cc_req = {};
@@ -2019,7 +2019,7 @@ exit:
static int cc_aead_encrypt(struct aead_request *req)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
int rc;
memset(areq_ctx, 0, sizeof(*areq_ctx));
@@ -2039,7 +2039,7 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
{
/* Very similar to cc_aead_encrypt() above. */
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
int rc;
rc = crypto_ipsec_check_assoclen(req->assoclen);
@@ -2063,7 +2063,7 @@ out:
static int cc_aead_decrypt(struct aead_request *req)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
int rc;
memset(areq_ctx, 0, sizeof(*areq_ctx));
@@ -2081,7 +2081,7 @@ static int cc_aead_decrypt(struct aead_request *req)
static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
int rc;
rc = crypto_ipsec_check_assoclen(req->assoclen);
@@ -2193,7 +2193,7 @@ static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
int rc;
rc = crypto_ipsec_check_assoclen(req->assoclen);
@@ -2217,7 +2217,7 @@ out:
static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
int rc;
rc = crypto_ipsec_check_assoclen(req->assoclen);
@@ -2244,7 +2244,7 @@ out:
static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
int rc;
rc = crypto_ipsec_check_assoclen(req->assoclen);
@@ -2268,7 +2268,7 @@ out:
static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
int rc;
rc = crypto_ipsec_check_assoclen(req->assoclen);
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 9efd88f871d1..bcca55bff910 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -52,7 +52,7 @@ static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
static void cc_copy_mac(struct device *dev, struct aead_request *req,
enum cc_sg_cpy_direct dir)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
u32 skip = req->assoclen + req->cryptlen;
cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
@@ -456,7 +456,7 @@ cipher_exit:
void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
@@ -546,7 +546,7 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
struct buffer_array *sg_data,
bool is_last, bool do_chain)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct device *dev = drvdata_to_dev(drvdata);
gfp_t flags = cc_gfp_flags(&req->base);
@@ -586,7 +586,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
struct buffer_array *sg_data,
bool is_last, bool do_chain)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
int rc = 0;
int mapped_nents = 0;
struct device *dev = drvdata_to_dev(drvdata);
@@ -652,7 +652,7 @@ chain_assoc_exit:
static void cc_prepare_aead_data_dlli(struct aead_request *req,
u32 *src_last_bytes, u32 *dst_last_bytes)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
struct scatterlist *sg;
@@ -678,7 +678,7 @@ static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
u32 *src_last_bytes, u32 *dst_last_bytes,
bool is_last_table)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
struct device *dev = drvdata_to_dev(drvdata);
@@ -790,7 +790,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
struct buffer_array *sg_data,
bool is_last_table, bool do_chain)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
struct device *dev = drvdata_to_dev(drvdata);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
@@ -895,7 +895,7 @@ chain_data_exit:
static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
struct aead_request *req)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
u32 curr_mlli_size = 0;
if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
@@ -945,7 +945,7 @@ static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
{
- struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
struct device *dev = drvdata_to_dev(drvdata);
struct buffer_array sg_data;
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index 7083767602fc..8f008f024f8f 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -55,7 +55,7 @@ void __init cc_debugfs_global_init(void)
cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
}
-void __exit cc_debugfs_global_fini(void)
+void cc_debugfs_global_fini(void)
{
debugfs_remove(cc_debugfs_dir);
}
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index cadead18b59e..d489c6f80892 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -651,9 +651,17 @@ static struct platform_driver ccree_driver = {
static int __init ccree_init(void)
{
+ int rc;
+
cc_debugfs_global_init();
- return platform_driver_register(&ccree_driver);
+ rc = platform_driver_register(&ccree_driver);
+ if (rc) {
+ cc_debugfs_global_fini();
+ return rc;
+ }
+
+ return 0;
}
module_init(ccree_init);
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index 683c9a430e11..f418162932fe 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -283,9 +283,9 @@ static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
static void cc_update_complete(struct device *dev, void *cc_req, int err)
{
struct ahash_request *req = (struct ahash_request *)cc_req;
- struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
dev_dbg(dev, "req=%pK\n", req);
@@ -301,9 +301,9 @@ static void cc_update_complete(struct device *dev, void *cc_req, int err)
static void cc_digest_complete(struct device *dev, void *cc_req, int err)
{
struct ahash_request *req = (struct ahash_request *)cc_req;
- struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
dev_dbg(dev, "req=%pK\n", req);
@@ -321,9 +321,9 @@ static void cc_digest_complete(struct device *dev, void *cc_req, int err)
static void cc_hash_complete(struct device *dev, void *cc_req, int err)
{
struct ahash_request *req = (struct ahash_request *)cc_req;
- struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
dev_dbg(dev, "req=%pK\n", req);
@@ -341,9 +341,9 @@ static void cc_hash_complete(struct device *dev, void *cc_req, int err)
static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
int idx)
{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
/* Get final MAC result */
@@ -364,9 +364,9 @@ static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
int idx)
{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
/* store the hash digest result in the context */
@@ -417,9 +417,9 @@ static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
static int cc_hash_digest(struct ahash_request *req)
{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
struct scatterlist *src = req->src;
unsigned int nbytes = req->nbytes;
@@ -555,9 +555,9 @@ static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
static int cc_hash_update(struct ahash_request *req)
{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
struct scatterlist *src = req->src;
unsigned int nbytes = req->nbytes;
@@ -631,9 +631,9 @@ static int cc_hash_update(struct ahash_request *req)
static int cc_do_finup(struct ahash_request *req, bool update)
{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
struct scatterlist *src = req->src;
unsigned int nbytes = req->nbytes;
@@ -711,9 +711,9 @@ static int cc_hash_final(struct ahash_request *req)
static int cc_hash_init(struct ahash_request *req)
{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
@@ -736,7 +736,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
u32 larval_addr;
struct device *dev;
- ctx = crypto_ahash_ctx(ahash);
+ ctx = crypto_ahash_ctx_dma(ahash);
dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "start keylen: %d", keylen);
@@ -922,7 +922,7 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
struct cc_crypto_req cc_req = {};
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
struct device *dev = drvdata_to_dev(ctx->drvdata);
int rc = 0;
unsigned int idx = 0;
@@ -1007,7 +1007,7 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
static int cc_cmac_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
@@ -1109,7 +1109,7 @@ fail:
static int cc_get_hash_len(struct crypto_tfm *tfm)
{
- struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
if (ctx->hash_mode == DRV_HASH_SM3)
return CC_SM3_HASH_LEN_SIZE;
@@ -1119,7 +1119,7 @@ static int cc_get_hash_len(struct crypto_tfm *tfm)
static int cc_cra_init(struct crypto_tfm *tfm)
{
- struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
struct hash_alg_common *hash_alg_common =
container_of(tfm->__crt_alg, struct hash_alg_common, base);
struct ahash_alg *ahash_alg =
@@ -1127,8 +1127,8 @@ static int cc_cra_init(struct crypto_tfm *tfm)
struct cc_hash_alg *cc_alg =
container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_req_ctx));
+ crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
+ sizeof(struct ahash_req_ctx));
ctx->hash_mode = cc_alg->hash_mode;
ctx->hw_mode = cc_alg->hw_mode;
@@ -1140,7 +1140,7 @@ static int cc_cra_init(struct crypto_tfm *tfm)
static void cc_cra_exit(struct crypto_tfm *tfm)
{
- struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "cc_cra_exit");
@@ -1149,9 +1149,9 @@ static void cc_cra_exit(struct crypto_tfm *tfm)
static int cc_mac_update(struct ahash_request *req)
{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
struct cc_crypto_req cc_req = {};
@@ -1217,9 +1217,9 @@ static int cc_mac_update(struct ahash_request *req)
static int cc_mac_final(struct ahash_request *req)
{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
@@ -1338,9 +1338,9 @@ static int cc_mac_final(struct ahash_request *req)
static int cc_mac_finup(struct ahash_request *req)
{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
@@ -1419,9 +1419,9 @@ static int cc_mac_finup(struct ahash_request *req)
static int cc_mac_digest(struct ahash_request *req)
{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
u32 digestsize = crypto_ahash_digestsize(tfm);
struct cc_crypto_req cc_req = {};
@@ -1499,8 +1499,8 @@ static int cc_mac_digest(struct ahash_request *req)
static int cc_hash_export(struct ahash_request *req, void *out)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
u8 *curr_buff = cc_hash_buf(state);
u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
const u32 tmp = CC_EXPORT_MAGIC;
@@ -1525,9 +1525,9 @@ static int cc_hash_export(struct ahash_request *req, void *out)
static int cc_hash_import(struct ahash_request *req, const void *in)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
u32 tmp;
memcpy(&tmp, in, sizeof(u32));
@@ -1846,7 +1846,7 @@ static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
template->driver_name);
}
alg->cra_module = THIS_MODULE;
- alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
+ alg->cra_ctxsize = sizeof(struct cc_hash_ctx) + crypto_dma_padding();
alg->cra_priority = CC_CRA_PRIO;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
@@ -2073,9 +2073,9 @@ static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
unsigned int *seq_size)
{
unsigned int idx = *seq_size;
- struct ahash_req_ctx *state = ahash_request_ctx(areq);
+ struct ahash_req_ctx *state = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
/* Setup XCBC MAC K1 */
hw_desc_init(&desc[idx]);
@@ -2130,9 +2130,9 @@ static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
unsigned int *seq_size)
{
unsigned int idx = *seq_size;
- struct ahash_req_ctx *state = ahash_request_ctx(areq);
+ struct ahash_req_ctx *state = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
/* Setup CMAC Key */
hw_desc_init(&desc[idx]);
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index f886401af13e..5dd3f6a4781a 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -3,11 +3,11 @@ config CRYPTO_DEV_CHELSIO
tristate "Chelsio Crypto Co-processor Driver"
depends on CHELSIO_T4
select CRYPTO_LIB_AES
+ select CRYPTO_LIB_GF128MUL
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_AUTHENC
- select CRYPTO_GF128MUL
help
The Chelsio Crypto Co-processor driver for T6 adapters.
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 6933546f87b1..68d65773ef2b 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -98,17 +98,17 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
{
- return ctx->crypto_ctx->aeadctx;
+ return &ctx->crypto_ctx->aeadctx;
}
static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
{
- return ctx->crypto_ctx->ablkctx;
+ return &ctx->crypto_ctx->ablkctx;
}
static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
{
- return ctx->crypto_ctx->hmacctx;
+ return &ctx->crypto_ctx->hmacctx;
}
static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
@@ -210,7 +210,7 @@ static inline int chcr_handle_aead_resp(struct aead_request *req,
unsigned char *input,
int err)
{
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_dev *dev = a_ctx(tfm)->dev;
@@ -718,7 +718,7 @@ static inline int get_qidxs(struct crypto_async_request *req,
{
struct aead_request *aead_req =
container_of(req, struct aead_request, base);
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(aead_req);
*txqidx = reqctx->txqidx;
*rxqidx = reqctx->rxqidx;
break;
@@ -2362,7 +2362,7 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
inline void chcr_aead_common_exit(struct aead_request *req)
{
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
@@ -2373,7 +2373,7 @@ static int chcr_aead_common_init(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
unsigned int authsize = crypto_aead_authsize(tfm);
int error = -EINVAL;
@@ -2417,7 +2417,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- struct aead_request *subreq = aead_request_ctx(req);
+ struct aead_request *subreq = aead_request_ctx_dma(req);
aead_request_set_tfm(subreq, aeadctx->sw_cipher);
aead_request_set_callback(subreq, req->base.flags,
@@ -2438,7 +2438,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
@@ -2576,7 +2576,7 @@ int chcr_aead_dma_map(struct device *dev,
unsigned short op_type)
{
int error;
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(tfm);
int src_len, dst_len;
@@ -2637,7 +2637,7 @@ void chcr_aead_dma_unmap(struct device *dev,
struct aead_request *req,
unsigned short op_type)
{
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(tfm);
int src_len, dst_len;
@@ -2678,7 +2678,7 @@ void chcr_add_aead_src_ent(struct aead_request *req,
struct ulptx_sgl *ulptx)
{
struct ulptx_walk ulp_walk;
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
if (reqctx->imm) {
u8 *buf = (u8 *)ulptx;
@@ -2704,7 +2704,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
unsigned short qid)
{
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct dsgl_walk dsgl_walk;
unsigned int authsize = crypto_aead_authsize(tfm);
@@ -2894,7 +2894,7 @@ static int generate_b0(struct aead_request *req, u8 *ivptr,
unsigned int l, lp, m;
int rc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
u8 *b0 = reqctx->scratch_pad;
m = crypto_aead_authsize(aead);
@@ -2932,7 +2932,7 @@ static int ccm_format_packet(struct aead_request *req,
unsigned short op_type,
unsigned int assoclen)
{
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
int rc = 0;
@@ -2963,7 +2963,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
struct chcr_context *ctx = a_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
@@ -3036,7 +3036,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
@@ -3135,7 +3135,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
struct chcr_context *ctx = a_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
@@ -3255,9 +3255,10 @@ static int chcr_aead_cra_init(struct crypto_aead *tfm)
CRYPTO_ALG_ASYNC);
if (IS_ERR(aeadctx->sw_cipher))
return PTR_ERR(aeadctx->sw_cipher);
- crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
- sizeof(struct aead_request) +
- crypto_aead_reqsize(aeadctx->sw_cipher)));
+ crypto_aead_set_reqsize_dma(
+ tfm, max(sizeof(struct chcr_aead_reqctx),
+ sizeof(struct aead_request) +
+ crypto_aead_reqsize(aeadctx->sw_cipher)));
return chcr_device_init(a_ctx(tfm));
}
@@ -3735,7 +3736,7 @@ static int chcr_aead_op(struct aead_request *req,
create_wr_t create_wr_fn)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
struct chcr_context *ctx = a_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
@@ -3785,7 +3786,7 @@ static int chcr_aead_op(struct aead_request *req,
static int chcr_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
struct chcr_context *ctx = a_ctx(tfm);
unsigned int cpu;
@@ -3816,7 +3817,7 @@ static int chcr_aead_decrypt(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = a_ctx(tfm);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
int size;
unsigned int cpu;
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index c7816c83e324..7f88ddb08631 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -248,9 +248,9 @@ struct hmac_ctx {
struct __crypto_ctx {
union {
- DECLARE_FLEX_ARRAY(struct hmac_ctx, hmacctx);
- DECLARE_FLEX_ARRAY(struct ablk_ctx, ablkctx);
- DECLARE_FLEX_ARRAY(struct chcr_aead_ctx, aeadctx);
+ struct hmac_ctx hmacctx;
+ struct ablk_ctx ablkctx;
+ struct chcr_aead_ctx aeadctx;
};
};
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 27e1fa912063..743ce4fc3158 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -26,7 +26,7 @@ config CRYPTO_DEV_HISI_SEC2
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
- select CRYPTO_SM4
+ select CRYPTO_SM4_GENERIC
depends on PCI && PCI_MSI
depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile
index 1e89269a2e4b..8595a5a5d228 100644
--- a/drivers/crypto/hisilicon/Makefile
+++ b/drivers/crypto/hisilicon/Makefile
@@ -3,6 +3,6 @@ obj-$(CONFIG_CRYPTO_DEV_HISI_HPRE) += hpre/
obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/
obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += sec2/
obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o
-hisi_qm-objs = qm.o sgl.o
+hisi_qm-objs = qm.o sgl.o debugfs.o
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/
obj-$(CONFIG_CRYPTO_DEV_HISI_TRNG) += trng/
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
new file mode 100644
index 000000000000..2cc1591949db
--- /dev/null
+++ b/drivers/crypto/hisilicon/debugfs.c
@@ -0,0 +1,1147 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 HiSilicon Limited. */
+#include <linux/hisi_acc_qm.h>
+#include "qm_common.h"
+
+#define QM_DFX_BASE 0x0100000
+#define QM_DFX_STATE1 0x0104000
+#define QM_DFX_STATE2 0x01040C8
+#define QM_DFX_COMMON 0x0000
+#define QM_DFX_BASE_LEN 0x5A
+#define QM_DFX_STATE1_LEN 0x2E
+#define QM_DFX_STATE2_LEN 0x11
+#define QM_DFX_COMMON_LEN 0xC3
+#define QM_DFX_REGS_LEN 4UL
+#define QM_DBG_TMP_BUF_LEN 22
+#define CURRENT_FUN_MASK GENMASK(5, 0)
+#define CURRENT_Q_MASK GENMASK(31, 16)
+#define QM_SQE_ADDR_MASK GENMASK(7, 0)
+
+#define QM_DFX_MB_CNT_VF 0x104010
+#define QM_DFX_DB_CNT_VF 0x104020
+#define QM_DFX_SQE_CNT_VF_SQN 0x104030
+#define QM_DFX_CQE_CNT_VF_CQN 0x104040
+#define QM_DFX_QN_SHIFT 16
+#define QM_DFX_CNT_CLR_CE 0x100118
+#define QM_DBG_WRITE_LEN 1024
+
+static const char * const qm_debug_file_name[] = {
+ [CURRENT_QM] = "current_qm",
+ [CURRENT_Q] = "current_q",
+ [CLEAR_ENABLE] = "clear_enable",
+};
+
+struct qm_dfx_item {
+ const char *name;
+ u32 offset;
+};
+
+struct qm_cmd_dump_item {
+ const char *cmd;
+ char *info_name;
+ int (*dump_fn)(struct hisi_qm *qm, char *cmd, char *info_name);
+};
+
+static struct qm_dfx_item qm_dfx_files[] = {
+ {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
+ {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
+ {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
+ {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
+ {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
+};
+
+#define CNT_CYC_REGS_NUM 10
+static const struct debugfs_reg32 qm_dfx_regs[] = {
+ /* XXX_CNT are reading clear register */
+ {"QM_ECC_1BIT_CNT ", 0x104000ull},
+ {"QM_ECC_MBIT_CNT ", 0x104008ull},
+ {"QM_DFX_MB_CNT ", 0x104018ull},
+ {"QM_DFX_DB_CNT ", 0x104028ull},
+ {"QM_DFX_SQE_CNT ", 0x104038ull},
+ {"QM_DFX_CQE_CNT ", 0x104048ull},
+ {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull},
+ {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull},
+ {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull},
+ {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull},
+ {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
+ {"QM_ECC_1BIT_INF ", 0x104004ull},
+ {"QM_ECC_MBIT_INF ", 0x10400cull},
+ {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull},
+ {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull},
+ {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull},
+ {"QM_DFX_FF_ST0 ", 0x1040c8ull},
+ {"QM_DFX_FF_ST1 ", 0x1040ccull},
+ {"QM_DFX_FF_ST2 ", 0x1040d0ull},
+ {"QM_DFX_FF_ST3 ", 0x1040d4ull},
+ {"QM_DFX_FF_ST4 ", 0x1040d8ull},
+ {"QM_DFX_FF_ST5 ", 0x1040dcull},
+ {"QM_DFX_FF_ST6 ", 0x1040e0ull},
+ {"QM_IN_IDLE_ST ", 0x1040e4ull},
+};
+
+static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
+ {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
+};
+
+/* define the QM's dfx regs region and region length */
+static struct dfx_diff_registers qm_diff_regs[] = {
+ {
+ .reg_offset = QM_DFX_BASE,
+ .reg_len = QM_DFX_BASE_LEN,
+ }, {
+ .reg_offset = QM_DFX_STATE1,
+ .reg_len = QM_DFX_STATE1_LEN,
+ }, {
+ .reg_offset = QM_DFX_STATE2,
+ .reg_len = QM_DFX_STATE2_LEN,
+ }, {
+ .reg_offset = QM_DFX_COMMON,
+ .reg_len = QM_DFX_COMMON_LEN,
+ },
+};
+
+static struct hisi_qm *file_to_qm(struct debugfs_file *file)
+{
+ struct qm_debug *debug = file->debug;
+
+ return container_of(debug, struct hisi_qm, debug);
+}
+
+static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ char buf[QM_DBG_READ_LEN];
+ int len;
+
+ len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
+ "Please echo help to cmd to get help information");
+
+ return simple_read_from_buffer(buffer, count, pos, buf, len);
+}
+
+static void dump_show(struct hisi_qm *qm, void *info,
+ unsigned int info_size, char *info_name)
+{
+ struct device *dev = &qm->pdev->dev;
+ u8 *info_curr = info;
+ u32 i;
+#define BYTE_PER_DW 4
+
+ dev_info(dev, "%s DUMP\n", info_name);
+ for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) {
+ pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
+ *(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr));
+ }
+}
+
+static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct qm_sqc *sqc, *sqc_curr;
+ dma_addr_t sqc_dma;
+ u32 qp_id;
+ int ret;
+
+ if (!s)
+ return -EINVAL;
+
+ ret = kstrtou32(s, 0, &qp_id);
+ if (ret || qp_id >= qm->qp_num) {
+ dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
+ return -EINVAL;
+ }
+
+ sqc = hisi_qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
+ if (IS_ERR(sqc))
+ return PTR_ERR(sqc);
+
+ ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 1);
+ if (ret) {
+ down_read(&qm->qps_lock);
+ if (qm->sqc) {
+ sqc_curr = qm->sqc + qp_id;
+
+ dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC");
+ }
+ up_read(&qm->qps_lock);
+
+ goto free_ctx;
+ }
+
+ dump_show(qm, sqc, sizeof(*sqc), name);
+
+free_ctx:
+ hisi_qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
+ return 0;
+}
+
+static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct qm_cqc *cqc, *cqc_curr;
+ dma_addr_t cqc_dma;
+ u32 qp_id;
+ int ret;
+
+ if (!s)
+ return -EINVAL;
+
+ ret = kstrtou32(s, 0, &qp_id);
+ if (ret || qp_id >= qm->qp_num) {
+ dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
+ return -EINVAL;
+ }
+
+ cqc = hisi_qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
+ if (IS_ERR(cqc))
+ return PTR_ERR(cqc);
+
+ ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 1);
+ if (ret) {
+ down_read(&qm->qps_lock);
+ if (qm->cqc) {
+ cqc_curr = qm->cqc + qp_id;
+
+ dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC");
+ }
+ up_read(&qm->qps_lock);
+
+ goto free_ctx;
+ }
+
+ dump_show(qm, cqc, sizeof(*cqc), name);
+
+free_ctx:
+ hisi_qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
+ return 0;
+}
+
+static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name)
+{
+ struct device *dev = &qm->pdev->dev;
+ dma_addr_t xeqc_dma;
+ size_t size;
+ void *xeqc;
+ int ret;
+ u8 cmd;
+
+ if (strsep(&s, " ")) {
+ dev_err(dev, "Please do not input extra characters!\n");
+ return -EINVAL;
+ }
+
+ if (!strcmp(name, "EQC")) {
+ cmd = QM_MB_CMD_EQC;
+ size = sizeof(struct qm_eqc);
+ } else {
+ cmd = QM_MB_CMD_AEQC;
+ size = sizeof(struct qm_aeqc);
+ }
+
+ xeqc = hisi_qm_ctx_alloc(qm, size, &xeqc_dma);
+ if (IS_ERR(xeqc))
+ return PTR_ERR(xeqc);
+
+ ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1);
+ if (ret)
+ goto err_free_ctx;
+
+ dump_show(qm, xeqc, size, name);
+
+err_free_ctx:
+ hisi_qm_ctx_free(qm, size, xeqc, &xeqc_dma);
+ return ret;
+}
+
+static int q_dump_param_parse(struct hisi_qm *qm, char *s,
+ u32 *e_id, u32 *q_id, u16 q_depth)
+{
+ struct device *dev = &qm->pdev->dev;
+ unsigned int qp_num = qm->qp_num;
+ char *presult;
+ int ret;
+
+ presult = strsep(&s, " ");
+ if (!presult) {
+ dev_err(dev, "Please input qp number!\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtou32(presult, 0, q_id);
+ if (ret || *q_id >= qp_num) {
+ dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
+ return -EINVAL;
+ }
+
+ presult = strsep(&s, " ");
+ if (!presult) {
+ dev_err(dev, "Please input sqe number!\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtou32(presult, 0, e_id);
+ if (ret || *e_id >= q_depth) {
+ dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
+ return -EINVAL;
+ }
+
+ if (strsep(&s, " ")) {
+ dev_err(dev, "Please do not input extra characters!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qm_sq_dump(struct hisi_qm *qm, char *s, char *name)
+{
+ u16 sq_depth = qm->qp_array->cq_depth;
+ void *sqe, *sqe_curr;
+ struct hisi_qp *qp;
+ u32 qp_id, sqe_id;
+ int ret;
+
+ ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth);
+ if (ret)
+ return ret;
+
+ sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL);
+ if (!sqe)
+ return -ENOMEM;
+
+ qp = &qm->qp_array[qp_id];
+ memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth);
+ sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
+ memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
+ qm->debug.sqe_mask_len);
+
+ dump_show(qm, sqe_curr, qm->sqe_size, name);
+
+ kfree(sqe);
+
+ return 0;
+}
+
+static int qm_cq_dump(struct hisi_qm *qm, char *s, char *name)
+{
+ struct qm_cqe *cqe_curr;
+ struct hisi_qp *qp;
+ u32 qp_id, cqe_id;
+ int ret;
+
+ ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth);
+ if (ret)
+ return ret;
+
+ qp = &qm->qp_array[qp_id];
+ cqe_curr = qp->cqe + cqe_id;
+ dump_show(qm, cqe_curr, sizeof(struct qm_cqe), name);
+
+ return 0;
+}
+
+static int qm_eq_aeq_dump(struct hisi_qm *qm, char *s, char *name)
+{
+ struct device *dev = &qm->pdev->dev;
+ u16 xeq_depth;
+ size_t size;
+ void *xeqe;
+ u32 xeqe_id;
+ int ret;
+
+ if (!s)
+ return -EINVAL;
+
+ ret = kstrtou32(s, 0, &xeqe_id);
+ if (ret)
+ return -EINVAL;
+
+ if (!strcmp(name, "EQE")) {
+ xeq_depth = qm->eq_depth;
+ size = sizeof(struct qm_eqe);
+ } else {
+ xeq_depth = qm->aeq_depth;
+ size = sizeof(struct qm_aeqe);
+ }
+
+ if (xeqe_id >= xeq_depth) {
+ dev_err(dev, "Please input eqe or aeqe num (0-%u)", xeq_depth - 1);
+ return -EINVAL;
+ }
+
+ down_read(&qm->qps_lock);
+
+ if (qm->eqe && !strcmp(name, "EQE")) {
+ xeqe = qm->eqe + xeqe_id;
+ } else if (qm->aeqe && !strcmp(name, "AEQE")) {
+ xeqe = qm->aeqe + xeqe_id;
+ } else {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ dump_show(qm, xeqe, size, name);
+
+err_unlock:
+ up_read(&qm->qps_lock);
+ return ret;
+}
+
+static int qm_dbg_help(struct hisi_qm *qm, char *s)
+{
+ struct device *dev = &qm->pdev->dev;
+
+ if (strsep(&s, " ")) {
+ dev_err(dev, "Please do not input extra characters!\n");
+ return -EINVAL;
+ }
+
+ dev_info(dev, "available commands:\n");
+ dev_info(dev, "sqc <num>\n");
+ dev_info(dev, "cqc <num>\n");
+ dev_info(dev, "eqc\n");
+ dev_info(dev, "aeqc\n");
+ dev_info(dev, "sq <num> <e>\n");
+ dev_info(dev, "cq <num> <e>\n");
+ dev_info(dev, "eq <e>\n");
+ dev_info(dev, "aeq <e>\n");
+
+ return 0;
+}
+
+static const struct qm_cmd_dump_item qm_cmd_dump_table[] = {
+ {
+ .cmd = "sqc",
+ .info_name = "SQC",
+ .dump_fn = qm_sqc_dump,
+ }, {
+ .cmd = "cqc",
+ .info_name = "CQC",
+ .dump_fn = qm_cqc_dump,
+ }, {
+ .cmd = "eqc",
+ .info_name = "EQC",
+ .dump_fn = qm_eqc_aeqc_dump,
+ }, {
+ .cmd = "aeqc",
+ .info_name = "AEQC",
+ .dump_fn = qm_eqc_aeqc_dump,
+ }, {
+ .cmd = "sq",
+ .info_name = "SQE",
+ .dump_fn = qm_sq_dump,
+ }, {
+ .cmd = "cq",
+ .info_name = "CQE",
+ .dump_fn = qm_cq_dump,
+ }, {
+ .cmd = "eq",
+ .info_name = "EQE",
+ .dump_fn = qm_eq_aeq_dump,
+ }, {
+ .cmd = "aeq",
+ .info_name = "AEQE",
+ .dump_fn = qm_eq_aeq_dump,
+ },
+};
+
+static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
+{
+ struct device *dev = &qm->pdev->dev;
+ char *presult, *s, *s_tmp;
+ int table_size, i, ret;
+
+ s = kstrdup(cmd_buf, GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ s_tmp = s;
+ presult = strsep(&s, " ");
+ if (!presult) {
+ ret = -EINVAL;
+ goto err_buffer_free;
+ }
+
+ if (!strcmp(presult, "help")) {
+ ret = qm_dbg_help(qm, s);
+ goto err_buffer_free;
+ }
+
+ table_size = ARRAY_SIZE(qm_cmd_dump_table);
+ for (i = 0; i < table_size; i++) {
+ if (!strcmp(presult, qm_cmd_dump_table[i].cmd)) {
+ ret = qm_cmd_dump_table[i].dump_fn(qm, s,
+ qm_cmd_dump_table[i].info_name);
+ break;
+ }
+ }
+
+ if (i == table_size) {
+ dev_info(dev, "Please echo help\n");
+ ret = -EINVAL;
+ }
+
+err_buffer_free:
+ kfree(s_tmp);
+
+ return ret;
+}
+
+static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct hisi_qm *qm = filp->private_data;
+ char *cmd_buf, *cmd_buf_tmp;
+ int ret;
+
+ if (*pos)
+ return 0;
+
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret)
+ return ret;
+
+ /* Judge if the instance is being reset. */
+ if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) {
+ ret = 0;
+ goto put_dfx_access;
+ }
+
+ if (count > QM_DBG_WRITE_LEN) {
+ ret = -ENOSPC;
+ goto put_dfx_access;
+ }
+
+ cmd_buf = memdup_user_nul(buffer, count);
+ if (IS_ERR(cmd_buf)) {
+ ret = PTR_ERR(cmd_buf);
+ goto put_dfx_access;
+ }
+
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ ret = qm_cmd_write_dump(qm, cmd_buf);
+ if (ret) {
+ kfree(cmd_buf);
+ goto put_dfx_access;
+ }
+
+ kfree(cmd_buf);
+
+ ret = count;
+
+put_dfx_access:
+ hisi_qm_put_dfx_access(qm);
+ return ret;
+}
+
+static const struct file_operations qm_cmd_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = qm_cmd_read,
+ .write = qm_cmd_write,
+};
+
+/**
+ * hisi_qm_regs_dump() - Dump registers's value.
+ * @s: debugfs file handle.
+ * @regset: accelerator registers information.
+ *
+ * Dump accelerator registers.
+ */
+void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset)
+{
+ struct pci_dev *pdev = to_pci_dev(regset->dev);
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ const struct debugfs_reg32 *regs = regset->regs;
+ int regs_len = regset->nregs;
+ int i, ret;
+ u32 val;
+
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret)
+ return;
+
+ for (i = 0; i < regs_len; i++) {
+ val = readl(regset->base + regs[i].offset);
+ seq_printf(s, "%s= 0x%08x\n", regs[i].name, val);
+ }
+
+ hisi_qm_put_dfx_access(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_regs_dump);
+
+static int qm_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+ struct debugfs_regset32 regset;
+
+ if (qm->fun_type == QM_HW_PF) {
+ regset.regs = qm_dfx_regs;
+ regset.nregs = ARRAY_SIZE(qm_dfx_regs);
+ } else {
+ regset.regs = qm_vf_dfx_regs;
+ regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs);
+ }
+
+ regset.base = qm->io_base;
+ regset.dev = &qm->pdev->dev;
+
+ hisi_qm_regs_dump(s, &regset);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(qm_regs);
+
+static u32 current_q_read(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
+}
+
+static int current_q_write(struct hisi_qm *qm, u32 val)
+{
+ u32 tmp;
+
+ if (val >= qm->debug.curr_qm_qp_num)
+ return -EINVAL;
+
+ tmp = val << QM_DFX_QN_SHIFT |
+ (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
+ writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+
+ tmp = val << QM_DFX_QN_SHIFT |
+ (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
+ writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+
+ return 0;
+}
+
+static u32 clear_enable_read(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
+}
+
+/* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
+static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl)
+{
+ if (rd_clr_ctrl > 1)
+ return -EINVAL;
+
+ writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
+
+ return 0;
+}
+
+static u32 current_qm_read(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + QM_DFX_MB_CNT_VF);
+}
+
+static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
+{
+ u32 remain_q_num, vfq_num;
+ u32 num_vfs = qm->vfs_num;
+
+ vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
+ if (vfq_num >= qm->max_qp_num)
+ return qm->max_qp_num;
+
+ remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
+ if (vfq_num + remain_q_num <= qm->max_qp_num)
+ return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
+
+ /*
+ * if vfq_num + remain_q_num > max_qp_num, the last VFs,
+ * each with one more queue.
+ */
+ return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
+}
+
+static int current_qm_write(struct hisi_qm *qm, u32 val)
+{
+ u32 tmp;
+
+ if (val > qm->vfs_num)
+ return -EINVAL;
+
+ /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
+ if (!val)
+ qm->debug.curr_qm_qp_num = qm->qp_num;
+ else
+ qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
+
+ writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+
+ return 0;
+}
+
+static ssize_t qm_debug_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct debugfs_file *file = filp->private_data;
+ enum qm_debug_file index = file->index;
+ struct hisi_qm *qm = file_to_qm(file);
+ char tbuf[QM_DBG_TMP_BUF_LEN];
+ u32 val;
+ int ret;
+
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret)
+ return ret;
+
+ mutex_lock(&file->lock);
+ switch (index) {
+ case CURRENT_QM:
+ val = current_qm_read(qm);
+ break;
+ case CURRENT_Q:
+ val = current_q_read(qm);
+ break;
+ case CLEAR_ENABLE:
+ val = clear_enable_read(qm);
+ break;
+ default:
+ goto err_input;
+ }
+ mutex_unlock(&file->lock);
+
+ hisi_qm_put_dfx_access(qm);
+ ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_input:
+ mutex_unlock(&file->lock);
+ hisi_qm_put_dfx_access(qm);
+ return -EINVAL;
+}
+
+static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct debugfs_file *file = filp->private_data;
+ enum qm_debug_file index = file->index;
+ struct hisi_qm *qm = file_to_qm(file);
+ unsigned long val;
+ char tbuf[QM_DBG_TMP_BUF_LEN];
+ int len, ret;
+
+ if (*pos != 0)
+ return 0;
+
+ if (count >= QM_DBG_TMP_BUF_LEN)
+ return -ENOSPC;
+
+ len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
+ count);
+ if (len < 0)
+ return len;
+
+ tbuf[len] = '\0';
+ if (kstrtoul(tbuf, 0, &val))
+ return -EFAULT;
+
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret)
+ return ret;
+
+ mutex_lock(&file->lock);
+ switch (index) {
+ case CURRENT_QM:
+ ret = current_qm_write(qm, val);
+ break;
+ case CURRENT_Q:
+ ret = current_q_write(qm, val);
+ break;
+ case CLEAR_ENABLE:
+ ret = clear_enable_write(qm, val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&file->lock);
+
+ hisi_qm_put_dfx_access(qm);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations qm_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = qm_debug_read,
+ .write = qm_debug_write,
+};
+
+static void dfx_regs_uninit(struct hisi_qm *qm,
+ struct dfx_diff_registers *dregs, int reg_len)
+{
+ int i;
+
+ /* Setting the pointer is NULL to prevent double free */
+ for (i = 0; i < reg_len; i++) {
+ kfree(dregs[i].regs);
+ dregs[i].regs = NULL;
+ }
+ kfree(dregs);
+}
+
+static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm,
+ const struct dfx_diff_registers *cregs, u32 reg_len)
+{
+ struct dfx_diff_registers *diff_regs;
+ u32 j, base_offset;
+ int i;
+
+ diff_regs = kcalloc(reg_len, sizeof(*diff_regs), GFP_KERNEL);
+ if (!diff_regs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < reg_len; i++) {
+ if (!cregs[i].reg_len)
+ continue;
+
+ diff_regs[i].reg_offset = cregs[i].reg_offset;
+ diff_regs[i].reg_len = cregs[i].reg_len;
+ diff_regs[i].regs = kcalloc(QM_DFX_REGS_LEN, cregs[i].reg_len,
+ GFP_KERNEL);
+ if (!diff_regs[i].regs)
+ goto alloc_error;
+
+ for (j = 0; j < diff_regs[i].reg_len; j++) {
+ base_offset = diff_regs[i].reg_offset +
+ j * QM_DFX_REGS_LEN;
+ diff_regs[i].regs[j] = readl(qm->io_base + base_offset);
+ }
+ }
+
+ return diff_regs;
+
+alloc_error:
+ while (i > 0) {
+ i--;
+ kfree(diff_regs[i].regs);
+ }
+ kfree(diff_regs);
+ return ERR_PTR(-ENOMEM);
+}
+
+static int qm_diff_regs_init(struct hisi_qm *qm,
+ struct dfx_diff_registers *dregs, u32 reg_len)
+{
+ qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
+ if (IS_ERR(qm->debug.qm_diff_regs))
+ return PTR_ERR(qm->debug.qm_diff_regs);
+
+ qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
+ if (IS_ERR(qm->debug.acc_diff_regs)) {
+ dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
+ return PTR_ERR(qm->debug.acc_diff_regs);
+ }
+
+ return 0;
+}
+
+static void qm_last_regs_uninit(struct hisi_qm *qm)
+{
+ struct qm_debug *debug = &qm->debug;
+
+ if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
+ return;
+
+ kfree(debug->qm_last_words);
+ debug->qm_last_words = NULL;
+}
+
+static int qm_last_regs_init(struct hisi_qm *qm)
+{
+ int dfx_regs_num = ARRAY_SIZE(qm_dfx_regs);
+ struct qm_debug *debug = &qm->debug;
+ int i;
+
+ if (qm->fun_type == QM_HW_VF)
+ return 0;
+
+ debug->qm_last_words = kcalloc(dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
+ if (!debug->qm_last_words)
+ return -ENOMEM;
+
+ for (i = 0; i < dfx_regs_num; i++) {
+ debug->qm_last_words[i] = readl_relaxed(qm->io_base +
+ qm_dfx_regs[i].offset);
+ }
+
+ return 0;
+}
+
+static void qm_diff_regs_uninit(struct hisi_qm *qm, u32 reg_len)
+{
+ dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
+ dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
+}
+
+/**
+ * hisi_qm_regs_debugfs_init() - Allocate memory for registers.
+ * @qm: device qm handle.
+ * @dregs: diff registers handle.
+ * @reg_len: diff registers region length.
+ */
+int hisi_qm_regs_debugfs_init(struct hisi_qm *qm,
+ struct dfx_diff_registers *dregs, u32 reg_len)
+{
+ int ret;
+
+ if (!qm || !dregs)
+ return -EINVAL;
+
+ if (qm->fun_type != QM_HW_PF)
+ return 0;
+
+ ret = qm_last_regs_init(qm);
+ if (ret) {
+ dev_info(&qm->pdev->dev, "failed to init qm words memory!\n");
+ return ret;
+ }
+
+ ret = qm_diff_regs_init(qm, dregs, reg_len);
+ if (ret) {
+ qm_last_regs_uninit(qm);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_regs_debugfs_init);
+
+/**
+ * hisi_qm_regs_debugfs_uninit() - Free memory for registers.
+ * @qm: device qm handle.
+ * @reg_len: diff registers region length.
+ */
+void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len)
+{
+ if (!qm || qm->fun_type != QM_HW_PF)
+ return;
+
+ qm_diff_regs_uninit(qm, reg_len);
+ qm_last_regs_uninit(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_regs_debugfs_uninit);
+
+/**
+ * hisi_qm_acc_diff_regs_dump() - Dump registers's value.
+ * @qm: device qm handle.
+ * @s: Debugfs file handle.
+ * @dregs: diff registers handle.
+ * @regs_len: diff registers region length.
+ */
+void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
+ struct dfx_diff_registers *dregs, u32 regs_len)
+{
+ u32 j, val, base_offset;
+ int i, ret;
+
+ if (!qm || !s || !dregs)
+ return;
+
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret)
+ return;
+
+ down_read(&qm->qps_lock);
+ for (i = 0; i < regs_len; i++) {
+ if (!dregs[i].reg_len)
+ continue;
+
+ for (j = 0; j < dregs[i].reg_len; j++) {
+ base_offset = dregs[i].reg_offset + j * QM_DFX_REGS_LEN;
+ val = readl(qm->io_base + base_offset);
+ if (val != dregs[i].regs[j])
+ seq_printf(s, "0x%08x = 0x%08x ---> 0x%08x\n",
+ base_offset, dregs[i].regs[j], val);
+ }
+ }
+ up_read(&qm->qps_lock);
+
+ hisi_qm_put_dfx_access(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_acc_diff_regs_dump);
+
+void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm)
+{
+ struct qm_debug *debug = &qm->debug;
+ struct pci_dev *pdev = qm->pdev;
+ u32 val;
+ int i;
+
+ if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(qm_dfx_regs); i++) {
+ val = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset);
+ if (debug->qm_last_words[i] != val)
+ pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n",
+ qm_dfx_regs[i].name, debug->qm_last_words[i], val);
+ }
+}
+
+static int qm_diff_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+
+ hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.qm_diff_regs,
+ ARRAY_SIZE(qm_diff_regs));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(qm_diff_regs);
+
+static ssize_t qm_status_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct hisi_qm *qm = filp->private_data;
+ char buf[QM_DBG_READ_LEN];
+ int val, len;
+
+ val = atomic_read(&qm->status.flags);
+ len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
+
+ return simple_read_from_buffer(buffer, count, pos, buf, len);
+}
+
+static const struct file_operations qm_status_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = qm_status_read,
+};
+
+static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
+ enum qm_debug_file index)
+{
+ struct debugfs_file *file = qm->debug.files + index;
+
+ debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
+ &qm_debug_fops);
+
+ file->index = index;
+ mutex_init(&file->lock);
+ file->debug = &qm->debug;
+}
+
+static int qm_debugfs_atomic64_set(void *data, u64 val)
+{
+ if (val)
+ return -EINVAL;
+
+ atomic64_set((atomic64_t *)data, 0);
+
+ return 0;
+}
+
+static int qm_debugfs_atomic64_get(void *data, u64 *val)
+{
+ *val = atomic64_read((atomic64_t *)data);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
+ qm_debugfs_atomic64_set, "%llu\n");
+
+/**
+ * hisi_qm_debug_init() - Initialize qm related debugfs files.
+ * @qm: The qm for which we want to add debugfs files.
+ *
+ * Create qm related debugfs files.
+ */
+void hisi_qm_debug_init(struct hisi_qm *qm)
+{
+ struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs;
+ struct qm_dfx *dfx = &qm->debug.dfx;
+ struct dentry *qm_d;
+ void *data;
+ int i;
+
+ qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
+ qm->debug.qm_d = qm_d;
+
+ /* only show this in PF */
+ if (qm->fun_type == QM_HW_PF) {
+ qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
+ for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
+ qm_create_debugfs_file(qm, qm->debug.qm_d, i);
+ }
+
+ if (qm_regs)
+ debugfs_create_file("diff_regs", 0444, qm->debug.qm_d,
+ qm, &qm_diff_regs_fops);
+
+ debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
+
+ debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
+
+ debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
+ &qm_status_fops);
+ for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
+ data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
+ debugfs_create_file(qm_dfx_files[i].name,
+ 0644,
+ qm_d,
+ data,
+ &qm_atomic64_ops);
+ }
+
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+ hisi_qm_set_algqos_init(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
+
+/**
+ * hisi_qm_debug_regs_clear() - clear qm debug related registers.
+ * @qm: The qm for which we want to clear its debug registers.
+ */
+void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
+{
+ const struct debugfs_reg32 *regs;
+ int i;
+
+ /* clear current_qm */
+ writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ /* clear current_q */
+ writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+ writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+
+ /*
+ * these registers are reading and clearing, so clear them after
+ * reading them.
+ */
+ writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
+
+ regs = qm_dfx_regs;
+ for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
+ readl(qm->io_base + regs->offset);
+ regs++;
+ }
+
+ /* clear clear_enable */
+ writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index ef02dadd6217..8ede77310dc5 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -147,6 +147,16 @@ struct hpre_asym_request {
struct timespec64 req_time;
};
+static inline unsigned int hpre_align_sz(void)
+{
+ return ((crypto_dma_align() - 1) | (HPRE_ALIGN_SZ - 1)) + 1;
+}
+
+static inline unsigned int hpre_align_pd(void)
+{
+ return (hpre_align_sz() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
+}
+
static int hpre_alloc_req_id(struct hpre_ctx *ctx)
{
unsigned long flags;
@@ -517,7 +527,7 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
}
tmp = akcipher_request_ctx(akreq);
- h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ h_req = PTR_ALIGN(tmp, hpre_align_sz());
h_req->cb = hpre_rsa_cb;
h_req->areq.rsa = akreq;
msg = &h_req->req;
@@ -531,7 +541,7 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
}
tmp = kpp_request_ctx(kreq);
- h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ h_req = PTR_ALIGN(tmp, hpre_align_sz());
h_req->cb = hpre_dh_cb;
h_req->areq.dh = kreq;
msg = &h_req->req;
@@ -582,7 +592,7 @@ static int hpre_dh_compute_value(struct kpp_request *req)
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
void *tmp = kpp_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
struct hpre_sqe *msg = &hpre_req->req;
int ret;
@@ -740,6 +750,8 @@ static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
+
return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
}
@@ -783,7 +795,7 @@ static int hpre_rsa_enc(struct akcipher_request *req)
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
void *tmp = akcipher_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
struct hpre_sqe *msg = &hpre_req->req;
int ret;
@@ -831,7 +843,7 @@ static int hpre_rsa_dec(struct akcipher_request *req)
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
void *tmp = akcipher_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
struct hpre_sqe *msg = &hpre_req->req;
int ret;
@@ -1165,6 +1177,9 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
return PTR_ERR(ctx->rsa.soft_tfm);
}
+ akcipher_set_reqsize(tfm, sizeof(struct hpre_asym_request) +
+ hpre_align_pd());
+
ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
if (ret)
crypto_free_akcipher(ctx->rsa.soft_tfm);
@@ -1485,7 +1500,7 @@ static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
}
tmp = kpp_request_ctx(req);
- h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ h_req = PTR_ALIGN(tmp, hpre_align_sz());
h_req->cb = hpre_ecdh_cb;
h_req->areq.ecdh = req;
msg = &h_req->req;
@@ -1566,7 +1581,7 @@ static int hpre_ecdh_compute_value(struct kpp_request *req)
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
struct device *dev = ctx->dev;
void *tmp = kpp_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
struct hpre_sqe *msg = &hpre_req->req;
int ret;
@@ -1617,6 +1632,8 @@ static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
ctx->curve_id = ECC_CURVE_NIST_P192;
+ kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
+
return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
}
@@ -1626,6 +1643,8 @@ static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
ctx->curve_id = ECC_CURVE_NIST_P256;
+ kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
+
return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
}
@@ -1635,6 +1654,8 @@ static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
ctx->curve_id = ECC_CURVE_NIST_P384;
+ kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
+
return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
}
@@ -1791,7 +1812,7 @@ static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
}
tmp = kpp_request_ctx(req);
- h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ h_req = PTR_ALIGN(tmp, hpre_align_sz());
h_req->cb = hpre_curve25519_cb;
h_req->areq.curve25519 = req;
msg = &h_req->req;
@@ -1912,7 +1933,7 @@ static int hpre_curve25519_compute_value(struct kpp_request *req)
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
struct device *dev = ctx->dev;
void *tmp = kpp_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
struct hpre_sqe *msg = &hpre_req->req;
int ret;
@@ -1961,6 +1982,8 @@ static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
+
return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
}
@@ -1981,7 +2004,6 @@ static struct akcipher_alg rsa = {
.max_size = hpre_rsa_max_size,
.init = hpre_rsa_init_tfm,
.exit = hpre_rsa_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
.base = {
.cra_ctxsize = sizeof(struct hpre_ctx),
.cra_priority = HPRE_CRYPTO_ALG_PRI,
@@ -1998,7 +2020,6 @@ static struct kpp_alg dh = {
.max_size = hpre_dh_max_size,
.init = hpre_dh_init_tfm,
.exit = hpre_dh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
.base = {
.cra_ctxsize = sizeof(struct hpre_ctx),
.cra_priority = HPRE_CRYPTO_ALG_PRI,
@@ -2016,7 +2037,6 @@ static struct kpp_alg ecdh_curves[] = {
.max_size = hpre_ecdh_max_size,
.init = hpre_ecdh_nist_p192_init_tfm,
.exit = hpre_ecdh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
.base = {
.cra_ctxsize = sizeof(struct hpre_ctx),
.cra_priority = HPRE_CRYPTO_ALG_PRI,
@@ -2031,7 +2051,6 @@ static struct kpp_alg ecdh_curves[] = {
.max_size = hpre_ecdh_max_size,
.init = hpre_ecdh_nist_p256_init_tfm,
.exit = hpre_ecdh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
.base = {
.cra_ctxsize = sizeof(struct hpre_ctx),
.cra_priority = HPRE_CRYPTO_ALG_PRI,
@@ -2046,7 +2065,6 @@ static struct kpp_alg ecdh_curves[] = {
.max_size = hpre_ecdh_max_size,
.init = hpre_ecdh_nist_p384_init_tfm,
.exit = hpre_ecdh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
.base = {
.cra_ctxsize = sizeof(struct hpre_ctx),
.cra_priority = HPRE_CRYPTO_ALG_PRI,
@@ -2064,7 +2082,6 @@ static struct kpp_alg curve25519_alg = {
.max_size = hpre_curve25519_max_size,
.init = hpre_curve25519_init_tfm,
.exit = hpre_curve25519_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
.base = {
.cra_ctxsize = sizeof(struct hpre_ctx),
.cra_priority = HPRE_CRYPTO_ALG_PRI,
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 471e5ca720f5..923f9c279265 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -1101,8 +1101,7 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
- ret = hisi_qm_diff_regs_init(qm, hpre_diff_regs,
- ARRAY_SIZE(hpre_diff_regs));
+ ret = hisi_qm_regs_debugfs_init(qm, hpre_diff_regs, ARRAY_SIZE(hpre_diff_regs));
if (ret) {
dev_warn(dev, "Failed to init HPRE diff regs!\n");
goto debugfs_remove;
@@ -1121,7 +1120,7 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
return 0;
failed_to_create:
- hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
debugfs_remove:
debugfs_remove_recursive(qm->debug.debug_root);
return ret;
@@ -1129,7 +1128,7 @@ debugfs_remove:
static void hpre_debugfs_exit(struct hisi_qm *qm)
{
- hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
debugfs_remove_recursive(qm->debug.debug_root);
}
@@ -1437,18 +1436,12 @@ err_with_qm_init:
static void hpre_remove(struct pci_dev *pdev)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
- int ret;
hisi_qm_pm_uninit(qm);
hisi_qm_wait_task_finish(qm, &hpre_devices);
hisi_qm_alg_unregister(qm, &hpre_devices);
- if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
- ret = hisi_qm_sriov_disable(pdev, true);
- if (ret) {
- pci_err(pdev, "Disable SRIOV fail!\n");
- return;
- }
- }
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num)
+ hisi_qm_sriov_disable(pdev, true);
hpre_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 8b387de69d22..007ac7a69ce7 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -16,6 +16,7 @@
#include <linux/uaccess.h>
#include <uapi/misc/uacce/hisi_qm.h>
#include <linux/hisi_acc_qm.h>
+#include "qm_common.h"
/* eq/aeq irq enable */
#define QM_VF_AEQ_INT_SOURCE 0x0
@@ -119,8 +120,6 @@
#define QM_SQC_VFT_NUM_SHIFT_V2 45
#define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0)
-#define QM_DFX_CNT_CLR_CE 0x100118
-
#define QM_ABNORMAL_INT_SOURCE 0x100000
#define QM_ABNORMAL_INT_MASK 0x100004
#define QM_ABNORMAL_INT_MASK_VALUE 0x7fff
@@ -187,14 +186,6 @@
#define QM_VF_RESET_WAIT_TIMEOUT_US \
(QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
-#define QM_DFX_MB_CNT_VF 0x104010
-#define QM_DFX_DB_CNT_VF 0x104020
-#define QM_DFX_SQE_CNT_VF_SQN 0x104030
-#define QM_DFX_CQE_CNT_VF_CQN 0x104040
-#define QM_DFX_QN_SHIFT 16
-#define CURRENT_FUN_MASK GENMASK(5, 0)
-#define CURRENT_Q_MASK GENMASK(31, 16)
-
#define POLL_PERIOD 10
#define POLL_TIMEOUT 1000
#define WAIT_PERIOD_US_MAX 200
@@ -211,19 +202,15 @@
#define QMC_ALIGN(sz) ALIGN(sz, 32)
#define QM_DBG_READ_LEN 256
-#define QM_DBG_WRITE_LEN 1024
-#define QM_DBG_TMP_BUF_LEN 22
#define QM_PCI_COMMAND_INVALID ~0
#define QM_RESET_STOP_TX_OFFSET 1
#define QM_RESET_STOP_RX_OFFSET 2
#define WAIT_PERIOD 20
#define REMOVE_WAIT_DELAY 10
-#define QM_SQE_ADDR_MASK GENMASK(7, 0)
#define QM_DRIVER_REMOVING 0
#define QM_RST_SCHED 1
-#define QM_RESETTING 2
#define QM_QOS_PARAM_NUM 2
#define QM_QOS_VAL_NUM 1
#define QM_QOS_BDF_PARAM_NUM 4
@@ -250,16 +237,6 @@
#define QM_QOS_MIN_CIR_B 100
#define QM_QOS_MAX_CIR_U 6
#define QM_QOS_MAX_CIR_S 11
-#define QM_QOS_VAL_MAX_LEN 32
-#define QM_DFX_BASE 0x0100000
-#define QM_DFX_STATE1 0x0104000
-#define QM_DFX_STATE2 0x01040C8
-#define QM_DFX_COMMON 0x0000
-#define QM_DFX_BASE_LEN 0x5A
-#define QM_DFX_STATE1_LEN 0x2E
-#define QM_DFX_STATE2_LEN 0x11
-#define QM_DFX_COMMON_LEN 0xC3
-#define QM_DFX_REGS_LEN 4UL
#define QM_AUTOSUSPEND_DELAY 3000
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
@@ -359,7 +336,7 @@ static const struct hisi_qm_cap_info qm_cap_info_vf[] = {
static const struct hisi_qm_cap_info qm_basic_info[] = {
{QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400},
{QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400},
- {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(15, 0), 0x800, 0x4000800, 0x4000800},
+ {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(31, 0), 0x800, 0x4000800, 0x4000800},
{QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400},
{QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000},
{QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001},
@@ -369,73 +346,6 @@ static const struct hisi_qm_cap_info qm_basic_info[] = {
{QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3},
};
-struct qm_cqe {
- __le32 rsvd0;
- __le16 cmd_id;
- __le16 rsvd1;
- __le16 sq_head;
- __le16 sq_num;
- __le16 rsvd2;
- __le16 w7;
-};
-
-struct qm_eqe {
- __le32 dw0;
-};
-
-struct qm_aeqe {
- __le32 dw0;
-};
-
-struct qm_sqc {
- __le16 head;
- __le16 tail;
- __le32 base_l;
- __le32 base_h;
- __le32 dw3;
- __le16 w8;
- __le16 rsvd0;
- __le16 pasid;
- __le16 w11;
- __le16 cq_num;
- __le16 w13;
- __le32 rsvd1;
-};
-
-struct qm_cqc {
- __le16 head;
- __le16 tail;
- __le32 base_l;
- __le32 base_h;
- __le32 dw3;
- __le16 w8;
- __le16 rsvd0;
- __le16 pasid;
- __le16 w11;
- __le32 dw6;
- __le32 rsvd1;
-};
-
-struct qm_eqc {
- __le16 head;
- __le16 tail;
- __le32 base_l;
- __le32 base_h;
- __le32 dw3;
- __le32 rsvd[2];
- __le32 dw6;
-};
-
-struct qm_aeqc {
- __le16 head;
- __le16 tail;
- __le32 base_l;
- __le32 base_h;
- __le32 dw3;
- __le32 rsvd[2];
- __le32 dw6;
-};
-
struct qm_mailbox {
__le16 w0;
__le16 queue_num;
@@ -468,25 +378,6 @@ struct hisi_qm_hw_ops {
int (*set_msi)(struct hisi_qm *qm, bool set);
};
-struct qm_dfx_item {
- const char *name;
- u32 offset;
-};
-
-static struct qm_dfx_item qm_dfx_files[] = {
- {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
- {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
- {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
- {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
- {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
-};
-
-static const char * const qm_debug_file_name[] = {
- [CURRENT_QM] = "current_qm",
- [CURRENT_Q] = "current_q",
- [CLEAR_ENABLE] = "clear_enable",
-};
-
struct hisi_qm_hw_error {
u32 int_msk;
const char *msg;
@@ -511,23 +402,6 @@ static const struct hisi_qm_hw_error qm_hw_error[] = {
{ /* sentinel */ }
};
-/* define the QM's dfx regs region and region length */
-static struct dfx_diff_registers qm_diff_regs[] = {
- {
- .reg_offset = QM_DFX_BASE,
- .reg_len = QM_DFX_BASE_LEN,
- }, {
- .reg_offset = QM_DFX_STATE1,
- .reg_len = QM_DFX_STATE1_LEN,
- }, {
- .reg_offset = QM_DFX_STATE2,
- .reg_len = QM_DFX_STATE2_LEN,
- }, {
- .reg_offset = QM_DFX_COMMON,
- .reg_len = QM_DFX_COMMON_LEN,
- },
-};
-
static const char * const qm_db_timeout[] = {
"sq", "cq", "eq", "aeq",
};
@@ -536,10 +410,6 @@ static const char * const qm_fifo_overflow[] = {
"cq", "eq", "aeq",
};
-static const char * const qm_s[] = {
- "init", "start", "close", "stop",
-};
-
static const char * const qp_s[] = {
"none", "init", "start", "stop", "close",
};
@@ -909,8 +779,8 @@ static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
u32 depth;
depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver);
- *high_bits = depth & QM_XQ_DEPTH_MASK;
- *low_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
+ *low_bits = depth & QM_XQ_DEPTH_MASK;
+ *high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
}
static u32 qm_get_irq_num(struct hisi_qm *qm)
@@ -1440,452 +1310,7 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
return 0;
}
-static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
-{
- u32 remain_q_num, vfq_num;
- u32 num_vfs = qm->vfs_num;
-
- vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
- if (vfq_num >= qm->max_qp_num)
- return qm->max_qp_num;
-
- remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
- if (vfq_num + remain_q_num <= qm->max_qp_num)
- return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
-
- /*
- * if vfq_num + remain_q_num > max_qp_num, the last VFs,
- * each with one more queue.
- */
- return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
-}
-
-static struct hisi_qm *file_to_qm(struct debugfs_file *file)
-{
- struct qm_debug *debug = file->debug;
-
- return container_of(debug, struct hisi_qm, debug);
-}
-
-static u32 current_q_read(struct hisi_qm *qm)
-{
- return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
-}
-
-static int current_q_write(struct hisi_qm *qm, u32 val)
-{
- u32 tmp;
-
- if (val >= qm->debug.curr_qm_qp_num)
- return -EINVAL;
-
- tmp = val << QM_DFX_QN_SHIFT |
- (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
- writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
-
- tmp = val << QM_DFX_QN_SHIFT |
- (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
- writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
-
- return 0;
-}
-
-static u32 clear_enable_read(struct hisi_qm *qm)
-{
- return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
-}
-
-/* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
-static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl)
-{
- if (rd_clr_ctrl > 1)
- return -EINVAL;
-
- writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
-
- return 0;
-}
-
-static u32 current_qm_read(struct hisi_qm *qm)
-{
- return readl(qm->io_base + QM_DFX_MB_CNT_VF);
-}
-
-static int current_qm_write(struct hisi_qm *qm, u32 val)
-{
- u32 tmp;
-
- if (val > qm->vfs_num)
- return -EINVAL;
-
- /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
- if (!val)
- qm->debug.curr_qm_qp_num = qm->qp_num;
- else
- qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
-
- writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
-
- return 0;
-}
-
-static ssize_t qm_debug_read(struct file *filp, char __user *buf,
- size_t count, loff_t *pos)
-{
- struct debugfs_file *file = filp->private_data;
- enum qm_debug_file index = file->index;
- struct hisi_qm *qm = file_to_qm(file);
- char tbuf[QM_DBG_TMP_BUF_LEN];
- u32 val;
- int ret;
-
- ret = hisi_qm_get_dfx_access(qm);
- if (ret)
- return ret;
-
- mutex_lock(&file->lock);
- switch (index) {
- case CURRENT_QM:
- val = current_qm_read(qm);
- break;
- case CURRENT_Q:
- val = current_q_read(qm);
- break;
- case CLEAR_ENABLE:
- val = clear_enable_read(qm);
- break;
- default:
- goto err_input;
- }
- mutex_unlock(&file->lock);
-
- hisi_qm_put_dfx_access(qm);
- ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
- return simple_read_from_buffer(buf, count, pos, tbuf, ret);
-
-err_input:
- mutex_unlock(&file->lock);
- hisi_qm_put_dfx_access(qm);
- return -EINVAL;
-}
-
-static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *pos)
-{
- struct debugfs_file *file = filp->private_data;
- enum qm_debug_file index = file->index;
- struct hisi_qm *qm = file_to_qm(file);
- unsigned long val;
- char tbuf[QM_DBG_TMP_BUF_LEN];
- int len, ret;
-
- if (*pos != 0)
- return 0;
-
- if (count >= QM_DBG_TMP_BUF_LEN)
- return -ENOSPC;
-
- len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
- count);
- if (len < 0)
- return len;
-
- tbuf[len] = '\0';
- if (kstrtoul(tbuf, 0, &val))
- return -EFAULT;
-
- ret = hisi_qm_get_dfx_access(qm);
- if (ret)
- return ret;
-
- mutex_lock(&file->lock);
- switch (index) {
- case CURRENT_QM:
- ret = current_qm_write(qm, val);
- break;
- case CURRENT_Q:
- ret = current_q_write(qm, val);
- break;
- case CLEAR_ENABLE:
- ret = clear_enable_write(qm, val);
- break;
- default:
- ret = -EINVAL;
- }
- mutex_unlock(&file->lock);
-
- hisi_qm_put_dfx_access(qm);
-
- if (ret)
- return ret;
-
- return count;
-}
-
-static const struct file_operations qm_debug_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = qm_debug_read,
- .write = qm_debug_write,
-};
-
-#define CNT_CYC_REGS_NUM 10
-static const struct debugfs_reg32 qm_dfx_regs[] = {
- /* XXX_CNT are reading clear register */
- {"QM_ECC_1BIT_CNT ", 0x104000ull},
- {"QM_ECC_MBIT_CNT ", 0x104008ull},
- {"QM_DFX_MB_CNT ", 0x104018ull},
- {"QM_DFX_DB_CNT ", 0x104028ull},
- {"QM_DFX_SQE_CNT ", 0x104038ull},
- {"QM_DFX_CQE_CNT ", 0x104048ull},
- {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull},
- {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull},
- {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull},
- {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull},
- {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
- {"QM_ECC_1BIT_INF ", 0x104004ull},
- {"QM_ECC_MBIT_INF ", 0x10400cull},
- {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull},
- {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull},
- {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull},
- {"QM_DFX_FF_ST0 ", 0x1040c8ull},
- {"QM_DFX_FF_ST1 ", 0x1040ccull},
- {"QM_DFX_FF_ST2 ", 0x1040d0ull},
- {"QM_DFX_FF_ST3 ", 0x1040d4ull},
- {"QM_DFX_FF_ST4 ", 0x1040d8ull},
- {"QM_DFX_FF_ST5 ", 0x1040dcull},
- {"QM_DFX_FF_ST6 ", 0x1040e0ull},
- {"QM_IN_IDLE_ST ", 0x1040e4ull},
-};
-
-static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
- {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
-};
-
-/**
- * hisi_qm_regs_dump() - Dump registers's value.
- * @s: debugfs file handle.
- * @regset: accelerator registers information.
- *
- * Dump accelerator registers.
- */
-void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset)
-{
- struct pci_dev *pdev = to_pci_dev(regset->dev);
- struct hisi_qm *qm = pci_get_drvdata(pdev);
- const struct debugfs_reg32 *regs = regset->regs;
- int regs_len = regset->nregs;
- int i, ret;
- u32 val;
-
- ret = hisi_qm_get_dfx_access(qm);
- if (ret)
- return;
-
- for (i = 0; i < regs_len; i++) {
- val = readl(regset->base + regs[i].offset);
- seq_printf(s, "%s= 0x%08x\n", regs[i].name, val);
- }
-
- hisi_qm_put_dfx_access(qm);
-}
-EXPORT_SYMBOL_GPL(hisi_qm_regs_dump);
-
-static int qm_regs_show(struct seq_file *s, void *unused)
-{
- struct hisi_qm *qm = s->private;
- struct debugfs_regset32 regset;
-
- if (qm->fun_type == QM_HW_PF) {
- regset.regs = qm_dfx_regs;
- regset.nregs = ARRAY_SIZE(qm_dfx_regs);
- } else {
- regset.regs = qm_vf_dfx_regs;
- regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs);
- }
-
- regset.base = qm->io_base;
- regset.dev = &qm->pdev->dev;
-
- hisi_qm_regs_dump(s, &regset);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(qm_regs);
-
-static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm,
- const struct dfx_diff_registers *cregs, int reg_len)
-{
- struct dfx_diff_registers *diff_regs;
- u32 j, base_offset;
- int i;
-
- diff_regs = kcalloc(reg_len, sizeof(*diff_regs), GFP_KERNEL);
- if (!diff_regs)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < reg_len; i++) {
- if (!cregs[i].reg_len)
- continue;
-
- diff_regs[i].reg_offset = cregs[i].reg_offset;
- diff_regs[i].reg_len = cregs[i].reg_len;
- diff_regs[i].regs = kcalloc(QM_DFX_REGS_LEN, cregs[i].reg_len,
- GFP_KERNEL);
- if (!diff_regs[i].regs)
- goto alloc_error;
-
- for (j = 0; j < diff_regs[i].reg_len; j++) {
- base_offset = diff_regs[i].reg_offset +
- j * QM_DFX_REGS_LEN;
- diff_regs[i].regs[j] = readl(qm->io_base + base_offset);
- }
- }
-
- return diff_regs;
-
-alloc_error:
- while (i > 0) {
- i--;
- kfree(diff_regs[i].regs);
- }
- kfree(diff_regs);
- return ERR_PTR(-ENOMEM);
-}
-
-static void dfx_regs_uninit(struct hisi_qm *qm,
- struct dfx_diff_registers *dregs, int reg_len)
-{
- int i;
-
- /* Setting the pointer is NULL to prevent double free */
- for (i = 0; i < reg_len; i++) {
- kfree(dregs[i].regs);
- dregs[i].regs = NULL;
- }
- kfree(dregs);
- dregs = NULL;
-}
-
-/**
- * hisi_qm_diff_regs_init() - Allocate memory for registers.
- * @qm: device qm handle.
- * @dregs: diff registers handle.
- * @reg_len: diff registers region length.
- */
-int hisi_qm_diff_regs_init(struct hisi_qm *qm,
- struct dfx_diff_registers *dregs, int reg_len)
-{
- if (!qm || !dregs || reg_len <= 0)
- return -EINVAL;
-
- if (qm->fun_type != QM_HW_PF)
- return 0;
-
- qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs,
- ARRAY_SIZE(qm_diff_regs));
- if (IS_ERR(qm->debug.qm_diff_regs))
- return PTR_ERR(qm->debug.qm_diff_regs);
-
- qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
- if (IS_ERR(qm->debug.acc_diff_regs)) {
- dfx_regs_uninit(qm, qm->debug.qm_diff_regs,
- ARRAY_SIZE(qm_diff_regs));
- return PTR_ERR(qm->debug.acc_diff_regs);
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_init);
-
-/**
- * hisi_qm_diff_regs_uninit() - Free memory for registers.
- * @qm: device qm handle.
- * @reg_len: diff registers region length.
- */
-void hisi_qm_diff_regs_uninit(struct hisi_qm *qm, int reg_len)
-{
- if (!qm || reg_len <= 0 || qm->fun_type != QM_HW_PF)
- return;
-
- dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
- dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
-}
-EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_uninit);
-
-/**
- * hisi_qm_acc_diff_regs_dump() - Dump registers's value.
- * @qm: device qm handle.
- * @s: Debugfs file handle.
- * @dregs: diff registers handle.
- * @regs_len: diff registers region length.
- */
-void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
- struct dfx_diff_registers *dregs, int regs_len)
-{
- u32 j, val, base_offset;
- int i, ret;
-
- if (!qm || !s || !dregs || regs_len <= 0)
- return;
-
- ret = hisi_qm_get_dfx_access(qm);
- if (ret)
- return;
-
- down_read(&qm->qps_lock);
- for (i = 0; i < regs_len; i++) {
- if (!dregs[i].reg_len)
- continue;
-
- for (j = 0; j < dregs[i].reg_len; j++) {
- base_offset = dregs[i].reg_offset + j * QM_DFX_REGS_LEN;
- val = readl(qm->io_base + base_offset);
- if (val != dregs[i].regs[j])
- seq_printf(s, "0x%08x = 0x%08x ---> 0x%08x\n",
- base_offset, dregs[i].regs[j], val);
- }
- }
- up_read(&qm->qps_lock);
-
- hisi_qm_put_dfx_access(qm);
-}
-EXPORT_SYMBOL_GPL(hisi_qm_acc_diff_regs_dump);
-
-static int qm_diff_regs_show(struct seq_file *s, void *unused)
-{
- struct hisi_qm *qm = s->private;
-
- hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.qm_diff_regs,
- ARRAY_SIZE(qm_diff_regs));
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(qm_diff_regs);
-
-static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *pos)
-{
- char buf[QM_DBG_READ_LEN];
- int len;
-
- len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
- "Please echo help to cmd to get help information");
-
- return simple_read_from_buffer(buffer, count, pos, buf, len);
-}
-
-static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
+void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
dma_addr_t *dma_addr)
{
struct device *dev = &qm->pdev->dev;
@@ -1905,7 +1330,7 @@ static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
return ctx_addr;
}
-static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
+void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
const void *ctx_addr, dma_addr_t *dma_addr)
{
struct device *dev = &qm->pdev->dev;
@@ -1914,21 +1339,6 @@ static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
kfree(ctx_addr);
}
-static void dump_show(struct hisi_qm *qm, void *info,
- unsigned int info_size, char *info_name)
-{
- struct device *dev = &qm->pdev->dev;
- u8 *info_curr = info;
- u32 i;
-#define BYTE_PER_DW 4
-
- dev_info(dev, "%s DUMP\n", info_name);
- for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) {
- pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
- *(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr));
- }
-}
-
static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
{
return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
@@ -1939,387 +1349,6 @@ static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
}
-static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
-{
- struct device *dev = &qm->pdev->dev;
- struct qm_sqc *sqc, *sqc_curr;
- dma_addr_t sqc_dma;
- u32 qp_id;
- int ret;
-
- if (!s)
- return -EINVAL;
-
- ret = kstrtou32(s, 0, &qp_id);
- if (ret || qp_id >= qm->qp_num) {
- dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
- return -EINVAL;
- }
-
- sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
- if (IS_ERR(sqc))
- return PTR_ERR(sqc);
-
- ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id);
- if (ret) {
- down_read(&qm->qps_lock);
- if (qm->sqc) {
- sqc_curr = qm->sqc + qp_id;
-
- dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC");
- }
- up_read(&qm->qps_lock);
-
- goto free_ctx;
- }
-
- dump_show(qm, sqc, sizeof(*sqc), "SQC");
-
-free_ctx:
- qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
- return 0;
-}
-
-static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
-{
- struct device *dev = &qm->pdev->dev;
- struct qm_cqc *cqc, *cqc_curr;
- dma_addr_t cqc_dma;
- u32 qp_id;
- int ret;
-
- if (!s)
- return -EINVAL;
-
- ret = kstrtou32(s, 0, &qp_id);
- if (ret || qp_id >= qm->qp_num) {
- dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
- return -EINVAL;
- }
-
- cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
- if (IS_ERR(cqc))
- return PTR_ERR(cqc);
-
- ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id);
- if (ret) {
- down_read(&qm->qps_lock);
- if (qm->cqc) {
- cqc_curr = qm->cqc + qp_id;
-
- dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC");
- }
- up_read(&qm->qps_lock);
-
- goto free_ctx;
- }
-
- dump_show(qm, cqc, sizeof(*cqc), "CQC");
-
-free_ctx:
- qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
- return 0;
-}
-
-static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
- int cmd, char *name)
-{
- struct device *dev = &qm->pdev->dev;
- dma_addr_t xeqc_dma;
- void *xeqc;
- int ret;
-
- if (strsep(&s, " ")) {
- dev_err(dev, "Please do not input extra characters!\n");
- return -EINVAL;
- }
-
- xeqc = qm_ctx_alloc(qm, size, &xeqc_dma);
- if (IS_ERR(xeqc))
- return PTR_ERR(xeqc);
-
- ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1);
- if (ret)
- goto err_free_ctx;
-
- dump_show(qm, xeqc, size, name);
-
-err_free_ctx:
- qm_ctx_free(qm, size, xeqc, &xeqc_dma);
- return ret;
-}
-
-static int q_dump_param_parse(struct hisi_qm *qm, char *s,
- u32 *e_id, u32 *q_id, u16 q_depth)
-{
- struct device *dev = &qm->pdev->dev;
- unsigned int qp_num = qm->qp_num;
- char *presult;
- int ret;
-
- presult = strsep(&s, " ");
- if (!presult) {
- dev_err(dev, "Please input qp number!\n");
- return -EINVAL;
- }
-
- ret = kstrtou32(presult, 0, q_id);
- if (ret || *q_id >= qp_num) {
- dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
- return -EINVAL;
- }
-
- presult = strsep(&s, " ");
- if (!presult) {
- dev_err(dev, "Please input sqe number!\n");
- return -EINVAL;
- }
-
- ret = kstrtou32(presult, 0, e_id);
- if (ret || *e_id >= q_depth) {
- dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
- return -EINVAL;
- }
-
- if (strsep(&s, " ")) {
- dev_err(dev, "Please do not input extra characters!\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int qm_sq_dump(struct hisi_qm *qm, char *s)
-{
- u16 sq_depth = qm->qp_array->cq_depth;
- void *sqe, *sqe_curr;
- struct hisi_qp *qp;
- u32 qp_id, sqe_id;
- int ret;
-
- ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth);
- if (ret)
- return ret;
-
- sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL);
- if (!sqe)
- return -ENOMEM;
-
- qp = &qm->qp_array[qp_id];
- memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth);
- sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
- memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
- qm->debug.sqe_mask_len);
-
- dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
-
- kfree(sqe);
-
- return 0;
-}
-
-static int qm_cq_dump(struct hisi_qm *qm, char *s)
-{
- struct qm_cqe *cqe_curr;
- struct hisi_qp *qp;
- u32 qp_id, cqe_id;
- int ret;
-
- ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth);
- if (ret)
- return ret;
-
- qp = &qm->qp_array[qp_id];
- cqe_curr = qp->cqe + cqe_id;
- dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
-
- return 0;
-}
-
-static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
- size_t size, char *name)
-{
- struct device *dev = &qm->pdev->dev;
- void *xeqe;
- u32 xeqe_id;
- int ret;
-
- if (!s)
- return -EINVAL;
-
- ret = kstrtou32(s, 0, &xeqe_id);
- if (ret)
- return -EINVAL;
-
- if (!strcmp(name, "EQE") && xeqe_id >= qm->eq_depth) {
- dev_err(dev, "Please input eqe num (0-%u)", qm->eq_depth - 1);
- return -EINVAL;
- } else if (!strcmp(name, "AEQE") && xeqe_id >= qm->aeq_depth) {
- dev_err(dev, "Please input aeqe num (0-%u)", qm->eq_depth - 1);
- return -EINVAL;
- }
-
- down_read(&qm->qps_lock);
-
- if (qm->eqe && !strcmp(name, "EQE")) {
- xeqe = qm->eqe + xeqe_id;
- } else if (qm->aeqe && !strcmp(name, "AEQE")) {
- xeqe = qm->aeqe + xeqe_id;
- } else {
- ret = -EINVAL;
- goto err_unlock;
- }
-
- dump_show(qm, xeqe, size, name);
-
-err_unlock:
- up_read(&qm->qps_lock);
- return ret;
-}
-
-static int qm_dbg_help(struct hisi_qm *qm, char *s)
-{
- struct device *dev = &qm->pdev->dev;
-
- if (strsep(&s, " ")) {
- dev_err(dev, "Please do not input extra characters!\n");
- return -EINVAL;
- }
-
- dev_info(dev, "available commands:\n");
- dev_info(dev, "sqc <num>\n");
- dev_info(dev, "cqc <num>\n");
- dev_info(dev, "eqc\n");
- dev_info(dev, "aeqc\n");
- dev_info(dev, "sq <num> <e>\n");
- dev_info(dev, "cq <num> <e>\n");
- dev_info(dev, "eq <e>\n");
- dev_info(dev, "aeq <e>\n");
-
- return 0;
-}
-
-static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
-{
- struct device *dev = &qm->pdev->dev;
- char *presult, *s, *s_tmp;
- int ret;
-
- s = kstrdup(cmd_buf, GFP_KERNEL);
- if (!s)
- return -ENOMEM;
-
- s_tmp = s;
- presult = strsep(&s, " ");
- if (!presult) {
- ret = -EINVAL;
- goto err_buffer_free;
- }
-
- if (!strcmp(presult, "sqc"))
- ret = qm_sqc_dump(qm, s);
- else if (!strcmp(presult, "cqc"))
- ret = qm_cqc_dump(qm, s);
- else if (!strcmp(presult, "eqc"))
- ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
- QM_MB_CMD_EQC, "EQC");
- else if (!strcmp(presult, "aeqc"))
- ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
- QM_MB_CMD_AEQC, "AEQC");
- else if (!strcmp(presult, "sq"))
- ret = qm_sq_dump(qm, s);
- else if (!strcmp(presult, "cq"))
- ret = qm_cq_dump(qm, s);
- else if (!strcmp(presult, "eq"))
- ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
- else if (!strcmp(presult, "aeq"))
- ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
- else if (!strcmp(presult, "help"))
- ret = qm_dbg_help(qm, s);
- else
- ret = -EINVAL;
-
- if (ret)
- dev_info(dev, "Please echo help\n");
-
-err_buffer_free:
- kfree(s_tmp);
-
- return ret;
-}
-
-static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
- size_t count, loff_t *pos)
-{
- struct hisi_qm *qm = filp->private_data;
- char *cmd_buf, *cmd_buf_tmp;
- int ret;
-
- if (*pos)
- return 0;
-
- ret = hisi_qm_get_dfx_access(qm);
- if (ret)
- return ret;
-
- /* Judge if the instance is being reset. */
- if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) {
- ret = 0;
- goto put_dfx_access;
- }
-
- if (count > QM_DBG_WRITE_LEN) {
- ret = -ENOSPC;
- goto put_dfx_access;
- }
-
- cmd_buf = memdup_user_nul(buffer, count);
- if (IS_ERR(cmd_buf)) {
- ret = PTR_ERR(cmd_buf);
- goto put_dfx_access;
- }
-
- cmd_buf_tmp = strchr(cmd_buf, '\n');
- if (cmd_buf_tmp) {
- *cmd_buf_tmp = '\0';
- count = cmd_buf_tmp - cmd_buf + 1;
- }
-
- ret = qm_cmd_write_dump(qm, cmd_buf);
- if (ret) {
- kfree(cmd_buf);
- goto put_dfx_access;
- }
-
- kfree(cmd_buf);
-
- ret = count;
-
-put_dfx_access:
- hisi_qm_put_dfx_access(qm);
- return ret;
-}
-
-static const struct file_operations qm_cmd_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = qm_cmd_read,
- .write = qm_cmd_write,
-};
-
-static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
- enum qm_debug_file index)
-{
- struct debugfs_file *file = qm->debug.files + index;
-
- debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
- &qm_debug_fops);
-
- file->index = index;
- mutex_init(&file->lock);
- file->debug = &qm->debug;
-}
-
static void qm_hw_error_init_v1(struct hisi_qm *qm)
{
writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
@@ -3101,7 +2130,7 @@ static int qm_drain_qp(struct hisi_qp *qp)
return ret;
}
- addr = qm_ctx_alloc(qm, size, &dma_addr);
+ addr = hisi_qm_ctx_alloc(qm, size, &dma_addr);
if (IS_ERR(addr)) {
dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
return -ENOMEM;
@@ -3136,7 +2165,7 @@ static int qm_drain_qp(struct hisi_qp *qp)
usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
}
- qm_ctx_free(qm, size, addr, &dma_addr);
+ hisi_qm_ctx_free(qm, size, addr, &dma_addr);
return ret;
}
@@ -3721,17 +2750,6 @@ static void hisi_qm_set_state(struct hisi_qm *qm, u8 state)
writel(state, qm->io_base + QM_VF_STATE);
}
-static void qm_last_regs_uninit(struct hisi_qm *qm)
-{
- struct qm_debug *debug = &qm->debug;
-
- if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
- return;
-
- kfree(debug->qm_last_words);
- debug->qm_last_words = NULL;
-}
-
static void hisi_qm_unint_work(struct hisi_qm *qm)
{
destroy_workqueue(qm->wq);
@@ -3762,8 +2780,6 @@ static void hisi_qm_memory_uninit(struct hisi_qm *qm)
*/
void hisi_qm_uninit(struct hisi_qm *qm)
{
- qm_last_regs_uninit(qm);
-
qm_cmd_uninit(qm);
hisi_qm_unint_work(qm);
down_write(&qm->qps_lock);
@@ -4132,45 +3148,6 @@ err_unlock:
}
EXPORT_SYMBOL_GPL(hisi_qm_stop);
-static ssize_t qm_status_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *pos)
-{
- struct hisi_qm *qm = filp->private_data;
- char buf[QM_DBG_READ_LEN];
- int val, len;
-
- val = atomic_read(&qm->status.flags);
- len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
-
- return simple_read_from_buffer(buffer, count, pos, buf, len);
-}
-
-static const struct file_operations qm_status_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = qm_status_read,
-};
-
-static int qm_debugfs_atomic64_set(void *data, u64 val)
-{
- if (val)
- return -EINVAL;
-
- atomic64_set((atomic64_t *)data, 0);
-
- return 0;
-}
-
-static int qm_debugfs_atomic64_get(void *data, u64 *val)
-{
- *val = atomic64_read((atomic64_t *)data);
-
- return 0;
-}
-
-DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
- qm_debugfs_atomic64_set, "%llu\n");
-
static void qm_hw_error_init(struct hisi_qm *qm)
{
if (!qm->ops->hw_error_init) {
@@ -4277,16 +3254,14 @@ static int hisi_qm_sort_devices(int node, struct list_head *head,
struct hisi_qm *qm;
struct list_head *n;
struct device *dev;
- int dev_node = 0;
+ int dev_node;
list_for_each_entry(qm, &qm_list->list, list) {
dev = &qm->pdev->dev;
- if (IS_ENABLED(CONFIG_NUMA)) {
- dev_node = dev_to_node(dev);
- if (dev_node < 0)
- dev_node = 0;
- }
+ dev_node = dev_to_node(dev);
+ if (dev_node < 0)
+ dev_node = 0;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
@@ -4592,49 +3567,36 @@ err_put_dfx_access:
return ret;
}
-static ssize_t qm_qos_value_init(const char *buf, unsigned long *val)
-{
- int buflen = strlen(buf);
- int ret, i;
-
- for (i = 0; i < buflen; i++) {
- if (!isdigit(buf[i]))
- return -EINVAL;
- }
-
- ret = sscanf(buf, "%lu", val);
- if (ret != QM_QOS_VAL_NUM)
- return -EINVAL;
-
- return 0;
-}
-
static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
unsigned long *val,
unsigned int *fun_index)
{
+ struct bus_type *bus_type = qm->pdev->dev.bus;
char tbuf_bdf[QM_DBG_READ_LEN] = {0};
- char val_buf[QM_QOS_VAL_MAX_LEN] = {0};
- u32 tmp1, device, function;
- int ret, bus;
+ char val_buf[QM_DBG_READ_LEN] = {0};
+ struct pci_dev *pdev;
+ struct device *dev;
+ int ret;
ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf);
if (ret != QM_QOS_PARAM_NUM)
return -EINVAL;
- ret = qm_qos_value_init(val_buf, val);
+ ret = kstrtoul(val_buf, 10, val);
if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) {
pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n");
return -EINVAL;
}
- ret = sscanf(tbuf_bdf, "%u:%x:%u.%u", &tmp1, &bus, &device, &function);
- if (ret != QM_QOS_BDF_PARAM_NUM) {
- pci_err(qm->pdev, "input pci bdf value is error!\n");
- return -EINVAL;
+ dev = bus_find_device_by_name(bus_type, NULL, tbuf_bdf);
+ if (!dev) {
+ pci_err(qm->pdev, "input pci bdf number is error!\n");
+ return -ENODEV;
}
- *fun_index = PCI_DEVFN(device, function);
+ pdev = container_of(dev, struct pci_dev, dev);
+
+ *fun_index = pdev->devfn;
return 0;
}
@@ -4648,9 +3610,6 @@ static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
unsigned long val;
int len, ret;
- if (qm->fun_type == QM_HW_VF)
- return -EINVAL;
-
if (*pos != 0)
return 0;
@@ -4709,7 +3668,7 @@ static const struct file_operations qm_algqos_fops = {
*
* Create function qos debugfs files, VF ping PF to get function qos.
*/
-static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
+void hisi_qm_set_algqos_init(struct hisi_qm *qm)
{
if (qm->fun_type == QM_HW_PF)
debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
@@ -4719,88 +3678,6 @@ static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
qm, &qm_algqos_fops);
}
-/**
- * hisi_qm_debug_init() - Initialize qm related debugfs files.
- * @qm: The qm for which we want to add debugfs files.
- *
- * Create qm related debugfs files.
- */
-void hisi_qm_debug_init(struct hisi_qm *qm)
-{
- struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs;
- struct qm_dfx *dfx = &qm->debug.dfx;
- struct dentry *qm_d;
- void *data;
- int i;
-
- qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
- qm->debug.qm_d = qm_d;
-
- /* only show this in PF */
- if (qm->fun_type == QM_HW_PF) {
- qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
- for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
- qm_create_debugfs_file(qm, qm->debug.qm_d, i);
- }
-
- if (qm_regs)
- debugfs_create_file("diff_regs", 0444, qm->debug.qm_d,
- qm, &qm_diff_regs_fops);
-
- debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
-
- debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
-
- debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
- &qm_status_fops);
- for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
- data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
- debugfs_create_file(qm_dfx_files[i].name,
- 0644,
- qm_d,
- data,
- &qm_atomic64_ops);
- }
-
- if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
- hisi_qm_set_algqos_init(qm);
-}
-EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
-
-/**
- * hisi_qm_debug_regs_clear() - clear qm debug related registers.
- * @qm: The qm for which we want to clear its debug registers.
- */
-void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
-{
- const struct debugfs_reg32 *regs;
- int i;
-
- /* clear current_qm */
- writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
-
- /* clear current_q */
- writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
- writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
-
- /*
- * these registers are reading and clearing, so clear them after
- * reading them.
- */
- writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
-
- regs = qm_dfx_regs;
- for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
- readl(qm->io_base + regs->offset);
- regs++;
- }
-
- /* clear clear_enable */
- writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
-}
-EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
-
static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func)
{
int i;
@@ -5439,24 +4316,6 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
return 0;
}
-static void qm_show_last_dfx_regs(struct hisi_qm *qm)
-{
- struct qm_debug *debug = &qm->debug;
- struct pci_dev *pdev = qm->pdev;
- u32 val;
- int i;
-
- if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
- return;
-
- for (i = 0; i < ARRAY_SIZE(qm_dfx_regs); i++) {
- val = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset);
- if (debug->qm_last_words[i] != val)
- pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n",
- qm_dfx_regs[i].name, debug->qm_last_words[i], val);
- }
-}
-
static int qm_controller_reset(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -5472,7 +4331,7 @@ static int qm_controller_reset(struct hisi_qm *qm)
return ret;
}
- qm_show_last_dfx_regs(qm);
+ hisi_qm_show_last_dfx_regs(qm);
if (qm->err_ini->show_last_dfx_regs)
qm->err_ini->show_last_dfx_regs(qm);
@@ -5725,6 +4584,7 @@ static void qm_pf_reset_vf_done(struct hisi_qm *qm)
cmd = QM_VF_START_FAIL;
}
+ qm_cmd_init(qm);
ret = qm_ping_pf(qm, cmd);
if (ret)
dev_warn(&pdev->dev, "PF responds timeout in reset done!\n");
@@ -5786,7 +4646,6 @@ static void qm_pf_reset_vf_process(struct hisi_qm *qm,
goto err_get_status;
qm_pf_reset_vf_done(qm);
- qm_cmd_init(qm);
dev_info(dev, "device reset done.\n");
@@ -6359,26 +5218,6 @@ err_destroy_idr:
return ret;
}
-static void qm_last_regs_init(struct hisi_qm *qm)
-{
- int dfx_regs_num = ARRAY_SIZE(qm_dfx_regs);
- struct qm_debug *debug = &qm->debug;
- int i;
-
- if (qm->fun_type == QM_HW_VF)
- return;
-
- debug->qm_last_words = kcalloc(dfx_regs_num, sizeof(unsigned int),
- GFP_KERNEL);
- if (!debug->qm_last_words)
- return;
-
- for (i = 0; i < dfx_regs_num; i++) {
- debug->qm_last_words[i] = readl_relaxed(qm->io_base +
- qm_dfx_regs[i].offset);
- }
-}
-
/**
* hisi_qm_init() - Initialize configures about qm.
* @qm: The qm needing init.
@@ -6427,8 +5266,6 @@ int hisi_qm_init(struct hisi_qm *qm)
qm_cmd_init(qm);
atomic_set(&qm->status.flags, QM_INIT);
- qm_last_regs_init(qm);
-
return 0;
err_free_qm_memory:
@@ -6631,8 +5468,14 @@ int hisi_qm_resume(struct device *dev)
}
ret = hisi_qm_start(qm);
- if (ret)
- pci_err(pdev, "failed to start qm(%d)\n", ret);
+ if (ret) {
+ if (qm_check_dev_error(qm)) {
+ pci_info(pdev, "failed to start qm due to device error, device will be reset!\n");
+ return 0;
+ }
+
+ pci_err(pdev, "failed to start qm(%d)!\n", ret);
+ }
return ret;
}
diff --git a/drivers/crypto/hisilicon/qm_common.h b/drivers/crypto/hisilicon/qm_common.h
new file mode 100644
index 000000000000..1406a422d455
--- /dev/null
+++ b/drivers/crypto/hisilicon/qm_common.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2022 HiSilicon Limited. */
+#ifndef QM_COMMON_H
+#define QM_COMMON_H
+
+#define QM_DBG_READ_LEN 256
+#define QM_RESETTING 2
+
+struct qm_cqe {
+ __le32 rsvd0;
+ __le16 cmd_id;
+ __le16 rsvd1;
+ __le16 sq_head;
+ __le16 sq_num;
+ __le16 rsvd2;
+ __le16 w7;
+};
+
+struct qm_eqe {
+ __le32 dw0;
+};
+
+struct qm_aeqe {
+ __le32 dw0;
+};
+
+struct qm_sqc {
+ __le16 head;
+ __le16 tail;
+ __le32 base_l;
+ __le32 base_h;
+ __le32 dw3;
+ __le16 w8;
+ __le16 rsvd0;
+ __le16 pasid;
+ __le16 w11;
+ __le16 cq_num;
+ __le16 w13;
+ __le32 rsvd1;
+};
+
+struct qm_cqc {
+ __le16 head;
+ __le16 tail;
+ __le32 base_l;
+ __le32 base_h;
+ __le32 dw3;
+ __le16 w8;
+ __le16 rsvd0;
+ __le16 pasid;
+ __le16 w11;
+ __le32 dw6;
+ __le32 rsvd1;
+};
+
+struct qm_eqc {
+ __le16 head;
+ __le16 tail;
+ __le32 base_l;
+ __le32 base_h;
+ __le32 dw3;
+ __le32 rsvd[2];
+ __le32 dw6;
+};
+
+struct qm_aeqc {
+ __le16 head;
+ __le16 tail;
+ __le32 base_l;
+ __le32 base_h;
+ __le32 dw3;
+ __le32 rsvd[2];
+ __le32 dw6;
+};
+
+static const char * const qm_s[] = {
+ "init", "start", "close", "stop",
+};
+
+void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
+ dma_addr_t *dma_addr);
+void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
+ const void *ctx_addr, dma_addr_t *dma_addr);
+void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm);
+void hisi_qm_set_algqos_init(struct hisi_qm *qm);
+
+#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 84ae8ddd1a13..f5bfc9755a4a 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -283,7 +283,6 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
spin_lock_bh(&qp_ctx->req_lock);
ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
-
if (ctx->fake_req_limit <=
atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
list_add_tail(&req->backlog_head, &qp_ctx->backlog);
@@ -2009,7 +2008,7 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
return sec_aead_ctx_init(tfm, "sha512");
}
-static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
+static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
struct sec_req *sreq)
{
u32 cryptlen = sreq->c_req.sk_req->cryptlen;
@@ -2071,7 +2070,7 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
}
return 0;
} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
- return sec_skcipher_cryptlen_ckeck(ctx, sreq);
+ return sec_skcipher_cryptlen_check(ctx, sreq);
}
dev_err(dev, "skcipher algorithm error!\n");
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 3705412bac5f..93572c0d4faa 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -55,7 +55,7 @@
#define SEC_CONTROL_REG 0x301200
#define SEC_DYNAMIC_GATE_REG 0x30121c
#define SEC_CORE_AUTO_GATE 0x30212c
-#define SEC_DYNAMIC_GATE_EN 0x7bff
+#define SEC_DYNAMIC_GATE_EN 0x7fff
#define SEC_CORE_AUTO_GATE_EN GENMASK(3, 0)
#define SEC_CLK_GATE_ENABLE BIT(3)
#define SEC_CLK_GATE_DISABLE (~BIT(3))
@@ -427,7 +427,6 @@ static void sec_set_endian(struct hisi_qm *qm)
if (!IS_ENABLED(CONFIG_64BIT))
reg |= BIT(1);
-
if (!IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
reg |= BIT(0);
@@ -899,8 +898,7 @@ static int sec_debugfs_init(struct hisi_qm *qm)
qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
- ret = hisi_qm_diff_regs_init(qm, sec_diff_regs,
- ARRAY_SIZE(sec_diff_regs));
+ ret = hisi_qm_regs_debugfs_init(qm, sec_diff_regs, ARRAY_SIZE(sec_diff_regs));
if (ret) {
dev_warn(dev, "Failed to init SEC diff regs!\n");
goto debugfs_remove;
@@ -915,7 +913,7 @@ static int sec_debugfs_init(struct hisi_qm *qm)
return 0;
failed_to_create:
- hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
debugfs_remove:
debugfs_remove_recursive(sec_debugfs_root);
return ret;
@@ -923,7 +921,7 @@ debugfs_remove:
static void sec_debugfs_exit(struct hisi_qm *qm)
{
- hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
debugfs_remove_recursive(qm->debug.debug_root);
}
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index c863435e8c75..1549bec3aea5 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -849,8 +849,7 @@ static int hisi_zip_debugfs_init(struct hisi_qm *qm)
qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
qm->debug.debug_root = dev_d;
- ret = hisi_qm_diff_regs_init(qm, hzip_diff_regs,
- ARRAY_SIZE(hzip_diff_regs));
+ ret = hisi_qm_regs_debugfs_init(qm, hzip_diff_regs, ARRAY_SIZE(hzip_diff_regs));
if (ret) {
dev_warn(dev, "Failed to init ZIP diff regs!\n");
goto debugfs_remove;
@@ -869,7 +868,7 @@ static int hisi_zip_debugfs_init(struct hisi_qm *qm)
return 0;
failed_to_create:
- hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
debugfs_remove:
debugfs_remove_recursive(hzip_debugfs_root);
return ret;
@@ -895,7 +894,7 @@ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
static void hisi_zip_debugfs_exit(struct hisi_qm *qm)
{
- hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
debugfs_remove_recursive(qm->debug.debug_root);
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index d8e82d69745d..9629e98bd68b 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -358,12 +358,16 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
static void img_hash_dma_task(unsigned long d)
{
struct img_hash_dev *hdev = (struct img_hash_dev *)d;
- struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+ struct img_hash_request_ctx *ctx;
u8 *addr;
size_t nbytes, bleft, wsend, len, tbc;
struct scatterlist tsg;
- if (!hdev->req || !ctx->sg)
+ if (!hdev->req)
+ return;
+
+ ctx = ahash_request_ctx(hdev->req);
+ if (!ctx->sg)
return;
addr = sg_virt(ctx->sg);
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index ad0d8c4a71ac..ae6110376e21 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -316,14 +316,20 @@ static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
const struct firmware *fw)
{
- const __be32 *data = (const __be32 *)fw->data;
+ u32 val;
int i;
/* Write the firmware */
- for (i = 0; i < fw->size / sizeof(u32); i++)
- writel(be32_to_cpu(data[i]),
+ for (i = 0; i < fw->size / sizeof(u32); i++) {
+ if (priv->data->fw_little_endian)
+ val = le32_to_cpu(((const __le32 *)fw->data)[i]);
+ else
+ val = be32_to_cpu(((const __be32 *)fw->data)[i]);
+
+ writel(val,
priv->base + EIP197_CLASSIFICATION_RAMS +
- i * sizeof(__be32));
+ i * sizeof(val));
+ }
/* Exclude final 2 NOPs from size */
return i - EIP197_FW_TERMINAL_NOPS;
@@ -410,11 +416,13 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
int i, j, ret = 0, pe;
int ipuesz, ifppsz, minifw = 0;
- if (priv->version == EIP197D_MRVL)
+ if (priv->data->version == EIP197D_MRVL)
dir = "eip197d";
- else if (priv->version == EIP197B_MRVL ||
- priv->version == EIP197_DEVBRD)
+ else if (priv->data->version == EIP197B_MRVL ||
+ priv->data->version == EIP197_DEVBRD)
dir = "eip197b";
+ else if (priv->data->version == EIP197C_MXL)
+ dir = "eip197c";
else
return -ENODEV;
@@ -423,7 +431,7 @@ retry_fw:
snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]);
ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev);
if (ret) {
- if (minifw || priv->version != EIP197B_MRVL)
+ if (minifw || priv->data->version != EIP197B_MRVL)
goto release_fw;
/* Fallback to the old firmware location for the
@@ -1597,7 +1605,7 @@ static int safexcel_probe_generic(void *pdev,
safexcel_configure(priv);
- if (IS_ENABLED(CONFIG_PCI) && priv->version == EIP197_DEVBRD) {
+ if (IS_ENABLED(CONFIG_PCI) && priv->data->version == EIP197_DEVBRD) {
/*
* Request MSI vectors for global + 1 per ring -
* or just 1 for older dev images
@@ -1731,7 +1739,7 @@ static int safexcel_probe(struct platform_device *pdev)
return -ENOMEM;
priv->dev = dev;
- priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
+ priv->data = (struct safexcel_priv_data *)of_device_get_match_data(dev);
platform_set_drvdata(pdev, priv);
@@ -1806,27 +1814,52 @@ static int safexcel_remove(struct platform_device *pdev)
return 0;
}
+static const struct safexcel_priv_data eip97ies_mrvl_data = {
+ .version = EIP97IES_MRVL,
+};
+
+static const struct safexcel_priv_data eip197b_mrvl_data = {
+ .version = EIP197B_MRVL,
+};
+
+static const struct safexcel_priv_data eip197d_mrvl_data = {
+ .version = EIP197D_MRVL,
+};
+
+static const struct safexcel_priv_data eip197_devbrd_data = {
+ .version = EIP197_DEVBRD,
+};
+
+static const struct safexcel_priv_data eip197c_mxl_data = {
+ .version = EIP197C_MXL,
+ .fw_little_endian = true,
+};
+
static const struct of_device_id safexcel_of_match_table[] = {
{
.compatible = "inside-secure,safexcel-eip97ies",
- .data = (void *)EIP97IES_MRVL,
+ .data = &eip97ies_mrvl_data,
},
{
.compatible = "inside-secure,safexcel-eip197b",
- .data = (void *)EIP197B_MRVL,
+ .data = &eip197b_mrvl_data,
},
{
.compatible = "inside-secure,safexcel-eip197d",
- .data = (void *)EIP197D_MRVL,
+ .data = &eip197d_mrvl_data,
+ },
+ {
+ .compatible = "inside-secure,safexcel-eip197c-mxl",
+ .data = &eip197c_mxl_data,
},
/* For backward compatibility and intended for generic use */
{
.compatible = "inside-secure,safexcel-eip97",
- .data = (void *)EIP97IES_MRVL,
+ .data = &eip97ies_mrvl_data,
},
{
.compatible = "inside-secure,safexcel-eip197",
- .data = (void *)EIP197B_MRVL,
+ .data = &eip197b_mrvl_data,
},
{},
};
@@ -1862,7 +1895,7 @@ static int safexcel_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
priv->dev = dev;
- priv->version = (enum safexcel_eip_version)ent->driver_data;
+ priv->data = (struct safexcel_priv_data *)ent->driver_data;
pci_set_drvdata(pdev, priv);
@@ -1881,7 +1914,7 @@ static int safexcel_pci_probe(struct pci_dev *pdev,
}
priv->base = pcim_iomap_table(pdev)[0];
- if (priv->version == EIP197_DEVBRD) {
+ if (priv->data->version == EIP197_DEVBRD) {
dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n");
rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel");
@@ -1949,7 +1982,7 @@ static const struct pci_device_id safexcel_pci_ids[] = {
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038,
0x16ae, 0xc522),
- .driver_data = EIP197_DEVBRD,
+ .driver_data = (kernel_ulong_t)&eip197_devbrd_data,
},
{},
};
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 797ff91512e0..6c2fc662f64f 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -730,7 +730,13 @@ enum safexcel_eip_version {
EIP97IES_MRVL,
EIP197B_MRVL,
EIP197D_MRVL,
- EIP197_DEVBRD
+ EIP197_DEVBRD,
+ EIP197C_MXL,
+};
+
+struct safexcel_priv_data {
+ enum safexcel_eip_version version;
+ bool fw_little_endian;
};
/* Priority we use for advertising our algorithms */
@@ -815,7 +821,7 @@ struct safexcel_crypto_priv {
struct clk *reg_clk;
struct safexcel_config config;
- enum safexcel_eip_version version;
+ struct safexcel_priv_data *data;
struct safexcel_register_offsets offsets;
struct safexcel_hwconfig hwconfig;
u32 flags;
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 103fc551d2af..ca46328472d4 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -231,7 +231,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
struct safexcel_result_desc *rdesc;
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
- struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *sreq = ahash_request_ctx_dma(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
u64 cache_len;
@@ -312,7 +312,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_crypto_priv *priv = ctx->base.priv;
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
@@ -569,7 +569,7 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
bool *should_complete, int *ret)
{
struct ahash_request *areq = ahash_request_cast(async);
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
int err;
BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv);
@@ -608,7 +608,7 @@ static int safexcel_ahash_send(struct crypto_async_request *async,
int ring, int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
int ret;
if (req->needs_inv)
@@ -624,7 +624,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE);
- struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
+ struct safexcel_ahash_req *rctx = ahash_request_ctx_dma(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
@@ -663,7 +663,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
*/
static int safexcel_ahash_cache(struct ahash_request *areq)
{
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
u64 cache_len;
/* cache_len: everything accepted by the driver but not sent yet,
@@ -689,7 +689,7 @@ static int safexcel_ahash_cache(struct ahash_request *areq)
static int safexcel_ahash_enqueue(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret, ring;
@@ -741,7 +741,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
static int safexcel_ahash_update(struct ahash_request *areq)
{
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
int ret;
/* If the request is 0 length, do nothing */
@@ -766,7 +766,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
static int safexcel_ahash_final(struct ahash_request *areq)
{
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
req->finish = true;
@@ -870,7 +870,7 @@ static int safexcel_ahash_final(struct ahash_request *areq)
static int safexcel_ahash_finup(struct ahash_request *areq)
{
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
req->finish = true;
@@ -880,7 +880,7 @@ static int safexcel_ahash_finup(struct ahash_request *areq)
static int safexcel_ahash_export(struct ahash_request *areq, void *out)
{
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
struct safexcel_ahash_export_state *export = out;
export->len = req->len;
@@ -896,7 +896,7 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
{
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
const struct safexcel_ahash_export_state *export = in;
int ret;
@@ -927,15 +927,15 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
ctx->base.handle_result = safexcel_handle_result;
ctx->fb_do_setkey = false;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct safexcel_ahash_req));
+ crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
+ sizeof(struct safexcel_ahash_req));
return 0;
}
static int safexcel_sha1_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -1012,7 +1012,7 @@ struct safexcel_alg_template safexcel_alg_sha1 = {
static int safexcel_hmac_sha1_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -1124,7 +1124,7 @@ static int safexcel_hmac_init_iv(struct ahash_request *areq,
if (ret)
return ret;
- req = ahash_request_ctx(areq);
+ req = ahash_request_ctx_dma(areq);
req->hmac = true;
req->last_req = true;
@@ -1264,7 +1264,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
static int safexcel_sha256_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -1321,7 +1321,7 @@ struct safexcel_alg_template safexcel_alg_sha256 = {
static int safexcel_sha224_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -1385,7 +1385,7 @@ static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key,
static int safexcel_hmac_sha224_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -1457,7 +1457,7 @@ static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
static int safexcel_hmac_sha256_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -1522,7 +1522,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
static int safexcel_sha512_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -1579,7 +1579,7 @@ struct safexcel_alg_template safexcel_alg_sha512 = {
static int safexcel_sha384_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -1643,7 +1643,7 @@ static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key,
static int safexcel_hmac_sha512_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -1715,7 +1715,7 @@ static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key,
static int safexcel_hmac_sha384_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -1780,7 +1780,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
static int safexcel_md5_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -1837,7 +1837,7 @@ struct safexcel_alg_template safexcel_alg_md5 = {
static int safexcel_hmac_md5_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -1920,7 +1920,7 @@ static int safexcel_crc32_cra_init(struct crypto_tfm *tfm)
static int safexcel_crc32_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -1992,7 +1992,7 @@ struct safexcel_alg_template safexcel_alg_crc32 = {
static int safexcel_cbcmac_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -2252,7 +2252,7 @@ struct safexcel_alg_template safexcel_alg_cmac = {
static int safexcel_sm3_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -2316,7 +2316,7 @@ static int safexcel_hmac_sm3_setkey(struct crypto_ahash *tfm, const u8 *key,
static int safexcel_hmac_sm3_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -2382,7 +2382,7 @@ static int safexcel_sha3_224_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -2400,7 +2400,7 @@ static int safexcel_sha3_fbcheck(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *subreq = ahash_request_ctx(req);
+ struct ahash_request *subreq = ahash_request_ctx_dma(req);
int ret = 0;
if (ctx->do_fallback) {
@@ -2437,7 +2437,7 @@ static int safexcel_sha3_update(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *subreq = ahash_request_ctx(req);
+ struct ahash_request *subreq = ahash_request_ctx_dma(req);
ctx->do_fallback = true;
return safexcel_sha3_fbcheck(req) ?: crypto_ahash_update(subreq);
@@ -2447,7 +2447,7 @@ static int safexcel_sha3_final(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *subreq = ahash_request_ctx(req);
+ struct ahash_request *subreq = ahash_request_ctx_dma(req);
ctx->do_fallback = true;
return safexcel_sha3_fbcheck(req) ?: crypto_ahash_final(subreq);
@@ -2457,7 +2457,7 @@ static int safexcel_sha3_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *subreq = ahash_request_ctx(req);
+ struct ahash_request *subreq = ahash_request_ctx_dma(req);
ctx->do_fallback |= !req->nbytes;
if (ctx->do_fallback)
@@ -2472,7 +2472,7 @@ static int safexcel_sha3_digest_fallback(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *subreq = ahash_request_ctx(req);
+ struct ahash_request *subreq = ahash_request_ctx_dma(req);
ctx->do_fallback = true;
ctx->fb_init_done = false;
@@ -2492,7 +2492,7 @@ static int safexcel_sha3_export(struct ahash_request *req, void *out)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *subreq = ahash_request_ctx(req);
+ struct ahash_request *subreq = ahash_request_ctx_dma(req);
ctx->do_fallback = true;
return safexcel_sha3_fbcheck(req) ?: crypto_ahash_export(subreq, out);
@@ -2502,7 +2502,7 @@ static int safexcel_sha3_import(struct ahash_request *req, const void *in)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *subreq = ahash_request_ctx(req);
+ struct ahash_request *subreq = ahash_request_ctx_dma(req);
ctx->do_fallback = true;
return safexcel_sha3_fbcheck(req) ?: crypto_ahash_import(subreq, in);
@@ -2526,9 +2526,10 @@ static int safexcel_sha3_cra_init(struct crypto_tfm *tfm)
/* Update statesize from fallback algorithm! */
crypto_hash_alg_common(ahash)->statesize =
crypto_ahash_statesize(ctx->fback);
- crypto_ahash_set_reqsize(ahash, max(sizeof(struct safexcel_ahash_req),
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(ctx->fback)));
+ crypto_ahash_set_reqsize_dma(
+ ahash, max(sizeof(struct safexcel_ahash_req),
+ sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(ctx->fback)));
return 0;
}
@@ -2575,7 +2576,7 @@ static int safexcel_sha3_256_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -2633,7 +2634,7 @@ static int safexcel_sha3_384_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -2691,7 +2692,7 @@ static int safexcel_sha3_512_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -2841,7 +2842,7 @@ static int safexcel_hmac_sha3_224_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -2912,7 +2913,7 @@ static int safexcel_hmac_sha3_256_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -2983,7 +2984,7 @@ static int safexcel_hmac_sha3_384_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
@@ -3054,7 +3055,7 @@ static int safexcel_hmac_sha3_512_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index d39a386b31ac..984b3cc0237c 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -420,7 +420,7 @@ static void one_packet(dma_addr_t phys)
break;
case CTL_FLAG_GEN_REVAES:
ctx = crypto_tfm_ctx(crypt->data.tfm);
- *(u32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
+ *(__be32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
if (atomic_dec_and_test(&ctx->configuring))
complete(&ctx->completion);
break;
@@ -720,7 +720,7 @@ static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
crypt->init_len = init_len;
crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
- buf->next = 0;
+ buf->next = NULL;
buf->buf_len = HMAC_PAD_BLOCKLEN;
buf->pkt_len = 0;
buf->phys_addr = pad_phys;
@@ -751,7 +751,7 @@ static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize
#ifndef __ARMEB__
cfgword ^= 0xAA000000; /* change the "byte swap" flags */
#endif
- *(u32 *)cinfo = cpu_to_be32(cfgword);
+ *(__be32 *)cinfo = cpu_to_be32(cfgword);
cinfo += sizeof(cfgword);
/* write ICV to cryptinfo */
@@ -788,7 +788,7 @@ static int gen_rev_aes_key(struct crypto_tfm *tfm)
if (!crypt)
return -EAGAIN;
- *(u32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
+ *(__be32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
crypt->data.tfm = tfm;
crypt->crypt_offs = 0;
@@ -846,7 +846,7 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key,
return err;
}
/* write cfg word to cryptinfo */
- *(u32 *)cinfo = cpu_to_be32(cipher_cfg);
+ *(__be32 *)cinfo = cpu_to_be32(cipher_cfg);
cinfo += sizeof(cipher_cfg);
/* write cipher key to cryptinfo */
diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
index 0379dbf32a4c..d4bcbed1f546 100644
--- a/drivers/crypto/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
@@ -226,7 +226,7 @@ static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req,
*/
static int kmb_ocs_dma_prepare(struct ahash_request *req)
{
- struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
struct device *dev = rctx->hcu_dev->dev;
unsigned int remainder = 0;
unsigned int total;
@@ -356,7 +356,7 @@ cleanup:
static void kmb_ocs_hcu_secure_cleanup(struct ahash_request *req)
{
- struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
/* Clear buffer of any data. */
memzero_explicit(rctx->buffer, sizeof(rctx->buffer));
@@ -374,7 +374,7 @@ static int kmb_ocs_hcu_handle_queue(struct ahash_request *req)
static int prepare_ipad(struct ahash_request *req)
{
- struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
int i;
@@ -414,7 +414,7 @@ static int kmb_ocs_hcu_do_one_request(struct crypto_engine *engine, void *areq)
base);
struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
int rc;
int i;
@@ -561,7 +561,7 @@ error:
static int kmb_ocs_hcu_init(struct ahash_request *req)
{
struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
- struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
@@ -614,7 +614,7 @@ static int kmb_ocs_hcu_init(struct ahash_request *req)
static int kmb_ocs_hcu_update(struct ahash_request *req)
{
- struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
int rc;
if (!req->nbytes)
@@ -650,7 +650,7 @@ static int kmb_ocs_hcu_update(struct ahash_request *req)
/* Common logic for kmb_ocs_hcu_final() and kmb_ocs_hcu_finup(). */
static int kmb_ocs_hcu_fin_common(struct ahash_request *req)
{
- struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
int rc;
@@ -687,7 +687,7 @@ static int kmb_ocs_hcu_fin_common(struct ahash_request *req)
static int kmb_ocs_hcu_final(struct ahash_request *req)
{
- struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
rctx->sg_data_total = 0;
rctx->sg_data_offset = 0;
@@ -698,7 +698,7 @@ static int kmb_ocs_hcu_final(struct ahash_request *req)
static int kmb_ocs_hcu_finup(struct ahash_request *req)
{
- struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
rctx->sg_data_total = req->nbytes;
rctx->sg_data_offset = 0;
@@ -726,7 +726,7 @@ static int kmb_ocs_hcu_digest(struct ahash_request *req)
static int kmb_ocs_hcu_export(struct ahash_request *req, void *out)
{
- struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
/* Intermediate data is always stored and applied per request. */
memcpy(out, rctx, sizeof(*rctx));
@@ -736,7 +736,7 @@ static int kmb_ocs_hcu_export(struct ahash_request *req, void *out)
static int kmb_ocs_hcu_import(struct ahash_request *req, const void *in)
{
- struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
/* Intermediate data is always stored and applied per request. */
memcpy(rctx, in, sizeof(*rctx));
@@ -822,8 +822,8 @@ err_free_ahash:
/* Set request size and initialize tfm context. */
static void __cra_init(struct crypto_tfm *tfm, struct ocs_hcu_ctx *ctx)
{
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ocs_hcu_rctx));
+ crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
+ sizeof(struct ocs_hcu_rctx));
/* Init context to 0. */
memzero_explicit(ctx, sizeof(*ctx));
diff --git a/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
index 205eacac4a34..f8aedafdfdc5 100644
--- a/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
+++ b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
@@ -534,7 +534,7 @@ union otx_cptx_vqx_misc_ena_w1s {
* Word0
* reserved_20_63:44 [63:20] Reserved.
* dbell_cnt:20 [19:0](R/W/H) Number of instruction queue 64-bit words to add
- * to the CPT instruction doorbell count. Readback value is the the
+ * to the CPT instruction doorbell count. Readback value is the
* current number of pending doorbell requests. If counter overflows
* CPT()_VQ()_MISC_INT[DBELL_DOVF] is set. To reset the count back to
* zero, write one to clear CPT()_VQ()_MISC_INT_ENA_W1C[DBELL_DOVF],
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index df9c2b8747e6..c4250e5fcf8f 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -345,8 +345,7 @@ static void release_tar_archive(struct tar_arch_info_t *tar_arch)
kfree(curr);
}
- if (tar_arch->fw)
- release_firmware(tar_arch->fw);
+ release_firmware(tar_arch->fw);
kfree(tar_arch);
}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
index 01c48ddc4eeb..80ba77c793a7 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -103,7 +103,7 @@ static inline int validate_hmac_cipher_null(struct otx_cpt_req_info *cpt_req)
req = container_of(cpt_req->areq, struct aead_request, base);
tfm = crypto_aead_reqtfm(req);
- rctx = aead_request_ctx(req);
+ rctx = aead_request_ctx_dma(req);
if (memcmp(rctx->fctx.hmac.s.hmac_calc,
rctx->fctx.hmac.s.hmac_recv,
crypto_aead_authsize(tfm)) != 0)
@@ -155,7 +155,7 @@ static void output_iv_copyback(struct crypto_async_request *areq)
ctx = crypto_skcipher_ctx(stfm);
if (ctx->cipher_type == OTX_CPT_AES_CBC ||
ctx->cipher_type == OTX_CPT_DES3_CBC) {
- rctx = skcipher_request_ctx(sreq);
+ rctx = skcipher_request_ctx_dma(sreq);
req_info = &rctx->cpt_req;
ivsize = crypto_skcipher_ivsize(stfm);
start = sreq->cryptlen - ivsize;
@@ -233,7 +233,7 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
u32 *argcnt)
{
struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
- struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
struct otx_cpt_enc_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -303,7 +303,7 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
u32 enc_iv_len)
{
- struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
u32 argcnt = 0;
int ret;
@@ -321,7 +321,7 @@ static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
static inline void create_output_list(struct skcipher_request *req,
u32 enc_iv_len)
{
- struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
u32 argcnt = 0;
@@ -340,7 +340,7 @@ static inline void create_output_list(struct skcipher_request *req,
static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
{
struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
- struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
struct pci_dev *pdev;
@@ -501,15 +501,16 @@ static int otx_cpt_enc_dec_init(struct crypto_skcipher *tfm)
* allocated since the cryptd daemon uses
* this memory for request_ctx information
*/
- crypto_skcipher_set_reqsize(tfm, sizeof(struct otx_cpt_req_ctx) +
- sizeof(struct skcipher_request));
+ crypto_skcipher_set_reqsize_dma(
+ tfm, sizeof(struct otx_cpt_req_ctx) +
+ sizeof(struct skcipher_request));
return 0;
}
static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type)
{
- struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
ctx->cipher_type = cipher_type;
ctx->mac_type = mac_type;
@@ -551,7 +552,7 @@ static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type)
}
}
- crypto_aead_set_reqsize(tfm, sizeof(struct otx_cpt_req_ctx));
+ crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx));
return 0;
}
@@ -603,7 +604,7 @@ static int otx_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
static void otx_cpt_aead_exit(struct crypto_aead *tfm)
{
- struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
kfree(ctx->ipad);
kfree(ctx->opad);
@@ -619,7 +620,7 @@ static void otx_cpt_aead_exit(struct crypto_aead *tfm)
static int otx_cpt_aead_set_authsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
switch (ctx->mac_type) {
case OTX_CPT_SHA1:
@@ -739,7 +740,7 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
static int aead_hmac_init(struct crypto_aead *cipher)
{
- struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
int state_size = crypto_shash_statesize(ctx->hashalg);
int ds = crypto_shash_digestsize(ctx->hashalg);
int bs = crypto_shash_blocksize(ctx->hashalg);
@@ -837,7 +838,7 @@ static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
- struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
struct crypto_authenc_key_param *param;
int enckeylen = 0, authkeylen = 0;
struct rtattr *rta = (void *)key;
@@ -896,7 +897,7 @@ static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
- struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
struct crypto_authenc_key_param *param;
struct rtattr *rta = (void *)key;
int enckeylen = 0;
@@ -932,7 +933,7 @@ static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
- struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
/*
* For aes gcm we expect to get encryption key (16, 24, 32 bytes)
@@ -965,9 +966,9 @@ static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc,
u32 *argcnt)
{
- struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
int mac_len = crypto_aead_authsize(tfm);
@@ -1050,9 +1051,9 @@ static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc,
static inline u32 create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
u32 enc)
{
- struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
@@ -1076,7 +1077,7 @@ static inline u32 create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
static inline u32 create_aead_input_list(struct aead_request *req, u32 enc)
{
- struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
u32 inputlen = req->cryptlen + req->assoclen;
u32 status, argcnt = 0;
@@ -1093,7 +1094,7 @@ static inline u32 create_aead_input_list(struct aead_request *req, u32 enc)
static inline u32 create_aead_output_list(struct aead_request *req, u32 enc,
u32 mac_len)
{
- struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
u32 argcnt = 0, outputlen = 0;
@@ -1111,7 +1112,7 @@ static inline u32 create_aead_output_list(struct aead_request *req, u32 enc,
static inline u32 create_aead_null_input_list(struct aead_request *req,
u32 enc, u32 mac_len)
{
- struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
u32 inputlen, argcnt = 0;
@@ -1130,7 +1131,7 @@ static inline u32 create_aead_null_input_list(struct aead_request *req,
static inline u32 create_aead_null_output_list(struct aead_request *req,
u32 enc, u32 mac_len)
{
- struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
struct scatterlist *dst;
u8 *ptr = NULL;
@@ -1217,7 +1218,7 @@ error:
static u32 cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
{
- struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct pci_dev *pdev;
@@ -1409,7 +1410,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_driver_name = "cpt_hmac_sha1_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
- .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
@@ -1428,7 +1429,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_driver_name = "cpt_hmac_sha256_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
- .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
@@ -1447,7 +1448,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_driver_name = "cpt_hmac_sha384_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
- .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
@@ -1466,7 +1467,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_driver_name = "cpt_hmac_sha512_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
- .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
@@ -1485,7 +1486,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_driver_name = "cpt_hmac_sha1_ecb_null",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
- .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
@@ -1504,7 +1505,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_driver_name = "cpt_hmac_sha256_ecb_null",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
- .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
@@ -1523,7 +1524,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_driver_name = "cpt_hmac_sha384_ecb_null",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
- .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
@@ -1542,7 +1543,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_driver_name = "cpt_hmac_sha512_ecb_null",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
- .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
@@ -1561,7 +1562,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_driver_name = "cpt_rfc4106_gcm_aes",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
- .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index 67530e90bbfe..30b423605c9c 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -87,7 +87,7 @@ static inline int validate_hmac_cipher_null(struct otx2_cpt_req_info *cpt_req)
req = container_of(cpt_req->areq, struct aead_request, base);
tfm = crypto_aead_reqtfm(req);
- rctx = aead_request_ctx(req);
+ rctx = aead_request_ctx_dma(req);
if (memcmp(rctx->fctx.hmac.s.hmac_calc,
rctx->fctx.hmac.s.hmac_recv,
crypto_aead_authsize(tfm)) != 0)
@@ -137,7 +137,7 @@ static void output_iv_copyback(struct crypto_async_request *areq)
ctx = crypto_skcipher_ctx(stfm);
if (ctx->cipher_type == OTX2_CPT_AES_CBC ||
ctx->cipher_type == OTX2_CPT_DES3_CBC) {
- rctx = skcipher_request_ctx(sreq);
+ rctx = skcipher_request_ctx_dma(sreq);
req_info = &rctx->cpt_req;
ivsize = crypto_skcipher_ivsize(stfm);
start = sreq->cryptlen - ivsize;
@@ -219,7 +219,7 @@ static inline int create_ctx_hdr(struct skcipher_request *req, u32 enc,
u32 *argcnt)
{
struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
- struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
@@ -288,7 +288,7 @@ static inline int create_ctx_hdr(struct skcipher_request *req, u32 enc,
static inline int create_input_list(struct skcipher_request *req, u32 enc,
u32 enc_iv_len)
{
- struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
u32 argcnt = 0;
int ret;
@@ -306,7 +306,7 @@ static inline int create_input_list(struct skcipher_request *req, u32 enc,
static inline void create_output_list(struct skcipher_request *req,
u32 enc_iv_len)
{
- struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
u32 argcnt = 0;
@@ -325,7 +325,7 @@ static inline void create_output_list(struct skcipher_request *req,
static int skcipher_do_fallback(struct skcipher_request *req, bool is_enc)
{
struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
- struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
int ret;
@@ -348,7 +348,7 @@ static int skcipher_do_fallback(struct skcipher_request *req, bool is_enc)
static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
{
struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
- struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
@@ -537,8 +537,9 @@ static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm)
* allocated since the cryptd daemon uses
* this memory for request_ctx information
*/
- crypto_skcipher_set_reqsize(stfm, sizeof(struct otx2_cpt_req_ctx) +
- sizeof(struct skcipher_request));
+ crypto_skcipher_set_reqsize_dma(
+ stfm, sizeof(struct otx2_cpt_req_ctx) +
+ sizeof(struct skcipher_request));
return cpt_skcipher_fallback_init(ctx, alg);
}
@@ -572,7 +573,7 @@ static int cpt_aead_fallback_init(struct otx2_cpt_aead_ctx *ctx,
static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
{
- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(atfm);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(atfm);
struct crypto_tfm *tfm = crypto_aead_tfm(atfm);
struct crypto_alg *alg = tfm->__crt_alg;
@@ -629,7 +630,7 @@ static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
ctx->enc_align_len = 1;
break;
}
- crypto_aead_set_reqsize(atfm, sizeof(struct otx2_cpt_req_ctx));
+ crypto_aead_set_reqsize_dma(atfm, sizeof(struct otx2_cpt_req_ctx));
return cpt_aead_fallback_init(ctx, alg);
}
@@ -681,7 +682,7 @@ static int otx2_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
{
- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
kfree(ctx->ipad);
kfree(ctx->opad);
@@ -698,7 +699,7 @@ static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
static int otx2_cpt_aead_gcm_set_authsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
if (crypto_rfc4106_check_authsize(authsize))
return -EINVAL;
@@ -722,7 +723,7 @@ static int otx2_cpt_aead_set_authsize(struct crypto_aead *tfm,
static int otx2_cpt_aead_null_set_authsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
ctx->is_trunc_hmac = true;
tfm->authsize = authsize;
@@ -794,7 +795,7 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
static int aead_hmac_init(struct crypto_aead *cipher)
{
- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
int state_size = crypto_shash_statesize(ctx->hashalg);
int ds = crypto_shash_digestsize(ctx->hashalg);
int bs = crypto_shash_blocksize(ctx->hashalg);
@@ -892,7 +893,7 @@ static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
struct crypto_authenc_key_param *param;
int enckeylen = 0, authkeylen = 0;
struct rtattr *rta = (void *)key;
@@ -944,7 +945,7 @@ static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
struct crypto_authenc_key_param *param;
struct rtattr *rta = (void *)key;
int enckeylen = 0;
@@ -979,7 +980,7 @@ static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
/*
* For aes gcm we expect to get encryption key (16, 24, 32 bytes)
@@ -1012,9 +1013,9 @@ static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
static inline int create_aead_ctx_hdr(struct aead_request *req, u32 enc,
u32 *argcnt)
{
- struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
int mac_len = crypto_aead_authsize(tfm);
@@ -1103,9 +1104,9 @@ static inline int create_aead_ctx_hdr(struct aead_request *req, u32 enc,
static inline void create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
u32 enc)
{
- struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
@@ -1127,7 +1128,7 @@ static inline void create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
static inline int create_aead_input_list(struct aead_request *req, u32 enc)
{
- struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
u32 inputlen = req->cryptlen + req->assoclen;
u32 status, argcnt = 0;
@@ -1144,7 +1145,7 @@ static inline int create_aead_input_list(struct aead_request *req, u32 enc)
static inline void create_aead_output_list(struct aead_request *req, u32 enc,
u32 mac_len)
{
- struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
u32 argcnt = 0, outputlen = 0;
@@ -1160,7 +1161,7 @@ static inline void create_aead_output_list(struct aead_request *req, u32 enc,
static inline void create_aead_null_input_list(struct aead_request *req,
u32 enc, u32 mac_len)
{
- struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
u32 inputlen, argcnt = 0;
@@ -1177,7 +1178,7 @@ static inline void create_aead_null_input_list(struct aead_request *req,
static inline int create_aead_null_output_list(struct aead_request *req,
u32 enc, u32 mac_len)
{
- struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
struct scatterlist *dst;
u8 *ptr = NULL;
@@ -1257,9 +1258,9 @@ error_free:
static int aead_do_fallback(struct aead_request *req, bool is_enc)
{
- struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(aead);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(aead);
int ret;
if (ctx->fbk_cipher) {
@@ -1281,10 +1282,10 @@ static int aead_do_fallback(struct aead_request *req, bool is_enc)
static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
{
- struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
struct pci_dev *pdev;
int status, cpu_num;
@@ -1458,7 +1459,7 @@ static struct aead_alg otx2_cpt_aeads[] = { {
.cra_driver_name = "cpt_hmac_sha1_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
@@ -1477,7 +1478,7 @@ static struct aead_alg otx2_cpt_aeads[] = { {
.cra_driver_name = "cpt_hmac_sha256_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
@@ -1496,7 +1497,7 @@ static struct aead_alg otx2_cpt_aeads[] = { {
.cra_driver_name = "cpt_hmac_sha384_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
@@ -1515,7 +1516,7 @@ static struct aead_alg otx2_cpt_aeads[] = { {
.cra_driver_name = "cpt_hmac_sha512_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
@@ -1534,7 +1535,7 @@ static struct aead_alg otx2_cpt_aeads[] = { {
.cra_driver_name = "cpt_hmac_sha1_ecb_null",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC,
- .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
@@ -1553,7 +1554,7 @@ static struct aead_alg otx2_cpt_aeads[] = { {
.cra_driver_name = "cpt_hmac_sha256_ecb_null",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC,
- .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
@@ -1572,7 +1573,7 @@ static struct aead_alg otx2_cpt_aeads[] = { {
.cra_driver_name = "cpt_hmac_sha384_ecb_null",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC,
- .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
@@ -1591,7 +1592,7 @@ static struct aead_alg otx2_cpt_aeads[] = { {
.cra_driver_name = "cpt_hmac_sha512_ecb_null",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC,
- .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
@@ -1610,7 +1611,7 @@ static struct aead_alg otx2_cpt_aeads[] = { {
.cra_driver_name = "cpt_rfc4106_gcm_aes",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 31e24df18877..20d0dcd50344 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1229,6 +1229,7 @@ struct n2_hash_tmpl {
const u8 *hash_init;
u8 hw_op_hashsz;
u8 digest_size;
+ u8 statesize;
u8 block_size;
u8 auth_type;
u8 hmac_type;
@@ -1260,6 +1261,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.hmac_type = AUTH_TYPE_HMAC_MD5,
.hw_op_hashsz = MD5_DIGEST_SIZE,
.digest_size = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct md5_state),
.block_size = MD5_HMAC_BLOCK_SIZE },
{ .name = "sha1",
.hash_zero = sha1_zero_message_hash,
@@ -1268,6 +1270,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.hmac_type = AUTH_TYPE_HMAC_SHA1,
.hw_op_hashsz = SHA1_DIGEST_SIZE,
.digest_size = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
.block_size = SHA1_BLOCK_SIZE },
{ .name = "sha256",
.hash_zero = sha256_zero_message_hash,
@@ -1276,6 +1279,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.hmac_type = AUTH_TYPE_HMAC_SHA256,
.hw_op_hashsz = SHA256_DIGEST_SIZE,
.digest_size = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
.block_size = SHA256_BLOCK_SIZE },
{ .name = "sha224",
.hash_zero = sha224_zero_message_hash,
@@ -1284,6 +1288,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.hmac_type = AUTH_TYPE_RESERVED,
.hw_op_hashsz = SHA256_DIGEST_SIZE,
.digest_size = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
.block_size = SHA224_BLOCK_SIZE },
};
#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
@@ -1424,6 +1429,7 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
halg = &ahash->halg;
halg->digestsize = tmpl->digest_size;
+ halg->statesize = tmpl->statesize;
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
index b66f19ac600f..7590bfb24d79 100644
--- a/drivers/crypto/nx/nx-842.h
+++ b/drivers/crypto/nx/nx-842.h
@@ -3,10 +3,10 @@
#ifndef __NX_842_H__
#define __NX_842_H__
+#include <crypto/algapi.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/crypto.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/io.h>
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 655a7f5a406a..cbeda59c6b19 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2114,7 +2114,7 @@ static int omap_sham_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get sync: %d\n", err);
goto err_pm;
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
index fda5f699ff57..834a705180c0 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -4,6 +4,7 @@
#include <adf_accel_devices.h>
#include <adf_cfg.h>
#include <adf_common_drv.h>
+#include <adf_gen4_dc.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
@@ -357,10 +358,11 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
- hw_data->dev_config = adf_crypto_dev_config;
+ hw_data->dev_config = adf_gen4_dev_config;
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+ adf_gen4_init_dc_ops(&hw_data->dc_ops);
}
void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
index 9d49248931f6..e98428ba78e2 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -70,6 +70,6 @@ enum icp_qat_4xxx_slice_mask {
void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data);
-int adf_crypto_dev_config(struct adf_accel_dev *accel_dev);
+int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c
index 2f212561acc4..b3a4c7b23864 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c
@@ -9,6 +9,7 @@
#include <adf_common_drv.h>
#include "adf_4xxx_hw_data.h"
+#include "qat_compression.h"
#include "qat_crypto.h"
#include "adf_transport_access_macros.h"
@@ -19,6 +20,16 @@ static const struct pci_device_id adf_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+enum configs {
+ DEV_CFG_CY = 0,
+ DEV_CFG_DC,
+};
+
+static const char * const services_operations[] = {
+ ADF_CFG_CY,
+ ADF_CFG_DC,
+};
+
static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
{
if (accel_dev->hw_device) {
@@ -53,7 +64,7 @@ static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
return 0;
}
-int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
+static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
int banks = GET_MAX_BANKS(accel_dev);
@@ -68,14 +79,6 @@ int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
else
instances = 0;
- ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
- if (ret)
- goto err;
-
- ret = adf_cfg_section_add(accel_dev, "Accelerator0");
- if (ret)
- goto err;
-
for (i = 0; i < instances; i++) {
val = i;
bank = i * 2;
@@ -155,10 +158,128 @@ int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
if (ret)
goto err;
- set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+ val = 0;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
return 0;
err:
- dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
+ dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
+ return ret;
+}
+
+static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
+{
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ int banks = GET_MAX_BANKS(accel_dev);
+ int cpus = num_online_cpus();
+ unsigned long val;
+ int instances;
+ int ret;
+ int i;
+
+ if (adf_hw_dev_has_compression(accel_dev))
+ instances = min(cpus, banks);
+ else
+ instances = 0;
+
+ for (i = 0; i < instances; i++) {
+ val = i;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 1;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = ADF_COALESCING_DEF_TIME;
+ snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+ }
+
+ val = i;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
+ return ret;
+}
+
+int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ int ret;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
+ if (ret)
+ goto err;
+
+ ret = adf_cfg_section_add(accel_dev, "Accelerator0");
+ if (ret)
+ goto err;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services);
+ if (ret)
+ goto err;
+
+ ret = sysfs_match_string(services_operations, services);
+ if (ret < 0)
+ goto err;
+
+ switch (ret) {
+ case DEV_CFG_CY:
+ ret = adf_crypto_dev_config(accel_dev);
+ break;
+ case DEV_CFG_DC:
+ ret = adf_comp_dev_config(accel_dev);
+ break;
+ }
+
+ if (ret)
+ goto err;
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+ return ret;
+
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
return ret;
}
@@ -261,6 +382,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
if (!hw_data->accel_capabilities_mask) {
dev_err(&pdev->dev, "Failed to get capabilities mask.\n");
+ ret = -EINVAL;
goto out_err;
}
@@ -293,7 +415,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_disable_aer;
- ret = adf_crypto_dev_config(accel_dev);
+ ret = hw_data->dev_config(accel_dev);
if (ret)
goto out_err_disable_aer;
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index 50d5afa26a9b..c55c51a07677 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -2,6 +2,8 @@
/* Copyright(c) 2014 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_c3xxx_hw_data.h"
@@ -124,9 +126,11 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->reset_device = adf_reset_flr;
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
hw_data->disable_iov = adf_disable_sriov;
+ hw_data->dev_config = adf_gen2_dev_config;
adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen2_init_dc_ops(&hw_data->dc_ops);
}
void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index 2aef0bb791df..1f4fbf4562b2 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -201,7 +201,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err_disable_aer;
}
- ret = qat_crypto_dev_config(accel_dev);
+ ret = hw_data->dev_config(accel_dev);
if (ret)
goto out_err_disable_aer;
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index a9fbe57b32ae..84d9486e04de 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -2,6 +2,8 @@
/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include <adf_pfvf_vf_msg.h>
@@ -86,9 +88,11 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
hw_data->get_sku = get_sku;
hw_data->enable_ints = adf_vf_void_noop;
hw_data->dev_class->instances++;
+ hw_data->dev_config = adf_gen2_dev_config;
adf_devmgr_update_class_index(hw_data);
adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen2_init_dc_ops(&hw_data->dc_ops);
}
void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index fa18d8009f53..cf4ef83e186f 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -177,8 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_dev_shutdown;
- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
index c00386fe6587..b7aa19d2fa80 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -2,6 +2,8 @@
/* Copyright(c) 2014 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_c62x_hw_data.h"
@@ -126,9 +128,11 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->reset_device = adf_reset_flr;
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
hw_data->disable_iov = adf_disable_sriov;
+ hw_data->dev_config = adf_gen2_dev_config;
adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen2_init_dc_ops(&hw_data->dc_ops);
}
void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 56163083f161..4ccaf298250c 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -201,7 +201,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err_disable_aer;
}
- ret = qat_crypto_dev_config(accel_dev);
+ ret = hw_data->dev_config(accel_dev);
if (ret)
goto out_err_disable_aer;
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index 0282038fca54..751d7aa57fc7 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -2,6 +2,8 @@
/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include <adf_pfvf_vf_msg.h>
@@ -86,9 +88,11 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
hw_data->get_sku = get_sku;
hw_data->enable_ints = adf_vf_void_noop;
hw_data->dev_class->instances++;
+ hw_data->dev_config = adf_gen2_dev_config;
adf_devmgr_update_class_index(hw_data);
adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen2_init_dc_ops(&hw_data->dc_ops);
}
void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index 686ec752d0e9..0e642c94b929 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -177,8 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_dev_shutdown;
- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index 80919cfcc29d..1fb8d50f509f 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -12,14 +12,20 @@ intel_qat-objs := adf_cfg.o \
adf_hw_arbiter.o \
adf_sysfs.o \
adf_gen2_hw_data.o \
+ adf_gen2_config.o \
adf_gen4_hw_data.o \
adf_gen4_pm.o \
+ adf_gen2_dc.o \
+ adf_gen4_dc.o \
qat_crypto.o \
+ qat_compression.o \
+ qat_comp_algs.o \
qat_algs.o \
qat_asym_algs.o \
qat_algs_send.o \
qat_uclo.o \
- qat_hal.o
+ qat_hal.o \
+ qat_bl.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 0a55a4f34dcf..284f5aad3ee0 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -163,6 +163,10 @@ struct adf_pfvf_ops {
u32 pfvf_offset, u8 compat_ver);
};
+struct adf_dc_ops {
+ void (*build_deflate_ctx)(void *ctx);
+};
+
struct adf_hw_device_data {
struct adf_hw_device_class *dev_class;
u32 (*get_accel_mask)(struct adf_hw_device_data *self);
@@ -202,6 +206,7 @@ struct adf_hw_device_data {
int (*dev_config)(struct adf_accel_dev *accel_dev);
struct adf_pfvf_ops pfvf_ops;
struct adf_hw_csr_ops csr_ops;
+ struct adf_dc_ops dc_ops;
const char *fw_name;
const char *fw_mmp_name;
u32 fuses;
@@ -247,6 +252,7 @@ struct adf_hw_device_data {
#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
+#define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
struct adf_admin_comms;
@@ -266,13 +272,21 @@ struct adf_accel_vf_info {
u8 vf_compat_ver;
};
+struct adf_dc_data {
+ u8 *ovf_buff;
+ size_t ovf_buff_sz;
+ dma_addr_t ovf_buff_p;
+};
+
struct adf_accel_dev {
struct adf_etr_data *transport;
struct adf_hw_device_data *hw_device;
struct adf_cfg_device_data *cfg;
struct adf_fw_loader_data *fw_loader;
struct adf_admin_comms *admin;
+ struct adf_dc_data *dc_data;
struct list_head crypto_list;
+ struct list_head compression_list;
unsigned long status;
atomic_t ref_count;
struct dentry *debugfs_dir;
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
index 655248dbf962..5d8c3bdb258c 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
@@ -20,6 +20,7 @@
#define ADF_ETRMGR_BANK "Bank"
#define ADF_RING_SYM_BANK_NUM "BankSymNumber"
#define ADF_RING_ASYM_BANK_NUM "BankAsymNumber"
+#define ADF_RING_DC_BANK_NUM "BankDcNumber"
#define ADF_CY "Cy"
#define ADF_DC "Dc"
#define ADF_CFG_DC "dc"
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 7bb477c3ce25..7189265573c0 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -110,7 +110,6 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev);
void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
int qat_crypto_register(void);
int qat_crypto_unregister(void);
-int qat_crypto_dev_config(struct adf_accel_dev *accel_dev);
int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev);
struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
void qat_crypto_put_instance(struct qat_crypto_instance *inst);
@@ -121,6 +120,14 @@ void qat_algs_unregister(void);
int qat_asym_algs_register(void);
void qat_asym_algs_unregister(void);
+struct qat_compression_instance *qat_compression_get_instance_node(int node);
+void qat_compression_put_instance(struct qat_compression_instance *inst);
+int qat_compression_register(void);
+int qat_compression_unregister(void);
+int qat_comp_algs_register(void);
+void qat_comp_algs_unregister(void);
+void qat_comp_alg_callback(void *resp);
+
int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 82b69e1f725b..9190532b27eb 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -438,8 +438,13 @@ static int __init adf_register_ctl_device_driver(void)
if (qat_crypto_register())
goto err_crypto_register;
+ if (qat_compression_register())
+ goto err_compression_register;
+
return 0;
+err_compression_register:
+ qat_crypto_unregister();
err_crypto_register:
adf_exit_vf_wq();
err_vf_wq:
@@ -463,6 +468,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
adf_exit_vf_wq();
adf_exit_pf_wq();
qat_crypto_unregister();
+ qat_compression_unregister();
adf_clean_vf_map(false);
mutex_destroy(&adf_ctl_lock);
}
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_config.c b/drivers/crypto/qat/qat_common/adf_gen2_config.c
new file mode 100644
index 000000000000..eeb30da7587a
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen2_config.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_gen2_config.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+#include "qat_compression.h"
+#include "adf_transport_access_macros.h"
+
+static int adf_gen2_crypto_dev_config(struct adf_accel_dev *accel_dev)
+{
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ int banks = GET_MAX_BANKS(accel_dev);
+ int cpus = num_online_cpus();
+ unsigned long val;
+ int instances;
+ int ret;
+ int i;
+
+ if (adf_hw_dev_has_crypto(accel_dev))
+ instances = min(cpus, banks);
+ else
+ instances = 0;
+
+ for (i = 0; i < instances; i++) {
+ val = i;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
+ i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+ val = 128;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 2;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 8;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 10;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = ADF_COALESCING_DEF_TIME;
+ snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+ }
+
+ val = i;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ return ret;
+
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
+ return ret;
+}
+
+static int adf_gen2_comp_dev_config(struct adf_accel_dev *accel_dev)
+{
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ int banks = GET_MAX_BANKS(accel_dev);
+ int cpus = num_online_cpus();
+ unsigned long val;
+ int instances;
+ int ret;
+ int i;
+
+ if (adf_hw_dev_has_compression(accel_dev))
+ instances = min(cpus, banks);
+ else
+ instances = 0;
+
+ for (i = 0; i < instances; i++) {
+ val = i;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 6;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 14;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+ }
+
+ val = i;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ return ret;
+
+ return ret;
+
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
+ return ret;
+}
+
+/**
+ * adf_gen2_dev_config() - create dev config required to create instances
+ *
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function creates device configuration required to create instances
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_gen2_dev_config(struct adf_accel_dev *accel_dev)
+{
+ int ret;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
+ if (ret)
+ goto err;
+
+ ret = adf_cfg_section_add(accel_dev, "Accelerator0");
+ if (ret)
+ goto err;
+
+ ret = adf_gen2_crypto_dev_config(accel_dev);
+ if (ret)
+ goto err;
+
+ ret = adf_gen2_comp_dev_config(accel_dev);
+ if (ret)
+ goto err;
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+ return ret;
+
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_dev_config);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_config.h b/drivers/crypto/qat/qat_common/adf_gen2_config.h
new file mode 100644
index 000000000000..4bf9da2de68a
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen2_config.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef ADF_GEN2_CONFIG_H_
+#define ADF_GEN2_CONFIG_H_
+
+#include "adf_accel_devices.h"
+
+int adf_gen2_dev_config(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_dc.c b/drivers/crypto/qat/qat_common/adf_gen2_dc.c
new file mode 100644
index 000000000000..47261b1c1da6
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen2_dc.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "adf_gen2_dc.h"
+#include "icp_qat_fw_comp.h"
+
+static void qat_comp_build_deflate_ctx(void *ctx)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = (struct icp_qat_fw_comp_req *)ctx;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
+ struct icp_qat_fw_comp_cd_hdr *comp_cd_ctrl = &req_tmpl->comp_cd_ctrl;
+
+ memset(req_tmpl, 0, sizeof(*req_tmpl));
+ header->hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
+ header->comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
+ QAT_COMN_PTR_TYPE_SGL);
+ header->serv_specif_flags =
+ ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
+ ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+ cd_pars->u.sl.comp_slice_cfg_word[0] =
+ ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
+ ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
+ ICP_QAT_HW_COMPRESSION_DEPTH_1,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+ req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
+ req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
+ req_pars->req_par_flags =
+ ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
+ ICP_QAT_FW_COMP_EOP,
+ ICP_QAT_FW_COMP_BFINAL,
+ ICP_QAT_FW_COMP_CNV,
+ ICP_QAT_FW_COMP_CNV_RECOVERY,
+ ICP_QAT_FW_COMP_NO_CNV_DFX,
+ ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
+ ICP_QAT_FW_COMP_NO_XXHASH_ACC,
+ ICP_QAT_FW_COMP_CNV_ERROR_NONE,
+ ICP_QAT_FW_COMP_NO_APPEND_CRC,
+ ICP_QAT_FW_COMP_NO_DROP_DATA);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+ ICP_QAT_FW_COMN_CURR_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_COMP);
+
+ /* Fill second half of the template for decompression */
+ memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
+ req_tmpl++;
+ header = &req_tmpl->comn_hdr;
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+ cd_pars = &req_tmpl->cd_pars;
+ cd_pars->u.sl.comp_slice_cfg_word[0] =
+ ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
+ ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
+ ICP_QAT_HW_COMPRESSION_DEPTH_1,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+}
+
+void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+ dc_ops->build_deflate_ctx = qat_comp_build_deflate_ctx;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_dc.h b/drivers/crypto/qat/qat_common/adf_gen2_dc.h
new file mode 100644
index 000000000000..6eae023354d7
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen2_dc.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef ADF_GEN2_DC_H
+#define ADF_GEN2_DC_H
+
+#include "adf_accel_devices.h"
+
+void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops);
+
+#endif /* ADF_GEN2_DC_H */
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_dc.c b/drivers/crypto/qat/qat_common/adf_gen4_dc.c
new file mode 100644
index 000000000000..5859238e37de
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen4_dc.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_comp.h"
+#include "icp_qat_hw_20_comp.h"
+#include "adf_gen4_dc.h"
+
+static void qat_comp_build_deflate(void *ctx)
+{
+ struct icp_qat_fw_comp_req *req_tmpl =
+ (struct icp_qat_fw_comp_req *)ctx;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
+ struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = {0};
+ struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = {0};
+ struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = {0};
+ u32 upper_val;
+ u32 lower_val;
+
+ memset(req_tmpl, 0, sizeof(*req_tmpl));
+ header->hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
+ header->comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
+ QAT_COMN_PTR_TYPE_SGL);
+ header->serv_specif_flags =
+ ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
+ ICP_QAT_FW_COMP_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+ hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
+ hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
+ hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
+ hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
+ hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
+ hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
+ hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL;
+ hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL;
+
+ upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr);
+ lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val;
+
+ req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
+ req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
+ req_pars->req_par_flags =
+ ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
+ ICP_QAT_FW_COMP_EOP,
+ ICP_QAT_FW_COMP_BFINAL,
+ ICP_QAT_FW_COMP_CNV,
+ ICP_QAT_FW_COMP_CNV_RECOVERY,
+ ICP_QAT_FW_COMP_NO_CNV_DFX,
+ ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
+ ICP_QAT_FW_COMP_NO_XXHASH_ACC,
+ ICP_QAT_FW_COMP_CNV_ERROR_NONE,
+ ICP_QAT_FW_COMP_NO_APPEND_CRC,
+ ICP_QAT_FW_COMP_NO_DROP_DATA);
+
+ /* Fill second half of the template for decompression */
+ memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
+ req_tmpl++;
+ header = &req_tmpl->comn_hdr;
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+ cd_pars = &req_tmpl->cd_pars;
+
+ hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
+ lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+}
+
+void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+ dc_ops->build_deflate_ctx = qat_comp_build_deflate;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_dc.h b/drivers/crypto/qat/qat_common/adf_gen4_dc.h
new file mode 100644
index 000000000000..0b1a6774412e
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen4_dc.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef ADF_GEN4_DC_H
+#define ADF_GEN4_DC_H
+
+#include "adf_accel_devices.h"
+
+void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops);
+
+#endif /* ADF_GEN4_DC_H */
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 33a9a46d6949..cef7bb8ec007 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -209,6 +209,14 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
return -EFAULT;
}
+
+ if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to register compression algs\n");
+ set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+ clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+ return -EFAULT;
+ }
return 0;
}
EXPORT_SYMBOL_GPL(adf_dev_start);
@@ -242,6 +250,9 @@ void adf_dev_stop(struct adf_accel_dev *accel_dev)
qat_asym_algs_unregister();
}
+ if (!list_empty(&accel_dev->compression_list))
+ qat_comp_algs_unregister();
+
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (!test_bit(accel_dev->accel_id, service->start_status))
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index b2db1d70d71f..d85a90cc387b 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -170,6 +170,10 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
ADF_NUM_CY, (void *)&val, ADF_DEC))
return -EFAULT;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ return ret;
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h
index 6dc09d270082..c141160421e1 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h
@@ -116,6 +116,10 @@ struct icp_qat_fw_comn_resp {
#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
+#define ICP_QAT_FW_COMN_CNV_FLAG_BITPOS 6
+#define ICP_QAT_FW_COMN_CNV_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5
+#define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1
#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
icp_qat_fw_comn_req_hdr_t.service_type
@@ -132,6 +136,26 @@ struct icp_qat_fw_comn_resp {
#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_CNVNR_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_SET(hdr_t, val) \
+ QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+ ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_CNVNR_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_CNV_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_SET(hdr_t, val) \
+ QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+ ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_CNV_FLAG_MASK)
+
#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/qat/qat_common/icp_qat_fw_comp.h
new file mode 100644
index 000000000000..a03d43fef2b3
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_comp.h
@@ -0,0 +1,404 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _ICP_QAT_FW_COMP_H_
+#define _ICP_QAT_FW_COMP_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_comp_cmd_id {
+ ICP_QAT_FW_COMP_CMD_STATIC = 0,
+ ICP_QAT_FW_COMP_CMD_DYNAMIC = 1,
+ ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2,
+ ICP_QAT_FW_COMP_CMD_DELIMITER
+};
+
+enum icp_qat_fw_comp_20_cmd_id {
+ ICP_QAT_FW_COMP_20_CMD_LZ4_COMPRESS = 3,
+ ICP_QAT_FW_COMP_20_CMD_LZ4_DECOMPRESS = 4,
+ ICP_QAT_FW_COMP_20_CMD_LZ4S_COMPRESS = 5,
+ ICP_QAT_FW_COMP_20_CMD_LZ4S_DECOMPRESS = 6,
+ ICP_QAT_FW_COMP_20_CMD_XP10_COMPRESS = 7,
+ ICP_QAT_FW_COMP_20_CMD_XP10_DECOMPRESS = 8,
+ ICP_QAT_FW_COMP_20_CMD_RESERVED_9 = 9,
+ ICP_QAT_FW_COMP_23_CMD_ZSTD_COMPRESS = 10,
+ ICP_QAT_FW_COMP_23_CMD_ZSTD_DECOMPRESS = 11,
+ ICP_QAT_FW_COMP_20_CMD_DELIMITER
+};
+
+#define ICP_QAT_FW_COMP_STATELESS_SESSION 0
+#define ICP_QAT_FW_COMP_STATEFUL_SESSION 1
+#define ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST 0
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST 1
+#define ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST 0
+#define ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST 1
+#define ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 0
+#define ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 1
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF 1
+#define ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF 0
+#define ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS 2
+#define ICP_QAT_FW_COMP_SESSION_TYPE_MASK 0x1
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS 3
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK 0x1
+#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS 4
+#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK 0x1
+#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS 5
+#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1
+
+#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \
+ ret_uncomp, secure_ram) \
+ ((((sesstype) & ICP_QAT_FW_COMP_SESSION_TYPE_MASK) << \
+ ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \
+ (((autoselect) & ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) << \
+ ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS) | \
+ (((enhanced_asb) & ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) << \
+ ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS) | \
+ (((ret_uncomp) & ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) << \
+ ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS) | \
+ (((secure_ram) & ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) << \
+ ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS))
+
+#define ICP_QAT_FW_COMP_SESSION_TYPE_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS, \
+ ICP_QAT_FW_COMP_SESSION_TYPE_MASK)
+
+#define ICP_QAT_FW_COMP_SESSION_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS, \
+ ICP_QAT_FW_COMP_SESSION_TYPE_MASK)
+
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS, \
+ ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK)
+
+#define ICP_QAT_FW_COMP_EN_ASB_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS, \
+ ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK)
+
+#define ICP_QAT_FW_COMP_RET_UNCOMP_GET(flags) \
+ QAT_FIELD_GET(flags, \
+ ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS, \
+ ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK)
+
+#define ICP_QAT_FW_COMP_SECURE_RAM_USE_GET(flags) \
+ QAT_FIELD_GET(flags, \
+ ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS, \
+ ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK)
+
+struct icp_qat_fw_comp_req_hdr_cd_pars {
+ union {
+ struct {
+ __u64 content_desc_addr;
+ __u16 content_desc_resrvd1;
+ __u8 content_desc_params_sz;
+ __u8 content_desc_hdr_resrvd2;
+ __u32 content_desc_resrvd3;
+ } s;
+ struct {
+ __u32 comp_slice_cfg_word[ICP_QAT_FW_NUM_LONGWORDS_2];
+ __u32 content_desc_resrvd4;
+ } sl;
+ } u;
+};
+
+struct icp_qat_fw_comp_req_params {
+ __u32 comp_len;
+ __u32 out_buffer_sz;
+ union {
+ struct {
+ __u32 initial_crc32;
+ __u32 initial_adler;
+ } legacy;
+ __u64 crc_data_addr;
+ } crc;
+ __u32 req_par_flags;
+ __u32 rsrvd;
+};
+
+#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr, \
+ cnvdfx, crc, xxhash_acc, \
+ cnv_error_type, append_crc, \
+ drop_data) \
+ ((((sop) & ICP_QAT_FW_COMP_SOP_MASK) << \
+ ICP_QAT_FW_COMP_SOP_BITPOS) | \
+ (((eop) & ICP_QAT_FW_COMP_EOP_MASK) << \
+ ICP_QAT_FW_COMP_EOP_BITPOS) | \
+ (((bfinal) & ICP_QAT_FW_COMP_BFINAL_MASK) \
+ << ICP_QAT_FW_COMP_BFINAL_BITPOS) | \
+ (((cnv) & ICP_QAT_FW_COMP_CNV_MASK) << \
+ ICP_QAT_FW_COMP_CNV_BITPOS) | \
+ (((cnvnr) & ICP_QAT_FW_COMP_CNVNR_MASK) \
+ << ICP_QAT_FW_COMP_CNVNR_BITPOS) | \
+ (((cnvdfx) & ICP_QAT_FW_COMP_CNV_DFX_MASK) \
+ << ICP_QAT_FW_COMP_CNV_DFX_BITPOS) | \
+ (((crc) & ICP_QAT_FW_COMP_CRC_MODE_MASK) \
+ << ICP_QAT_FW_COMP_CRC_MODE_BITPOS) | \
+ (((xxhash_acc) & ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK) \
+ << ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS) | \
+ (((cnv_error_type) & ICP_QAT_FW_COMP_CNV_ERROR_MASK) \
+ << ICP_QAT_FW_COMP_CNV_ERROR_BITPOS) | \
+ (((append_crc) & ICP_QAT_FW_COMP_APPEND_CRC_MASK) \
+ << ICP_QAT_FW_COMP_APPEND_CRC_BITPOS) | \
+ (((drop_data) & ICP_QAT_FW_COMP_DROP_DATA_MASK) \
+ << ICP_QAT_FW_COMP_DROP_DATA_BITPOS))
+
+#define ICP_QAT_FW_COMP_NOT_SOP 0
+#define ICP_QAT_FW_COMP_SOP 1
+#define ICP_QAT_FW_COMP_NOT_EOP 0
+#define ICP_QAT_FW_COMP_EOP 1
+#define ICP_QAT_FW_COMP_NOT_BFINAL 0
+#define ICP_QAT_FW_COMP_BFINAL 1
+#define ICP_QAT_FW_COMP_NO_CNV 0
+#define ICP_QAT_FW_COMP_CNV 1
+#define ICP_QAT_FW_COMP_NO_CNV_RECOVERY 0
+#define ICP_QAT_FW_COMP_CNV_RECOVERY 1
+#define ICP_QAT_FW_COMP_NO_CNV_DFX 0
+#define ICP_QAT_FW_COMP_CNV_DFX 1
+#define ICP_QAT_FW_COMP_CRC_MODE_LEGACY 0
+#define ICP_QAT_FW_COMP_CRC_MODE_E2E 1
+#define ICP_QAT_FW_COMP_NO_XXHASH_ACC 0
+#define ICP_QAT_FW_COMP_XXHASH_ACC 1
+#define ICP_QAT_FW_COMP_APPEND_CRC 1
+#define ICP_QAT_FW_COMP_NO_APPEND_CRC 0
+#define ICP_QAT_FW_COMP_DROP_DATA 1
+#define ICP_QAT_FW_COMP_NO_DROP_DATA 0
+#define ICP_QAT_FW_COMP_SOP_BITPOS 0
+#define ICP_QAT_FW_COMP_SOP_MASK 0x1
+#define ICP_QAT_FW_COMP_EOP_BITPOS 1
+#define ICP_QAT_FW_COMP_EOP_MASK 0x1
+#define ICP_QAT_FW_COMP_BFINAL_BITPOS 6
+#define ICP_QAT_FW_COMP_BFINAL_MASK 0x1
+#define ICP_QAT_FW_COMP_CNV_BITPOS 16
+#define ICP_QAT_FW_COMP_CNV_MASK 0x1
+#define ICP_QAT_FW_COMP_CNVNR_BITPOS 17
+#define ICP_QAT_FW_COMP_CNVNR_MASK 0x1
+#define ICP_QAT_FW_COMP_CNV_DFX_BITPOS 18
+#define ICP_QAT_FW_COMP_CNV_DFX_MASK 0x1
+#define ICP_QAT_FW_COMP_CRC_MODE_BITPOS 19
+#define ICP_QAT_FW_COMP_CRC_MODE_MASK 0x1
+#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS 20
+#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK 0x1
+#define ICP_QAT_FW_COMP_CNV_ERROR_BITPOS 21
+#define ICP_QAT_FW_COMP_CNV_ERROR_MASK 0b111
+#define ICP_QAT_FW_COMP_CNV_ERROR_NONE 0b000
+#define ICP_QAT_FW_COMP_CNV_ERROR_CHECKSUM 0b001
+#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR_OBC_DIFF 0b010
+#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR 0b011
+#define ICP_QAT_FW_COMP_CNV_ERROR_XLT 0b100
+#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR_IBC_DIFF 0b101
+#define ICP_QAT_FW_COMP_APPEND_CRC_BITPOS 24
+#define ICP_QAT_FW_COMP_APPEND_CRC_MASK 0x1
+#define ICP_QAT_FW_COMP_DROP_DATA_BITPOS 25
+#define ICP_QAT_FW_COMP_DROP_DATA_MASK 0x1
+
+#define ICP_QAT_FW_COMP_SOP_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SOP_BITPOS, \
+ ICP_QAT_FW_COMP_SOP_MASK)
+
+#define ICP_QAT_FW_COMP_SOP_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_SOP_BITPOS, \
+ ICP_QAT_FW_COMP_SOP_MASK)
+
+#define ICP_QAT_FW_COMP_EOP_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_EOP_BITPOS, \
+ ICP_QAT_FW_COMP_EOP_MASK)
+
+#define ICP_QAT_FW_COMP_EOP_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_EOP_BITPOS, \
+ ICP_QAT_FW_COMP_EOP_MASK)
+
+#define ICP_QAT_FW_COMP_BFINAL_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_BFINAL_BITPOS, \
+ ICP_QAT_FW_COMP_BFINAL_MASK)
+
+#define ICP_QAT_FW_COMP_BFINAL_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_BFINAL_BITPOS, \
+ ICP_QAT_FW_COMP_BFINAL_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_BITPOS, \
+ ICP_QAT_FW_COMP_CNV_MASK)
+
+#define ICP_QAT_FW_COMP_CNVNR_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNVNR_BITPOS, \
+ ICP_QAT_FW_COMP_CNVNR_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_DFX_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_DFX_BITPOS, \
+ ICP_QAT_FW_COMP_CNV_DFX_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_DFX_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_CNV_DFX_BITPOS, \
+ ICP_QAT_FW_COMP_CNV_DFX_MASK)
+
+#define ICP_QAT_FW_COMP_CRC_MODE_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CRC_MODE_BITPOS, \
+ ICP_QAT_FW_COMP_CRC_MODE_MASK)
+
+#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \
+ ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK)
+
+#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \
+ ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_ERROR_TYPE_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_ERROR_BITPOS, \
+ ICP_QAT_FW_COMP_CNV_ERROR_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_ERROR_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_CNV_ERROR_BITPOS, \
+ ICP_QAT_FW_COMP_CNV_ERROR_MASK)
+
+struct icp_qat_fw_xlt_req_params {
+ __u64 inter_buff_ptr;
+};
+
+struct icp_qat_fw_comp_cd_hdr {
+ __u16 ram_bank_flags;
+ __u8 comp_cfg_offset;
+ __u8 next_curr_id;
+ __u32 resrvd;
+ __u64 comp_state_addr;
+ __u64 ram_banks_addr;
+};
+
+#define COMP_CPR_INITIAL_CRC 0
+#define COMP_CPR_INITIAL_ADLER 1
+
+struct icp_qat_fw_xlt_cd_hdr {
+ __u16 resrvd1;
+ __u8 resrvd2;
+ __u8 next_curr_id;
+ __u32 resrvd3;
+};
+
+struct icp_qat_fw_comp_req {
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ struct icp_qat_fw_comp_req_hdr_cd_pars cd_pars;
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ struct icp_qat_fw_comp_req_params comp_pars;
+ union {
+ struct icp_qat_fw_xlt_req_params xlt_pars;
+ __u32 resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2];
+ } u1;
+ __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
+ struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl;
+ union {
+ struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl;
+ __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_2];
+ } u2;
+};
+
+struct icp_qat_fw_resp_comp_pars {
+ __u32 input_byte_counter;
+ __u32 output_byte_counter;
+ union {
+ struct {
+ __u32 curr_crc32;
+ __u32 curr_adler_32;
+ } legacy;
+ __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_2];
+ } crc;
+};
+
+struct icp_qat_fw_comp_state {
+ __u32 rd8_counter;
+ __u32 status_flags;
+ __u32 in_counter;
+ __u32 out_counter;
+ __u64 intermediate_state;
+ __u32 lobc;
+ __u32 replaybc;
+ __u64 pcrc64_poly;
+ __u32 crc32;
+ __u32 adler_xxhash32;
+ __u64 pcrc64_xorout;
+ __u32 out_buf_size;
+ __u32 in_buf_size;
+ __u64 in_pcrc64;
+ __u64 out_pcrc64;
+ __u32 lobs;
+ __u32 libc;
+ __u64 reserved;
+ __u32 xxhash_state[4];
+ __u32 cleartext[4];
+};
+
+struct icp_qat_fw_comp_resp {
+ struct icp_qat_fw_comn_resp_hdr comn_resp;
+ __u64 opaque_data;
+ struct icp_qat_fw_resp_comp_pars comp_resp_pars;
+};
+
+#define QAT_FW_COMP_BANK_FLAG_MASK 0x1
+#define QAT_FW_COMP_BANK_I_BITPOS 8
+#define QAT_FW_COMP_BANK_H_BITPOS 7
+#define QAT_FW_COMP_BANK_G_BITPOS 6
+#define QAT_FW_COMP_BANK_F_BITPOS 5
+#define QAT_FW_COMP_BANK_E_BITPOS 4
+#define QAT_FW_COMP_BANK_D_BITPOS 3
+#define QAT_FW_COMP_BANK_C_BITPOS 2
+#define QAT_FW_COMP_BANK_B_BITPOS 1
+#define QAT_FW_COMP_BANK_A_BITPOS 0
+
+enum icp_qat_fw_comp_bank_enabled {
+ ICP_QAT_FW_COMP_BANK_DISABLED = 0,
+ ICP_QAT_FW_COMP_BANK_ENABLED = 1,
+ ICP_QAT_FW_COMP_BANK_DELIMITER = 2
+};
+
+#define ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(bank_i_enable, bank_h_enable, \
+ bank_g_enable, bank_f_enable, \
+ bank_e_enable, bank_d_enable, \
+ bank_c_enable, bank_b_enable, \
+ bank_a_enable) \
+ ((((bank_i_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+ QAT_FW_COMP_BANK_I_BITPOS) | \
+ (((bank_h_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+ QAT_FW_COMP_BANK_H_BITPOS) | \
+ (((bank_g_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+ QAT_FW_COMP_BANK_G_BITPOS) | \
+ (((bank_f_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+ QAT_FW_COMP_BANK_F_BITPOS) | \
+ (((bank_e_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+ QAT_FW_COMP_BANK_E_BITPOS) | \
+ (((bank_d_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+ QAT_FW_COMP_BANK_D_BITPOS) | \
+ (((bank_c_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+ QAT_FW_COMP_BANK_C_BITPOS) | \
+ (((bank_b_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+ QAT_FW_COMP_BANK_B_BITPOS) | \
+ (((bank_a_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+ QAT_FW_COMP_BANK_A_BITPOS))
+
+struct icp_qat_fw_comp_crc_data_struct {
+ __u32 crc32;
+ union {
+ __u32 adler;
+ __u32 xxhash;
+ } adler_xxhash_u;
+ __u32 cpr_in_crc_lo;
+ __u32 cpr_in_crc_hi;
+ __u32 cpr_out_crc_lo;
+ __u32 cpr_out_crc_hi;
+ __u32 xlt_in_crc_lo;
+ __u32 xlt_in_crc_hi;
+ __u32 xlt_out_crc_lo;
+ __u32 xlt_out_crc_hi;
+ __u32 prog_crc_poly_lo;
+ __u32 prog_crc_poly_hi;
+ __u32 xor_out_lo;
+ __u32 xor_out_hi;
+ __u32 append_crc_lo;
+ __u32 append_crc_hi;
+};
+
+struct xxhash_acc_state_buff {
+ __u32 in_counter;
+ __u32 out_counter;
+ __u32 xxhash_state[4];
+ __u32 clear_txt[4];
+};
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h
index 433304cad2ed..4042739bb6fa 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h
@@ -307,4 +307,70 @@ struct icp_qat_hw_cipher_algo_blk {
struct icp_qat_hw_ucs_cipher_aes256_f8 ucs_aes;
};
} __aligned(64);
+
+enum icp_qat_hw_compression_direction {
+ ICP_QAT_HW_COMPRESSION_DIR_COMPRESS = 0,
+ ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS = 1,
+ ICP_QAT_HW_COMPRESSION_DIR_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_delayed_match {
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED = 0,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED = 1,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_algo {
+ ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0,
+ ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1,
+ ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_depth {
+ ICP_QAT_HW_COMPRESSION_DEPTH_1 = 0,
+ ICP_QAT_HW_COMPRESSION_DEPTH_4 = 1,
+ ICP_QAT_HW_COMPRESSION_DEPTH_8 = 2,
+ ICP_QAT_HW_COMPRESSION_DEPTH_16 = 3,
+ ICP_QAT_HW_COMPRESSION_DEPTH_128 = 4,
+ ICP_QAT_HW_COMPRESSION_DEPTH_DELIMITER = 5
+};
+
+enum icp_qat_hw_compression_file_type {
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_0 = 0,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_1 = 1,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_2 = 2,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_3 = 3,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_4 = 4,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_DELIMITER = 5
+};
+
+struct icp_qat_hw_compression_config {
+ __u32 lower_val;
+ __u32 upper_val;
+};
+
+#define QAT_COMPRESSION_DIR_BITPOS 4
+#define QAT_COMPRESSION_DIR_MASK 0x7
+#define QAT_COMPRESSION_DELAYED_MATCH_BITPOS 16
+#define QAT_COMPRESSION_DELAYED_MATCH_MASK 0x1
+#define QAT_COMPRESSION_ALGO_BITPOS 31
+#define QAT_COMPRESSION_ALGO_MASK 0x1
+#define QAT_COMPRESSION_DEPTH_BITPOS 28
+#define QAT_COMPRESSION_DEPTH_MASK 0x7
+#define QAT_COMPRESSION_FILE_TYPE_BITPOS 24
+#define QAT_COMPRESSION_FILE_TYPE_MASK 0xF
+
+#define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(dir, delayed, \
+ algo, depth, filetype) \
+ ((((dir) & QAT_COMPRESSION_DIR_MASK) << \
+ QAT_COMPRESSION_DIR_BITPOS) | \
+ (((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK) << \
+ QAT_COMPRESSION_DELAYED_MATCH_BITPOS) | \
+ (((algo) & QAT_COMPRESSION_ALGO_MASK) << \
+ QAT_COMPRESSION_ALGO_BITPOS) | \
+ (((depth) & QAT_COMPRESSION_DEPTH_MASK) << \
+ QAT_COMPRESSION_DEPTH_BITPOS) | \
+ (((filetype) & QAT_COMPRESSION_FILE_TYPE_MASK) << \
+ QAT_COMPRESSION_FILE_TYPE_BITPOS))
+
#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h b/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h
new file mode 100644
index 000000000000..7ea8962272f2
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _ICP_QAT_HW_20_COMP_H_
+#define _ICP_QAT_HW_20_COMP_H_
+
+#include "icp_qat_hw_20_comp_defs.h"
+#include "icp_qat_fw.h"
+
+struct icp_qat_hw_comp_20_config_csr_lower {
+ enum icp_qat_hw_comp_20_extended_delay_match_mode edmm;
+ enum icp_qat_hw_comp_20_hw_comp_format algo;
+ enum icp_qat_hw_comp_20_search_depth sd;
+ enum icp_qat_hw_comp_20_hbs_control hbs;
+ enum icp_qat_hw_comp_20_abd abd;
+ enum icp_qat_hw_comp_20_lllbd_ctrl lllbd;
+ enum icp_qat_hw_comp_20_min_match_control mmctrl;
+ enum icp_qat_hw_comp_20_skip_hash_collision hash_col;
+ enum icp_qat_hw_comp_20_skip_hash_update hash_update;
+ enum icp_qat_hw_comp_20_byte_skip skip_ctrl;
+};
+
+static inline __u32
+ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_comp_20_config_csr_lower csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.algo,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK);
+ QAT_FIELD_SET(val32, csr.sd,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK);
+ QAT_FIELD_SET(val32, csr.edmm,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK);
+ QAT_FIELD_SET(val32, csr.hbs,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.lllbd,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK);
+ QAT_FIELD_SET(val32, csr.mmctrl,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.hash_col,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK);
+ QAT_FIELD_SET(val32, csr.hash_update,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK);
+ QAT_FIELD_SET(val32, csr.skip_ctrl,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK);
+ QAT_FIELD_SET(val32, csr.abd, ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK);
+
+ return __builtin_bswap32(val32);
+}
+
+struct icp_qat_hw_comp_20_config_csr_upper {
+ enum icp_qat_hw_comp_20_scb_control scb_ctrl;
+ enum icp_qat_hw_comp_20_rmb_control rmb_ctrl;
+ enum icp_qat_hw_comp_20_som_control som_ctrl;
+ enum icp_qat_hw_comp_20_skip_hash_rd_control skip_hash_ctrl;
+ enum icp_qat_hw_comp_20_scb_unload_control scb_unload_ctrl;
+ enum icp_qat_hw_comp_20_disable_token_fusion_control disable_token_fusion_ctrl;
+ enum icp_qat_hw_comp_20_lbms lbms;
+ enum icp_qat_hw_comp_20_scb_mode_reset_mask scb_mode_reset;
+ __u16 lazy;
+ __u16 nice;
+};
+
+static inline __u32
+ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_comp_20_config_csr_upper csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.scb_ctrl,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.rmb_ctrl,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.som_ctrl,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.skip_hash_ctrl,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.scb_unload_ctrl,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.disable_token_fusion_ctrl,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.lbms,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK);
+ QAT_FIELD_SET(val32, csr.scb_mode_reset,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK);
+ QAT_FIELD_SET(val32, csr.lazy,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK);
+ QAT_FIELD_SET(val32, csr.nice,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK);
+
+ return __builtin_bswap32(val32);
+}
+
+struct icp_qat_hw_decomp_20_config_csr_lower {
+ enum icp_qat_hw_decomp_20_hbs_control hbs;
+ enum icp_qat_hw_decomp_20_lbms lbms;
+ enum icp_qat_hw_decomp_20_hw_comp_format algo;
+ enum icp_qat_hw_decomp_20_min_match_control mmctrl;
+ enum icp_qat_hw_decomp_20_lz4_block_checksum_present lbc;
+};
+
+static inline __u32
+ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_decomp_20_config_csr_lower csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.hbs,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.lbms,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK);
+ QAT_FIELD_SET(val32, csr.algo,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK);
+ QAT_FIELD_SET(val32, csr.mmctrl,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.lbc,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK);
+
+ return __builtin_bswap32(val32);
+}
+
+struct icp_qat_hw_decomp_20_config_csr_upper {
+ enum icp_qat_hw_decomp_20_speculative_decoder_control sdc;
+ enum icp_qat_hw_decomp_20_mini_cam_control mcc;
+};
+
+static inline __u32
+ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_decomp_20_config_csr_upper csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.sdc,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.mcc,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK);
+
+ return __builtin_bswap32(val32);
+}
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h b/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h
new file mode 100644
index 000000000000..208d4554283b
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _ICP_QAT_HW_20_COMP_DEFS_H
+#define _ICP_QAT_HW_20_COMP_DEFS_H
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS 31
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_scb_control {
+ ICP_QAT_HW_COMP_20_SCB_CONTROL_ENABLE = 0x0,
+ ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS 30
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_rmb_control {
+ ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL = 0x0,
+ ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_FC_ONLY = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS 28
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK 0x3
+
+enum icp_qat_hw_comp_20_som_control {
+ ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE = 0x0,
+ ICP_QAT_HW_COMP_20_SOM_CONTROL_REPLAY_MODE = 0x1,
+ ICP_QAT_HW_COMP_20_SOM_CONTROL_INPUT_CRC = 0x2,
+ ICP_QAT_HW_COMP_20_SOM_CONTROL_RESERVED_MODE = 0x3,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS 27
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_skip_hash_rd_control {
+ ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP = 0x0,
+ ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_SKIP_HASH_READS = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS 26
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_scb_unload_control {
+ ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD = 0x0,
+ ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_NO_UNLOAD = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS 21
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_disable_token_fusion_control {
+ ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE = 0x0,
+ ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_DISABLE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS 19
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK 0x3
+
+enum icp_qat_hw_comp_20_lbms {
+ ICP_QAT_HW_COMP_20_LBMS_LBMS_64KB = 0x0,
+ ICP_QAT_HW_COMP_20_LBMS_LBMS_256KB = 0x1,
+ ICP_QAT_HW_COMP_20_LBMS_LBMS_1MB = 0x2,
+ ICP_QAT_HW_COMP_20_LBMS_LBMS_4MB = 0x3,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_LBMS_LBMS_64KB
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS 18
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK 0x1
+
+enum icp_qat_hw_comp_20_scb_mode_reset_mask {
+ ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS = 0x0,
+ ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS_AND_HISTORY = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS 9
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK 0x1ff
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL 258
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS 0
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK 0x1ff
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL 259
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7
+
+enum icp_qat_hw_comp_20_hbs_control {
+ ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0,
+ ICP_QAT_HW_COMP_23_HBS_CONTROL_HBS_IS_64KB = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS 13
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK 0x1
+
+enum icp_qat_hw_comp_20_abd {
+ ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_20_ABD_ABD_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS 12
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK 0x1
+
+enum icp_qat_hw_comp_20_lllbd_ctrl {
+ ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS 8
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK 0xf
+
+enum icp_qat_hw_comp_20_search_depth {
+ ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1 = 0x1,
+ ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_6 = 0x3,
+ ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_9 = 0x4,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS 5
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK 0x7
+
+enum icp_qat_hw_comp_20_hw_comp_format {
+ ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77 = 0x0,
+ ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE = 0x1,
+ ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4 = 0x2,
+ ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4S = 0x3,
+ ICP_QAT_HW_COMP_23_HW_COMP_FORMAT_ZSTD = 0x4,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_min_match_control {
+ ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_3B = 0x0,
+ ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_4B = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_3B
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS 3
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK 0x1
+
+enum icp_qat_hw_comp_20_skip_hash_collision {
+ ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW = 0x0,
+ ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_DONT_ALLOW = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS 2
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK 0x1
+
+enum icp_qat_hw_comp_20_skip_hash_update {
+ ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW = 0x0,
+ ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS 1
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK 0x1
+
+enum icp_qat_hw_comp_20_byte_skip {
+ ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN = 0x0,
+ ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS 0
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK 0x1
+
+enum icp_qat_hw_comp_20_extended_delay_match_mode {
+ ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED = 0x0,
+ ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS 31
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK 0x1
+
+enum icp_qat_hw_decomp_20_speculative_decoder_control {
+ ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE = 0x0,
+ ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_DISABLE = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS 30
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK 0x1
+
+enum icp_qat_hw_decomp_20_mini_cam_control {
+ ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE = 0x0,
+ ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_DISABLE = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7
+
+enum icp_qat_hw_decomp_20_hbs_control {
+ ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS 8
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK 0x3
+
+enum icp_qat_hw_decomp_20_lbms {
+ ICP_QAT_HW_DECOMP_20_LBMS_LBMS_64KB = 0x0,
+ ICP_QAT_HW_DECOMP_20_LBMS_LBMS_256KB = 0x1,
+ ICP_QAT_HW_DECOMP_20_LBMS_LBMS_1MB = 0x2,
+ ICP_QAT_HW_DECOMP_20_LBMS_LBMS_4MB = 0x3,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_20_LBMS_LBMS_64KB
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS 5
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK 0x7
+
+enum icp_qat_hw_decomp_20_hw_comp_format {
+ ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE = 0x1,
+ ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4 = 0x2,
+ ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4S = 0x3,
+ ICP_QAT_HW_DECOMP_23_HW_DECOMP_FORMAT_ZSTD = 0x4,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK 0x1
+
+enum icp_qat_hw_decomp_20_min_match_control {
+ ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_3B = 0x0,
+ ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_4B = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_3B
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS 3
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK 0x1
+
+enum icp_qat_hw_decomp_20_lz4_block_checksum_present {
+ ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_ABSENT = 0x0,
+ ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_PRESENT = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_ABSENT
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index cad9c58caab1..b4b9f0aa59b9 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -23,6 +23,7 @@
#include "icp_qat_hw.h"
#include "icp_qat_fw.h"
#include "icp_qat_fw_la.h"
+#include "qat_bl.h"
#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
@@ -663,189 +664,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
return qat_alg_aead_newkey(tfm, key, keylen);
}
-static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
- struct qat_crypto_request *qat_req)
-{
- struct device *dev = &GET_DEV(inst->accel_dev);
- struct qat_alg_buf_list *bl = qat_req->buf.bl;
- struct qat_alg_buf_list *blout = qat_req->buf.blout;
- dma_addr_t blp = qat_req->buf.blp;
- dma_addr_t blpout = qat_req->buf.bloutp;
- size_t sz = qat_req->buf.sz;
- size_t sz_out = qat_req->buf.sz_out;
- int bl_dma_dir;
- int i;
-
- bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
-
- for (i = 0; i < bl->num_bufs; i++)
- dma_unmap_single(dev, bl->bufers[i].addr,
- bl->bufers[i].len, bl_dma_dir);
-
- dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
-
- if (!qat_req->buf.sgl_src_valid)
- kfree(bl);
-
- if (blp != blpout) {
- /* If out of place operation dma unmap only data */
- int bufless = blout->num_bufs - blout->num_mapped_bufs;
-
- for (i = bufless; i < blout->num_bufs; i++) {
- dma_unmap_single(dev, blout->bufers[i].addr,
- blout->bufers[i].len,
- DMA_FROM_DEVICE);
- }
- dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
-
- if (!qat_req->buf.sgl_dst_valid)
- kfree(blout);
- }
-}
-
-static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
- struct scatterlist *sgl,
- struct scatterlist *sglout,
- struct qat_crypto_request *qat_req,
- gfp_t flags)
-{
- struct device *dev = &GET_DEV(inst->accel_dev);
- int i, sg_nctr = 0;
- int n = sg_nents(sgl);
- struct qat_alg_buf_list *bufl;
- struct qat_alg_buf_list *buflout = NULL;
- dma_addr_t blp = DMA_MAPPING_ERROR;
- dma_addr_t bloutp = DMA_MAPPING_ERROR;
- struct scatterlist *sg;
- size_t sz_out, sz = struct_size(bufl, bufers, n);
- int node = dev_to_node(&GET_DEV(inst->accel_dev));
- int bufl_dma_dir;
-
- if (unlikely(!n))
- return -EINVAL;
-
- qat_req->buf.sgl_src_valid = false;
- qat_req->buf.sgl_dst_valid = false;
-
- if (n > QAT_MAX_BUFF_DESC) {
- bufl = kzalloc_node(sz, flags, node);
- if (unlikely(!bufl))
- return -ENOMEM;
- } else {
- bufl = &qat_req->buf.sgl_src.sgl_hdr;
- memset(bufl, 0, sizeof(struct qat_alg_buf_list));
- qat_req->buf.sgl_src_valid = true;
- }
-
- bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
-
- for_each_sg(sgl, sg, n, i)
- bufl->bufers[i].addr = DMA_MAPPING_ERROR;
-
- for_each_sg(sgl, sg, n, i) {
- int y = sg_nctr;
-
- if (!sg->length)
- continue;
-
- bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
- sg->length,
- bufl_dma_dir);
- bufl->bufers[y].len = sg->length;
- if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
- goto err_in;
- sg_nctr++;
- }
- bufl->num_bufs = sg_nctr;
- blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, blp)))
- goto err_in;
- qat_req->buf.bl = bufl;
- qat_req->buf.blp = blp;
- qat_req->buf.sz = sz;
- /* Handle out of place operation */
- if (sgl != sglout) {
- struct qat_alg_buf *bufers;
-
- n = sg_nents(sglout);
- sz_out = struct_size(buflout, bufers, n);
- sg_nctr = 0;
-
- if (n > QAT_MAX_BUFF_DESC) {
- buflout = kzalloc_node(sz_out, flags, node);
- if (unlikely(!buflout))
- goto err_in;
- } else {
- buflout = &qat_req->buf.sgl_dst.sgl_hdr;
- memset(buflout, 0, sizeof(struct qat_alg_buf_list));
- qat_req->buf.sgl_dst_valid = true;
- }
-
- bufers = buflout->bufers;
- for_each_sg(sglout, sg, n, i)
- bufers[i].addr = DMA_MAPPING_ERROR;
-
- for_each_sg(sglout, sg, n, i) {
- int y = sg_nctr;
-
- if (!sg->length)
- continue;
-
- bufers[y].addr = dma_map_single(dev, sg_virt(sg),
- sg->length,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
- goto err_out;
- bufers[y].len = sg->length;
- sg_nctr++;
- }
- buflout->num_bufs = sg_nctr;
- buflout->num_mapped_bufs = sg_nctr;
- bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, bloutp)))
- goto err_out;
- qat_req->buf.blout = buflout;
- qat_req->buf.bloutp = bloutp;
- qat_req->buf.sz_out = sz_out;
- } else {
- /* Otherwise set the src and dst to the same address */
- qat_req->buf.bloutp = qat_req->buf.blp;
- qat_req->buf.sz_out = 0;
- }
- return 0;
-
-err_out:
- if (!dma_mapping_error(dev, bloutp))
- dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
-
- n = sg_nents(sglout);
- for (i = 0; i < n; i++)
- if (!dma_mapping_error(dev, buflout->bufers[i].addr))
- dma_unmap_single(dev, buflout->bufers[i].addr,
- buflout->bufers[i].len,
- DMA_FROM_DEVICE);
-
- if (!qat_req->buf.sgl_dst_valid)
- kfree(buflout);
-
-err_in:
- if (!dma_mapping_error(dev, blp))
- dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
-
- n = sg_nents(sgl);
- for (i = 0; i < n; i++)
- if (!dma_mapping_error(dev, bufl->bufers[i].addr))
- dma_unmap_single(dev, bufl->bufers[i].addr,
- bufl->bufers[i].len,
- bufl_dma_dir);
-
- if (!qat_req->buf.sgl_src_valid)
- kfree(bufl);
-
- dev_err(dev, "Failed to map buf for dma\n");
- return -ENOMEM;
-}
-
static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
struct qat_crypto_request *qat_req)
{
@@ -855,7 +673,7 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
u8 stat_filed = qat_resp->comn_resp.comn_status;
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
- qat_alg_free_bufl(inst, qat_req);
+ qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
res = -EBADMSG;
areq->base.complete(&areq->base, res);
@@ -925,7 +743,7 @@ static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
u8 stat_filed = qat_resp->comn_resp.comn_status;
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
- qat_alg_free_bufl(inst, qat_req);
+ qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
res = -EINVAL;
@@ -981,7 +799,8 @@ static int qat_alg_aead_dec(struct aead_request *areq)
if (cipher_len % AES_BLOCK_SIZE != 0)
return -EINVAL;
- ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f);
+ ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
+ &qat_req->buf, NULL, f);
if (unlikely(ret))
return ret;
@@ -1003,7 +822,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
if (ret == -ENOSPC)
- qat_alg_free_bufl(ctx->inst, qat_req);
+ qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
return ret;
}
@@ -1024,7 +843,8 @@ static int qat_alg_aead_enc(struct aead_request *areq)
if (areq->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL;
- ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f);
+ ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
+ &qat_req->buf, NULL, f);
if (unlikely(ret))
return ret;
@@ -1048,7 +868,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
if (ret == -ENOSPC)
- qat_alg_free_bufl(ctx->inst, qat_req);
+ qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
return ret;
}
@@ -1209,7 +1029,8 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
if (req->cryptlen == 0)
return 0;
- ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f);
+ ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
+ &qat_req->buf, NULL, f);
if (unlikely(ret))
return ret;
@@ -1230,7 +1051,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
if (ret == -ENOSPC)
- qat_alg_free_bufl(ctx->inst, qat_req);
+ qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
return ret;
}
@@ -1275,7 +1096,8 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
if (req->cryptlen == 0)
return 0;
- ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f);
+ ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
+ &qat_req->buf, NULL, f);
if (unlikely(ret))
return ret;
@@ -1297,7 +1119,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
if (ret == -ENOSPC)
- qat_alg_free_bufl(ctx->inst, qat_req);
+ qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
return ret;
}
diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.h b/drivers/crypto/qat/qat_common/qat_algs_send.h
index 5ce9f4f69d8f..0baca16e1eff 100644
--- a/drivers/crypto/qat/qat_common/qat_algs_send.h
+++ b/drivers/crypto/qat/qat_common/qat_algs_send.h
@@ -3,7 +3,21 @@
#ifndef QAT_ALGS_SEND_H
#define QAT_ALGS_SEND_H
-#include "qat_crypto.h"
+#include <linux/list.h>
+#include "adf_transport_internal.h"
+
+struct qat_instance_backlog {
+ struct list_head list;
+ spinlock_t lock; /* protects backlog list */
+};
+
+struct qat_alg_req {
+ u32 *fw_req;
+ struct adf_etr_ring_data *tx_ring;
+ struct crypto_async_request *base;
+ struct list_head list;
+ struct qat_instance_backlog *backlog;
+};
int qat_alg_send_message(struct qat_alg_req *req);
void qat_alg_send_backlog(struct qat_instance_backlog *backlog);
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 94a26702aeae..935a7e012946 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -494,6 +494,8 @@ static int qat_dh_init_tfm(struct crypto_kpp *tfm)
if (!inst)
return -EINVAL;
+ kpp_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
+
ctx->p_size = 0;
ctx->g2 = false;
ctx->inst = inst;
@@ -1230,6 +1232,8 @@ static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
if (!inst)
return -EINVAL;
+ akcipher_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
+
ctx->key_sz = 0;
ctx->inst = inst;
return 0;
@@ -1252,7 +1256,6 @@ static struct akcipher_alg rsa = {
.max_size = qat_rsa_max_size,
.init = qat_rsa_init_tfm,
.exit = qat_rsa_exit_tfm,
- .reqsize = sizeof(struct qat_asym_request) + 64,
.base = {
.cra_name = "rsa",
.cra_driver_name = "qat-rsa",
@@ -1269,7 +1272,6 @@ static struct kpp_alg dh = {
.max_size = qat_dh_max_size,
.init = qat_dh_init_tfm,
.exit = qat_dh_exit_tfm,
- .reqsize = sizeof(struct qat_asym_request) + 64,
.base = {
.cra_name = "dh",
.cra_driver_name = "qat-dh",
diff --git a/drivers/crypto/qat/qat_common/qat_bl.c b/drivers/crypto/qat/qat_common/qat_bl.c
new file mode 100644
index 000000000000..2e89ff08041b
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_bl.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2014 - 2022 Intel Corporation */
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "qat_bl.h"
+#include "qat_crypto.h"
+
+void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
+ struct qat_request_buffs *buf)
+{
+ struct device *dev = &GET_DEV(accel_dev);
+ struct qat_alg_buf_list *bl = buf->bl;
+ struct qat_alg_buf_list *blout = buf->blout;
+ dma_addr_t blp = buf->blp;
+ dma_addr_t blpout = buf->bloutp;
+ size_t sz = buf->sz;
+ size_t sz_out = buf->sz_out;
+ int bl_dma_dir;
+ int i;
+
+ bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
+ for (i = 0; i < bl->num_bufs; i++)
+ dma_unmap_single(dev, bl->bufers[i].addr,
+ bl->bufers[i].len, bl_dma_dir);
+
+ dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+
+ if (!buf->sgl_src_valid)
+ kfree(bl);
+
+ if (blp != blpout) {
+ for (i = 0; i < blout->num_mapped_bufs; i++) {
+ dma_unmap_single(dev, blout->bufers[i].addr,
+ blout->bufers[i].len,
+ DMA_FROM_DEVICE);
+ }
+ dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
+
+ if (!buf->sgl_dst_valid)
+ kfree(blout);
+ }
+}
+
+static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
+ struct scatterlist *sgl,
+ struct scatterlist *sglout,
+ struct qat_request_buffs *buf,
+ dma_addr_t extra_dst_buff,
+ size_t sz_extra_dst_buff,
+ gfp_t flags)
+{
+ struct device *dev = &GET_DEV(accel_dev);
+ int i, sg_nctr = 0;
+ int n = sg_nents(sgl);
+ struct qat_alg_buf_list *bufl;
+ struct qat_alg_buf_list *buflout = NULL;
+ dma_addr_t blp = DMA_MAPPING_ERROR;
+ dma_addr_t bloutp = DMA_MAPPING_ERROR;
+ struct scatterlist *sg;
+ size_t sz_out, sz = struct_size(bufl, bufers, n);
+ int node = dev_to_node(&GET_DEV(accel_dev));
+ int bufl_dma_dir;
+
+ if (unlikely(!n))
+ return -EINVAL;
+
+ buf->sgl_src_valid = false;
+ buf->sgl_dst_valid = false;
+
+ if (n > QAT_MAX_BUFF_DESC) {
+ bufl = kzalloc_node(sz, flags, node);
+ if (unlikely(!bufl))
+ return -ENOMEM;
+ } else {
+ bufl = &buf->sgl_src.sgl_hdr;
+ memset(bufl, 0, sizeof(struct qat_alg_buf_list));
+ buf->sgl_src_valid = true;
+ }
+
+ bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
+ for (i = 0; i < n; i++)
+ bufl->bufers[i].addr = DMA_MAPPING_ERROR;
+
+ for_each_sg(sgl, sg, n, i) {
+ int y = sg_nctr;
+
+ if (!sg->length)
+ continue;
+
+ bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+ sg->length,
+ bufl_dma_dir);
+ bufl->bufers[y].len = sg->length;
+ if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
+ goto err_in;
+ sg_nctr++;
+ }
+ bufl->num_bufs = sg_nctr;
+ blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, blp)))
+ goto err_in;
+ buf->bl = bufl;
+ buf->blp = blp;
+ buf->sz = sz;
+ /* Handle out of place operation */
+ if (sgl != sglout) {
+ struct qat_alg_buf *bufers;
+ int extra_buff = extra_dst_buff ? 1 : 0;
+ int n_sglout = sg_nents(sglout);
+
+ n = n_sglout + extra_buff;
+ sz_out = struct_size(buflout, bufers, n);
+ sg_nctr = 0;
+
+ if (n > QAT_MAX_BUFF_DESC) {
+ buflout = kzalloc_node(sz_out, flags, node);
+ if (unlikely(!buflout))
+ goto err_in;
+ } else {
+ buflout = &buf->sgl_dst.sgl_hdr;
+ memset(buflout, 0, sizeof(struct qat_alg_buf_list));
+ buf->sgl_dst_valid = true;
+ }
+
+ bufers = buflout->bufers;
+ for (i = 0; i < n; i++)
+ bufers[i].addr = DMA_MAPPING_ERROR;
+
+ for_each_sg(sglout, sg, n_sglout, i) {
+ int y = sg_nctr;
+
+ if (!sg->length)
+ continue;
+
+ bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+ sg->length,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
+ goto err_out;
+ bufers[y].len = sg->length;
+ sg_nctr++;
+ }
+ if (extra_buff) {
+ bufers[sg_nctr].addr = extra_dst_buff;
+ bufers[sg_nctr].len = sz_extra_dst_buff;
+ }
+
+ buflout->num_bufs = sg_nctr;
+ buflout->num_bufs += extra_buff;
+ buflout->num_mapped_bufs = sg_nctr;
+ bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, bloutp)))
+ goto err_out;
+ buf->blout = buflout;
+ buf->bloutp = bloutp;
+ buf->sz_out = sz_out;
+ } else {
+ /* Otherwise set the src and dst to the same address */
+ buf->bloutp = buf->blp;
+ buf->sz_out = 0;
+ }
+ return 0;
+
+err_out:
+ if (!dma_mapping_error(dev, bloutp))
+ dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+
+ n = sg_nents(sglout);
+ for (i = 0; i < n; i++) {
+ if (buflout->bufers[i].addr == extra_dst_buff)
+ break;
+ if (!dma_mapping_error(dev, buflout->bufers[i].addr))
+ dma_unmap_single(dev, buflout->bufers[i].addr,
+ buflout->bufers[i].len,
+ DMA_FROM_DEVICE);
+ }
+
+ if (!buf->sgl_dst_valid)
+ kfree(buflout);
+
+err_in:
+ if (!dma_mapping_error(dev, blp))
+ dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+
+ n = sg_nents(sgl);
+ for (i = 0; i < n; i++)
+ if (!dma_mapping_error(dev, bufl->bufers[i].addr))
+ dma_unmap_single(dev, bufl->bufers[i].addr,
+ bufl->bufers[i].len,
+ bufl_dma_dir);
+
+ if (!buf->sgl_src_valid)
+ kfree(bufl);
+
+ dev_err(dev, "Failed to map buf for dma\n");
+ return -ENOMEM;
+}
+
+int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
+ struct scatterlist *sgl,
+ struct scatterlist *sglout,
+ struct qat_request_buffs *buf,
+ struct qat_sgl_to_bufl_params *params,
+ gfp_t flags)
+{
+ dma_addr_t extra_dst_buff = 0;
+ size_t sz_extra_dst_buff = 0;
+
+ if (params) {
+ extra_dst_buff = params->extra_dst_buff;
+ sz_extra_dst_buff = params->sz_extra_dst_buff;
+ }
+
+ return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
+ extra_dst_buff, sz_extra_dst_buff,
+ flags);
+}
+
+static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
+ struct qat_alg_buf_list *bl)
+{
+ struct device *dev = &GET_DEV(accel_dev);
+ int n = bl->num_bufs;
+ int i;
+
+ for (i = 0; i < n; i++)
+ if (!dma_mapping_error(dev, bl->bufers[i].addr))
+ dma_unmap_single(dev, bl->bufers[i].addr,
+ bl->bufers[i].len, DMA_FROM_DEVICE);
+}
+
+static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
+ struct scatterlist *sgl,
+ struct qat_alg_buf_list **bl)
+{
+ struct device *dev = &GET_DEV(accel_dev);
+ struct qat_alg_buf_list *bufl;
+ int node = dev_to_node(dev);
+ struct scatterlist *sg;
+ int n, i, sg_nctr;
+ size_t sz;
+
+ n = sg_nents(sgl);
+ sz = struct_size(bufl, bufers, n);
+ bufl = kzalloc_node(sz, GFP_KERNEL, node);
+ if (unlikely(!bufl))
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++)
+ bufl->bufers[i].addr = DMA_MAPPING_ERROR;
+
+ sg_nctr = 0;
+ for_each_sg(sgl, sg, n, i) {
+ int y = sg_nctr;
+
+ if (!sg->length)
+ continue;
+
+ bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+ sg->length,
+ DMA_FROM_DEVICE);
+ bufl->bufers[y].len = sg->length;
+ if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
+ goto err_map;
+ sg_nctr++;
+ }
+ bufl->num_bufs = sg_nctr;
+ bufl->num_mapped_bufs = sg_nctr;
+
+ *bl = bufl;
+
+ return 0;
+
+err_map:
+ for (i = 0; i < n; i++)
+ if (!dma_mapping_error(dev, bufl->bufers[i].addr))
+ dma_unmap_single(dev, bufl->bufers[i].addr,
+ bufl->bufers[i].len,
+ DMA_FROM_DEVICE);
+ kfree(bufl);
+ *bl = NULL;
+
+ return -ENOMEM;
+}
+
+static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev,
+ struct scatterlist *sgl,
+ struct qat_alg_buf_list *bl,
+ bool free_bl)
+{
+ if (bl) {
+ qat_bl_sgl_unmap(accel_dev, bl);
+
+ if (free_bl)
+ kfree(bl);
+ }
+ if (sgl)
+ sgl_free(sgl);
+}
+
+static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev,
+ struct scatterlist **sgl,
+ struct qat_alg_buf_list **bl,
+ unsigned int dlen,
+ gfp_t gfp)
+{
+ struct scatterlist *dst;
+ int ret;
+
+ dst = sgl_alloc(dlen, gfp, NULL);
+ if (!dst) {
+ dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ ret = qat_bl_sgl_map(accel_dev, dst, bl);
+ if (ret)
+ goto err;
+
+ *sgl = dst;
+
+ return 0;
+
+err:
+ sgl_free(dst);
+ *sgl = NULL;
+ return ret;
+}
+
+int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
+ struct scatterlist **sg,
+ unsigned int dlen,
+ struct qat_request_buffs *qat_bufs,
+ gfp_t gfp)
+{
+ struct device *dev = &GET_DEV(accel_dev);
+ dma_addr_t new_blp = DMA_MAPPING_ERROR;
+ struct qat_alg_buf_list *new_bl;
+ struct scatterlist *new_sg;
+ size_t new_bl_size;
+ int ret;
+
+ ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp);
+ if (ret)
+ return ret;
+
+ new_bl_size = struct_size(new_bl, bufers, new_bl->num_bufs);
+
+ /* Map new firmware SGL descriptor */
+ new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, new_blp)))
+ goto err;
+
+ /* Unmap old firmware SGL descriptor */
+ dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE);
+
+ /* Free and unmap old scatterlist */
+ qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout,
+ !qat_bufs->sgl_dst_valid);
+
+ qat_bufs->sgl_dst_valid = false;
+ qat_bufs->blout = new_bl;
+ qat_bufs->bloutp = new_blp;
+ qat_bufs->sz_out = new_bl_size;
+
+ *sg = new_sg;
+
+ return 0;
+err:
+ qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true);
+
+ if (!dma_mapping_error(dev, new_blp))
+ dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE);
+
+ return -ENOMEM;
+}
diff --git a/drivers/crypto/qat/qat_common/qat_bl.h b/drivers/crypto/qat/qat_common/qat_bl.h
new file mode 100644
index 000000000000..8ca5e52ee9e2
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_bl.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2014 - 2022 Intel Corporation */
+#ifndef QAT_BL_H
+#define QAT_BL_H
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+#define QAT_MAX_BUFF_DESC 4
+
+struct qat_alg_buf {
+ u32 len;
+ u32 resrvd;
+ u64 addr;
+} __packed;
+
+struct qat_alg_buf_list {
+ u64 resrvd;
+ u32 num_bufs;
+ u32 num_mapped_bufs;
+ struct qat_alg_buf bufers[];
+} __packed;
+
+struct qat_alg_fixed_buf_list {
+ struct qat_alg_buf_list sgl_hdr;
+ struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
+} __packed __aligned(64);
+
+struct qat_request_buffs {
+ struct qat_alg_buf_list *bl;
+ dma_addr_t blp;
+ struct qat_alg_buf_list *blout;
+ dma_addr_t bloutp;
+ size_t sz;
+ size_t sz_out;
+ bool sgl_src_valid;
+ bool sgl_dst_valid;
+ struct qat_alg_fixed_buf_list sgl_src;
+ struct qat_alg_fixed_buf_list sgl_dst;
+};
+
+struct qat_sgl_to_bufl_params {
+ dma_addr_t extra_dst_buff;
+ size_t sz_extra_dst_buff;
+};
+
+void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
+ struct qat_request_buffs *buf);
+int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
+ struct scatterlist *sgl,
+ struct scatterlist *sglout,
+ struct qat_request_buffs *buf,
+ struct qat_sgl_to_bufl_params *params,
+ gfp_t flags);
+
+static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req)
+{
+ return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+}
+
+int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
+ struct scatterlist **newd,
+ unsigned int dlen,
+ struct qat_request_buffs *qat_bufs,
+ gfp_t gfp);
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_comp_algs.c b/drivers/crypto/qat/qat_common/qat_comp_algs.c
new file mode 100644
index 000000000000..1480d36a8d2b
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_comp_algs.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include <linux/crypto.h>
+#include <crypto/acompress.h>
+#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "qat_bl.h"
+#include "qat_comp_req.h"
+#include "qat_compression.h"
+#include "qat_algs_send.h"
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+enum direction {
+ DECOMPRESSION = 0,
+ COMPRESSION = 1,
+};
+
+struct qat_compression_ctx {
+ u8 comp_ctx[QAT_COMP_CTX_SIZE];
+ struct qat_compression_instance *inst;
+};
+
+struct qat_dst {
+ bool is_null;
+ int resubmitted;
+};
+
+struct qat_compression_req {
+ u8 req[QAT_COMP_REQ_SIZE];
+ struct qat_compression_ctx *qat_compression_ctx;
+ struct acomp_req *acompress_req;
+ struct qat_request_buffs buf;
+ enum direction dir;
+ int actual_dlen;
+ struct qat_alg_req alg_req;
+ struct work_struct resubmit;
+ struct qat_dst dst;
+};
+
+static int qat_alg_send_dc_message(struct qat_compression_req *qat_req,
+ struct qat_compression_instance *inst,
+ struct crypto_async_request *base)
+{
+ struct qat_alg_req *alg_req = &qat_req->alg_req;
+
+ alg_req->fw_req = (u32 *)&qat_req->req;
+ alg_req->tx_ring = inst->dc_tx;
+ alg_req->base = base;
+ alg_req->backlog = &inst->backlog;
+
+ return qat_alg_send_message(alg_req);
+}
+
+static void qat_comp_resubmit(struct work_struct *work)
+{
+ struct qat_compression_req *qat_req =
+ container_of(work, struct qat_compression_req, resubmit);
+ struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
+ struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+ struct qat_request_buffs *qat_bufs = &qat_req->buf;
+ struct qat_compression_instance *inst = ctx->inst;
+ struct acomp_req *areq = qat_req->acompress_req;
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
+ unsigned int dlen = CRYPTO_ACOMP_DST_MAX;
+ u8 *req = qat_req->req;
+ dma_addr_t dfbuf;
+ int ret;
+
+ areq->dlen = dlen;
+
+ dev_dbg(&GET_DEV(accel_dev), "[%s][%s] retry NULL dst request - dlen = %d\n",
+ crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
+ qat_req->dir == COMPRESSION ? "comp" : "decomp", dlen);
+
+ ret = qat_bl_realloc_map_new_dst(accel_dev, &areq->dst, dlen, qat_bufs,
+ qat_algs_alloc_flags(&areq->base));
+ if (ret)
+ goto err;
+
+ qat_req->dst.resubmitted = true;
+
+ dfbuf = qat_req->buf.bloutp;
+ qat_comp_override_dst(req, dfbuf, dlen);
+
+ ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
+ if (ret != -ENOSPC)
+ return;
+
+err:
+ qat_bl_free_bufl(accel_dev, qat_bufs);
+ areq->base.complete(&areq->base, ret);
+}
+
+static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
+ void *resp)
+{
+ struct acomp_req *areq = qat_req->acompress_req;
+ struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
+ struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
+ struct qat_compression_instance *inst = ctx->inst;
+ int consumed, produced;
+ s8 cmp_err, xlt_err;
+ int res = -EBADMSG;
+ int status;
+ u8 cnv;
+
+ status = qat_comp_get_cmp_status(resp);
+ status |= qat_comp_get_xlt_status(resp);
+ cmp_err = qat_comp_get_cmp_err(resp);
+ xlt_err = qat_comp_get_xlt_err(resp);
+
+ consumed = qat_comp_get_consumed_ctr(resp);
+ produced = qat_comp_get_produced_ctr(resp);
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "[%s][%s][%s] slen = %8d dlen = %8d consumed = %8d produced = %8d cmp_err = %3d xlt_err = %3d",
+ crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
+ qat_req->dir == COMPRESSION ? "comp " : "decomp",
+ status ? "ERR" : "OK ",
+ areq->slen, areq->dlen, consumed, produced, cmp_err, xlt_err);
+
+ areq->dlen = 0;
+
+ if (qat_req->dir == DECOMPRESSION && qat_req->dst.is_null) {
+ if (cmp_err == ERR_CODE_OVERFLOW_ERROR) {
+ if (qat_req->dst.resubmitted) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Output does not fit destination buffer\n");
+ res = -EOVERFLOW;
+ goto end;
+ }
+
+ INIT_WORK(&qat_req->resubmit, qat_comp_resubmit);
+ adf_misc_wq_queue_work(&qat_req->resubmit);
+ return;
+ }
+ }
+
+ if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+ goto end;
+
+ if (qat_req->dir == COMPRESSION) {
+ cnv = qat_comp_get_cmp_cnv_flag(resp);
+ if (unlikely(!cnv)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Verified compression not supported\n");
+ goto end;
+ }
+
+ if (unlikely(produced > qat_req->actual_dlen)) {
+ memset(inst->dc_data->ovf_buff, 0,
+ inst->dc_data->ovf_buff_sz);
+ dev_dbg(&GET_DEV(accel_dev),
+ "Actual buffer overflow: produced=%d, dlen=%d\n",
+ produced, qat_req->actual_dlen);
+ goto end;
+ }
+ }
+
+ res = 0;
+ areq->dlen = produced;
+
+end:
+ qat_bl_free_bufl(accel_dev, &qat_req->buf);
+ areq->base.complete(&areq->base, res);
+}
+
+void qat_comp_alg_callback(void *resp)
+{
+ struct qat_compression_req *qat_req =
+ (void *)(__force long)qat_comp_get_opaque(resp);
+ struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
+
+ qat_comp_generic_callback(qat_req, resp);
+
+ qat_alg_send_backlog(backlog);
+}
+
+static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
+{
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+ struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_compression_instance *inst;
+ int node;
+
+ if (tfm->node == NUMA_NO_NODE)
+ node = numa_node_id();
+ else
+ node = tfm->node;
+
+ memset(ctx, 0, sizeof(*ctx));
+ inst = qat_compression_get_instance_node(node);
+ if (!inst)
+ return -EINVAL;
+ ctx->inst = inst;
+
+ ctx->inst->build_deflate_ctx(ctx->comp_ctx);
+
+ return 0;
+}
+
+static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
+{
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+ struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ qat_compression_put_instance(ctx->inst);
+ memset(ctx, 0, sizeof(*ctx));
+}
+
+static int qat_comp_alg_compress_decompress(struct acomp_req *areq,
+ enum direction dir)
+{
+ struct qat_compression_req *qat_req = acomp_request_ctx(areq);
+ struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq);
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+ struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_compression_instance *inst = ctx->inst;
+ struct qat_sgl_to_bufl_params *p_params = NULL;
+ gfp_t f = qat_algs_alloc_flags(&areq->base);
+ struct qat_sgl_to_bufl_params params;
+ unsigned int slen = areq->slen;
+ unsigned int dlen = areq->dlen;
+ dma_addr_t sfbuf, dfbuf;
+ u8 *req = qat_req->req;
+ size_t ovf_buff_sz;
+ int ret;
+
+ if (!areq->src || !slen)
+ return -EINVAL;
+
+ if (areq->dst && !dlen)
+ return -EINVAL;
+
+ qat_req->dst.is_null = false;
+
+ /* Handle acomp requests that require the allocation of a destination
+ * buffer. The size of the destination buffer is double the source
+ * buffer (rounded up to the size of a page) to fit the decompressed
+ * output or an expansion on the data for compression.
+ */
+ if (!areq->dst) {
+ qat_req->dst.is_null = true;
+
+ dlen = round_up(2 * slen, PAGE_SIZE);
+ areq->dst = sgl_alloc(dlen, f, NULL);
+ if (!areq->dst)
+ return -ENOMEM;
+
+ areq->dlen = dlen;
+ qat_req->dst.resubmitted = false;
+ }
+
+ if (dir == COMPRESSION) {
+ params.extra_dst_buff = inst->dc_data->ovf_buff_p;
+ ovf_buff_sz = inst->dc_data->ovf_buff_sz;
+ params.sz_extra_dst_buff = ovf_buff_sz;
+ p_params = &params;
+ }
+
+ ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
+ &qat_req->buf, p_params, f);
+ if (unlikely(ret))
+ return ret;
+
+ sfbuf = qat_req->buf.blp;
+ dfbuf = qat_req->buf.bloutp;
+ qat_req->qat_compression_ctx = ctx;
+ qat_req->acompress_req = areq;
+ qat_req->dir = dir;
+
+ if (dir == COMPRESSION) {
+ qat_req->actual_dlen = dlen;
+ dlen += ovf_buff_sz;
+ qat_comp_create_compression_req(ctx->comp_ctx, req,
+ (u64)(__force long)sfbuf, slen,
+ (u64)(__force long)dfbuf, dlen,
+ (u64)(__force long)qat_req);
+ } else {
+ qat_comp_create_decompression_req(ctx->comp_ctx, req,
+ (u64)(__force long)sfbuf, slen,
+ (u64)(__force long)dfbuf, dlen,
+ (u64)(__force long)qat_req);
+ }
+
+ ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
+ if (ret == -ENOSPC)
+ qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
+
+ return ret;
+}
+
+static int qat_comp_alg_compress(struct acomp_req *req)
+{
+ return qat_comp_alg_compress_decompress(req, COMPRESSION);
+}
+
+static int qat_comp_alg_decompress(struct acomp_req *req)
+{
+ return qat_comp_alg_compress_decompress(req, DECOMPRESSION);
+}
+
+static struct acomp_alg qat_acomp[] = { {
+ .base = {
+ .cra_name = "deflate",
+ .cra_driver_name = "qat_deflate",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_ctxsize = sizeof(struct qat_compression_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = qat_comp_alg_init_tfm,
+ .exit = qat_comp_alg_exit_tfm,
+ .compress = qat_comp_alg_compress,
+ .decompress = qat_comp_alg_decompress,
+ .dst_free = sgl_free,
+ .reqsize = sizeof(struct qat_compression_req),
+} };
+
+int qat_comp_algs_register(void)
+{
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++active_devs == 1)
+ ret = crypto_register_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
+ mutex_unlock(&algs_lock);
+ return ret;
+}
+
+void qat_comp_algs_unregister(void)
+{
+ mutex_lock(&algs_lock);
+ if (--active_devs == 0)
+ crypto_unregister_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
+ mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/qat/qat_common/qat_comp_req.h b/drivers/crypto/qat/qat_common/qat_comp_req.h
new file mode 100644
index 000000000000..404e32c5e778
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_comp_req.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _QAT_COMP_REQ_H_
+#define _QAT_COMP_REQ_H_
+
+#include "icp_qat_fw_comp.h"
+
+#define QAT_COMP_REQ_SIZE (sizeof(struct icp_qat_fw_comp_req))
+#define QAT_COMP_CTX_SIZE (QAT_COMP_REQ_SIZE * 2)
+
+static inline void qat_comp_create_req(void *ctx, void *req, u64 src, u32 slen,
+ u64 dst, u32 dlen, u64 opaque)
+{
+ struct icp_qat_fw_comp_req *fw_tmpl = ctx;
+ struct icp_qat_fw_comp_req *fw_req = req;
+ struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars;
+
+ memcpy(fw_req, fw_tmpl, sizeof(*fw_req));
+ fw_req->comn_mid.src_data_addr = src;
+ fw_req->comn_mid.src_length = slen;
+ fw_req->comn_mid.dest_data_addr = dst;
+ fw_req->comn_mid.dst_length = dlen;
+ fw_req->comn_mid.opaque_data = opaque;
+ req_pars->comp_len = slen;
+ req_pars->out_buffer_sz = dlen;
+}
+
+static inline void qat_comp_override_dst(void *req, u64 dst, u32 dlen)
+{
+ struct icp_qat_fw_comp_req *fw_req = req;
+ struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars;
+
+ fw_req->comn_mid.dest_data_addr = dst;
+ fw_req->comn_mid.dst_length = dlen;
+ req_pars->out_buffer_sz = dlen;
+}
+
+static inline void qat_comp_create_compression_req(void *ctx, void *req,
+ u64 src, u32 slen,
+ u64 dst, u32 dlen,
+ u64 opaque)
+{
+ qat_comp_create_req(ctx, req, src, slen, dst, dlen, opaque);
+}
+
+static inline void qat_comp_create_decompression_req(void *ctx, void *req,
+ u64 src, u32 slen,
+ u64 dst, u32 dlen,
+ u64 opaque)
+{
+ struct icp_qat_fw_comp_req *fw_tmpl = ctx;
+
+ fw_tmpl++;
+ qat_comp_create_req(fw_tmpl, req, src, slen, dst, dlen, opaque);
+}
+
+static inline u32 qat_comp_get_consumed_ctr(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+ return qat_resp->comp_resp_pars.input_byte_counter;
+}
+
+static inline u32 qat_comp_get_produced_ctr(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+ return qat_resp->comp_resp_pars.output_byte_counter;
+}
+
+static inline u32 qat_comp_get_produced_adler32(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+ return qat_resp->comp_resp_pars.crc.legacy.curr_adler_32;
+}
+
+static inline u64 qat_comp_get_opaque(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+ return qat_resp->opaque_data;
+}
+
+static inline s8 qat_comp_get_cmp_err(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+ return qat_resp->comn_resp.comn_error.cmp_err_code;
+}
+
+static inline s8 qat_comp_get_xlt_err(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+ return qat_resp->comn_resp.comn_error.xlat_err_code;
+}
+
+static inline s8 qat_comp_get_cmp_status(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+ u8 stat_filed = qat_resp->comn_resp.comn_status;
+
+ return ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(stat_filed);
+}
+
+static inline s8 qat_comp_get_xlt_status(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+ u8 stat_filed = qat_resp->comn_resp.comn_status;
+
+ return ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(stat_filed);
+}
+
+static inline u8 qat_comp_get_cmp_cnv_flag(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+ u8 flags = qat_resp->comn_resp.hdr_flags;
+
+ return ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(flags);
+}
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_compression.c b/drivers/crypto/qat/qat_common/qat_compression.c
new file mode 100644
index 000000000000..9fd10f4242f8
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_compression.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport.h"
+#include "adf_transport_access_macros.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "qat_compression.h"
+#include "icp_qat_fw.h"
+
+#define SEC ADF_KERNEL_SEC
+
+static struct service_hndl qat_compression;
+
+void qat_compression_put_instance(struct qat_compression_instance *inst)
+{
+ atomic_dec(&inst->refctr);
+ adf_dev_put(inst->accel_dev);
+}
+
+static int qat_compression_free_instances(struct adf_accel_dev *accel_dev)
+{
+ struct qat_compression_instance *inst;
+ struct list_head *list_ptr, *tmp;
+ int i;
+
+ list_for_each_safe(list_ptr, tmp, &accel_dev->compression_list) {
+ inst = list_entry(list_ptr,
+ struct qat_compression_instance, list);
+
+ for (i = 0; i < atomic_read(&inst->refctr); i++)
+ qat_compression_put_instance(inst);
+
+ if (inst->dc_tx)
+ adf_remove_ring(inst->dc_tx);
+
+ if (inst->dc_rx)
+ adf_remove_ring(inst->dc_rx);
+
+ list_del(list_ptr);
+ kfree(inst);
+ }
+ return 0;
+}
+
+struct qat_compression_instance *qat_compression_get_instance_node(int node)
+{
+ struct qat_compression_instance *inst = NULL;
+ struct adf_accel_dev *accel_dev = NULL;
+ unsigned long best = ~0;
+ struct list_head *itr;
+
+ list_for_each(itr, adf_devmgr_get_head()) {
+ struct adf_accel_dev *tmp_dev;
+ unsigned long ctr;
+ int tmp_dev_node;
+
+ tmp_dev = list_entry(itr, struct adf_accel_dev, list);
+ tmp_dev_node = dev_to_node(&GET_DEV(tmp_dev));
+
+ if ((node == tmp_dev_node || tmp_dev_node < 0) &&
+ adf_dev_started(tmp_dev) && !list_empty(&tmp_dev->compression_list)) {
+ ctr = atomic_read(&tmp_dev->ref_count);
+ if (best > ctr) {
+ accel_dev = tmp_dev;
+ best = ctr;
+ }
+ }
+ }
+
+ if (!accel_dev) {
+ pr_info("QAT: Could not find a device on node %d\n", node);
+ /* Get any started device */
+ list_for_each(itr, adf_devmgr_get_head()) {
+ struct adf_accel_dev *tmp_dev;
+
+ tmp_dev = list_entry(itr, struct adf_accel_dev, list);
+ if (adf_dev_started(tmp_dev) &&
+ !list_empty(&tmp_dev->compression_list)) {
+ accel_dev = tmp_dev;
+ break;
+ }
+ }
+ }
+
+ if (!accel_dev)
+ return NULL;
+
+ best = ~0;
+ list_for_each(itr, &accel_dev->compression_list) {
+ struct qat_compression_instance *tmp_inst;
+ unsigned long ctr;
+
+ tmp_inst = list_entry(itr, struct qat_compression_instance, list);
+ ctr = atomic_read(&tmp_inst->refctr);
+ if (best > ctr) {
+ inst = tmp_inst;
+ best = ctr;
+ }
+ }
+ if (inst) {
+ if (adf_dev_get(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
+ return NULL;
+ }
+ atomic_inc(&inst->refctr);
+ }
+ return inst;
+}
+
+static int qat_compression_create_instances(struct adf_accel_dev *accel_dev)
+{
+ struct qat_compression_instance *inst;
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ unsigned long num_inst, num_msg_dc;
+ unsigned long bank;
+ int msg_size;
+ int ret;
+ int i;
+
+ INIT_LIST_HEAD(&accel_dev->compression_list);
+ strscpy(key, ADF_NUM_DC, sizeof(key));
+ ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+ if (ret)
+ return ret;
+
+ ret = kstrtoul(val, 10, &num_inst);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_inst; i++) {
+ inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (!inst) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ list_add_tail(&inst->list, &accel_dev->compression_list);
+ inst->id = i;
+ atomic_set(&inst->refctr, 0);
+ inst->accel_dev = accel_dev;
+ inst->build_deflate_ctx = GET_DC_OPS(accel_dev)->build_deflate_ctx;
+
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
+ ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+ if (ret)
+ return ret;
+
+ ret = kstrtoul(val, 10, &bank);
+ if (ret)
+ return ret;
+
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
+ ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+ if (ret)
+ return ret;
+
+ ret = kstrtoul(val, 10, &num_msg_dc);
+ if (ret)
+ return ret;
+
+ msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
+ ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc,
+ msg_size, key, NULL, 0, &inst->dc_tx);
+ if (ret)
+ return ret;
+
+ msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
+ ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc,
+ msg_size, key, qat_comp_alg_callback, 0,
+ &inst->dc_rx);
+ if (ret)
+ return ret;
+
+ inst->dc_data = accel_dev->dc_data;
+ INIT_LIST_HEAD(&inst->backlog.list);
+ spin_lock_init(&inst->backlog.lock);
+ }
+ return 0;
+err:
+ qat_compression_free_instances(accel_dev);
+ return ret;
+}
+
+static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
+{
+ struct device *dev = &GET_DEV(accel_dev);
+ dma_addr_t obuff_p = DMA_MAPPING_ERROR;
+ size_t ovf_buff_sz = QAT_COMP_MAX_SKID;
+ struct adf_dc_data *dc_data = NULL;
+ u8 *obuff = NULL;
+
+ dc_data = devm_kzalloc(dev, sizeof(*dc_data), GFP_KERNEL);
+ if (!dc_data)
+ goto err;
+
+ obuff = kzalloc_node(ovf_buff_sz, GFP_KERNEL, dev_to_node(dev));
+ if (!obuff)
+ goto err;
+
+ obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, obuff_p)))
+ goto err;
+
+ dc_data->ovf_buff = obuff;
+ dc_data->ovf_buff_p = obuff_p;
+ dc_data->ovf_buff_sz = ovf_buff_sz;
+
+ accel_dev->dc_data = dc_data;
+
+ return 0;
+
+err:
+ accel_dev->dc_data = NULL;
+ kfree(obuff);
+ devm_kfree(dev, dc_data);
+ return -ENOMEM;
+}
+
+static void qat_free_dc_data(struct adf_accel_dev *accel_dev)
+{
+ struct adf_dc_data *dc_data = accel_dev->dc_data;
+ struct device *dev = &GET_DEV(accel_dev);
+
+ if (!dc_data)
+ return;
+
+ dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz,
+ DMA_FROM_DEVICE);
+ memset(dc_data->ovf_buff, 0, dc_data->ovf_buff_sz);
+ kfree(dc_data->ovf_buff);
+ devm_kfree(dev, dc_data);
+ accel_dev->dc_data = NULL;
+}
+
+static int qat_compression_init(struct adf_accel_dev *accel_dev)
+{
+ int ret;
+
+ ret = qat_compression_alloc_dc_data(accel_dev);
+ if (ret)
+ return ret;
+
+ ret = qat_compression_create_instances(accel_dev);
+ if (ret)
+ qat_free_dc_data(accel_dev);
+
+ return ret;
+}
+
+static int qat_compression_shutdown(struct adf_accel_dev *accel_dev)
+{
+ qat_free_dc_data(accel_dev);
+ return qat_compression_free_instances(accel_dev);
+}
+
+static int qat_compression_event_handler(struct adf_accel_dev *accel_dev,
+ enum adf_event event)
+{
+ int ret;
+
+ switch (event) {
+ case ADF_EVENT_INIT:
+ ret = qat_compression_init(accel_dev);
+ break;
+ case ADF_EVENT_SHUTDOWN:
+ ret = qat_compression_shutdown(accel_dev);
+ break;
+ case ADF_EVENT_RESTARTING:
+ case ADF_EVENT_RESTARTED:
+ case ADF_EVENT_START:
+ case ADF_EVENT_STOP:
+ default:
+ ret = 0;
+ }
+ return ret;
+}
+
+int qat_compression_register(void)
+{
+ memset(&qat_compression, 0, sizeof(qat_compression));
+ qat_compression.event_hld = qat_compression_event_handler;
+ qat_compression.name = "qat_compression";
+ return adf_service_register(&qat_compression);
+}
+
+int qat_compression_unregister(void)
+{
+ return adf_service_unregister(&qat_compression);
+}
diff --git a/drivers/crypto/qat/qat_common/qat_compression.h b/drivers/crypto/qat/qat_common/qat_compression.h
new file mode 100644
index 000000000000..aebac2302dcf
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_compression.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _QAT_COMPRESSION_H_
+#define _QAT_COMPRESSION_H_
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "qat_algs_send.h"
+
+#define QAT_COMP_MAX_SKID 4096
+
+struct qat_compression_instance {
+ struct adf_etr_ring_data *dc_tx;
+ struct adf_etr_ring_data *dc_rx;
+ struct adf_accel_dev *accel_dev;
+ struct list_head list;
+ unsigned long state;
+ int id;
+ atomic_t refctr;
+ struct qat_instance_backlog backlog;
+ struct adf_dc_data *dc_data;
+ void (*build_deflate_ctx)(void *ctx);
+};
+
+static inline bool adf_hw_dev_has_compression(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ u32 mask = ~hw_device->accel_capabilities_mask;
+
+ if (mask & ADF_ACCEL_CAPABILITIES_COMPRESSION)
+ return false;
+
+ return true;
+}
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index 9341d892533a..e31199eade5b 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -5,7 +5,6 @@
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_transport.h"
-#include "adf_transport_access_macros.h"
#include "adf_cfg.h"
#include "adf_cfg_strings.h"
#include "adf_gen2_hw_data.h"
@@ -126,126 +125,9 @@ int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev)
return -EFAULT;
}
- return qat_crypto_dev_config(accel_dev);
+ return GET_HW_DATA(accel_dev)->dev_config(accel_dev);
}
-/**
- * qat_crypto_dev_config() - create dev config required to create crypto inst.
- *
- * @accel_dev: Pointer to acceleration device.
- *
- * Function creates device configuration required to create crypto instances
- *
- * Return: 0 on success, error code otherwise.
- */
-int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
-{
- char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
- int banks = GET_MAX_BANKS(accel_dev);
- int cpus = num_online_cpus();
- unsigned long val;
- int instances;
- int ret;
- int i;
-
- if (adf_hw_dev_has_crypto(accel_dev))
- instances = min(cpus, banks);
- else
- instances = 0;
-
- ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
- if (ret)
- goto err;
-
- ret = adf_cfg_section_add(accel_dev, "Accelerator0");
- if (ret)
- goto err;
-
- for (i = 0; i < instances; i++) {
- val = i;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
- i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
- val = 128;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 512;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 0;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 2;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 8;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 10;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = ADF_COALESCING_DEF_TIME;
- snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
- ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
- key, &val, ADF_DEC);
- if (ret)
- goto err;
- }
-
- val = i;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
- &val, ADF_DEC);
- if (ret)
- goto err;
-
- set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
- return 0;
-err:
- dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
- return ret;
-}
-EXPORT_SYMBOL_GPL(qat_crypto_dev_config);
-
static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
{
unsigned long num_inst, num_msg_sym, num_msg_asym;
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index df3c738ce323..6a0e961bb9dc 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -8,19 +8,8 @@
#include <linux/slab.h>
#include "adf_accel_devices.h"
#include "icp_qat_fw_la.h"
-
-struct qat_instance_backlog {
- struct list_head list;
- spinlock_t lock; /* protects backlog list */
-};
-
-struct qat_alg_req {
- u32 *fw_req;
- struct adf_etr_ring_data *tx_ring;
- struct crypto_async_request *base;
- struct list_head list;
- struct qat_instance_backlog *backlog;
-};
+#include "qat_algs_send.h"
+#include "qat_bl.h"
struct qat_crypto_instance {
struct adf_etr_ring_data *sym_tx;
@@ -35,39 +24,6 @@ struct qat_crypto_instance {
struct qat_instance_backlog backlog;
};
-#define QAT_MAX_BUFF_DESC 4
-
-struct qat_alg_buf {
- u32 len;
- u32 resrvd;
- u64 addr;
-} __packed;
-
-struct qat_alg_buf_list {
- u64 resrvd;
- u32 num_bufs;
- u32 num_mapped_bufs;
- struct qat_alg_buf bufers[];
-} __packed;
-
-struct qat_alg_fixed_buf_list {
- struct qat_alg_buf_list sgl_hdr;
- struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
-} __packed __aligned(64);
-
-struct qat_crypto_request_buffs {
- struct qat_alg_buf_list *bl;
- dma_addr_t blp;
- struct qat_alg_buf_list *blout;
- dma_addr_t bloutp;
- size_t sz;
- size_t sz_out;
- bool sgl_src_valid;
- bool sgl_dst_valid;
- struct qat_alg_fixed_buf_list sgl_src;
- struct qat_alg_fixed_buf_list sgl_dst;
-};
-
struct qat_crypto_request;
struct qat_crypto_request {
@@ -80,7 +36,7 @@ struct qat_crypto_request {
struct aead_request *aead_req;
struct skcipher_request *skcipher_req;
};
- struct qat_crypto_request_buffs buf;
+ struct qat_request_buffs buf;
void (*cb)(struct icp_qat_fw_la_resp *resp,
struct qat_crypto_request *req);
union {
@@ -109,9 +65,4 @@ static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev)
return true;
}
-static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req)
-{
- return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
-}
-
#endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index cb3bdd3618fb..bc80bb475118 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -2,6 +2,8 @@
/* Copyright(c) 2014 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_dh895xcc_hw_data.h"
@@ -234,12 +236,14 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->enable_ints = adf_gen2_enable_ints;
hw_data->reset_device = adf_reset_sbr;
hw_data->disable_iov = adf_disable_sriov;
+ hw_data->dev_config = adf_gen2_dev_config;
adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts;
hw_data->pfvf_ops.disable_all_vf2pf_interrupts = disable_all_vf2pf_interrupts;
hw_data->pfvf_ops.disable_pending_vf2pf_interrupts = disable_pending_vf2pf_interrupts;
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen2_init_dc_ops(&hw_data->dc_ops);
}
void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index acca56752aa0..ebeb17b67fcd 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -201,7 +201,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err_disable_aer;
}
- ret = qat_crypto_dev_config(accel_dev);
+ ret = hw_data->dev_config(accel_dev);
if (ret)
goto out_err_disable_aer;
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index 31c14d7e1c11..70e56cc16ece 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -2,6 +2,8 @@
/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include <adf_pfvf_vf_msg.h>
@@ -86,9 +88,11 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
hw_data->get_sku = get_sku;
hw_data->enable_ints = adf_vf_void_noop;
hw_data->dev_class->instances++;
+ hw_data->dev_config = adf_gen2_dev_config;
adf_devmgr_update_class_index(hw_data);
adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen2_init_dc_ops(&hw_data->dc_ops);
}
void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index 18756b2e1c91..c1485e702b3e 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -177,8 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_dev_shutdown;
- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index 6eb4d2e35629..7d811728f047 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -24,7 +24,7 @@ static void qce_aead_done(void *data)
{
struct crypto_async_request *async_req = data;
struct aead_request *req = aead_request_cast(async_req);
- struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
struct qce_device *qce = tmpl->qce;
@@ -92,7 +92,7 @@ static void qce_aead_done(void *data)
static struct scatterlist *
qce_aead_prepare_result_buf(struct sg_table *tbl, struct aead_request *req)
{
- struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
struct qce_device *qce = tmpl->qce;
@@ -103,7 +103,7 @@ qce_aead_prepare_result_buf(struct sg_table *tbl, struct aead_request *req)
static struct scatterlist *
qce_aead_prepare_ccm_result_buf(struct sg_table *tbl, struct aead_request *req)
{
- struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
sg_init_one(&rctx->result_sg, rctx->ccmresult_buf, QCE_BAM_BURST_SIZE);
return qce_sgtable_add(tbl, &rctx->result_sg, QCE_BAM_BURST_SIZE);
@@ -112,7 +112,7 @@ qce_aead_prepare_ccm_result_buf(struct sg_table *tbl, struct aead_request *req)
static struct scatterlist *
qce_aead_prepare_dst_buf(struct aead_request *req)
{
- struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
struct qce_device *qce = tmpl->qce;
struct scatterlist *sg, *msg_sg, __sg[2];
@@ -186,7 +186,7 @@ qce_aead_ccm_prepare_buf_assoclen(struct aead_request *req)
{
struct scatterlist *sg, *msg_sg, __sg[2];
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
unsigned int assoclen = rctx->assoclen;
unsigned int adata_header_len, cryptlen, totallen;
@@ -300,7 +300,7 @@ err_free:
static int qce_aead_prepare_buf(struct aead_request *req)
{
- struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
struct qce_device *qce = tmpl->qce;
struct scatterlist *sg;
@@ -328,7 +328,7 @@ static int qce_aead_prepare_buf(struct aead_request *req)
static int qce_aead_ccm_prepare_buf(struct aead_request *req)
{
- struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct scatterlist *sg;
@@ -408,7 +408,7 @@ static int
qce_aead_async_req_handle(struct crypto_async_request *async_req)
{
struct aead_request *req = aead_request_cast(async_req);
- struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
@@ -502,7 +502,7 @@ error_free:
static int qce_aead_crypt(struct aead_request *req, int encrypt)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct qce_alg_template *tmpl = to_aead_tmpl(tfm);
unsigned int blocksize = crypto_aead_blocksize(tfm);
@@ -675,8 +675,8 @@ static int qce_aead_init(struct crypto_aead *tfm)
if (IS_ERR(ctx->fallback))
return PTR_ERR(ctx->fallback);
- crypto_aead_set_reqsize(tfm, sizeof(struct qce_aead_reqctx) +
- crypto_aead_reqsize(ctx->fallback));
+ crypto_aead_set_reqsize_dma(tfm, sizeof(struct qce_aead_reqctx) +
+ crypto_aead_reqsize(ctx->fallback));
return 0;
}
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 7c612ba5068f..04253a8d3340 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -3,6 +3,7 @@
* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*/
+#include <crypto/internal/hash.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/types.h>
@@ -147,7 +148,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
{
struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
- struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req);
struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
struct qce_device *qce = tmpl->qce;
unsigned int digestsize = crypto_ahash_digestsize(ahash);
@@ -419,7 +420,7 @@ static unsigned int qce_be32_to_cpu_array(u32 *dst, const u8 *src, unsigned int
static int qce_setup_regs_aead(struct crypto_async_request *async_req)
{
struct aead_request *req = aead_request_cast(async_req);
- struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
struct qce_device *qce = tmpl->qce;
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 37bafd7aeb79..fc72af8aa9a7 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -38,7 +38,7 @@ static void qce_ahash_done(void *data)
struct crypto_async_request *async_req = data;
struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req);
struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
struct qce_device *qce = tmpl->qce;
struct qce_result_dump *result = qce->dma.result_buf;
@@ -75,7 +75,7 @@ static void qce_ahash_done(void *data)
static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
{
struct ahash_request *req = ahash_request_cast(async_req);
- struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req);
struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
struct qce_device *qce = tmpl->qce;
@@ -132,7 +132,7 @@ error_unmap_src:
static int qce_ahash_init(struct ahash_request *req)
{
- struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req);
struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
const u32 *std_iv = tmpl->std_iv;
@@ -147,7 +147,7 @@ static int qce_ahash_init(struct ahash_request *req)
static int qce_ahash_export(struct ahash_request *req, void *out)
{
- struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req);
struct qce_sha_saved_state *export_state = out;
memcpy(export_state->pending_buf, rctx->buf, rctx->buflen);
@@ -164,7 +164,7 @@ static int qce_ahash_export(struct ahash_request *req, void *out)
static int qce_ahash_import(struct ahash_request *req, const void *in)
{
- struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req);
const struct qce_sha_saved_state *import_state = in;
memset(rctx, 0, sizeof(*rctx));
@@ -183,7 +183,7 @@ static int qce_ahash_import(struct ahash_request *req, const void *in)
static int qce_ahash_update(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req);
struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
struct qce_device *qce = tmpl->qce;
struct scatterlist *sg_last, *sg;
@@ -275,7 +275,7 @@ static int qce_ahash_update(struct ahash_request *req)
static int qce_ahash_final(struct ahash_request *req)
{
- struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req);
struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
struct qce_device *qce = tmpl->qce;
@@ -302,7 +302,7 @@ static int qce_ahash_final(struct ahash_request *req)
static int qce_ahash_digest(struct ahash_request *req)
{
- struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req);
struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
struct qce_device *qce = tmpl->qce;
int ret;
@@ -395,7 +395,7 @@ static int qce_ahash_cra_init(struct crypto_tfm *tfm)
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_ahash_set_reqsize(ahash, sizeof(struct qce_sha_reqctx));
+ crypto_ahash_set_reqsize_dma(ahash, sizeof(struct qce_sha_reqctx));
memset(ctx, 0, sizeof(*ctx));
return 0;
}
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index 35d73061d156..9f6ba770a90a 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -14,235 +14,162 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/reset.h>
-static int rk_crypto_enable_clk(struct rk_crypto_info *dev)
-{
- int err;
-
- err = clk_prepare_enable(dev->sclk);
- if (err) {
- dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n",
- __func__, __LINE__);
- goto err_return;
- }
- err = clk_prepare_enable(dev->aclk);
- if (err) {
- dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n",
- __func__, __LINE__);
- goto err_aclk;
- }
- err = clk_prepare_enable(dev->hclk);
- if (err) {
- dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n",
- __func__, __LINE__);
- goto err_hclk;
- }
- err = clk_prepare_enable(dev->dmaclk);
- if (err) {
- dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n",
- __func__, __LINE__);
- goto err_dmaclk;
- }
- return err;
-err_dmaclk:
- clk_disable_unprepare(dev->hclk);
-err_hclk:
- clk_disable_unprepare(dev->aclk);
-err_aclk:
- clk_disable_unprepare(dev->sclk);
-err_return:
- return err;
-}
+static struct rockchip_ip rocklist = {
+ .dev_list = LIST_HEAD_INIT(rocklist.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(rocklist.lock),
+};
-static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
+struct rk_crypto_info *get_rk_crypto(void)
{
- clk_disable_unprepare(dev->dmaclk);
- clk_disable_unprepare(dev->hclk);
- clk_disable_unprepare(dev->aclk);
- clk_disable_unprepare(dev->sclk);
+ struct rk_crypto_info *first;
+
+ spin_lock(&rocklist.lock);
+ first = list_first_entry_or_null(&rocklist.dev_list,
+ struct rk_crypto_info, list);
+ list_rotate_left(&rocklist.dev_list);
+ spin_unlock(&rocklist.lock);
+ return first;
}
-static int check_alignment(struct scatterlist *sg_src,
- struct scatterlist *sg_dst,
- int align_mask)
-{
- int in, out, align;
+static const struct rk_variant rk3288_variant = {
+ .num_clks = 4,
+ .rkclks = {
+ { "sclk", 150000000},
+ }
+};
- in = IS_ALIGNED((uint32_t)sg_src->offset, 4) &&
- IS_ALIGNED((uint32_t)sg_src->length, align_mask);
- if (!sg_dst)
- return in;
- out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) &&
- IS_ALIGNED((uint32_t)sg_dst->length, align_mask);
- align = in && out;
+static const struct rk_variant rk3328_variant = {
+ .num_clks = 3,
+};
- return (align && (sg_src->length == sg_dst->length));
-}
+static const struct rk_variant rk3399_variant = {
+ .num_clks = 3,
+};
-static int rk_load_data(struct rk_crypto_info *dev,
- struct scatterlist *sg_src,
- struct scatterlist *sg_dst)
+static int rk_crypto_get_clks(struct rk_crypto_info *dev)
{
- unsigned int count;
-
- dev->aligned = dev->aligned ?
- check_alignment(sg_src, sg_dst, dev->align_size) :
- dev->aligned;
- if (dev->aligned) {
- count = min(dev->left_bytes, sg_src->length);
- dev->left_bytes -= count;
-
- if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) {
- dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n",
- __func__, __LINE__);
- return -EINVAL;
- }
- dev->addr_in = sg_dma_address(sg_src);
-
- if (sg_dst) {
- if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) {
- dev_err(dev->dev,
- "[%s:%d] dma_map_sg(dst) error\n",
- __func__, __LINE__);
- dma_unmap_sg(dev->dev, sg_src, 1,
- DMA_TO_DEVICE);
- return -EINVAL;
- }
- dev->addr_out = sg_dma_address(sg_dst);
- }
- } else {
- count = (dev->left_bytes > PAGE_SIZE) ?
- PAGE_SIZE : dev->left_bytes;
-
- if (!sg_pcopy_to_buffer(dev->first, dev->src_nents,
- dev->addr_vir, count,
- dev->total - dev->left_bytes)) {
- dev_err(dev->dev, "[%s:%d] pcopy err\n",
- __func__, __LINE__);
- return -EINVAL;
- }
- dev->left_bytes -= count;
- sg_init_one(&dev->sg_tmp, dev->addr_vir, count);
- if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) {
- dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n",
- __func__, __LINE__);
- return -ENOMEM;
- }
- dev->addr_in = sg_dma_address(&dev->sg_tmp);
-
- if (sg_dst) {
- if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1,
- DMA_FROM_DEVICE)) {
- dev_err(dev->dev,
- "[%s:%d] dma_map_sg(sg_tmp) error\n",
- __func__, __LINE__);
- dma_unmap_sg(dev->dev, &dev->sg_tmp, 1,
- DMA_TO_DEVICE);
- return -ENOMEM;
+ int i, j, err;
+ unsigned long cr;
+
+ dev->num_clks = devm_clk_bulk_get_all(dev->dev, &dev->clks);
+ if (dev->num_clks < dev->variant->num_clks) {
+ dev_err(dev->dev, "Missing clocks, got %d instead of %d\n",
+ dev->num_clks, dev->variant->num_clks);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dev->num_clks; i++) {
+ cr = clk_get_rate(dev->clks[i].clk);
+ for (j = 0; j < ARRAY_SIZE(dev->variant->rkclks); j++) {
+ if (dev->variant->rkclks[j].max == 0)
+ continue;
+ if (strcmp(dev->variant->rkclks[j].name, dev->clks[i].id))
+ continue;
+ if (cr > dev->variant->rkclks[j].max) {
+ err = clk_set_rate(dev->clks[i].clk,
+ dev->variant->rkclks[j].max);
+ if (err)
+ dev_err(dev->dev, "Fail downclocking %s from %lu to %lu\n",
+ dev->variant->rkclks[j].name, cr,
+ dev->variant->rkclks[j].max);
+ else
+ dev_info(dev->dev, "Downclocking %s from %lu to %lu\n",
+ dev->variant->rkclks[j].name, cr,
+ dev->variant->rkclks[j].max);
}
- dev->addr_out = sg_dma_address(&dev->sg_tmp);
}
}
- dev->count = count;
return 0;
}
-static void rk_unload_data(struct rk_crypto_info *dev)
+static int rk_crypto_enable_clk(struct rk_crypto_info *dev)
{
- struct scatterlist *sg_in, *sg_out;
+ int err;
- sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp;
- dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE);
+ err = clk_bulk_prepare_enable(dev->num_clks, dev->clks);
+ if (err)
+ dev_err(dev->dev, "Could not enable clock clks\n");
- if (dev->sg_dst) {
- sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp;
- dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE);
- }
+ return err;
}
-static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
+static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
{
- struct rk_crypto_info *dev = platform_get_drvdata(dev_id);
- u32 interrupt_status;
+ clk_bulk_disable_unprepare(dev->num_clks, dev->clks);
+}
- spin_lock(&dev->lock);
- interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
- CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
+/*
+ * Power management strategy: The device is suspended until a request
+ * is handled. For avoiding suspend/resume yoyo, the autosuspend is set to 2s.
+ */
+static int rk_crypto_pm_suspend(struct device *dev)
+{
+ struct rk_crypto_info *rkdev = dev_get_drvdata(dev);
- if (interrupt_status & 0x0a) {
- dev_warn(dev->dev, "DMA Error\n");
- dev->err = -EFAULT;
- }
- tasklet_schedule(&dev->done_task);
+ rk_crypto_disable_clk(rkdev);
+ reset_control_assert(rkdev->rst);
- spin_unlock(&dev->lock);
- return IRQ_HANDLED;
+ return 0;
}
-static int rk_crypto_enqueue(struct rk_crypto_info *dev,
- struct crypto_async_request *async_req)
+static int rk_crypto_pm_resume(struct device *dev)
{
- unsigned long flags;
+ struct rk_crypto_info *rkdev = dev_get_drvdata(dev);
int ret;
- spin_lock_irqsave(&dev->lock, flags);
- ret = crypto_enqueue_request(&dev->queue, async_req);
- if (dev->busy) {
- spin_unlock_irqrestore(&dev->lock, flags);
+ ret = rk_crypto_enable_clk(rkdev);
+ if (ret)
return ret;
- }
- dev->busy = true;
- spin_unlock_irqrestore(&dev->lock, flags);
- tasklet_schedule(&dev->queue_task);
- return ret;
-}
+ reset_control_deassert(rkdev->rst);
+ return 0;
-static void rk_crypto_queue_task_cb(unsigned long data)
-{
- struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
- struct crypto_async_request *async_req, *backlog;
- unsigned long flags;
- int err = 0;
+}
- dev->err = 0;
- spin_lock_irqsave(&dev->lock, flags);
- backlog = crypto_get_backlog(&dev->queue);
- async_req = crypto_dequeue_request(&dev->queue);
+static const struct dev_pm_ops rk_crypto_pm_ops = {
+ SET_RUNTIME_PM_OPS(rk_crypto_pm_suspend, rk_crypto_pm_resume, NULL)
+};
- if (!async_req) {
- dev->busy = false;
- spin_unlock_irqrestore(&dev->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&dev->lock, flags);
+static int rk_crypto_pm_init(struct rk_crypto_info *rkdev)
+{
+ int err;
- if (backlog) {
- backlog->complete(backlog, -EINPROGRESS);
- backlog = NULL;
- }
+ pm_runtime_use_autosuspend(rkdev->dev);
+ pm_runtime_set_autosuspend_delay(rkdev->dev, 2000);
- dev->async_req = async_req;
- err = dev->start(dev);
+ err = pm_runtime_set_suspended(rkdev->dev);
if (err)
- dev->complete(dev->async_req, err);
+ return err;
+ pm_runtime_enable(rkdev->dev);
+ return err;
}
-static void rk_crypto_done_task_cb(unsigned long data)
+static void rk_crypto_pm_exit(struct rk_crypto_info *rkdev)
{
- struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
+ pm_runtime_disable(rkdev->dev);
+}
- if (dev->err) {
- dev->complete(dev->async_req, dev->err);
- return;
+static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
+{
+ struct rk_crypto_info *dev = platform_get_drvdata(dev_id);
+ u32 interrupt_status;
+
+ interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
+ CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
+
+ dev->status = 1;
+ if (interrupt_status & 0x0a) {
+ dev_warn(dev->dev, "DMA Error\n");
+ dev->status = 0;
}
+ complete(&dev->complete);
- dev->err = dev->update(dev);
- if (dev->err)
- dev->complete(dev->async_req, dev->err);
+ return IRQ_HANDLED;
}
static struct rk_crypto_tmp *rk_cipher_algs[] = {
@@ -257,6 +184,62 @@ static struct rk_crypto_tmp *rk_cipher_algs[] = {
&rk_ahash_md5,
};
+#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
+static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
+{
+ struct rk_crypto_info *dd;
+ unsigned int i;
+
+ spin_lock(&rocklist.lock);
+ list_for_each_entry(dd, &rocklist.dev_list, list) {
+ seq_printf(seq, "%s %s requests: %lu\n",
+ dev_driver_string(dd->dev), dev_name(dd->dev),
+ dd->nreq);
+ }
+ spin_unlock(&rocklist.lock);
+
+ for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
+ if (!rk_cipher_algs[i]->dev)
+ continue;
+ switch (rk_cipher_algs[i]->type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
+ rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name,
+ rk_cipher_algs[i]->alg.skcipher.base.cra_name,
+ rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb);
+ seq_printf(seq, "\tfallback due to length: %lu\n",
+ rk_cipher_algs[i]->stat_fb_len);
+ seq_printf(seq, "\tfallback due to alignment: %lu\n",
+ rk_cipher_algs[i]->stat_fb_align);
+ seq_printf(seq, "\tfallback due to SGs: %lu\n",
+ rk_cipher_algs[i]->stat_fb_sgdiff);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
+ rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name,
+ rk_cipher_algs[i]->alg.hash.halg.base.cra_name,
+ rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb);
+ break;
+ }
+ }
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(rk_crypto_debugfs);
+#endif
+
+static void register_debugfs(struct rk_crypto_info *crypto_info)
+{
+#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
+ /* Ignore error of debugfs */
+ rocklist.dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL);
+ rocklist.dbgfs_stats = debugfs_create_file("stats", 0444,
+ rocklist.dbgfs_dir,
+ &rocklist,
+ &rk_crypto_debugfs_fops);
+#endif
+}
+
static int rk_crypto_register(struct rk_crypto_info *crypto_info)
{
unsigned int i, k;
@@ -264,12 +247,22 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
rk_cipher_algs[i]->dev = crypto_info;
- if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
- err = crypto_register_skcipher(
- &rk_cipher_algs[i]->alg.skcipher);
- else
- err = crypto_register_ahash(
- &rk_cipher_algs[i]->alg.hash);
+ switch (rk_cipher_algs[i]->type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ dev_info(crypto_info->dev, "Register %s as %s\n",
+ rk_cipher_algs[i]->alg.skcipher.base.cra_name,
+ rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name);
+ err = crypto_register_skcipher(&rk_cipher_algs[i]->alg.skcipher);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ dev_info(crypto_info->dev, "Register %s as %s\n",
+ rk_cipher_algs[i]->alg.hash.halg.base.cra_name,
+ rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name);
+ err = crypto_register_ahash(&rk_cipher_algs[i]->alg.hash);
+ break;
+ default:
+ dev_err(crypto_info->dev, "unknown algorithm\n");
+ }
if (err)
goto err_cipher_algs;
}
@@ -277,7 +270,7 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
err_cipher_algs:
for (k = 0; k < i; k++) {
- if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+ if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER)
crypto_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher);
else
crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
@@ -290,22 +283,23 @@ static void rk_crypto_unregister(void)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
- if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+ if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER)
crypto_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher);
else
crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
}
}
-static void rk_crypto_action(void *data)
-{
- struct rk_crypto_info *crypto_info = data;
-
- reset_control_assert(crypto_info->rst);
-}
-
static const struct of_device_id crypto_of_id_table[] = {
- { .compatible = "rockchip,rk3288-crypto" },
+ { .compatible = "rockchip,rk3288-crypto",
+ .data = &rk3288_variant,
+ },
+ { .compatible = "rockchip,rk3328-crypto",
+ .data = &rk3328_variant,
+ },
+ { .compatible = "rockchip,rk3399-crypto",
+ .data = &rk3399_variant,
+ },
{}
};
MODULE_DEVICE_TABLE(of, crypto_of_id_table);
@@ -313,7 +307,7 @@ MODULE_DEVICE_TABLE(of, crypto_of_id_table);
static int rk_crypto_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct rk_crypto_info *crypto_info;
+ struct rk_crypto_info *crypto_info, *first;
int err = 0;
crypto_info = devm_kzalloc(&pdev->dev,
@@ -323,7 +317,16 @@ static int rk_crypto_probe(struct platform_device *pdev)
goto err_crypto;
}
- crypto_info->rst = devm_reset_control_get(dev, "crypto-rst");
+ crypto_info->dev = &pdev->dev;
+ platform_set_drvdata(pdev, crypto_info);
+
+ crypto_info->variant = of_device_get_match_data(&pdev->dev);
+ if (!crypto_info->variant) {
+ dev_err(&pdev->dev, "Missing variant\n");
+ return -EINVAL;
+ }
+
+ crypto_info->rst = devm_reset_control_array_get_exclusive(dev);
if (IS_ERR(crypto_info->rst)) {
err = PTR_ERR(crypto_info->rst);
goto err_crypto;
@@ -333,46 +336,18 @@ static int rk_crypto_probe(struct platform_device *pdev)
usleep_range(10, 20);
reset_control_deassert(crypto_info->rst);
- err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info);
- if (err)
- goto err_crypto;
-
- spin_lock_init(&crypto_info->lock);
-
crypto_info->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(crypto_info->reg)) {
err = PTR_ERR(crypto_info->reg);
goto err_crypto;
}
- crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk");
- if (IS_ERR(crypto_info->aclk)) {
- err = PTR_ERR(crypto_info->aclk);
- goto err_crypto;
- }
-
- crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk");
- if (IS_ERR(crypto_info->hclk)) {
- err = PTR_ERR(crypto_info->hclk);
- goto err_crypto;
- }
-
- crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk");
- if (IS_ERR(crypto_info->sclk)) {
- err = PTR_ERR(crypto_info->sclk);
- goto err_crypto;
- }
-
- crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk");
- if (IS_ERR(crypto_info->dmaclk)) {
- err = PTR_ERR(crypto_info->dmaclk);
+ err = rk_crypto_get_clks(crypto_info);
+ if (err)
goto err_crypto;
- }
crypto_info->irq = platform_get_irq(pdev, 0);
if (crypto_info->irq < 0) {
- dev_warn(crypto_info->dev,
- "control Interrupt is not available.\n");
err = crypto_info->irq;
goto err_crypto;
}
@@ -382,49 +357,64 @@ static int rk_crypto_probe(struct platform_device *pdev)
"rk-crypto", pdev);
if (err) {
- dev_err(crypto_info->dev, "irq request failed.\n");
+ dev_err(&pdev->dev, "irq request failed.\n");
goto err_crypto;
}
- crypto_info->dev = &pdev->dev;
- platform_set_drvdata(pdev, crypto_info);
-
- tasklet_init(&crypto_info->queue_task,
- rk_crypto_queue_task_cb, (unsigned long)crypto_info);
- tasklet_init(&crypto_info->done_task,
- rk_crypto_done_task_cb, (unsigned long)crypto_info);
- crypto_init_queue(&crypto_info->queue, 50);
+ crypto_info->engine = crypto_engine_alloc_init(&pdev->dev, true);
+ crypto_engine_start(crypto_info->engine);
+ init_completion(&crypto_info->complete);
- crypto_info->enable_clk = rk_crypto_enable_clk;
- crypto_info->disable_clk = rk_crypto_disable_clk;
- crypto_info->load_data = rk_load_data;
- crypto_info->unload_data = rk_unload_data;
- crypto_info->enqueue = rk_crypto_enqueue;
- crypto_info->busy = false;
+ err = rk_crypto_pm_init(crypto_info);
+ if (err)
+ goto err_pm;
+
+ spin_lock(&rocklist.lock);
+ first = list_first_entry_or_null(&rocklist.dev_list,
+ struct rk_crypto_info, list);
+ list_add_tail(&crypto_info->list, &rocklist.dev_list);
+ spin_unlock(&rocklist.lock);
+
+ if (!first) {
+ err = rk_crypto_register(crypto_info);
+ if (err) {
+ dev_err(dev, "Fail to register crypto algorithms");
+ goto err_register_alg;
+ }
- err = rk_crypto_register(crypto_info);
- if (err) {
- dev_err(dev, "err in register alg");
- goto err_register_alg;
+ register_debugfs(crypto_info);
}
- dev_info(dev, "Crypto Accelerator successfully registered\n");
return 0;
err_register_alg:
- tasklet_kill(&crypto_info->queue_task);
- tasklet_kill(&crypto_info->done_task);
+ rk_crypto_pm_exit(crypto_info);
+err_pm:
+ crypto_engine_exit(crypto_info->engine);
err_crypto:
+ dev_err(dev, "Crypto Accelerator not successfully registered\n");
return err;
}
static int rk_crypto_remove(struct platform_device *pdev)
{
struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
-
- rk_crypto_unregister();
- tasklet_kill(&crypto_tmp->done_task);
- tasklet_kill(&crypto_tmp->queue_task);
+ struct rk_crypto_info *first;
+
+ spin_lock_bh(&rocklist.lock);
+ list_del(&crypto_tmp->list);
+ first = list_first_entry_or_null(&rocklist.dev_list,
+ struct rk_crypto_info, list);
+ spin_unlock_bh(&rocklist.lock);
+
+ if (!first) {
+#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
+ debugfs_remove_recursive(rocklist.dbgfs_dir);
+#endif
+ rk_crypto_unregister();
+ }
+ rk_crypto_pm_exit(crypto_tmp);
+ crypto_engine_exit(crypto_tmp->engine);
return 0;
}
@@ -433,6 +423,7 @@ static struct platform_driver crypto_driver = {
.remove = rk_crypto_remove,
.driver = {
.name = "rk3288-crypto",
+ .pm = &rk_crypto_pm_ops,
.of_match_table = crypto_of_id_table,
},
};
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index 97278c2574ff..b2695258cade 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -5,9 +5,13 @@
#include <crypto/aes.h>
#include <crypto/internal/des.h>
#include <crypto/algapi.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
+#include <crypto/engine.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
@@ -184,85 +188,91 @@
#define CRYPTO_WRITE(dev, offset, val) \
writel_relaxed((val), ((dev)->reg + (offset)))
+#define RK_MAX_CLKS 4
+
+/*
+ * struct rockchip_ip - struct for managing a list of RK crypto instance
+ * @dev_list: Used for doing a list of rk_crypto_info
+ * @lock: Control access to dev_list
+ * @dbgfs_dir: Debugfs dentry for statistic directory
+ * @dbgfs_stats: Debugfs dentry for statistic counters
+ */
+struct rockchip_ip {
+ struct list_head dev_list;
+ spinlock_t lock; /* Control access to dev_list */
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_stats;
+};
+
+struct rk_clks {
+ const char *name;
+ unsigned long max;
+};
+
+struct rk_variant {
+ int num_clks;
+ struct rk_clks rkclks[RK_MAX_CLKS];
+};
+
struct rk_crypto_info {
+ struct list_head list;
struct device *dev;
- struct clk *aclk;
- struct clk *hclk;
- struct clk *sclk;
- struct clk *dmaclk;
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control *rst;
void __iomem *reg;
int irq;
- struct crypto_queue queue;
- struct tasklet_struct queue_task;
- struct tasklet_struct done_task;
- struct crypto_async_request *async_req;
- int err;
- /* device lock */
- spinlock_t lock;
-
- /* the public variable */
- struct scatterlist *sg_src;
- struct scatterlist *sg_dst;
- struct scatterlist sg_tmp;
- struct scatterlist *first;
- unsigned int left_bytes;
- void *addr_vir;
- int aligned;
- int align_size;
- size_t src_nents;
- size_t dst_nents;
- unsigned int total;
- unsigned int count;
- dma_addr_t addr_in;
- dma_addr_t addr_out;
- bool busy;
- int (*start)(struct rk_crypto_info *dev);
- int (*update)(struct rk_crypto_info *dev);
- void (*complete)(struct crypto_async_request *base, int err);
- int (*enable_clk)(struct rk_crypto_info *dev);
- void (*disable_clk)(struct rk_crypto_info *dev);
- int (*load_data)(struct rk_crypto_info *dev,
- struct scatterlist *sg_src,
- struct scatterlist *sg_dst);
- void (*unload_data)(struct rk_crypto_info *dev);
- int (*enqueue)(struct rk_crypto_info *dev,
- struct crypto_async_request *async_req);
+ const struct rk_variant *variant;
+ unsigned long nreq;
+ struct crypto_engine *engine;
+ struct completion complete;
+ int status;
};
/* the private variable of hash */
struct rk_ahash_ctx {
- struct rk_crypto_info *dev;
+ struct crypto_engine_ctx enginectx;
/* for fallback */
struct crypto_ahash *fallback_tfm;
};
-/* the privete variable of hash for fallback */
+/* the private variable of hash for fallback */
struct rk_ahash_rctx {
+ struct rk_crypto_info *dev;
struct ahash_request fallback_req;
u32 mode;
+ int nrsg;
};
/* the private variable of cipher */
struct rk_cipher_ctx {
- struct rk_crypto_info *dev;
+ struct crypto_engine_ctx enginectx;
unsigned int keylen;
- u32 mode;
+ u8 key[AES_MAX_KEY_SIZE];
u8 iv[AES_BLOCK_SIZE];
+ struct crypto_skcipher *fallback_tfm;
};
-enum alg_type {
- ALG_TYPE_HASH,
- ALG_TYPE_CIPHER,
+struct rk_cipher_rctx {
+ struct rk_crypto_info *dev;
+ u8 backup_iv[AES_BLOCK_SIZE];
+ u32 mode;
+ struct skcipher_request fallback_req; // keep at the end
};
struct rk_crypto_tmp {
- struct rk_crypto_info *dev;
+ u32 type;
+ struct rk_crypto_info *dev;
union {
struct skcipher_alg skcipher;
struct ahash_alg hash;
} alg;
- enum alg_type type;
+ unsigned long stat_req;
+ unsigned long stat_fb;
+ unsigned long stat_fb_len;
+ unsigned long stat_fb_sglen;
+ unsigned long stat_fb_align;
+ unsigned long stat_fb_sgdiff;
};
extern struct rk_crypto_tmp rk_ecb_aes_alg;
@@ -276,4 +286,5 @@ extern struct rk_crypto_tmp rk_ahash_sha1;
extern struct rk_crypto_tmp rk_ahash_sha256;
extern struct rk_crypto_tmp rk_ahash_md5;
+struct rk_crypto_info *get_rk_crypto(void);
#endif
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index ed03058497bc..a78ff3dcd0b1 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -9,6 +9,8 @@
* Some ideas are from marvell/cesa.c and s5p-sss.c driver.
*/
#include <linux/device.h>
+#include <asm/unaligned.h>
+#include <linux/iopoll.h>
#include "rk3288_crypto.h"
/*
@@ -16,6 +18,44 @@
* so we put the fixed hash out when met zero message.
*/
+static bool rk_ahash_need_fallback(struct ahash_request *req)
+{
+ struct scatterlist *sg;
+
+ sg = req->src;
+ while (sg) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
+ return true;
+ }
+ if (sg->length % 4) {
+ return true;
+ }
+ sg = sg_next(sg);
+ }
+ return false;
+}
+
+static int rk_ahash_digest_fb(struct ahash_request *areq)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+
+ algt->stat_fb++;
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = areq->nbytes;
+ rctx->fallback_req.src = areq->src;
+ rctx->fallback_req.result = areq->result;
+
+ return crypto_ahash_digest(&rctx->fallback_req);
+}
+
static int zero_message_process(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -38,15 +78,9 @@ static int zero_message_process(struct ahash_request *req)
return 0;
}
-static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err)
+static void rk_ahash_reg_init(struct ahash_request *req,
+ struct rk_crypto_info *dev)
{
- if (base->complete)
- base->complete(base, err);
-}
-
-static void rk_ahash_reg_init(struct rk_crypto_info *dev)
-{
- struct ahash_request *req = ahash_request_cast(dev->async_req);
struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
int reg_status;
@@ -74,7 +108,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev)
RK_CRYPTO_BYTESWAP_BRFIFO |
RK_CRYPTO_BYTESWAP_BTFIFO);
- CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total);
+ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, req->nbytes);
}
static int rk_ahash_init(struct ahash_request *req)
@@ -164,51 +198,80 @@ static int rk_ahash_export(struct ahash_request *req, void *out)
static int rk_ahash_digest(struct ahash_request *req)
{
- struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- struct rk_crypto_info *dev = tctx->dev;
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct rk_crypto_info *dev;
+ struct crypto_engine *engine;
+
+ if (rk_ahash_need_fallback(req))
+ return rk_ahash_digest_fb(req);
if (!req->nbytes)
return zero_message_process(req);
- else
- return dev->enqueue(dev, &req->base);
+
+ dev = get_rk_crypto();
+
+ rctx->dev = dev;
+ engine = dev->engine;
+
+ return crypto_transfer_hash_request_to_engine(engine, req);
}
-static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
+static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg)
{
- CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in);
- CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4);
+ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, sg_dma_address(sg));
+ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, sg_dma_len(sg) / 4);
CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
(RK_CRYPTO_HASH_START << 16));
}
-static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
+static int rk_hash_prepare(struct crypto_engine *engine, void *breq)
{
- int err;
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
+ struct rk_crypto_info *rkc = rctx->dev;
+ int ret;
- err = dev->load_data(dev, dev->sg_src, NULL);
- if (!err)
- crypto_ahash_dma_start(dev);
- return err;
+ ret = dma_map_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
+ if (ret <= 0)
+ return -EINVAL;
+
+ rctx->nrsg = ret;
+
+ return 0;
}
-static int rk_ahash_start(struct rk_crypto_info *dev)
+static int rk_hash_unprepare(struct crypto_engine *engine, void *breq)
{
- struct ahash_request *req = ahash_request_cast(dev->async_req);
- struct crypto_ahash *tfm;
- struct rk_ahash_rctx *rctx;
-
- dev->total = req->nbytes;
- dev->left_bytes = req->nbytes;
- dev->aligned = 0;
- dev->align_size = 4;
- dev->sg_dst = NULL;
- dev->sg_src = req->src;
- dev->first = req->src;
- dev->src_nents = sg_nents(req->src);
- rctx = ahash_request_ctx(req);
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
+ struct rk_crypto_info *rkc = rctx->dev;
+
+ dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
+ return 0;
+}
+
+static int rk_hash_run(struct crypto_engine *engine, void *breq)
+{
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+ struct scatterlist *sg = areq->src;
+ struct rk_crypto_info *rkc = rctx->dev;
+ int err = 0;
+ int i;
+ u32 v;
+
+ err = pm_runtime_resume_and_get(rkc->dev);
+ if (err)
+ return err;
+
rctx->mode = 0;
- tfm = crypto_ahash_reqtfm(req);
+ algt->stat_req++;
+ rkc->nreq++;
+
switch (crypto_ahash_digestsize(tfm)) {
case SHA1_DIGEST_SIZE:
rctx->mode = RK_CRYPTO_HASH_SHA1;
@@ -220,100 +283,88 @@ static int rk_ahash_start(struct rk_crypto_info *dev)
rctx->mode = RK_CRYPTO_HASH_MD5;
break;
default:
- return -EINVAL;
+ err = -EINVAL;
+ goto theend;
}
- rk_ahash_reg_init(dev);
- return rk_ahash_set_data_start(dev);
-}
-
-static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
-{
- int err = 0;
- struct ahash_request *req = ahash_request_cast(dev->async_req);
- struct crypto_ahash *tfm;
-
- dev->unload_data(dev);
- if (dev->left_bytes) {
- if (dev->aligned) {
- if (sg_is_last(dev->sg_src)) {
- dev_warn(dev->dev, "[%s:%d], Lack of data\n",
- __func__, __LINE__);
- err = -ENOMEM;
- goto out_rx;
- }
- dev->sg_src = sg_next(dev->sg_src);
+ rk_ahash_reg_init(areq, rkc);
+
+ while (sg) {
+ reinit_completion(&rkc->complete);
+ rkc->status = 0;
+ crypto_ahash_dma_start(rkc, sg);
+ wait_for_completion_interruptible_timeout(&rkc->complete,
+ msecs_to_jiffies(2000));
+ if (!rkc->status) {
+ dev_err(rkc->dev, "DMA timeout\n");
+ err = -EFAULT;
+ goto theend;
}
- err = rk_ahash_set_data_start(dev);
- } else {
- /*
- * it will take some time to process date after last dma
- * transmission.
- *
- * waiting time is relative with the last date len,
- * so cannot set a fixed time here.
- * 10us makes system not call here frequently wasting
- * efficiency, and make it response quickly when dma
- * complete.
- */
- while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
- udelay(10);
-
- tfm = crypto_ahash_reqtfm(req);
- memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
- crypto_ahash_digestsize(tfm));
- dev->complete(dev->async_req, 0);
- tasklet_schedule(&dev->queue_task);
+ sg = sg_next(sg);
}
-out_rx:
- return err;
+ /*
+ * it will take some time to process date after last dma
+ * transmission.
+ *
+ * waiting time is relative with the last date len,
+ * so cannot set a fixed time here.
+ * 10us makes system not call here frequently wasting
+ * efficiency, and make it response quickly when dma
+ * complete.
+ */
+ readl_poll_timeout(rkc->reg + RK_CRYPTO_HASH_STS, v, v == 0, 10, 1000);
+
+ for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) {
+ v = readl(rkc->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4);
+ put_unaligned_le32(v, areq->result + i * 4);
+ }
+
+theend:
+ pm_runtime_put_autosuspend(rkc->dev);
+
+ local_bh_disable();
+ crypto_finalize_hash_request(engine, breq, err);
+ local_bh_enable();
+
+ return 0;
}
static int rk_cra_hash_init(struct crypto_tfm *tfm)
{
struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
- struct rk_crypto_tmp *algt;
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
-
const char *alg_name = crypto_tfm_alg_name(tfm);
-
- algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
-
- tctx->dev = algt->dev;
- tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL);
- if (!tctx->dev->addr_vir) {
- dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n");
- return -ENOMEM;
- }
- tctx->dev->start = rk_ahash_start;
- tctx->dev->update = rk_ahash_crypto_rx;
- tctx->dev->complete = rk_ahash_crypto_complete;
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
/* for fallback */
tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(tctx->fallback_tfm)) {
- dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
+ dev_err(algt->dev->dev, "Could not load fallback driver.\n");
return PTR_ERR(tctx->fallback_tfm);
}
+
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct rk_ahash_rctx) +
crypto_ahash_reqsize(tctx->fallback_tfm));
- return tctx->dev->enable_clk(tctx->dev);
+ tctx->enginectx.op.do_one_request = rk_hash_run;
+ tctx->enginectx.op.prepare_request = rk_hash_prepare;
+ tctx->enginectx.op.unprepare_request = rk_hash_unprepare;
+
+ return 0;
}
static void rk_cra_hash_exit(struct crypto_tfm *tfm)
{
struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
- free_page((unsigned long)tctx->dev->addr_vir);
- return tctx->dev->disable_clk(tctx->dev);
+ crypto_free_ahash(tctx->fallback_tfm);
}
struct rk_crypto_tmp rk_ahash_sha1 = {
- .type = ALG_TYPE_HASH,
+ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = rk_ahash_init,
.update = rk_ahash_update,
@@ -337,13 +388,13 @@ struct rk_crypto_tmp rk_ahash_sha1 = {
.cra_init = rk_cra_hash_init,
.cra_exit = rk_cra_hash_exit,
.cra_module = THIS_MODULE,
- }
- }
+ }
+ }
}
};
struct rk_crypto_tmp rk_ahash_sha256 = {
- .type = ALG_TYPE_HASH,
+ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = rk_ahash_init,
.update = rk_ahash_update,
@@ -367,13 +418,13 @@ struct rk_crypto_tmp rk_ahash_sha256 = {
.cra_init = rk_cra_hash_init,
.cra_exit = rk_cra_hash_exit,
.cra_module = THIS_MODULE,
- }
- }
+ }
+ }
}
};
struct rk_crypto_tmp rk_ahash_md5 = {
- .type = ALG_TYPE_HASH,
+ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = rk_ahash_init,
.update = rk_ahash_update,
@@ -397,7 +448,7 @@ struct rk_crypto_tmp rk_ahash_md5 = {
.cra_init = rk_cra_hash_init,
.cra_exit = rk_cra_hash_exit,
.cra_module = THIS_MODULE,
- }
}
+ }
}
};
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index 5bbf0d2722e1..59069457582b 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -9,23 +9,94 @@
* Some ideas are from marvell-cesa.c and s5p-sss.c driver.
*/
#include <linux/device.h>
+#include <crypto/scatterwalk.h>
#include "rk3288_crypto.h"
#define RK_CRYPTO_DEC BIT(0)
-static void rk_crypto_complete(struct crypto_async_request *base, int err)
+static int rk_cipher_need_fallback(struct skcipher_request *req)
{
- if (base->complete)
- base->complete(base, err);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+ struct scatterlist *sgs, *sgd;
+ unsigned int stodo, dtodo, len;
+ unsigned int bs = crypto_skcipher_blocksize(tfm);
+
+ if (!req->cryptlen)
+ return true;
+
+ len = req->cryptlen;
+ sgs = req->src;
+ sgd = req->dst;
+ while (sgs && sgd) {
+ if (!IS_ALIGNED(sgs->offset, sizeof(u32))) {
+ algt->stat_fb_align++;
+ return true;
+ }
+ if (!IS_ALIGNED(sgd->offset, sizeof(u32))) {
+ algt->stat_fb_align++;
+ return true;
+ }
+ stodo = min(len, sgs->length);
+ if (stodo % bs) {
+ algt->stat_fb_len++;
+ return true;
+ }
+ dtodo = min(len, sgd->length);
+ if (dtodo % bs) {
+ algt->stat_fb_len++;
+ return true;
+ }
+ if (stodo != dtodo) {
+ algt->stat_fb_sgdiff++;
+ return true;
+ }
+ len -= stodo;
+ sgs = sg_next(sgs);
+ sgd = sg_next(sgd);
+ }
+ return false;
}
-static int rk_handle_req(struct rk_crypto_info *dev,
- struct skcipher_request *req)
+static int rk_cipher_fallback(struct skcipher_request *areq)
{
- if (!IS_ALIGNED(req->cryptlen, dev->align_size))
- return -EINVAL;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+ int err;
+
+ algt->stat_fb++;
+
+ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (rctx->mode & RK_CRYPTO_DEC)
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
else
- return dev->enqueue(dev, &req->base);
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
+ return err;
+}
+
+static int rk_cipher_handle_req(struct skcipher_request *req)
+{
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *rkc;
+ struct crypto_engine *engine;
+
+ if (rk_cipher_need_fallback(req))
+ return rk_cipher_fallback(req);
+
+ rkc = get_rk_crypto();
+
+ engine = rkc->engine;
+ rctx->dev = rkc;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, req);
}
static int rk_aes_setkey(struct crypto_skcipher *cipher,
@@ -38,8 +109,9 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher,
keylen != AES_KEYSIZE_256)
return -EINVAL;
ctx->keylen = keylen;
- memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
- return 0;
+ memcpy(ctx->key, key, keylen);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
}
static int rk_des_setkey(struct crypto_skcipher *cipher,
@@ -53,8 +125,9 @@ static int rk_des_setkey(struct crypto_skcipher *cipher,
return err;
ctx->keylen = keylen;
- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
- return 0;
+ memcpy(ctx->key, key, keylen);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
}
static int rk_tdes_setkey(struct crypto_skcipher *cipher,
@@ -68,161 +141,136 @@ static int rk_tdes_setkey(struct crypto_skcipher *cipher,
return err;
ctx->keylen = keylen;
- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
- return 0;
+ memcpy(ctx->key, key, keylen);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
}
static int rk_aes_ecb_encrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_AES_ECB_MODE;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_AES_ECB_MODE;
+ return rk_cipher_handle_req(req);
}
static int rk_aes_ecb_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
+ return rk_cipher_handle_req(req);
}
static int rk_aes_cbc_encrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_AES_CBC_MODE;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_AES_CBC_MODE;
+ return rk_cipher_handle_req(req);
}
static int rk_aes_cbc_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
+ return rk_cipher_handle_req(req);
}
static int rk_des_ecb_encrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = 0;
- return rk_handle_req(dev, req);
+ rctx->mode = 0;
+ return rk_cipher_handle_req(req);
}
static int rk_des_ecb_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_DEC;
+ return rk_cipher_handle_req(req);
}
static int rk_des_cbc_encrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
+ return rk_cipher_handle_req(req);
}
static int rk_des_cbc_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
+ return rk_cipher_handle_req(req);
}
static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_TDES_SELECT;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_TDES_SELECT;
+ return rk_cipher_handle_req(req);
}
static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
+ return rk_cipher_handle_req(req);
}
static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
- return rk_handle_req(dev, req);
+ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
+ return rk_cipher_handle_req(req);
}
static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
+ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
+ return rk_cipher_handle_req(req);
}
-static void rk_ablk_hw_init(struct rk_crypto_info *dev)
+static void rk_cipher_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req)
{
- struct skcipher_request *req =
- skcipher_request_cast(dev->async_req);
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
- u32 ivsize, block, conf_reg = 0;
+ u32 block, conf_reg = 0;
block = crypto_tfm_alg_blocksize(tfm);
- ivsize = crypto_skcipher_ivsize(cipher);
if (block == DES_BLOCK_SIZE) {
- ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
+ rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
RK_CRYPTO_TDES_BYTESWAP_KEY |
RK_CRYPTO_TDES_BYTESWAP_IV;
- CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize);
+ CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode);
+ memcpy_toio(dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen);
conf_reg = RK_CRYPTO_DESSEL;
} else {
- ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
+ rctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
RK_CRYPTO_AES_KEY_CHANGE |
RK_CRYPTO_AES_BYTESWAP_KEY |
RK_CRYPTO_AES_BYTESWAP_IV;
if (ctx->keylen == AES_KEYSIZE_192)
- ctx->mode |= RK_CRYPTO_AES_192BIT_key;
+ rctx->mode |= RK_CRYPTO_AES_192BIT_key;
else if (ctx->keylen == AES_KEYSIZE_256)
- ctx->mode |= RK_CRYPTO_AES_256BIT_key;
- CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize);
+ rctx->mode |= RK_CRYPTO_AES_256BIT_key;
+ CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode);
+ memcpy_toio(dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen);
}
conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
RK_CRYPTO_BYTESWAP_BRFIFO;
@@ -231,189 +279,196 @@ static void rk_ablk_hw_init(struct rk_crypto_info *dev)
RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
}
-static void crypto_dma_start(struct rk_crypto_info *dev)
+static void crypto_dma_start(struct rk_crypto_info *dev,
+ struct scatterlist *sgs,
+ struct scatterlist *sgd, unsigned int todo)
{
- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
- CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
+ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, sg_dma_address(sgs));
+ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, todo);
+ CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, sg_dma_address(sgd));
CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
_SBF(RK_CRYPTO_BLOCK_START, 16));
}
-static int rk_set_data_start(struct rk_crypto_info *dev)
+static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
{
- int err;
- struct skcipher_request *req =
- skcipher_request_cast(dev->async_req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 ivsize = crypto_skcipher_ivsize(tfm);
- u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
- dev->sg_src->offset + dev->sg_src->length - ivsize;
-
- /* Store the iv that need to be updated in chain mode.
- * And update the IV buffer to contain the next IV for decryption mode.
- */
- if (ctx->mode & RK_CRYPTO_DEC) {
- memcpy(ctx->iv, src_last_blk, ivsize);
- sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv,
- ivsize, dev->total - ivsize);
- }
-
- err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
- if (!err)
- crypto_dma_start(dev);
- return err;
-}
-
-static int rk_ablk_start(struct rk_crypto_info *dev)
-{
- struct skcipher_request *req =
- skcipher_request_cast(dev->async_req);
- unsigned long flags;
+ struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
+ struct scatterlist *sgs, *sgd;
int err = 0;
+ int ivsize = crypto_skcipher_ivsize(tfm);
+ int offset;
+ u8 iv[AES_BLOCK_SIZE];
+ u8 biv[AES_BLOCK_SIZE];
+ u8 *ivtouse = areq->iv;
+ unsigned int len = areq->cryptlen;
+ unsigned int todo;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+ struct rk_crypto_info *rkc = rctx->dev;
- dev->left_bytes = req->cryptlen;
- dev->total = req->cryptlen;
- dev->sg_src = req->src;
- dev->first = req->src;
- dev->src_nents = sg_nents(req->src);
- dev->sg_dst = req->dst;
- dev->dst_nents = sg_nents(req->dst);
- dev->aligned = 1;
-
- spin_lock_irqsave(&dev->lock, flags);
- rk_ablk_hw_init(dev);
- err = rk_set_data_start(dev);
- spin_unlock_irqrestore(&dev->lock, flags);
- return err;
-}
+ err = pm_runtime_resume_and_get(rkc->dev);
+ if (err)
+ return err;
-static void rk_iv_copyback(struct rk_crypto_info *dev)
-{
- struct skcipher_request *req =
- skcipher_request_cast(dev->async_req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 ivsize = crypto_skcipher_ivsize(tfm);
+ algt->stat_req++;
+ rkc->nreq++;
- /* Update the IV buffer to contain the next IV for encryption mode. */
- if (!(ctx->mode & RK_CRYPTO_DEC)) {
- if (dev->aligned) {
- memcpy(req->iv, sg_virt(dev->sg_dst) +
- dev->sg_dst->length - ivsize, ivsize);
- } else {
- memcpy(req->iv, dev->addr_vir +
- dev->count - ivsize, ivsize);
+ ivsize = crypto_skcipher_ivsize(tfm);
+ if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
+ if (rctx->mode & RK_CRYPTO_DEC) {
+ offset = areq->cryptlen - ivsize;
+ scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
+ offset, ivsize, 0);
}
}
-}
-static void rk_update_iv(struct rk_crypto_info *dev)
-{
- struct skcipher_request *req =
- skcipher_request_cast(dev->async_req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 ivsize = crypto_skcipher_ivsize(tfm);
- u8 *new_iv = NULL;
+ sgs = areq->src;
+ sgd = areq->dst;
- if (ctx->mode & RK_CRYPTO_DEC) {
- new_iv = ctx->iv;
- } else {
- new_iv = page_address(sg_page(dev->sg_dst)) +
- dev->sg_dst->offset + dev->sg_dst->length - ivsize;
+ while (sgs && sgd && len) {
+ if (!sgs->length) {
+ sgs = sg_next(sgs);
+ sgd = sg_next(sgd);
+ continue;
+ }
+ if (rctx->mode & RK_CRYPTO_DEC) {
+ /* we backup last block of source to be used as IV at next step */
+ offset = sgs->length - ivsize;
+ scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0);
+ }
+ if (sgs == sgd) {
+ err = dma_map_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
+ if (err <= 0) {
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ } else {
+ err = dma_map_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
+ if (err <= 0) {
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ err = dma_map_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
+ if (err <= 0) {
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+ }
+ err = 0;
+ rk_cipher_hw_init(rkc, areq);
+ if (ivsize) {
+ if (ivsize == DES_BLOCK_SIZE)
+ memcpy_toio(rkc->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize);
+ else
+ memcpy_toio(rkc->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize);
+ }
+ reinit_completion(&rkc->complete);
+ rkc->status = 0;
+
+ todo = min(sg_dma_len(sgs), len);
+ len -= todo;
+ crypto_dma_start(rkc, sgs, sgd, todo / 4);
+ wait_for_completion_interruptible_timeout(&rkc->complete,
+ msecs_to_jiffies(2000));
+ if (!rkc->status) {
+ dev_err(rkc->dev, "DMA timeout\n");
+ err = -EFAULT;
+ goto theend;
+ }
+ if (sgs == sgd) {
+ dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
+ }
+ if (rctx->mode & RK_CRYPTO_DEC) {
+ memcpy(iv, biv, ivsize);
+ ivtouse = iv;
+ } else {
+ offset = sgd->length - ivsize;
+ scatterwalk_map_and_copy(iv, sgd, offset, ivsize, 0);
+ ivtouse = iv;
+ }
+ sgs = sg_next(sgs);
+ sgd = sg_next(sgd);
}
- if (ivsize == DES_BLOCK_SIZE)
- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
- else if (ivsize == AES_BLOCK_SIZE)
- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
-}
-
-/* return:
- * true some err was occurred
- * fault no err, continue
- */
-static int rk_ablk_rx(struct rk_crypto_info *dev)
-{
- int err = 0;
- struct skcipher_request *req =
- skcipher_request_cast(dev->async_req);
-
- dev->unload_data(dev);
- if (!dev->aligned) {
- if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
- dev->addr_vir, dev->count,
- dev->total - dev->left_bytes -
- dev->count)) {
- err = -EINVAL;
- goto out_rx;
+ if (areq->iv && ivsize > 0) {
+ offset = areq->cryptlen - ivsize;
+ if (rctx->mode & RK_CRYPTO_DEC) {
+ memcpy(areq->iv, rctx->backup_iv, ivsize);
+ memzero_explicit(rctx->backup_iv, ivsize);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
+ ivsize, 0);
}
}
- if (dev->left_bytes) {
- rk_update_iv(dev);
- if (dev->aligned) {
- if (sg_is_last(dev->sg_src)) {
- dev_err(dev->dev, "[%s:%d] Lack of data\n",
- __func__, __LINE__);
- err = -ENOMEM;
- goto out_rx;
- }
- dev->sg_src = sg_next(dev->sg_src);
- dev->sg_dst = sg_next(dev->sg_dst);
- }
- err = rk_set_data_start(dev);
+
+theend:
+ pm_runtime_put_autosuspend(rkc->dev);
+
+ local_bh_disable();
+ crypto_finalize_skcipher_request(engine, areq, err);
+ local_bh_enable();
+ return 0;
+
+theend_sgs:
+ if (sgs == sgd) {
+ dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
} else {
- rk_iv_copyback(dev);
- /* here show the calculation is over without any err */
- dev->complete(dev->async_req, 0);
- tasklet_schedule(&dev->queue_task);
+ dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
}
-out_rx:
+theend_iv:
return err;
}
-static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
+static int rk_cipher_tfm_init(struct crypto_skcipher *tfm)
{
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct rk_crypto_tmp *algt;
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
- algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+ ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback_tfm)) {
+ dev_err(algt->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(ctx->fallback_tfm));
+ return PTR_ERR(ctx->fallback_tfm);
+ }
+
+ tfm->reqsize = sizeof(struct rk_cipher_rctx) +
+ crypto_skcipher_reqsize(ctx->fallback_tfm);
- ctx->dev = algt->dev;
- ctx->dev->align_size = crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)) + 1;
- ctx->dev->start = rk_ablk_start;
- ctx->dev->update = rk_ablk_rx;
- ctx->dev->complete = rk_crypto_complete;
- ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
+ ctx->enginectx.op.do_one_request = rk_cipher_run;
- return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
+ return 0;
}
-static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
+static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm)
{
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- free_page((unsigned long)ctx->dev->addr_vir);
- ctx->dev->disable_clk(ctx->dev);
+ memzero_explicit(ctx->key, ctx->keylen);
+ crypto_free_skcipher(ctx->fallback_tfm);
}
struct rk_crypto_tmp rk_ecb_aes_alg = {
- .type = ALG_TYPE_CIPHER,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "ecb-aes-rk",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.base.cra_alignmask = 0x0f,
.base.cra_module = THIS_MODULE,
- .init = rk_ablk_init_tfm,
- .exit = rk_ablk_exit_tfm,
+ .init = rk_cipher_tfm_init,
+ .exit = rk_cipher_tfm_exit,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = rk_aes_setkey,
@@ -423,19 +478,19 @@ struct rk_crypto_tmp rk_ecb_aes_alg = {
};
struct rk_crypto_tmp rk_cbc_aes_alg = {
- .type = ALG_TYPE_CIPHER,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "cbc-aes-rk",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.base.cra_alignmask = 0x0f,
.base.cra_module = THIS_MODULE,
- .init = rk_ablk_init_tfm,
- .exit = rk_ablk_exit_tfm,
+ .init = rk_cipher_tfm_init,
+ .exit = rk_cipher_tfm_exit,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -446,19 +501,19 @@ struct rk_crypto_tmp rk_cbc_aes_alg = {
};
struct rk_crypto_tmp rk_ecb_des_alg = {
- .type = ALG_TYPE_CIPHER,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "ecb-des-rk",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.base.cra_alignmask = 0x07,
.base.cra_module = THIS_MODULE,
- .init = rk_ablk_init_tfm,
- .exit = rk_ablk_exit_tfm,
+ .init = rk_cipher_tfm_init,
+ .exit = rk_cipher_tfm_exit,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = rk_des_setkey,
@@ -468,19 +523,19 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
};
struct rk_crypto_tmp rk_cbc_des_alg = {
- .type = ALG_TYPE_CIPHER,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "cbc-des-rk",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.base.cra_alignmask = 0x07,
.base.cra_module = THIS_MODULE,
- .init = rk_ablk_init_tfm,
- .exit = rk_ablk_exit_tfm,
+ .init = rk_cipher_tfm_init,
+ .exit = rk_cipher_tfm_exit,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -491,19 +546,19 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
};
struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
- .type = ALG_TYPE_CIPHER,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "ecb-des3-ede-rk",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.base.cra_alignmask = 0x07,
.base.cra_module = THIS_MODULE,
- .init = rk_ablk_init_tfm,
- .exit = rk_ablk_exit_tfm,
+ .init = rk_cipher_tfm_init,
+ .exit = rk_cipher_tfm_exit,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = rk_tdes_setkey,
@@ -513,19 +568,19 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
};
struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
- .type = ALG_TYPE_CIPHER,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cbc-des3-ede-rk",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.base.cra_alignmask = 0x07,
.base.cra_module = THIS_MODULE,
- .init = rk_ablk_init_tfm,
- .exit = rk_ablk_exit_tfm,
+ .init = rk_cipher_tfm_init,
+ .exit = rk_cipher_tfm_exit,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 4a4c3284ae1f..4fc581e9e595 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -10,7 +10,7 @@ config CRYPTO_DEV_STM32_CRC
config CRYPTO_DEV_STM32_HASH
tristate "Support for STM32 hash accelerators"
- depends on ARCH_STM32
+ depends on ARCH_STM32 || ARCH_U8500
depends on HAS_DMA
select CRYPTO_HASH
select CRYPTO_MD5
@@ -23,7 +23,7 @@ config CRYPTO_DEV_STM32_HASH
config CRYPTO_DEV_STM32_CRYP
tristate "Support for STM32 cryp accelerators"
- depends on ARCH_STM32
+ depends on ARCH_STM32 || ARCH_U8500
select CRYPTO_HASH
select CRYPTO_ENGINE
select CRYPTO_LIB_DES
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 59ef541123ae..4208338e72b6 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) STMicroelectronics SA 2017
* Author: Fabien Dessenne <fabien.dessenne@st.com>
+ * Ux500 support taken from snippets in the old Ux500 cryp driver
*/
#include <linux/clk.h>
@@ -62,6 +63,29 @@
#define CRYP_CSGCMCCM0R 0x00000050
#define CRYP_CSGCM0R 0x00000070
+#define UX500_CRYP_CR 0x00000000
+#define UX500_CRYP_SR 0x00000004
+#define UX500_CRYP_DIN 0x00000008
+#define UX500_CRYP_DINSIZE 0x0000000C
+#define UX500_CRYP_DOUT 0x00000010
+#define UX500_CRYP_DOUSIZE 0x00000014
+#define UX500_CRYP_DMACR 0x00000018
+#define UX500_CRYP_IMSC 0x0000001C
+#define UX500_CRYP_RIS 0x00000020
+#define UX500_CRYP_MIS 0x00000024
+#define UX500_CRYP_K1L 0x00000028
+#define UX500_CRYP_K1R 0x0000002C
+#define UX500_CRYP_K2L 0x00000030
+#define UX500_CRYP_K2R 0x00000034
+#define UX500_CRYP_K3L 0x00000038
+#define UX500_CRYP_K3R 0x0000003C
+#define UX500_CRYP_K4L 0x00000040
+#define UX500_CRYP_K4R 0x00000044
+#define UX500_CRYP_IV0L 0x00000048
+#define UX500_CRYP_IV0R 0x0000004C
+#define UX500_CRYP_IV1L 0x00000050
+#define UX500_CRYP_IV1R 0x00000054
+
/* Registers values */
#define CR_DEC_NOT_ENC 0x00000004
#define CR_TDES_ECB 0x00000000
@@ -71,7 +95,8 @@
#define CR_AES_ECB 0x00000020
#define CR_AES_CBC 0x00000028
#define CR_AES_CTR 0x00000030
-#define CR_AES_KP 0x00000038
+#define CR_AES_KP 0x00000038 /* Not on Ux500 */
+#define CR_AES_XTS 0x00000038 /* Only on Ux500 */
#define CR_AES_GCM 0x00080000
#define CR_AES_CCM 0x00080008
#define CR_AES_UNKNOWN 0xFFFFFFFF
@@ -83,6 +108,8 @@
#define CR_KEY128 0x00000000
#define CR_KEY192 0x00000100
#define CR_KEY256 0x00000200
+#define CR_KEYRDEN 0x00000400 /* Only on Ux500 */
+#define CR_KSE 0x00000800 /* Only on Ux500 */
#define CR_FFLUSH 0x00004000
#define CR_CRYPEN 0x00008000
#define CR_PH_INIT 0x00000000
@@ -107,8 +134,25 @@
#define CRYP_AUTOSUSPEND_DELAY 50
struct stm32_cryp_caps {
- bool swap_final;
- bool padding_wa;
+ bool aeads_support;
+ bool linear_aes_key;
+ bool kp_mode;
+ bool iv_protection;
+ bool swap_final;
+ bool padding_wa;
+ u32 cr;
+ u32 sr;
+ u32 din;
+ u32 dout;
+ u32 imsc;
+ u32 mis;
+ u32 k1l;
+ u32 k1r;
+ u32 k3r;
+ u32 iv0l;
+ u32 iv0r;
+ u32 iv1l;
+ u32 iv1r;
};
struct stm32_cryp_ctx {
@@ -228,20 +272,21 @@ static inline int stm32_cryp_wait_busy(struct stm32_cryp *cryp)
{
u32 status;
- return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status,
+ return readl_relaxed_poll_timeout(cryp->regs + cryp->caps->sr, status,
!(status & SR_BUSY), 10, 100000);
}
static inline void stm32_cryp_enable(struct stm32_cryp *cryp)
{
- writel_relaxed(readl_relaxed(cryp->regs + CRYP_CR) | CR_CRYPEN, cryp->regs + CRYP_CR);
+ writel_relaxed(readl_relaxed(cryp->regs + cryp->caps->cr) | CR_CRYPEN,
+ cryp->regs + cryp->caps->cr);
}
static inline int stm32_cryp_wait_enable(struct stm32_cryp *cryp)
{
u32 status;
- return readl_relaxed_poll_timeout(cryp->regs + CRYP_CR, status,
+ return readl_relaxed_poll_timeout(cryp->regs + cryp->caps->cr, status,
!(status & CR_CRYPEN), 10, 100000);
}
@@ -249,10 +294,22 @@ static inline int stm32_cryp_wait_output(struct stm32_cryp *cryp)
{
u32 status;
- return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status,
+ return readl_relaxed_poll_timeout(cryp->regs + cryp->caps->sr, status,
status & SR_OFNE, 10, 100000);
}
+static inline void stm32_cryp_key_read_enable(struct stm32_cryp *cryp)
+{
+ writel_relaxed(readl_relaxed(cryp->regs + cryp->caps->cr) | CR_KEYRDEN,
+ cryp->regs + cryp->caps->cr);
+}
+
+static inline void stm32_cryp_key_read_disable(struct stm32_cryp *cryp)
+{
+ writel_relaxed(readl_relaxed(cryp->regs + cryp->caps->cr) & ~CR_KEYRDEN,
+ cryp->regs + cryp->caps->cr);
+}
+
static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp);
static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err);
@@ -281,12 +338,12 @@ static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, __be32 *iv)
if (!iv)
return;
- stm32_cryp_write(cryp, CRYP_IV0LR, be32_to_cpu(*iv++));
- stm32_cryp_write(cryp, CRYP_IV0RR, be32_to_cpu(*iv++));
+ stm32_cryp_write(cryp, cryp->caps->iv0l, be32_to_cpu(*iv++));
+ stm32_cryp_write(cryp, cryp->caps->iv0r, be32_to_cpu(*iv++));
if (is_aes(cryp)) {
- stm32_cryp_write(cryp, CRYP_IV1LR, be32_to_cpu(*iv++));
- stm32_cryp_write(cryp, CRYP_IV1RR, be32_to_cpu(*iv++));
+ stm32_cryp_write(cryp, cryp->caps->iv1l, be32_to_cpu(*iv++));
+ stm32_cryp_write(cryp, cryp->caps->iv1r, be32_to_cpu(*iv++));
}
}
@@ -298,12 +355,102 @@ static void stm32_cryp_get_iv(struct stm32_cryp *cryp)
if (!tmp)
return;
- *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0LR));
- *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0RR));
+ if (cryp->caps->iv_protection)
+ stm32_cryp_key_read_enable(cryp);
+
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv0l));
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv0r));
if (is_aes(cryp)) {
- *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1LR));
- *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1RR));
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv1l));
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv1r));
+ }
+
+ if (cryp->caps->iv_protection)
+ stm32_cryp_key_read_disable(cryp);
+}
+
+/**
+ * ux500_swap_bits_in_byte() - mirror the bits in a byte
+ * @b: the byte to be mirrored
+ *
+ * The bits are swapped the following way:
+ * Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and
+ * nibble 2 (n2) bits 4-7.
+ *
+ * Nibble 1 (n1):
+ * (The "old" (moved) bit is replaced with a zero)
+ * 1. Move bit 6 and 7, 4 positions to the left.
+ * 2. Move bit 3 and 5, 2 positions to the left.
+ * 3. Move bit 1-4, 1 position to the left.
+ *
+ * Nibble 2 (n2):
+ * 1. Move bit 0 and 1, 4 positions to the right.
+ * 2. Move bit 2 and 4, 2 positions to the right.
+ * 3. Move bit 3-6, 1 position to the right.
+ *
+ * Combine the two nibbles to a complete and swapped byte.
+ */
+static inline u8 ux500_swap_bits_in_byte(u8 b)
+{
+#define R_SHIFT_4_MASK 0xc0 /* Bits 6 and 7, right shift 4 */
+#define R_SHIFT_2_MASK 0x28 /* (After right shift 4) Bits 3 and 5,
+ right shift 2 */
+#define R_SHIFT_1_MASK 0x1e /* (After right shift 2) Bits 1-4,
+ right shift 1 */
+#define L_SHIFT_4_MASK 0x03 /* Bits 0 and 1, left shift 4 */
+#define L_SHIFT_2_MASK 0x14 /* (After left shift 4) Bits 2 and 4,
+ left shift 2 */
+#define L_SHIFT_1_MASK 0x78 /* (After left shift 1) Bits 3-6,
+ left shift 1 */
+
+ u8 n1;
+ u8 n2;
+
+ /* Swap most significant nibble */
+ /* Right shift 4, bits 6 and 7 */
+ n1 = ((b & R_SHIFT_4_MASK) >> 4) | (b & ~(R_SHIFT_4_MASK >> 4));
+ /* Right shift 2, bits 3 and 5 */
+ n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2));
+ /* Right shift 1, bits 1-4 */
+ n1 = (n1 & R_SHIFT_1_MASK) >> 1;
+
+ /* Swap least significant nibble */
+ /* Left shift 4, bits 0 and 1 */
+ n2 = ((b & L_SHIFT_4_MASK) << 4) | (b & ~(L_SHIFT_4_MASK << 4));
+ /* Left shift 2, bits 2 and 4 */
+ n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2));
+ /* Left shift 1, bits 3-6 */
+ n2 = (n2 & L_SHIFT_1_MASK) << 1;
+
+ return n1 | n2;
+}
+
+/**
+ * ux500_swizzle_key() - Shuffle around words and bits in the AES key
+ * @in: key to swizzle
+ * @out: swizzled key
+ * @len: length of key, in bytes
+ *
+ * This "key swizzling procedure" is described in the examples in the
+ * DB8500 design specification. There is no real description of why
+ * the bits have been arranged like this in the hardware.
+ */
+static inline void ux500_swizzle_key(const u8 *in, u8 *out, u32 len)
+{
+ int i = 0;
+ int bpw = sizeof(u32);
+ int j;
+ int index = 0;
+
+ j = len - bpw;
+ while (j >= 0) {
+ for (i = 0; i < bpw; i++) {
+ index = len - j - bpw + i;
+ out[j + i] =
+ ux500_swap_bits_in_byte(in[index]);
+ }
+ j -= bpw;
}
}
@@ -313,14 +460,33 @@ static void stm32_cryp_hw_write_key(struct stm32_cryp *c)
int r_id;
if (is_des(c)) {
- stm32_cryp_write(c, CRYP_K1LR, be32_to_cpu(c->ctx->key[0]));
- stm32_cryp_write(c, CRYP_K1RR, be32_to_cpu(c->ctx->key[1]));
- } else {
- r_id = CRYP_K3RR;
- for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4)
- stm32_cryp_write(c, r_id,
- be32_to_cpu(c->ctx->key[i - 1]));
+ stm32_cryp_write(c, c->caps->k1l, be32_to_cpu(c->ctx->key[0]));
+ stm32_cryp_write(c, c->caps->k1r, be32_to_cpu(c->ctx->key[1]));
+ return;
}
+
+ /*
+ * On the Ux500 the AES key is considered as a single bit sequence
+ * of 128, 192 or 256 bits length. It is written linearly into the
+ * registers from K1L and down, and need to be processed to become
+ * a proper big-endian bit sequence.
+ */
+ if (is_aes(c) && c->caps->linear_aes_key) {
+ u32 tmpkey[8];
+
+ ux500_swizzle_key((u8 *)c->ctx->key,
+ (u8 *)tmpkey, c->ctx->keylen);
+
+ r_id = c->caps->k1l;
+ for (i = 0; i < c->ctx->keylen / sizeof(u32); i++, r_id += 4)
+ stm32_cryp_write(c, r_id, tmpkey[i]);
+
+ return;
+ }
+
+ r_id = c->caps->k3r;
+ for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4)
+ stm32_cryp_write(c, r_id, be32_to_cpu(c->ctx->key[i - 1]));
}
static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp)
@@ -373,7 +539,7 @@ static int stm32_cryp_gcm_init(struct stm32_cryp *cryp, u32 cfg)
cryp->gcm_ctr = GCM_CTR_INIT;
stm32_cryp_hw_write_iv(cryp, iv);
- stm32_cryp_write(cryp, CRYP_CR, cfg | CR_PH_INIT | CR_CRYPEN);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg | CR_PH_INIT | CR_CRYPEN);
/* Wait for end of processing */
ret = stm32_cryp_wait_enable(cryp);
@@ -385,10 +551,10 @@ static int stm32_cryp_gcm_init(struct stm32_cryp *cryp, u32 cfg)
/* Prepare next phase */
if (cryp->areq->assoclen) {
cfg |= CR_PH_HEADER;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
} else if (stm32_cryp_get_input_text_len(cryp)) {
cfg |= CR_PH_PAYLOAD;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
}
return 0;
@@ -405,20 +571,20 @@ static void stm32_crypt_gcmccm_end_header(struct stm32_cryp *cryp)
err = stm32_cryp_wait_busy(cryp);
if (err) {
dev_err(cryp->dev, "Timeout (gcm/ccm header)\n");
- stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+ stm32_cryp_write(cryp, cryp->caps->imsc, 0);
stm32_cryp_finish_req(cryp, err);
return;
}
if (stm32_cryp_get_input_text_len(cryp)) {
/* Phase 3 : payload */
- cfg = stm32_cryp_read(cryp, CRYP_CR);
+ cfg = stm32_cryp_read(cryp, cryp->caps->cr);
cfg &= ~CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
cfg &= ~CR_PH_MASK;
cfg |= CR_PH_PAYLOAD | CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
} else {
/*
* Phase 4 : tag.
@@ -458,7 +624,7 @@ static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp)
scatterwalk_copychunks((char *)block + len, &cryp->in_walk, written, 0);
for (i = 0; i < AES_BLOCK_32; i++)
- stm32_cryp_write(cryp, CRYP_DIN, block[i]);
+ stm32_cryp_write(cryp, cryp->caps->din, block[i]);
cryp->header_in -= written;
@@ -494,7 +660,7 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg)
b0[AES_BLOCK_SIZE - 1] = textlen & 0xFF;
/* Enable HW */
- stm32_cryp_write(cryp, CRYP_CR, cfg | CR_PH_INIT | CR_CRYPEN);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg | CR_PH_INIT | CR_CRYPEN);
/* Write B0 */
d = (u32 *)b0;
@@ -505,7 +671,7 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg)
if (!cryp->caps->padding_wa)
xd = be32_to_cpu(bd[i]);
- stm32_cryp_write(cryp, CRYP_DIN, xd);
+ stm32_cryp_write(cryp, cryp->caps->din, xd);
}
/* Wait for end of processing */
@@ -518,13 +684,13 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg)
/* Prepare next phase */
if (cryp->areq->assoclen) {
cfg |= CR_PH_HEADER | CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
/* Write first (special) block (may move to next phase [payload]) */
stm32_cryp_write_ccm_first_header(cryp);
} else if (stm32_cryp_get_input_text_len(cryp)) {
cfg |= CR_PH_PAYLOAD;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
}
return 0;
@@ -538,7 +704,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
pm_runtime_get_sync(cryp->dev);
/* Disable interrupt */
- stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+ stm32_cryp_write(cryp, cryp->caps->imsc, 0);
/* Set configuration */
cfg = CR_DATA8 | CR_FFLUSH;
@@ -566,7 +732,12 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
if (is_decrypt(cryp) &&
((hw_mode == CR_AES_ECB) || (hw_mode == CR_AES_CBC))) {
/* Configure in key preparation mode */
- stm32_cryp_write(cryp, CRYP_CR, cfg | CR_AES_KP);
+ if (cryp->caps->kp_mode)
+ stm32_cryp_write(cryp, cryp->caps->cr,
+ cfg | CR_AES_KP);
+ else
+ stm32_cryp_write(cryp,
+ cryp->caps->cr, cfg | CR_AES_ECB | CR_KSE);
/* Set key only after full configuration done */
stm32_cryp_hw_write_key(cryp);
@@ -583,14 +754,14 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
cfg |= hw_mode | CR_DEC_NOT_ENC;
/* Apply updated config (Decrypt + algo) and flush */
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
} else {
cfg |= hw_mode;
if (is_decrypt(cryp))
cfg |= CR_DEC_NOT_ENC;
/* Apply config and flush */
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
/* Set key only after configuration done */
stm32_cryp_hw_write_key(cryp);
@@ -649,7 +820,7 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
{
/* Enable interrupt and let the IRQ handler do everything */
- stm32_cryp_write(cryp, CRYP_IMSCR, IMSCR_IN | IMSCR_OUT);
+ stm32_cryp_write(cryp, cryp->caps->imsc, IMSCR_IN | IMSCR_OUT);
return 0;
}
@@ -1137,14 +1308,14 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
int ret = 0;
/* Update Config */
- cfg = stm32_cryp_read(cryp, CRYP_CR);
+ cfg = stm32_cryp_read(cryp, cryp->caps->cr);
cfg &= ~CR_PH_MASK;
cfg |= CR_PH_FINAL;
cfg &= ~CR_DEC_NOT_ENC;
cfg |= CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
if (is_gcm(cryp)) {
/* GCM: write aad and payload size (in bits) */
@@ -1152,8 +1323,8 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
if (cryp->caps->swap_final)
size_bit = (__force u32)cpu_to_be32(size_bit);
- stm32_cryp_write(cryp, CRYP_DIN, 0);
- stm32_cryp_write(cryp, CRYP_DIN, size_bit);
+ stm32_cryp_write(cryp, cryp->caps->din, 0);
+ stm32_cryp_write(cryp, cryp->caps->din, size_bit);
size_bit = is_encrypt(cryp) ? cryp->areq->cryptlen :
cryp->areq->cryptlen - cryp->authsize;
@@ -1161,8 +1332,8 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
if (cryp->caps->swap_final)
size_bit = (__force u32)cpu_to_be32(size_bit);
- stm32_cryp_write(cryp, CRYP_DIN, 0);
- stm32_cryp_write(cryp, CRYP_DIN, size_bit);
+ stm32_cryp_write(cryp, cryp->caps->din, 0);
+ stm32_cryp_write(cryp, cryp->caps->din, size_bit);
} else {
/* CCM: write CTR0 */
u32 iv32[AES_BLOCK_32];
@@ -1177,7 +1348,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
if (!cryp->caps->padding_wa)
xiv = be32_to_cpu(biv[i]);
- stm32_cryp_write(cryp, CRYP_DIN, xiv);
+ stm32_cryp_write(cryp, cryp->caps->din, xiv);
}
}
@@ -1193,7 +1364,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
/* Get and write tag */
for (i = 0; i < AES_BLOCK_32; i++)
- out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT);
+ out_tag[i] = stm32_cryp_read(cryp, cryp->caps->dout);
scatterwalk_copychunks(out_tag, &cryp->out_walk, cryp->authsize, 1);
} else {
@@ -1203,7 +1374,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
scatterwalk_copychunks(in_tag, &cryp->in_walk, cryp->authsize, 0);
for (i = 0; i < AES_BLOCK_32; i++)
- out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT);
+ out_tag[i] = stm32_cryp_read(cryp, cryp->caps->dout);
if (crypto_memneq(in_tag, out_tag, cryp->authsize))
ret = -EBADMSG;
@@ -1211,7 +1382,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
/* Disable cryp */
cfg &= ~CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
return ret;
}
@@ -1227,19 +1398,19 @@ static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp)
*/
crypto_inc((u8 *)cryp->last_ctr, sizeof(cryp->last_ctr));
- cr = stm32_cryp_read(cryp, CRYP_CR);
- stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN);
+ cr = stm32_cryp_read(cryp, cryp->caps->cr);
+ stm32_cryp_write(cryp, cryp->caps->cr, cr & ~CR_CRYPEN);
stm32_cryp_hw_write_iv(cryp, cryp->last_ctr);
- stm32_cryp_write(cryp, CRYP_CR, cr);
+ stm32_cryp_write(cryp, cryp->caps->cr, cr);
}
/* The IV registers are BE */
- cryp->last_ctr[0] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0LR));
- cryp->last_ctr[1] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0RR));
- cryp->last_ctr[2] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1LR));
- cryp->last_ctr[3] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1RR));
+ cryp->last_ctr[0] = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv0l));
+ cryp->last_ctr[1] = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv0r));
+ cryp->last_ctr[2] = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv1l));
+ cryp->last_ctr[3] = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv1r));
}
static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
@@ -1248,7 +1419,7 @@ static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
u32 block[AES_BLOCK_32];
for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
- block[i] = stm32_cryp_read(cryp, CRYP_DOUT);
+ block[i] = stm32_cryp_read(cryp, cryp->caps->dout);
scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
cryp->payload_out), 1);
@@ -1264,7 +1435,7 @@ static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize,
cryp->payload_in), 0);
for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
- stm32_cryp_write(cryp, CRYP_DIN, block[i]);
+ stm32_cryp_write(cryp, cryp->caps->din, block[i]);
cryp->payload_in -= min_t(size_t, cryp->hw_blocksize, cryp->payload_in);
}
@@ -1278,22 +1449,22 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
/* 'Special workaround' procedure described in the datasheet */
/* a) disable ip */
- stm32_cryp_write(cryp, CRYP_IMSCR, 0);
- cfg = stm32_cryp_read(cryp, CRYP_CR);
+ stm32_cryp_write(cryp, cryp->caps->imsc, 0);
+ cfg = stm32_cryp_read(cryp, cryp->caps->cr);
cfg &= ~CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
/* b) Update IV1R */
- stm32_cryp_write(cryp, CRYP_IV1RR, cryp->gcm_ctr - 2);
+ stm32_cryp_write(cryp, cryp->caps->iv1r, cryp->gcm_ctr - 2);
/* c) change mode to CTR */
cfg &= ~CR_ALGO_MASK;
cfg |= CR_AES_CTR;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
/* a) enable IP */
cfg |= CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
/* b) pad and write the last block */
stm32_cryp_irq_write_block(cryp);
@@ -1310,7 +1481,7 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
* block value
*/
for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
- block[i] = stm32_cryp_read(cryp, CRYP_DOUT);
+ block[i] = stm32_cryp_read(cryp, cryp->caps->dout);
scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
cryp->payload_out), 1);
@@ -1320,16 +1491,16 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
/* d) change mode back to AES GCM */
cfg &= ~CR_ALGO_MASK;
cfg |= CR_AES_GCM;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
/* e) change phase to Final */
cfg &= ~CR_PH_MASK;
cfg |= CR_PH_FINAL;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
/* f) write padded data */
for (i = 0; i < AES_BLOCK_32; i++)
- stm32_cryp_write(cryp, CRYP_DIN, block[i]);
+ stm32_cryp_write(cryp, cryp->caps->din, block[i]);
/* g) Empty fifo out */
err = stm32_cryp_wait_output(cryp);
@@ -1339,7 +1510,7 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
}
for (i = 0; i < AES_BLOCK_32; i++)
- stm32_cryp_read(cryp, CRYP_DOUT);
+ stm32_cryp_read(cryp, cryp->caps->dout);
/* h) run the he normal Final phase */
stm32_cryp_finish_req(cryp, 0);
@@ -1350,13 +1521,13 @@ static void stm32_cryp_irq_set_npblb(struct stm32_cryp *cryp)
u32 cfg;
/* disable ip, set NPBLB and reneable ip */
- cfg = stm32_cryp_read(cryp, CRYP_CR);
+ cfg = stm32_cryp_read(cryp, cryp->caps->cr);
cfg &= ~CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
cfg |= (cryp->hw_blocksize - cryp->payload_in) << CR_NBPBL_SHIFT;
cfg |= CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
}
static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
@@ -1370,11 +1541,11 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
/* 'Special workaround' procedure described in the datasheet */
/* a) disable ip */
- stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+ stm32_cryp_write(cryp, cryp->caps->imsc, 0);
- cfg = stm32_cryp_read(cryp, CRYP_CR);
+ cfg = stm32_cryp_read(cryp, cryp->caps->cr);
cfg &= ~CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
/* b) get IV1 from CRYP_CSGCMCCM7 */
iv1tmp = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + 7 * 4);
@@ -1384,23 +1555,23 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
cstmp1[i] = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + i * 4);
/* d) Write IV1R */
- stm32_cryp_write(cryp, CRYP_IV1RR, iv1tmp);
+ stm32_cryp_write(cryp, cryp->caps->iv1r, iv1tmp);
/* e) change mode to CTR */
cfg &= ~CR_ALGO_MASK;
cfg |= CR_AES_CTR;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
/* a) enable IP */
cfg |= CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
/* b) pad and write the last block */
stm32_cryp_irq_write_block(cryp);
/* wait end of process */
err = stm32_cryp_wait_output(cryp);
if (err) {
- dev_err(cryp->dev, "Timeout (wite ccm padded data)\n");
+ dev_err(cryp->dev, "Timeout (write ccm padded data)\n");
return stm32_cryp_finish_req(cryp, err);
}
@@ -1410,7 +1581,7 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
* block value
*/
for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
- block[i] = stm32_cryp_read(cryp, CRYP_DOUT);
+ block[i] = stm32_cryp_read(cryp, cryp->caps->dout);
scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
cryp->payload_out), 1);
@@ -1423,24 +1594,24 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
/* e) change mode back to AES CCM */
cfg &= ~CR_ALGO_MASK;
cfg |= CR_AES_CCM;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
/* f) change phase to header */
cfg &= ~CR_PH_MASK;
cfg |= CR_PH_HEADER;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ stm32_cryp_write(cryp, cryp->caps->cr, cfg);
/* g) XOR and write padded data */
for (i = 0; i < ARRAY_SIZE(block); i++) {
block[i] ^= cstmp1[i];
block[i] ^= cstmp2[i];
- stm32_cryp_write(cryp, CRYP_DIN, block[i]);
+ stm32_cryp_write(cryp, cryp->caps->din, block[i]);
}
/* h) wait for completion */
err = stm32_cryp_wait_busy(cryp);
if (err)
- dev_err(cryp->dev, "Timeout (wite ccm padded data)\n");
+ dev_err(cryp->dev, "Timeout (write ccm padded data)\n");
/* i) run the he normal Final phase */
stm32_cryp_finish_req(cryp, err);
@@ -1497,7 +1668,7 @@ static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp)
scatterwalk_copychunks(block, &cryp->in_walk, written, 0);
for (i = 0; i < AES_BLOCK_32; i++)
- stm32_cryp_write(cryp, CRYP_DIN, block[i]);
+ stm32_cryp_write(cryp, cryp->caps->din, block[i]);
cryp->header_in -= written;
@@ -1508,7 +1679,7 @@ static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
{
struct stm32_cryp *cryp = arg;
u32 ph;
- u32 it_mask = stm32_cryp_read(cryp, CRYP_IMSCR);
+ u32 it_mask = stm32_cryp_read(cryp, cryp->caps->imsc);
if (cryp->irq_status & MISR_OUT)
/* Output FIFO IRQ: read data */
@@ -1516,7 +1687,7 @@ static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
if (cryp->irq_status & MISR_IN) {
if (is_gcm(cryp) || is_ccm(cryp)) {
- ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK;
+ ph = stm32_cryp_read(cryp, cryp->caps->cr) & CR_PH_MASK;
if (unlikely(ph == CR_PH_HEADER))
/* Write Header */
stm32_cryp_irq_write_gcmccm_header(cryp);
@@ -1536,7 +1707,7 @@ static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
it_mask &= ~IMSCR_IN;
if (!cryp->payload_out)
it_mask &= ~IMSCR_OUT;
- stm32_cryp_write(cryp, CRYP_IMSCR, it_mask);
+ stm32_cryp_write(cryp, cryp->caps->imsc, it_mask);
if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out)
stm32_cryp_finish_req(cryp, 0);
@@ -1548,7 +1719,7 @@ static irqreturn_t stm32_cryp_irq(int irq, void *arg)
{
struct stm32_cryp *cryp = arg;
- cryp->irq_status = stm32_cryp_read(cryp, CRYP_MISR);
+ cryp->irq_status = stm32_cryp_read(cryp, cryp->caps->mis);
return IRQ_WAKE_THREAD;
}
@@ -1722,17 +1893,74 @@ static struct aead_alg aead_algs[] = {
},
};
+static const struct stm32_cryp_caps ux500_data = {
+ .aeads_support = false,
+ .linear_aes_key = true,
+ .kp_mode = false,
+ .iv_protection = true,
+ .swap_final = true,
+ .padding_wa = true,
+ .cr = UX500_CRYP_CR,
+ .sr = UX500_CRYP_SR,
+ .din = UX500_CRYP_DIN,
+ .dout = UX500_CRYP_DOUT,
+ .imsc = UX500_CRYP_IMSC,
+ .mis = UX500_CRYP_MIS,
+ .k1l = UX500_CRYP_K1L,
+ .k1r = UX500_CRYP_K1R,
+ .k3r = UX500_CRYP_K3R,
+ .iv0l = UX500_CRYP_IV0L,
+ .iv0r = UX500_CRYP_IV0R,
+ .iv1l = UX500_CRYP_IV1L,
+ .iv1r = UX500_CRYP_IV1R,
+};
+
static const struct stm32_cryp_caps f7_data = {
+ .aeads_support = true,
+ .linear_aes_key = false,
+ .kp_mode = true,
+ .iv_protection = false,
.swap_final = true,
.padding_wa = true,
+ .cr = CRYP_CR,
+ .sr = CRYP_SR,
+ .din = CRYP_DIN,
+ .dout = CRYP_DOUT,
+ .imsc = CRYP_IMSCR,
+ .mis = CRYP_MISR,
+ .k1l = CRYP_K1LR,
+ .k1r = CRYP_K1RR,
+ .k3r = CRYP_K3RR,
+ .iv0l = CRYP_IV0LR,
+ .iv0r = CRYP_IV0RR,
+ .iv1l = CRYP_IV1LR,
+ .iv1r = CRYP_IV1RR,
};
static const struct stm32_cryp_caps mp1_data = {
+ .aeads_support = true,
+ .linear_aes_key = false,
+ .kp_mode = true,
+ .iv_protection = false,
.swap_final = false,
.padding_wa = false,
+ .cr = CRYP_CR,
+ .sr = CRYP_SR,
+ .din = CRYP_DIN,
+ .dout = CRYP_DOUT,
+ .imsc = CRYP_IMSCR,
+ .mis = CRYP_MISR,
+ .k1l = CRYP_K1LR,
+ .k1r = CRYP_K1RR,
+ .k3r = CRYP_K3RR,
+ .iv0l = CRYP_IV0LR,
+ .iv0r = CRYP_IV0RR,
+ .iv1l = CRYP_IV1LR,
+ .iv1r = CRYP_IV1RR,
};
static const struct of_device_id stm32_dt_ids[] = {
+ { .compatible = "stericsson,ux500-cryp", .data = &ux500_data},
{ .compatible = "st,stm32f756-cryp", .data = &f7_data},
{ .compatible = "st,stm32mp1-cryp", .data = &mp1_data},
{},
@@ -1829,9 +2057,11 @@ static int stm32_cryp_probe(struct platform_device *pdev)
goto err_algs;
}
- ret = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
- if (ret)
- goto err_aead_algs;
+ if (cryp->caps->aeads_support) {
+ ret = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+ if (ret)
+ goto err_aead_algs;
+ }
dev_info(dev, "Initialized\n");
@@ -1869,7 +2099,8 @@ static int stm32_cryp_remove(struct platform_device *pdev)
if (ret < 0)
return ret;
- crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+ if (cryp->caps->aeads_support)
+ crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
crypto_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
crypto_engine_exit(cryp->engine);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index c9ad6c213090..71db6450b6aa 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1999,7 +1999,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
/* Buffer up to one whole block */
nents = sg_nents_for_len(areq->src, nbytes);
if (nents < 0) {
- dev_err(ctx->dev, "Invalid number of src SG.\n");
+ dev_err(dev, "Invalid number of src SG.\n");
return nents;
}
sg_copy_to_buffer(areq->src, nents,
@@ -2040,7 +2040,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
offset = nbytes_to_hash - req_ctx->nbuf;
nents = sg_nents_for_len(areq->src, offset);
if (nents < 0) {
- dev_err(ctx->dev, "Invalid number of src SG.\n");
+ dev_err(dev, "Invalid number of src SG.\n");
return nents;
}
sg_copy_to_buffer(areq->src, nents,
@@ -2054,7 +2054,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
if (to_hash_later) {
nents = sg_nents_for_len(areq->src, nbytes);
if (nents < 0) {
- dev_err(ctx->dev, "Invalid number of src SG.\n");
+ dev_err(dev, "Invalid number of src SG.\n");
return nents;
}
sg_pcopy_to_buffer(areq->src, nents,
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 32825119e880..1a93ee355929 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -65,8 +65,8 @@ struct talitos_edesc {
dma_addr_t dma_link_tbl;
struct talitos_desc desc;
union {
- struct talitos_ptr link_tbl[0];
- u8 buf[0];
+ DECLARE_FLEX_ARRAY(struct talitos_ptr, link_tbl);
+ DECLARE_FLEX_ARRAY(u8, buf);
};
};
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
index f56d65c56ccf..dcbd7404768f 100644
--- a/drivers/crypto/ux500/Kconfig
+++ b/drivers/crypto/ux500/Kconfig
@@ -4,16 +4,6 @@
# Author: Shujuan Chen (shujuan.chen@stericsson.com)
#
-config CRYPTO_DEV_UX500_CRYP
- tristate "UX500 crypto driver for CRYP block"
- depends on CRYPTO_DEV_UX500
- select CRYPTO_ALGAPI
- select CRYPTO_SKCIPHER
- select CRYPTO_LIB_DES
- help
- This selects the crypto driver for the UX500_CRYP hardware. It supports
- AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes.
-
config CRYPTO_DEV_UX500_HASH
tristate "UX500 crypto driver for HASH block"
depends on CRYPTO_DEV_UX500
diff --git a/drivers/crypto/ux500/Makefile b/drivers/crypto/ux500/Makefile
index f014eb01710a..f1aa4edf66f4 100644
--- a/drivers/crypto/ux500/Makefile
+++ b/drivers/crypto/ux500/Makefile
@@ -5,4 +5,3 @@
#
obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += hash/
-obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += cryp/
diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile
deleted file mode 100644
index 3e67531f484c..000000000000
--- a/drivers/crypto/ux500/cryp/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#/*
-# * Copyright (C) ST-Ericsson SA 2010
-# * Author: shujuan.chen@stericsson.com for ST-Ericsson.
-# */
-
-ccflags-$(CONFIG_CRYPTO_DEV_UX500_DEBUG) += -DDEBUG
-
-obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o
-ux500_cryp-objs := cryp.o cryp_irq.o cryp_core.o
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c
deleted file mode 100644
index 759d0d9786fd..000000000000
--- a/drivers/crypto/ux500/cryp/cryp.c
+++ /dev/null
@@ -1,394 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
- * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
- * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
- * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
- * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
- */
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "cryp_p.h"
-#include "cryp.h"
-
-/*
- * cryp_wait_until_done - wait until the device logic is not busy
- */
-void cryp_wait_until_done(struct cryp_device_data *device_data)
-{
- while (cryp_is_logic_busy(device_data))
- cpu_relax();
-}
-
-/**
- * cryp_check - This routine checks Peripheral and PCell Id
- * @device_data: Pointer to the device data struct for base address.
- */
-int cryp_check(struct cryp_device_data *device_data)
-{
- int peripheralid2 = 0;
-
- if (NULL == device_data)
- return -EINVAL;
-
- peripheralid2 = readl_relaxed(&device_data->base->periphId2);
-
- if (peripheralid2 != CRYP_PERIPHERAL_ID2_DB8500)
- return -EPERM;
-
- /* Check Peripheral and Pcell Id Register for CRYP */
- if ((CRYP_PERIPHERAL_ID0 ==
- readl_relaxed(&device_data->base->periphId0))
- && (CRYP_PERIPHERAL_ID1 ==
- readl_relaxed(&device_data->base->periphId1))
- && (CRYP_PERIPHERAL_ID3 ==
- readl_relaxed(&device_data->base->periphId3))
- && (CRYP_PCELL_ID0 ==
- readl_relaxed(&device_data->base->pcellId0))
- && (CRYP_PCELL_ID1 ==
- readl_relaxed(&device_data->base->pcellId1))
- && (CRYP_PCELL_ID2 ==
- readl_relaxed(&device_data->base->pcellId2))
- && (CRYP_PCELL_ID3 ==
- readl_relaxed(&device_data->base->pcellId3))) {
- return 0;
- }
-
- return -EPERM;
-}
-
-/**
- * cryp_activity - This routine enables/disable the cryptography function.
- * @device_data: Pointer to the device data struct for base address.
- * @cryp_crypen: Enable/Disable functionality
- */
-void cryp_activity(struct cryp_device_data *device_data,
- enum cryp_crypen cryp_crypen)
-{
- CRYP_PUT_BITS(&device_data->base->cr,
- cryp_crypen,
- CRYP_CR_CRYPEN_POS,
- CRYP_CR_CRYPEN_MASK);
-}
-
-/**
- * cryp_flush_inoutfifo - Resets both the input and the output FIFOs
- * @device_data: Pointer to the device data struct for base address.
- */
-void cryp_flush_inoutfifo(struct cryp_device_data *device_data)
-{
- /*
- * We always need to disable the hardware before trying to flush the
- * FIFO. This is something that isn't written in the design
- * specification, but we have been informed by the hardware designers
- * that this must be done.
- */
- cryp_activity(device_data, CRYP_CRYPEN_DISABLE);
- cryp_wait_until_done(device_data);
-
- CRYP_SET_BITS(&device_data->base->cr, CRYP_CR_FFLUSH_MASK);
- /*
- * CRYP_SR_INFIFO_READY_MASK is the expected value on the status
- * register when starting a new calculation, which means Input FIFO is
- * not full and input FIFO is empty.
- */
- while (readl_relaxed(&device_data->base->sr) !=
- CRYP_SR_INFIFO_READY_MASK)
- cpu_relax();
-}
-
-/**
- * cryp_set_configuration - This routine set the cr CRYP IP
- * @device_data: Pointer to the device data struct for base address.
- * @cryp_config: Pointer to the configuration parameter
- * @control_register: The control register to be written later on.
- */
-int cryp_set_configuration(struct cryp_device_data *device_data,
- struct cryp_config *cryp_config,
- u32 *control_register)
-{
- u32 cr_for_kse;
-
- if (NULL == device_data || NULL == cryp_config)
- return -EINVAL;
-
- *control_register |= (cryp_config->keysize << CRYP_CR_KEYSIZE_POS);
-
- /* Prepare key for decryption in AES_ECB and AES_CBC mode. */
- if ((CRYP_ALGORITHM_DECRYPT == cryp_config->algodir) &&
- ((CRYP_ALGO_AES_ECB == cryp_config->algomode) ||
- (CRYP_ALGO_AES_CBC == cryp_config->algomode))) {
- cr_for_kse = *control_register;
- /*
- * This seems a bit odd, but it is indeed needed to set this to
- * encrypt even though it is a decryption that we are doing. It
- * also mentioned in the design spec that you need to do this.
- * After the keyprepartion for decrypting is done you should set
- * algodir back to decryption, which is done outside this if
- * statement.
- *
- * According to design specification we should set mode ECB
- * during key preparation even though we might be running CBC
- * when enter this function.
- *
- * Writing to KSE_ENABLED will drop CRYPEN when key preparation
- * is done. Therefore we need to set CRYPEN again outside this
- * if statement when running decryption.
- */
- cr_for_kse |= ((CRYP_ALGORITHM_ENCRYPT << CRYP_CR_ALGODIR_POS) |
- (CRYP_ALGO_AES_ECB << CRYP_CR_ALGOMODE_POS) |
- (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS) |
- (KSE_ENABLED << CRYP_CR_KSE_POS));
-
- writel_relaxed(cr_for_kse, &device_data->base->cr);
- cryp_wait_until_done(device_data);
- }
-
- *control_register |=
- ((cryp_config->algomode << CRYP_CR_ALGOMODE_POS) |
- (cryp_config->algodir << CRYP_CR_ALGODIR_POS));
-
- return 0;
-}
-
-/**
- * cryp_configure_protection - set the protection bits in the CRYP logic.
- * @device_data: Pointer to the device data struct for base address.
- * @p_protect_config: Pointer to the protection mode and
- * secure mode configuration
- */
-int cryp_configure_protection(struct cryp_device_data *device_data,
- struct cryp_protection_config *p_protect_config)
-{
- if (NULL == p_protect_config)
- return -EINVAL;
-
- CRYP_WRITE_BIT(&device_data->base->cr,
- (u32) p_protect_config->secure_access,
- CRYP_CR_SECURE_MASK);
- CRYP_PUT_BITS(&device_data->base->cr,
- p_protect_config->privilege_access,
- CRYP_CR_PRLG_POS,
- CRYP_CR_PRLG_MASK);
-
- return 0;
-}
-
-/**
- * cryp_is_logic_busy - returns the busy status of the CRYP logic
- * @device_data: Pointer to the device data struct for base address.
- */
-int cryp_is_logic_busy(struct cryp_device_data *device_data)
-{
- return CRYP_TEST_BITS(&device_data->base->sr,
- CRYP_SR_BUSY_MASK);
-}
-
-/**
- * cryp_configure_for_dma - configures the CRYP IP for DMA operation
- * @device_data: Pointer to the device data struct for base address.
- * @dma_req: Specifies the DMA request type value.
- */
-void cryp_configure_for_dma(struct cryp_device_data *device_data,
- enum cryp_dma_req_type dma_req)
-{
- CRYP_SET_BITS(&device_data->base->dmacr,
- (u32) dma_req);
-}
-
-/**
- * cryp_configure_key_values - configures the key values for CRYP operations
- * @device_data: Pointer to the device data struct for base address.
- * @key_reg_index: Key value index register
- * @key_value: The key value struct
- */
-int cryp_configure_key_values(struct cryp_device_data *device_data,
- enum cryp_key_reg_index key_reg_index,
- struct cryp_key_value key_value)
-{
- while (cryp_is_logic_busy(device_data))
- cpu_relax();
-
- switch (key_reg_index) {
- case CRYP_KEY_REG_1:
- writel_relaxed(key_value.key_value_left,
- &device_data->base->key_1_l);
- writel_relaxed(key_value.key_value_right,
- &device_data->base->key_1_r);
- break;
- case CRYP_KEY_REG_2:
- writel_relaxed(key_value.key_value_left,
- &device_data->base->key_2_l);
- writel_relaxed(key_value.key_value_right,
- &device_data->base->key_2_r);
- break;
- case CRYP_KEY_REG_3:
- writel_relaxed(key_value.key_value_left,
- &device_data->base->key_3_l);
- writel_relaxed(key_value.key_value_right,
- &device_data->base->key_3_r);
- break;
- case CRYP_KEY_REG_4:
- writel_relaxed(key_value.key_value_left,
- &device_data->base->key_4_l);
- writel_relaxed(key_value.key_value_right,
- &device_data->base->key_4_r);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * cryp_configure_init_vector - configures the initialization vector register
- * @device_data: Pointer to the device data struct for base address.
- * @init_vector_index: Specifies the index of the init vector.
- * @init_vector_value: Specifies the value for the init vector.
- */
-int cryp_configure_init_vector(struct cryp_device_data *device_data,
- enum cryp_init_vector_index
- init_vector_index,
- struct cryp_init_vector_value
- init_vector_value)
-{
- while (cryp_is_logic_busy(device_data))
- cpu_relax();
-
- switch (init_vector_index) {
- case CRYP_INIT_VECTOR_INDEX_0:
- writel_relaxed(init_vector_value.init_value_left,
- &device_data->base->init_vect_0_l);
- writel_relaxed(init_vector_value.init_value_right,
- &device_data->base->init_vect_0_r);
- break;
- case CRYP_INIT_VECTOR_INDEX_1:
- writel_relaxed(init_vector_value.init_value_left,
- &device_data->base->init_vect_1_l);
- writel_relaxed(init_vector_value.init_value_right,
- &device_data->base->init_vect_1_r);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * cryp_save_device_context - Store hardware registers and
- * other device context parameter
- * @device_data: Pointer to the device data struct for base address.
- * @ctx: Crypto device context
- * @cryp_mode: Mode: Polling, Interrupt or DMA
- */
-void cryp_save_device_context(struct cryp_device_data *device_data,
- struct cryp_device_context *ctx,
- int cryp_mode)
-{
- enum cryp_algo_mode algomode;
- struct cryp_register __iomem *src_reg = device_data->base;
- struct cryp_config *config =
- (struct cryp_config *)device_data->current_ctx;
-
- /*
- * Always start by disable the hardware and wait for it to finish the
- * ongoing calculations before trying to reprogram it.
- */
- cryp_activity(device_data, CRYP_CRYPEN_DISABLE);
- cryp_wait_until_done(device_data);
-
- if (cryp_mode == CRYP_MODE_DMA)
- cryp_configure_for_dma(device_data, CRYP_DMA_DISABLE_BOTH);
-
- if (CRYP_TEST_BITS(&src_reg->sr, CRYP_SR_IFEM_MASK) == 0)
- ctx->din = readl_relaxed(&src_reg->din);
-
- ctx->cr = readl_relaxed(&src_reg->cr) & CRYP_CR_CONTEXT_SAVE_MASK;
-
- switch (config->keysize) {
- case CRYP_KEY_SIZE_256:
- ctx->key_4_l = readl_relaxed(&src_reg->key_4_l);
- ctx->key_4_r = readl_relaxed(&src_reg->key_4_r);
- fallthrough;
-
- case CRYP_KEY_SIZE_192:
- ctx->key_3_l = readl_relaxed(&src_reg->key_3_l);
- ctx->key_3_r = readl_relaxed(&src_reg->key_3_r);
- fallthrough;
-
- case CRYP_KEY_SIZE_128:
- ctx->key_2_l = readl_relaxed(&src_reg->key_2_l);
- ctx->key_2_r = readl_relaxed(&src_reg->key_2_r);
- fallthrough;
-
- default:
- ctx->key_1_l = readl_relaxed(&src_reg->key_1_l);
- ctx->key_1_r = readl_relaxed(&src_reg->key_1_r);
- }
-
- /* Save IV for CBC mode for both AES and DES. */
- algomode = ((ctx->cr & CRYP_CR_ALGOMODE_MASK) >> CRYP_CR_ALGOMODE_POS);
- if (algomode == CRYP_ALGO_TDES_CBC ||
- algomode == CRYP_ALGO_DES_CBC ||
- algomode == CRYP_ALGO_AES_CBC) {
- ctx->init_vect_0_l = readl_relaxed(&src_reg->init_vect_0_l);
- ctx->init_vect_0_r = readl_relaxed(&src_reg->init_vect_0_r);
- ctx->init_vect_1_l = readl_relaxed(&src_reg->init_vect_1_l);
- ctx->init_vect_1_r = readl_relaxed(&src_reg->init_vect_1_r);
- }
-}
-
-/**
- * cryp_restore_device_context - Restore hardware registers and
- * other device context parameter
- * @device_data: Pointer to the device data struct for base address.
- * @ctx: Crypto device context
- */
-void cryp_restore_device_context(struct cryp_device_data *device_data,
- struct cryp_device_context *ctx)
-{
- struct cryp_register __iomem *reg = device_data->base;
- struct cryp_config *config =
- (struct cryp_config *)device_data->current_ctx;
-
- /*
- * Fall through for all items in switch statement. DES is captured in
- * the default.
- */
- switch (config->keysize) {
- case CRYP_KEY_SIZE_256:
- writel_relaxed(ctx->key_4_l, &reg->key_4_l);
- writel_relaxed(ctx->key_4_r, &reg->key_4_r);
- fallthrough;
-
- case CRYP_KEY_SIZE_192:
- writel_relaxed(ctx->key_3_l, &reg->key_3_l);
- writel_relaxed(ctx->key_3_r, &reg->key_3_r);
- fallthrough;
-
- case CRYP_KEY_SIZE_128:
- writel_relaxed(ctx->key_2_l, &reg->key_2_l);
- writel_relaxed(ctx->key_2_r, &reg->key_2_r);
- fallthrough;
-
- default:
- writel_relaxed(ctx->key_1_l, &reg->key_1_l);
- writel_relaxed(ctx->key_1_r, &reg->key_1_r);
- }
-
- /* Restore IV for CBC mode for AES and DES. */
- if (config->algomode == CRYP_ALGO_TDES_CBC ||
- config->algomode == CRYP_ALGO_DES_CBC ||
- config->algomode == CRYP_ALGO_AES_CBC) {
- writel_relaxed(ctx->init_vect_0_l, &reg->init_vect_0_l);
- writel_relaxed(ctx->init_vect_0_r, &reg->init_vect_0_r);
- writel_relaxed(ctx->init_vect_1_l, &reg->init_vect_1_l);
- writel_relaxed(ctx->init_vect_1_r, &reg->init_vect_1_r);
- }
-}
diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h
deleted file mode 100644
index 59e1557a620a..000000000000
--- a/drivers/crypto/ux500/cryp/cryp.h
+++ /dev/null
@@ -1,315 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
- * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
- * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
- * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
- * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
- */
-
-#ifndef _CRYP_H_
-#define _CRYP_H_
-
-#include <linux/completion.h>
-#include <linux/dmaengine.h>
-#include <linux/klist.h>
-#include <linux/mutex.h>
-
-#define DEV_DBG_NAME "crypX crypX:"
-
-/* CRYP enable/disable */
-enum cryp_crypen {
- CRYP_CRYPEN_DISABLE = 0,
- CRYP_CRYPEN_ENABLE = 1
-};
-
-/* CRYP Start Computation enable/disable */
-enum cryp_start {
- CRYP_START_DISABLE = 0,
- CRYP_START_ENABLE = 1
-};
-
-/* CRYP Init Signal enable/disable */
-enum cryp_init {
- CRYP_INIT_DISABLE = 0,
- CRYP_INIT_ENABLE = 1
-};
-
-/* Cryp State enable/disable */
-enum cryp_state {
- CRYP_STATE_DISABLE = 0,
- CRYP_STATE_ENABLE = 1
-};
-
-/* Key preparation bit enable */
-enum cryp_key_prep {
- KSE_DISABLED = 0,
- KSE_ENABLED = 1
-};
-
-/* Key size for AES */
-#define CRYP_KEY_SIZE_128 (0)
-#define CRYP_KEY_SIZE_192 (1)
-#define CRYP_KEY_SIZE_256 (2)
-
-/* AES modes */
-enum cryp_algo_mode {
- CRYP_ALGO_TDES_ECB,
- CRYP_ALGO_TDES_CBC,
- CRYP_ALGO_DES_ECB,
- CRYP_ALGO_DES_CBC,
- CRYP_ALGO_AES_ECB,
- CRYP_ALGO_AES_CBC,
- CRYP_ALGO_AES_CTR,
- CRYP_ALGO_AES_XTS
-};
-
-/* Cryp Encryption or Decryption */
-enum cryp_algorithm_dir {
- CRYP_ALGORITHM_ENCRYPT,
- CRYP_ALGORITHM_DECRYPT
-};
-
-/* Hardware access method */
-enum cryp_mode {
- CRYP_MODE_POLLING,
- CRYP_MODE_INTERRUPT,
- CRYP_MODE_DMA
-};
-
-/**
- * struct cryp_config -
- * @keysize: Key size for AES
- * @algomode: AES modes
- * @algodir: Cryp Encryption or Decryption
- *
- * CRYP configuration structure to be passed to set configuration
- */
-struct cryp_config {
- int keysize;
- enum cryp_algo_mode algomode;
- enum cryp_algorithm_dir algodir;
-};
-
-/**
- * struct cryp_protection_config -
- * @privilege_access: Privileged cryp state enable/disable
- * @secure_access: Secure cryp state enable/disable
- *
- * Protection configuration structure for setting privilage access
- */
-struct cryp_protection_config {
- enum cryp_state privilege_access;
- enum cryp_state secure_access;
-};
-
-/* Cryp status */
-enum cryp_status_id {
- CRYP_STATUS_BUSY = 0x10,
- CRYP_STATUS_OUTPUT_FIFO_FULL = 0x08,
- CRYP_STATUS_OUTPUT_FIFO_NOT_EMPTY = 0x04,
- CRYP_STATUS_INPUT_FIFO_NOT_FULL = 0x02,
- CRYP_STATUS_INPUT_FIFO_EMPTY = 0x01
-};
-
-/* Cryp DMA interface */
-#define CRYP_DMA_TX_FIFO 0x08
-#define CRYP_DMA_RX_FIFO 0x10
-
-enum cryp_dma_req_type {
- CRYP_DMA_DISABLE_BOTH,
- CRYP_DMA_ENABLE_IN_DATA,
- CRYP_DMA_ENABLE_OUT_DATA,
- CRYP_DMA_ENABLE_BOTH_DIRECTIONS
-};
-
-enum cryp_dma_channel {
- CRYP_DMA_RX = 0,
- CRYP_DMA_TX
-};
-
-/* Key registers */
-enum cryp_key_reg_index {
- CRYP_KEY_REG_1,
- CRYP_KEY_REG_2,
- CRYP_KEY_REG_3,
- CRYP_KEY_REG_4
-};
-
-/* Key register left and right */
-struct cryp_key_value {
- u32 key_value_left;
- u32 key_value_right;
-};
-
-/* Cryp Initialization structure */
-enum cryp_init_vector_index {
- CRYP_INIT_VECTOR_INDEX_0,
- CRYP_INIT_VECTOR_INDEX_1
-};
-
-/* struct cryp_init_vector_value -
- * @init_value_left
- * @init_value_right
- * */
-struct cryp_init_vector_value {
- u32 init_value_left;
- u32 init_value_right;
-};
-
-/**
- * struct cryp_device_context - structure for a cryp context.
- * @cr: control register
- * @dmacr: DMA control register
- * @imsc: Interrupt mask set/clear register
- * @key_1_l: Key 1l register
- * @key_1_r: Key 1r register
- * @key_2_l: Key 2l register
- * @key_2_r: Key 2r register
- * @key_3_l: Key 3l register
- * @key_3_r: Key 3r register
- * @key_4_l: Key 4l register
- * @key_4_r: Key 4r register
- * @init_vect_0_l: Initialization vector 0l register
- * @init_vect_0_r: Initialization vector 0r register
- * @init_vect_1_l: Initialization vector 1l register
- * @init_vect_1_r: Initialization vector 0r register
- * @din: Data in register
- * @dout: Data out register
- *
- * CRYP power management specifc structure.
- */
-struct cryp_device_context {
- u32 cr;
- u32 dmacr;
- u32 imsc;
-
- u32 key_1_l;
- u32 key_1_r;
- u32 key_2_l;
- u32 key_2_r;
- u32 key_3_l;
- u32 key_3_r;
- u32 key_4_l;
- u32 key_4_r;
-
- u32 init_vect_0_l;
- u32 init_vect_0_r;
- u32 init_vect_1_l;
- u32 init_vect_1_r;
-
- u32 din;
- u32 dout;
-};
-
-struct cryp_dma {
- dma_cap_mask_t mask;
- struct completion cryp_dma_complete;
- struct dma_chan *chan_cryp2mem;
- struct dma_chan *chan_mem2cryp;
- struct stedma40_chan_cfg *cfg_cryp2mem;
- struct stedma40_chan_cfg *cfg_mem2cryp;
- int sg_src_len;
- int sg_dst_len;
- struct scatterlist *sg_src;
- struct scatterlist *sg_dst;
- int nents_src;
- int nents_dst;
-};
-
-/**
- * struct cryp_device_data - structure for a cryp device.
- * @base: Pointer to virtual base address of the cryp device.
- * @phybase: Pointer to physical memory location of the cryp device.
- * @dev: Pointer to the devices dev structure.
- * @clk: Pointer to the device's clock control.
- * @irq: IRQ number
- * @pwr_regulator: Pointer to the device's power control.
- * @power_status: Current status of the power.
- * @ctx_lock: Lock for current_ctx.
- * @current_ctx: Pointer to the currently allocated context.
- * @list_node: For inclusion into a klist.
- * @dma: The dma structure holding channel configuration.
- * @power_state: TRUE = power state on, FALSE = power state off.
- * @power_state_spinlock: Spinlock for power_state.
- * @restore_dev_ctx: TRUE = saved ctx, FALSE = no saved ctx.
- */
-struct cryp_device_data {
- struct cryp_register __iomem *base;
- phys_addr_t phybase;
- struct device *dev;
- struct clk *clk;
- int irq;
- struct regulator *pwr_regulator;
- int power_status;
- spinlock_t ctx_lock;
- struct cryp_ctx *current_ctx;
- struct klist_node list_node;
- struct cryp_dma dma;
- bool power_state;
- spinlock_t power_state_spinlock;
- bool restore_dev_ctx;
-};
-
-void cryp_wait_until_done(struct cryp_device_data *device_data);
-
-/* Initialization functions */
-
-int cryp_check(struct cryp_device_data *device_data);
-
-void cryp_activity(struct cryp_device_data *device_data,
- enum cryp_crypen cryp_crypen);
-
-void cryp_flush_inoutfifo(struct cryp_device_data *device_data);
-
-int cryp_set_configuration(struct cryp_device_data *device_data,
- struct cryp_config *cryp_config,
- u32 *control_register);
-
-void cryp_configure_for_dma(struct cryp_device_data *device_data,
- enum cryp_dma_req_type dma_req);
-
-int cryp_configure_key_values(struct cryp_device_data *device_data,
- enum cryp_key_reg_index key_reg_index,
- struct cryp_key_value key_value);
-
-int cryp_configure_init_vector(struct cryp_device_data *device_data,
- enum cryp_init_vector_index
- init_vector_index,
- struct cryp_init_vector_value
- init_vector_value);
-
-int cryp_configure_protection(struct cryp_device_data *device_data,
- struct cryp_protection_config *p_protect_config);
-
-/* Power management funtions */
-void cryp_save_device_context(struct cryp_device_data *device_data,
- struct cryp_device_context *ctx,
- int cryp_mode);
-
-void cryp_restore_device_context(struct cryp_device_data *device_data,
- struct cryp_device_context *ctx);
-
-/* Data transfer and status bits. */
-int cryp_is_logic_busy(struct cryp_device_data *device_data);
-
-int cryp_get_status(struct cryp_device_data *device_data);
-
-/**
- * cryp_write_indata - This routine writes 32 bit data into the data input
- * register of the cryptography IP.
- * @device_data: Pointer to the device data struct for base address.
- * @write_data: Data to write.
- */
-int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data);
-
-/**
- * cryp_read_outdata - This routine reads the data from the data output
- * register of the CRYP logic
- * @device_data: Pointer to the device data struct for base address.
- * @read_data: Read the data from the output FIFO.
- */
-int cryp_read_outdata(struct cryp_device_data *device_data, u32 *read_data);
-
-#endif /* _CRYP_H_ */
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
deleted file mode 100644
index 5a57c9afd8c8..000000000000
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ /dev/null
@@ -1,1600 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
- * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
- * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
- * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
- * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
- * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
- */
-
-#include <linux/clk.h>
-#include <linux/completion.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irqreturn.h>
-#include <linux/kernel.h>
-#include <linux/klist.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-#include <linux/semaphore.h>
-#include <linux/platform_data/dma-ste-dma40.h>
-
-#include <crypto/aes.h>
-#include <crypto/ctr.h>
-#include <crypto/internal/des.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
-
-#include <linux/platform_data/crypto-ux500.h>
-
-#include "cryp_p.h"
-#include "cryp.h"
-
-#define CRYP_MAX_KEY_SIZE 32
-#define BYTES_PER_WORD 4
-
-static int cryp_mode;
-static atomic_t session_id;
-
-static struct stedma40_chan_cfg *mem_to_engine;
-static struct stedma40_chan_cfg *engine_to_mem;
-
-/**
- * struct cryp_driver_data - data specific to the driver.
- *
- * @device_list: A list of registered devices to choose from.
- * @device_allocation: A semaphore initialized with number of devices.
- */
-struct cryp_driver_data {
- struct klist device_list;
- struct semaphore device_allocation;
-};
-
-/**
- * struct cryp_ctx - Crypto context
- * @config: Crypto mode.
- * @key: Key array.
- * @keylen: Length of key.
- * @iv: Pointer to initialization vector.
- * @indata: Pointer to indata.
- * @outdata: Pointer to outdata.
- * @datalen: Length of indata.
- * @outlen: Length of outdata.
- * @blocksize: Size of blocks.
- * @updated: Updated flag.
- * @dev_ctx: Device dependent context.
- * @device: Pointer to the device.
- * @session_id: Atomic session ID.
- */
-struct cryp_ctx {
- struct cryp_config config;
- u8 key[CRYP_MAX_KEY_SIZE];
- u32 keylen;
- u8 *iv;
- const u8 *indata;
- u8 *outdata;
- u32 datalen;
- u32 outlen;
- u32 blocksize;
- u8 updated;
- struct cryp_device_context dev_ctx;
- struct cryp_device_data *device;
- u32 session_id;
-};
-
-static struct cryp_driver_data driver_data;
-
-/**
- * swap_bits_in_byte - mirror the bits in a byte
- * @b: the byte to be mirrored
- *
- * The bits are swapped the following way:
- * Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and
- * nibble 2 (n2) bits 4-7.
- *
- * Nibble 1 (n1):
- * (The "old" (moved) bit is replaced with a zero)
- * 1. Move bit 6 and 7, 4 positions to the left.
- * 2. Move bit 3 and 5, 2 positions to the left.
- * 3. Move bit 1-4, 1 position to the left.
- *
- * Nibble 2 (n2):
- * 1. Move bit 0 and 1, 4 positions to the right.
- * 2. Move bit 2 and 4, 2 positions to the right.
- * 3. Move bit 3-6, 1 position to the right.
- *
- * Combine the two nibbles to a complete and swapped byte.
- */
-
-static inline u8 swap_bits_in_byte(u8 b)
-{
-#define R_SHIFT_4_MASK 0xc0 /* Bits 6 and 7, right shift 4 */
-#define R_SHIFT_2_MASK 0x28 /* (After right shift 4) Bits 3 and 5,
- right shift 2 */
-#define R_SHIFT_1_MASK 0x1e /* (After right shift 2) Bits 1-4,
- right shift 1 */
-#define L_SHIFT_4_MASK 0x03 /* Bits 0 and 1, left shift 4 */
-#define L_SHIFT_2_MASK 0x14 /* (After left shift 4) Bits 2 and 4,
- left shift 2 */
-#define L_SHIFT_1_MASK 0x78 /* (After left shift 1) Bits 3-6,
- left shift 1 */
-
- u8 n1;
- u8 n2;
-
- /* Swap most significant nibble */
- /* Right shift 4, bits 6 and 7 */
- n1 = ((b & R_SHIFT_4_MASK) >> 4) | (b & ~(R_SHIFT_4_MASK >> 4));
- /* Right shift 2, bits 3 and 5 */
- n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2));
- /* Right shift 1, bits 1-4 */
- n1 = (n1 & R_SHIFT_1_MASK) >> 1;
-
- /* Swap least significant nibble */
- /* Left shift 4, bits 0 and 1 */
- n2 = ((b & L_SHIFT_4_MASK) << 4) | (b & ~(L_SHIFT_4_MASK << 4));
- /* Left shift 2, bits 2 and 4 */
- n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2));
- /* Left shift 1, bits 3-6 */
- n2 = (n2 & L_SHIFT_1_MASK) << 1;
-
- return n1 | n2;
-}
-
-static inline void swap_words_in_key_and_bits_in_byte(const u8 *in,
- u8 *out, u32 len)
-{
- unsigned int i = 0;
- int j;
- int index = 0;
-
- j = len - BYTES_PER_WORD;
- while (j >= 0) {
- for (i = 0; i < BYTES_PER_WORD; i++) {
- index = len - j - BYTES_PER_WORD + i;
- out[j + i] =
- swap_bits_in_byte(in[index]);
- }
- j -= BYTES_PER_WORD;
- }
-}
-
-static void add_session_id(struct cryp_ctx *ctx)
-{
- /*
- * We never want 0 to be a valid value, since this is the default value
- * for the software context.
- */
- if (unlikely(atomic_inc_and_test(&session_id)))
- atomic_inc(&session_id);
-
- ctx->session_id = atomic_read(&session_id);
-}
-
-static irqreturn_t cryp_interrupt_handler(int irq, void *param)
-{
- struct cryp_ctx *ctx;
- int count;
- struct cryp_device_data *device_data;
-
- if (param == NULL) {
- BUG_ON(!param);
- return IRQ_HANDLED;
- }
-
- /* The device is coming from the one found in hw_crypt_noxts. */
- device_data = (struct cryp_device_data *)param;
-
- ctx = device_data->current_ctx;
-
- if (ctx == NULL) {
- BUG_ON(!ctx);
- return IRQ_HANDLED;
- }
-
- dev_dbg(ctx->device->dev, "[%s] (len: %d) %s, ", __func__, ctx->outlen,
- cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO) ?
- "out" : "in");
-
- if (cryp_pending_irq_src(device_data,
- CRYP_IRQ_SRC_OUTPUT_FIFO)) {
- if (ctx->outlen / ctx->blocksize > 0) {
- count = ctx->blocksize / 4;
-
- readsl(&device_data->base->dout, ctx->outdata, count);
- ctx->outdata += count;
- ctx->outlen -= count;
-
- if (ctx->outlen == 0) {
- cryp_disable_irq_src(device_data,
- CRYP_IRQ_SRC_OUTPUT_FIFO);
- }
- }
- } else if (cryp_pending_irq_src(device_data,
- CRYP_IRQ_SRC_INPUT_FIFO)) {
- if (ctx->datalen / ctx->blocksize > 0) {
- count = ctx->blocksize / 4;
-
- writesl(&device_data->base->din, ctx->indata, count);
-
- ctx->indata += count;
- ctx->datalen -= count;
-
- if (ctx->datalen == 0)
- cryp_disable_irq_src(device_data,
- CRYP_IRQ_SRC_INPUT_FIFO);
-
- if (ctx->config.algomode == CRYP_ALGO_AES_XTS) {
- CRYP_PUT_BITS(&device_data->base->cr,
- CRYP_START_ENABLE,
- CRYP_CR_START_POS,
- CRYP_CR_START_MASK);
-
- cryp_wait_until_done(device_data);
- }
- }
- }
-
- return IRQ_HANDLED;
-}
-
-static int mode_is_aes(enum cryp_algo_mode mode)
-{
- return CRYP_ALGO_AES_ECB == mode ||
- CRYP_ALGO_AES_CBC == mode ||
- CRYP_ALGO_AES_CTR == mode ||
- CRYP_ALGO_AES_XTS == mode;
-}
-
-static int cfg_iv(struct cryp_device_data *device_data, u32 left, u32 right,
- enum cryp_init_vector_index index)
-{
- struct cryp_init_vector_value vector_value;
-
- dev_dbg(device_data->dev, "[%s]", __func__);
-
- vector_value.init_value_left = left;
- vector_value.init_value_right = right;
-
- return cryp_configure_init_vector(device_data,
- index,
- vector_value);
-}
-
-static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx)
-{
- int i;
- int status = 0;
- int num_of_regs = ctx->blocksize / 8;
- __be32 *civ = (__be32 *)ctx->iv;
- u32 iv[AES_BLOCK_SIZE / 4];
-
- dev_dbg(device_data->dev, "[%s]", __func__);
-
- /*
- * Since we loop on num_of_regs we need to have a check in case
- * someone provides an incorrect blocksize which would force calling
- * cfg_iv with i greater than 2 which is an error.
- */
- if (num_of_regs > 2) {
- dev_err(device_data->dev, "[%s] Incorrect blocksize %d",
- __func__, ctx->blocksize);
- return -EINVAL;
- }
-
- for (i = 0; i < ctx->blocksize / 4; i++)
- iv[i] = be32_to_cpup(civ + i);
-
- for (i = 0; i < num_of_regs; i++) {
- status = cfg_iv(device_data, iv[i*2], iv[i*2+1],
- (enum cryp_init_vector_index) i);
- if (status != 0)
- return status;
- }
- return status;
-}
-
-static int set_key(struct cryp_device_data *device_data,
- u32 left_key,
- u32 right_key,
- enum cryp_key_reg_index index)
-{
- struct cryp_key_value key_value;
- int cryp_error;
-
- dev_dbg(device_data->dev, "[%s]", __func__);
-
- key_value.key_value_left = left_key;
- key_value.key_value_right = right_key;
-
- cryp_error = cryp_configure_key_values(device_data,
- index,
- key_value);
- if (cryp_error != 0)
- dev_err(device_data->dev, "[%s]: "
- "cryp_configure_key_values() failed!", __func__);
-
- return cryp_error;
-}
-
-static int cfg_keys(struct cryp_ctx *ctx)
-{
- int i;
- int num_of_regs = ctx->keylen / 8;
- u32 swapped_key[CRYP_MAX_KEY_SIZE / 4];
- __be32 *ckey = (__be32 *)ctx->key;
- int cryp_error = 0;
-
- dev_dbg(ctx->device->dev, "[%s]", __func__);
-
- if (mode_is_aes(ctx->config.algomode)) {
- swap_words_in_key_and_bits_in_byte((u8 *)ckey,
- (u8 *)swapped_key,
- ctx->keylen);
- } else {
- for (i = 0; i < ctx->keylen / 4; i++)
- swapped_key[i] = be32_to_cpup(ckey + i);
- }
-
- for (i = 0; i < num_of_regs; i++) {
- cryp_error = set_key(ctx->device,
- swapped_key[i * 2],
- swapped_key[i * 2 + 1],
- (enum cryp_key_reg_index) i);
-
- if (cryp_error != 0) {
- dev_err(ctx->device->dev, "[%s]: set_key() failed!",
- __func__);
- return cryp_error;
- }
- }
- return cryp_error;
-}
-
-static int cryp_setup_context(struct cryp_ctx *ctx,
- struct cryp_device_data *device_data)
-{
- u32 control_register = CRYP_CR_DEFAULT;
-
- switch (cryp_mode) {
- case CRYP_MODE_INTERRUPT:
- writel_relaxed(CRYP_IMSC_DEFAULT, &device_data->base->imsc);
- break;
-
- case CRYP_MODE_DMA:
- writel_relaxed(CRYP_DMACR_DEFAULT, &device_data->base->dmacr);
- break;
-
- default:
- break;
- }
-
- if (ctx->updated == 0) {
- cryp_flush_inoutfifo(device_data);
- if (cfg_keys(ctx) != 0) {
- dev_err(ctx->device->dev, "[%s]: cfg_keys failed!",
- __func__);
- return -EINVAL;
- }
-
- if (ctx->iv &&
- CRYP_ALGO_AES_ECB != ctx->config.algomode &&
- CRYP_ALGO_DES_ECB != ctx->config.algomode &&
- CRYP_ALGO_TDES_ECB != ctx->config.algomode) {
- if (cfg_ivs(device_data, ctx) != 0)
- return -EPERM;
- }
-
- cryp_set_configuration(device_data, &ctx->config,
- &control_register);
- add_session_id(ctx);
- } else if (ctx->updated == 1 &&
- ctx->session_id != atomic_read(&session_id)) {
- cryp_flush_inoutfifo(device_data);
- cryp_restore_device_context(device_data, &ctx->dev_ctx);
-
- add_session_id(ctx);
- control_register = ctx->dev_ctx.cr;
- } else
- control_register = ctx->dev_ctx.cr;
-
- writel(control_register |
- (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS),
- &device_data->base->cr);
-
- return 0;
-}
-
-static int cryp_get_device_data(struct cryp_ctx *ctx,
- struct cryp_device_data **device_data)
-{
- int ret;
- struct klist_iter device_iterator;
- struct klist_node *device_node;
- struct cryp_device_data *local_device_data = NULL;
- pr_debug(DEV_DBG_NAME " [%s]", __func__);
-
- /* Wait until a device is available */
- ret = down_interruptible(&driver_data.device_allocation);
- if (ret)
- return ret; /* Interrupted */
-
- /* Select a device */
- klist_iter_init(&driver_data.device_list, &device_iterator);
-
- device_node = klist_next(&device_iterator);
- while (device_node) {
- local_device_data = container_of(device_node,
- struct cryp_device_data, list_node);
- spin_lock(&local_device_data->ctx_lock);
- /* current_ctx allocates a device, NULL = unallocated */
- if (local_device_data->current_ctx) {
- device_node = klist_next(&device_iterator);
- } else {
- local_device_data->current_ctx = ctx;
- ctx->device = local_device_data;
- spin_unlock(&local_device_data->ctx_lock);
- break;
- }
- spin_unlock(&local_device_data->ctx_lock);
- }
- klist_iter_exit(&device_iterator);
-
- if (!device_node) {
- /**
- * No free device found.
- * Since we allocated a device with down_interruptible, this
- * should not be able to happen.
- * Number of available devices, which are contained in
- * device_allocation, is therefore decremented by not doing
- * an up(device_allocation).
- */
- return -EBUSY;
- }
-
- *device_data = local_device_data;
-
- return 0;
-}
-
-static void cryp_dma_setup_channel(struct cryp_device_data *device_data,
- struct device *dev)
-{
- struct dma_slave_config mem2cryp = {
- .direction = DMA_MEM_TO_DEV,
- .dst_addr = device_data->phybase + CRYP_DMA_TX_FIFO,
- .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
- .dst_maxburst = 4,
- };
- struct dma_slave_config cryp2mem = {
- .direction = DMA_DEV_TO_MEM,
- .src_addr = device_data->phybase + CRYP_DMA_RX_FIFO,
- .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
- .src_maxburst = 4,
- };
-
- dma_cap_zero(device_data->dma.mask);
- dma_cap_set(DMA_SLAVE, device_data->dma.mask);
-
- device_data->dma.cfg_mem2cryp = mem_to_engine;
- device_data->dma.chan_mem2cryp =
- dma_request_channel(device_data->dma.mask,
- stedma40_filter,
- device_data->dma.cfg_mem2cryp);
-
- device_data->dma.cfg_cryp2mem = engine_to_mem;
- device_data->dma.chan_cryp2mem =
- dma_request_channel(device_data->dma.mask,
- stedma40_filter,
- device_data->dma.cfg_cryp2mem);
-
- dmaengine_slave_config(device_data->dma.chan_mem2cryp, &mem2cryp);
- dmaengine_slave_config(device_data->dma.chan_cryp2mem, &cryp2mem);
-
- init_completion(&device_data->dma.cryp_dma_complete);
-}
-
-static void cryp_dma_out_callback(void *data)
-{
- struct cryp_ctx *ctx = (struct cryp_ctx *) data;
- dev_dbg(ctx->device->dev, "[%s]: ", __func__);
-
- complete(&ctx->device->dma.cryp_dma_complete);
-}
-
-static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
- struct scatterlist *sg,
- int len,
- enum dma_data_direction direction)
-{
- struct dma_async_tx_descriptor *desc;
- struct dma_chan *channel = NULL;
- dma_cookie_t cookie;
-
- dev_dbg(ctx->device->dev, "[%s]: ", __func__);
-
- if (unlikely(!IS_ALIGNED((unsigned long)sg, 4))) {
- dev_err(ctx->device->dev, "[%s]: Data in sg list isn't "
- "aligned! Addr: 0x%08lx", __func__, (unsigned long)sg);
- return -EFAULT;
- }
-
- switch (direction) {
- case DMA_TO_DEVICE:
- channel = ctx->device->dma.chan_mem2cryp;
- ctx->device->dma.sg_src = sg;
- ctx->device->dma.sg_src_len = dma_map_sg(channel->device->dev,
- ctx->device->dma.sg_src,
- ctx->device->dma.nents_src,
- direction);
-
- if (!ctx->device->dma.sg_src_len) {
- dev_dbg(ctx->device->dev,
- "[%s]: Could not map the sg list (TO_DEVICE)",
- __func__);
- return -EFAULT;
- }
-
- dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
- "(TO_DEVICE)", __func__);
-
- desc = dmaengine_prep_slave_sg(channel,
- ctx->device->dma.sg_src,
- ctx->device->dma.sg_src_len,
- DMA_MEM_TO_DEV, DMA_CTRL_ACK);
- break;
-
- case DMA_FROM_DEVICE:
- channel = ctx->device->dma.chan_cryp2mem;
- ctx->device->dma.sg_dst = sg;
- ctx->device->dma.sg_dst_len = dma_map_sg(channel->device->dev,
- ctx->device->dma.sg_dst,
- ctx->device->dma.nents_dst,
- direction);
-
- if (!ctx->device->dma.sg_dst_len) {
- dev_dbg(ctx->device->dev,
- "[%s]: Could not map the sg list (FROM_DEVICE)",
- __func__);
- return -EFAULT;
- }
-
- dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
- "(FROM_DEVICE)", __func__);
-
- desc = dmaengine_prep_slave_sg(channel,
- ctx->device->dma.sg_dst,
- ctx->device->dma.sg_dst_len,
- DMA_DEV_TO_MEM,
- DMA_CTRL_ACK |
- DMA_PREP_INTERRUPT);
-
- desc->callback = cryp_dma_out_callback;
- desc->callback_param = ctx;
- break;
-
- default:
- dev_dbg(ctx->device->dev, "[%s]: Invalid DMA direction",
- __func__);
- return -EFAULT;
- }
-
- cookie = dmaengine_submit(desc);
- if (dma_submit_error(cookie)) {
- dev_dbg(ctx->device->dev, "[%s]: DMA submission failed\n",
- __func__);
- return cookie;
- }
-
- dma_async_issue_pending(channel);
-
- return 0;
-}
-
-static void cryp_dma_done(struct cryp_ctx *ctx)
-{
- struct dma_chan *chan;
-
- dev_dbg(ctx->device->dev, "[%s]: ", __func__);
-
- chan = ctx->device->dma.chan_mem2cryp;
- dmaengine_terminate_all(chan);
- dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
- ctx->device->dma.nents_src, DMA_TO_DEVICE);
-
- chan = ctx->device->dma.chan_cryp2mem;
- dmaengine_terminate_all(chan);
- dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
- ctx->device->dma.nents_dst, DMA_FROM_DEVICE);
-}
-
-static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg,
- int len)
-{
- int error = cryp_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
- dev_dbg(ctx->device->dev, "[%s]: ", __func__);
-
- if (error) {
- dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
- "failed", __func__);
- return error;
- }
-
- return len;
-}
-
-static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len)
-{
- int error = cryp_set_dma_transfer(ctx, sg, len, DMA_FROM_DEVICE);
- if (error) {
- dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
- "failed", __func__);
- return error;
- }
-
- return len;
-}
-
-static void cryp_polling_mode(struct cryp_ctx *ctx,
- struct cryp_device_data *device_data)
-{
- int len = ctx->blocksize / BYTES_PER_WORD;
- int remaining_length = ctx->datalen;
- u32 *indata = (u32 *)ctx->indata;
- u32 *outdata = (u32 *)ctx->outdata;
-
- while (remaining_length > 0) {
- writesl(&device_data->base->din, indata, len);
- indata += len;
- remaining_length -= (len * BYTES_PER_WORD);
- cryp_wait_until_done(device_data);
-
- readsl(&device_data->base->dout, outdata, len);
- outdata += len;
- cryp_wait_until_done(device_data);
- }
-}
-
-static int cryp_disable_power(struct device *dev,
- struct cryp_device_data *device_data,
- bool save_device_context)
-{
- int ret = 0;
-
- dev_dbg(dev, "[%s]", __func__);
-
- spin_lock(&device_data->power_state_spinlock);
- if (!device_data->power_state)
- goto out;
-
- spin_lock(&device_data->ctx_lock);
- if (save_device_context && device_data->current_ctx) {
- cryp_save_device_context(device_data,
- &device_data->current_ctx->dev_ctx,
- cryp_mode);
- device_data->restore_dev_ctx = true;
- }
- spin_unlock(&device_data->ctx_lock);
-
- clk_disable(device_data->clk);
- ret = regulator_disable(device_data->pwr_regulator);
- if (ret)
- dev_err(dev, "[%s]: "
- "regulator_disable() failed!",
- __func__);
-
- device_data->power_state = false;
-
-out:
- spin_unlock(&device_data->power_state_spinlock);
-
- return ret;
-}
-
-static int cryp_enable_power(
- struct device *dev,
- struct cryp_device_data *device_data,
- bool restore_device_context)
-{
- int ret = 0;
-
- dev_dbg(dev, "[%s]", __func__);
-
- spin_lock(&device_data->power_state_spinlock);
- if (!device_data->power_state) {
- ret = regulator_enable(device_data->pwr_regulator);
- if (ret) {
- dev_err(dev, "[%s]: regulator_enable() failed!",
- __func__);
- goto out;
- }
-
- ret = clk_enable(device_data->clk);
- if (ret) {
- dev_err(dev, "[%s]: clk_enable() failed!",
- __func__);
- regulator_disable(device_data->pwr_regulator);
- goto out;
- }
- device_data->power_state = true;
- }
-
- if (device_data->restore_dev_ctx) {
- spin_lock(&device_data->ctx_lock);
- if (restore_device_context && device_data->current_ctx) {
- device_data->restore_dev_ctx = false;
- cryp_restore_device_context(device_data,
- &device_data->current_ctx->dev_ctx);
- }
- spin_unlock(&device_data->ctx_lock);
- }
-out:
- spin_unlock(&device_data->power_state_spinlock);
-
- return ret;
-}
-
-static int hw_crypt_noxts(struct cryp_ctx *ctx,
- struct cryp_device_data *device_data)
-{
- int ret = 0;
-
- const u8 *indata = ctx->indata;
- u8 *outdata = ctx->outdata;
- u32 datalen = ctx->datalen;
- u32 outlen = datalen;
-
- pr_debug(DEV_DBG_NAME " [%s]", __func__);
-
- ctx->outlen = ctx->datalen;
-
- if (unlikely(!IS_ALIGNED((unsigned long)indata, 4))) {
- pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: "
- "0x%08lx", __func__, (unsigned long)indata);
- return -EINVAL;
- }
-
- ret = cryp_setup_context(ctx, device_data);
-
- if (ret)
- goto out;
-
- if (cryp_mode == CRYP_MODE_INTERRUPT) {
- cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO |
- CRYP_IRQ_SRC_OUTPUT_FIFO);
-
- /*
- * ctx->outlen is decremented in the cryp_interrupt_handler
- * function. We had to add cpu_relax() (barrier) to make sure
- * that gcc didn't optimze away this variable.
- */
- while (ctx->outlen > 0)
- cpu_relax();
- } else if (cryp_mode == CRYP_MODE_POLLING ||
- cryp_mode == CRYP_MODE_DMA) {
- /*
- * The reason for having DMA in this if case is that if we are
- * running cryp_mode = 2, then we separate DMA routines for
- * handling cipher/plaintext > blocksize, except when
- * running the normal CRYPTO_ALG_TYPE_CIPHER, then we still use
- * the polling mode. Overhead of doing DMA setup eats up the
- * benefits using it.
- */
- cryp_polling_mode(ctx, device_data);
- } else {
- dev_err(ctx->device->dev, "[%s]: Invalid operation mode!",
- __func__);
- ret = -EPERM;
- goto out;
- }
-
- cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
- ctx->updated = 1;
-
-out:
- ctx->indata = indata;
- ctx->outdata = outdata;
- ctx->datalen = datalen;
- ctx->outlen = outlen;
-
- return ret;
-}
-
-static int get_nents(struct scatterlist *sg, int nbytes)
-{
- int nents = 0;
-
- while (nbytes > 0) {
- nbytes -= sg->length;
- sg = sg_next(sg);
- nents++;
- }
-
- return nents;
-}
-
-static int ablk_dma_crypt(struct skcipher_request *areq)
-{
- struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
- struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
- struct cryp_device_data *device_data;
-
- int bytes_written = 0;
- int bytes_read = 0;
- int ret;
-
- pr_debug(DEV_DBG_NAME " [%s]", __func__);
-
- ctx->datalen = areq->cryptlen;
- ctx->outlen = areq->cryptlen;
-
- ret = cryp_get_device_data(ctx, &device_data);
- if (ret)
- return ret;
-
- ret = cryp_setup_context(ctx, device_data);
- if (ret)
- goto out;
-
- /* We have the device now, so store the nents in the dma struct. */
- ctx->device->dma.nents_src = get_nents(areq->src, ctx->datalen);
- ctx->device->dma.nents_dst = get_nents(areq->dst, ctx->outlen);
-
- /* Enable DMA in- and output. */
- cryp_configure_for_dma(device_data, CRYP_DMA_ENABLE_BOTH_DIRECTIONS);
-
- bytes_written = cryp_dma_write(ctx, areq->src, ctx->datalen);
- bytes_read = cryp_dma_read(ctx, areq->dst, bytes_written);
-
- wait_for_completion(&ctx->device->dma.cryp_dma_complete);
- cryp_dma_done(ctx);
-
- cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
- ctx->updated = 1;
-
-out:
- spin_lock(&device_data->ctx_lock);
- device_data->current_ctx = NULL;
- ctx->device = NULL;
- spin_unlock(&device_data->ctx_lock);
-
- /*
- * The down_interruptible part for this semaphore is called in
- * cryp_get_device_data.
- */
- up(&driver_data.device_allocation);
-
- if (unlikely(bytes_written != bytes_read))
- return -EPERM;
-
- return 0;
-}
-
-static int ablk_crypt(struct skcipher_request *areq)
-{
- struct skcipher_walk walk;
- struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
- struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
- struct cryp_device_data *device_data;
- unsigned long src_paddr;
- unsigned long dst_paddr;
- int ret;
- int nbytes;
-
- pr_debug(DEV_DBG_NAME " [%s]", __func__);
-
- ret = cryp_get_device_data(ctx, &device_data);
- if (ret)
- goto out;
-
- ret = skcipher_walk_async(&walk, areq);
-
- if (ret) {
- pr_err(DEV_DBG_NAME "[%s]: skcipher_walk_async() failed!",
- __func__);
- goto out;
- }
-
- while ((nbytes = walk.nbytes) > 0) {
- ctx->iv = walk.iv;
- src_paddr = (page_to_phys(walk.src.phys.page) + walk.src.phys.offset);
- ctx->indata = phys_to_virt(src_paddr);
-
- dst_paddr = (page_to_phys(walk.dst.phys.page) + walk.dst.phys.offset);
- ctx->outdata = phys_to_virt(dst_paddr);
-
- ctx->datalen = nbytes - (nbytes % ctx->blocksize);
-
- ret = hw_crypt_noxts(ctx, device_data);
- if (ret)
- goto out;
-
- nbytes -= ctx->datalen;
- ret = skcipher_walk_done(&walk, nbytes);
- if (ret)
- goto out;
- }
-
-out:
- /* Release the device */
- spin_lock(&device_data->ctx_lock);
- device_data->current_ctx = NULL;
- ctx->device = NULL;
- spin_unlock(&device_data->ctx_lock);
-
- /*
- * The down_interruptible part for this semaphore is called in
- * cryp_get_device_data.
- */
- up(&driver_data.device_allocation);
-
- return ret;
-}
-
-static int aes_skcipher_setkey(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
-
- pr_debug(DEV_DBG_NAME " [%s]", __func__);
-
- switch (keylen) {
- case AES_KEYSIZE_128:
- ctx->config.keysize = CRYP_KEY_SIZE_128;
- break;
-
- case AES_KEYSIZE_192:
- ctx->config.keysize = CRYP_KEY_SIZE_192;
- break;
-
- case AES_KEYSIZE_256:
- ctx->config.keysize = CRYP_KEY_SIZE_256;
- break;
-
- default:
- pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__);
- return -EINVAL;
- }
-
- memcpy(ctx->key, key, keylen);
- ctx->keylen = keylen;
-
- ctx->updated = 0;
-
- return 0;
-}
-
-static int des_skcipher_setkey(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
- int err;
-
- pr_debug(DEV_DBG_NAME " [%s]", __func__);
-
- err = verify_skcipher_des_key(cipher, key);
- if (err)
- return err;
-
- memcpy(ctx->key, key, keylen);
- ctx->keylen = keylen;
-
- ctx->updated = 0;
- return 0;
-}
-
-static int des3_skcipher_setkey(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
- int err;
-
- pr_debug(DEV_DBG_NAME " [%s]", __func__);
-
- err = verify_skcipher_des3_key(cipher, key);
- if (err)
- return err;
-
- memcpy(ctx->key, key, keylen);
- ctx->keylen = keylen;
-
- ctx->updated = 0;
- return 0;
-}
-
-static int cryp_blk_encrypt(struct skcipher_request *areq)
-{
- struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
- struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
-
- pr_debug(DEV_DBG_NAME " [%s]", __func__);
-
- ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
-
- /*
- * DMA does not work for DES due to a hw bug */
- if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode))
- return ablk_dma_crypt(areq);
-
- /* For everything except DMA, we run the non DMA version. */
- return ablk_crypt(areq);
-}
-
-static int cryp_blk_decrypt(struct skcipher_request *areq)
-{
- struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
- struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
-
- pr_debug(DEV_DBG_NAME " [%s]", __func__);
-
- ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
-
- /* DMA does not work for DES due to a hw bug */
- if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode))
- return ablk_dma_crypt(areq);
-
- /* For everything except DMA, we run the non DMA version. */
- return ablk_crypt(areq);
-}
-
-struct cryp_algo_template {
- enum cryp_algo_mode algomode;
- struct skcipher_alg skcipher;
-};
-
-static int cryp_init_tfm(struct crypto_skcipher *tfm)
-{
- struct cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct cryp_algo_template *cryp_alg = container_of(alg,
- struct cryp_algo_template,
- skcipher);
-
- ctx->config.algomode = cryp_alg->algomode;
- ctx->blocksize = crypto_skcipher_blocksize(tfm);
-
- return 0;
-}
-
-static struct cryp_algo_template cryp_algs[] = {
- {
- .algomode = CRYP_ALGO_AES_ECB,
- .skcipher = {
- .base.cra_name = "ecb(aes)",
- .base.cra_driver_name = "ecb-aes-ux500",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct cryp_ctx),
- .base.cra_alignmask = 3,
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_skcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- .init = cryp_init_tfm,
- }
- },
- {
- .algomode = CRYP_ALGO_AES_CBC,
- .skcipher = {
- .base.cra_name = "cbc(aes)",
- .base.cra_driver_name = "cbc-aes-ux500",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct cryp_ctx),
- .base.cra_alignmask = 3,
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_skcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- .init = cryp_init_tfm,
- .ivsize = AES_BLOCK_SIZE,
- }
- },
- {
- .algomode = CRYP_ALGO_AES_CTR,
- .skcipher = {
- .base.cra_name = "ctr(aes)",
- .base.cra_driver_name = "ctr-aes-ux500",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct cryp_ctx),
- .base.cra_alignmask = 3,
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_skcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- .init = cryp_init_tfm,
- .ivsize = AES_BLOCK_SIZE,
- .chunksize = AES_BLOCK_SIZE,
- }
- },
- {
- .algomode = CRYP_ALGO_DES_ECB,
- .skcipher = {
- .base.cra_name = "ecb(des)",
- .base.cra_driver_name = "ecb-des-ux500",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct cryp_ctx),
- .base.cra_alignmask = 3,
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = des_skcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- .init = cryp_init_tfm,
- }
- },
- {
- .algomode = CRYP_ALGO_TDES_ECB,
- .skcipher = {
- .base.cra_name = "ecb(des3_ede)",
- .base.cra_driver_name = "ecb-des3_ede-ux500",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct cryp_ctx),
- .base.cra_alignmask = 3,
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = des3_skcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- .init = cryp_init_tfm,
- }
- },
- {
- .algomode = CRYP_ALGO_DES_CBC,
- .skcipher = {
- .base.cra_name = "cbc(des)",
- .base.cra_driver_name = "cbc-des-ux500",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct cryp_ctx),
- .base.cra_alignmask = 3,
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = des_skcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- .ivsize = DES_BLOCK_SIZE,
- .init = cryp_init_tfm,
- }
- },
- {
- .algomode = CRYP_ALGO_TDES_CBC,
- .skcipher = {
- .base.cra_name = "cbc(des3_ede)",
- .base.cra_driver_name = "cbc-des3_ede-ux500",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct cryp_ctx),
- .base.cra_alignmask = 3,
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = des3_skcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .init = cryp_init_tfm,
- }
- }
-};
-
-/**
- * cryp_algs_register_all -
- */
-static int cryp_algs_register_all(void)
-{
- int ret;
- int i;
- int count;
-
- pr_debug("[%s]", __func__);
-
- for (i = 0; i < ARRAY_SIZE(cryp_algs); i++) {
- ret = crypto_register_skcipher(&cryp_algs[i].skcipher);
- if (ret) {
- count = i;
- pr_err("[%s] alg registration failed",
- cryp_algs[i].skcipher.base.cra_driver_name);
- goto unreg;
- }
- }
- return 0;
-unreg:
- for (i = 0; i < count; i++)
- crypto_unregister_skcipher(&cryp_algs[i].skcipher);
- return ret;
-}
-
-/**
- * cryp_algs_unregister_all -
- */
-static void cryp_algs_unregister_all(void)
-{
- int i;
-
- pr_debug(DEV_DBG_NAME " [%s]", __func__);
-
- for (i = 0; i < ARRAY_SIZE(cryp_algs); i++)
- crypto_unregister_skcipher(&cryp_algs[i].skcipher);
-}
-
-static int ux500_cryp_probe(struct platform_device *pdev)
-{
- int ret;
- struct resource *res;
- struct cryp_device_data *device_data;
- struct cryp_protection_config prot = {
- .privilege_access = CRYP_STATE_ENABLE
- };
- struct device *dev = &pdev->dev;
-
- dev_dbg(dev, "[%s]", __func__);
- device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_KERNEL);
- if (!device_data) {
- ret = -ENOMEM;
- goto out;
- }
-
- device_data->dev = dev;
- device_data->current_ctx = NULL;
-
- /* Grab the DMA configuration from platform data. */
- mem_to_engine = &((struct cryp_platform_data *)
- dev->platform_data)->mem_to_engine;
- engine_to_mem = &((struct cryp_platform_data *)
- dev->platform_data)->engine_to_mem;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "[%s]: platform_get_resource() failed",
- __func__);
- ret = -ENODEV;
- goto out;
- }
-
- device_data->phybase = res->start;
- device_data->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(device_data->base)) {
- ret = PTR_ERR(device_data->base);
- goto out;
- }
-
- spin_lock_init(&device_data->ctx_lock);
- spin_lock_init(&device_data->power_state_spinlock);
-
- /* Enable power for CRYP hardware block */
- device_data->pwr_regulator = regulator_get(&pdev->dev, "v-ape");
- if (IS_ERR(device_data->pwr_regulator)) {
- dev_err(dev, "[%s]: could not get cryp regulator", __func__);
- ret = PTR_ERR(device_data->pwr_regulator);
- device_data->pwr_regulator = NULL;
- goto out;
- }
-
- /* Enable the clk for CRYP hardware block */
- device_data->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(device_data->clk)) {
- dev_err(dev, "[%s]: clk_get() failed!", __func__);
- ret = PTR_ERR(device_data->clk);
- goto out_regulator;
- }
-
- ret = clk_prepare(device_data->clk);
- if (ret) {
- dev_err(dev, "[%s]: clk_prepare() failed!", __func__);
- goto out_regulator;
- }
-
- /* Enable device power (and clock) */
- ret = cryp_enable_power(device_data->dev, device_data, false);
- if (ret) {
- dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
- goto out_clk_unprepare;
- }
-
- if (cryp_check(device_data)) {
- dev_err(dev, "[%s]: cryp_check() failed!", __func__);
- ret = -EINVAL;
- goto out_power;
- }
-
- if (cryp_configure_protection(device_data, &prot)) {
- dev_err(dev, "[%s]: cryp_configure_protection() failed!",
- __func__);
- ret = -EINVAL;
- goto out_power;
- }
-
- device_data->irq = platform_get_irq(pdev, 0);
- if (device_data->irq <= 0) {
- ret = device_data->irq ? device_data->irq : -ENXIO;
- goto out_power;
- }
-
- ret = devm_request_irq(&pdev->dev, device_data->irq,
- cryp_interrupt_handler, 0, "cryp1", device_data);
- if (ret) {
- dev_err(dev, "[%s]: Unable to request IRQ", __func__);
- goto out_power;
- }
-
- if (cryp_mode == CRYP_MODE_DMA)
- cryp_dma_setup_channel(device_data, dev);
-
- platform_set_drvdata(pdev, device_data);
-
- /* Put the new device into the device list... */
- klist_add_tail(&device_data->list_node, &driver_data.device_list);
-
- /* ... and signal that a new device is available. */
- up(&driver_data.device_allocation);
-
- atomic_set(&session_id, 1);
-
- ret = cryp_algs_register_all();
- if (ret) {
- dev_err(dev, "[%s]: cryp_algs_register_all() failed!",
- __func__);
- goto out_power;
- }
-
- dev_info(dev, "successfully registered\n");
-
- return 0;
-
-out_power:
- cryp_disable_power(device_data->dev, device_data, false);
-
-out_clk_unprepare:
- clk_unprepare(device_data->clk);
-
-out_regulator:
- regulator_put(device_data->pwr_regulator);
-
-out:
- return ret;
-}
-
-static int ux500_cryp_remove(struct platform_device *pdev)
-{
- struct cryp_device_data *device_data;
-
- dev_dbg(&pdev->dev, "[%s]", __func__);
- device_data = platform_get_drvdata(pdev);
- if (!device_data) {
- dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
- __func__);
- return -ENOMEM;
- }
-
- /* Try to decrease the number of available devices. */
- if (down_trylock(&driver_data.device_allocation))
- return -EBUSY;
-
- /* Check that the device is free */
- spin_lock(&device_data->ctx_lock);
- /* current_ctx allocates a device, NULL = unallocated */
- if (device_data->current_ctx) {
- /* The device is busy */
- spin_unlock(&device_data->ctx_lock);
- /* Return the device to the pool. */
- up(&driver_data.device_allocation);
- return -EBUSY;
- }
-
- spin_unlock(&device_data->ctx_lock);
-
- /* Remove the device from the list */
- if (klist_node_attached(&device_data->list_node))
- klist_remove(&device_data->list_node);
-
- /* If this was the last device, remove the services */
- if (list_empty(&driver_data.device_list.k_list))
- cryp_algs_unregister_all();
-
- if (cryp_disable_power(&pdev->dev, device_data, false))
- dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
- __func__);
-
- clk_unprepare(device_data->clk);
- regulator_put(device_data->pwr_regulator);
-
- return 0;
-}
-
-static void ux500_cryp_shutdown(struct platform_device *pdev)
-{
- struct cryp_device_data *device_data;
-
- dev_dbg(&pdev->dev, "[%s]", __func__);
-
- device_data = platform_get_drvdata(pdev);
- if (!device_data) {
- dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
- __func__);
- return;
- }
-
- /* Check that the device is free */
- spin_lock(&device_data->ctx_lock);
- /* current_ctx allocates a device, NULL = unallocated */
- if (!device_data->current_ctx) {
- if (down_trylock(&driver_data.device_allocation))
- dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
- "Shutting down anyway...", __func__);
- /**
- * (Allocate the device)
- * Need to set this to non-null (dummy) value,
- * to avoid usage if context switching.
- */
- device_data->current_ctx++;
- }
- spin_unlock(&device_data->ctx_lock);
-
- /* Remove the device from the list */
- if (klist_node_attached(&device_data->list_node))
- klist_remove(&device_data->list_node);
-
- /* If this was the last device, remove the services */
- if (list_empty(&driver_data.device_list.k_list))
- cryp_algs_unregister_all();
-
- if (cryp_disable_power(&pdev->dev, device_data, false))
- dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
- __func__);
-
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int ux500_cryp_suspend(struct device *dev)
-{
- int ret;
- struct platform_device *pdev = to_platform_device(dev);
- struct cryp_device_data *device_data;
- struct cryp_ctx *temp_ctx = NULL;
-
- dev_dbg(dev, "[%s]", __func__);
-
- /* Handle state? */
- device_data = platform_get_drvdata(pdev);
- if (!device_data) {
- dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__);
- return -ENOMEM;
- }
-
- disable_irq(device_data->irq);
-
- spin_lock(&device_data->ctx_lock);
- if (!device_data->current_ctx)
- device_data->current_ctx++;
- spin_unlock(&device_data->ctx_lock);
-
- if (device_data->current_ctx == ++temp_ctx) {
- if (down_interruptible(&driver_data.device_allocation))
- dev_dbg(dev, "[%s]: down_interruptible() failed",
- __func__);
- ret = cryp_disable_power(dev, device_data, false);
-
- } else
- ret = cryp_disable_power(dev, device_data, true);
-
- if (ret)
- dev_err(dev, "[%s]: cryp_disable_power()", __func__);
-
- return ret;
-}
-
-static int ux500_cryp_resume(struct device *dev)
-{
- int ret = 0;
- struct platform_device *pdev = to_platform_device(dev);
- struct cryp_device_data *device_data;
- struct cryp_ctx *temp_ctx = NULL;
-
- dev_dbg(dev, "[%s]", __func__);
-
- device_data = platform_get_drvdata(pdev);
- if (!device_data) {
- dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__);
- return -ENOMEM;
- }
-
- spin_lock(&device_data->ctx_lock);
- if (device_data->current_ctx == ++temp_ctx)
- device_data->current_ctx = NULL;
- spin_unlock(&device_data->ctx_lock);
-
-
- if (!device_data->current_ctx)
- up(&driver_data.device_allocation);
- else
- ret = cryp_enable_power(dev, device_data, true);
-
- if (ret)
- dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
- else
- enable_irq(device_data->irq);
-
- return ret;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume);
-
-static const struct of_device_id ux500_cryp_match[] = {
- { .compatible = "stericsson,ux500-cryp" },
- { },
-};
-MODULE_DEVICE_TABLE(of, ux500_cryp_match);
-
-static struct platform_driver cryp_driver = {
- .probe = ux500_cryp_probe,
- .remove = ux500_cryp_remove,
- .shutdown = ux500_cryp_shutdown,
- .driver = {
- .name = "cryp1",
- .of_match_table = ux500_cryp_match,
- .pm = &ux500_cryp_pm,
- }
-};
-
-static int __init ux500_cryp_mod_init(void)
-{
- pr_debug("[%s] is called!", __func__);
- klist_init(&driver_data.device_list, NULL, NULL);
- /* Initialize the semaphore to 0 devices (locked state) */
- sema_init(&driver_data.device_allocation, 0);
- return platform_driver_register(&cryp_driver);
-}
-
-static void __exit ux500_cryp_mod_fini(void)
-{
- pr_debug("[%s] is called!", __func__);
- platform_driver_unregister(&cryp_driver);
-}
-
-module_init(ux500_cryp_mod_init);
-module_exit(ux500_cryp_mod_fini);
-
-module_param(cryp_mode, int, 0);
-
-MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
-MODULE_ALIAS_CRYPTO("aes-all");
-MODULE_ALIAS_CRYPTO("des-all");
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.c b/drivers/crypto/ux500/cryp/cryp_irq.c
deleted file mode 100644
index 6d2f07bec98a..000000000000
--- a/drivers/crypto/ux500/cryp/cryp_irq.c
+++ /dev/null
@@ -1,45 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
- * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
- * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
- * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
- * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
- */
-
-#include <linux/kernel.h>
-#include <linux/bitmap.h>
-#include <linux/device.h>
-
-#include "cryp.h"
-#include "cryp_p.h"
-#include "cryp_irq.h"
-#include "cryp_irqp.h"
-
-void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src)
-{
- u32 i;
-
- dev_dbg(device_data->dev, "[%s]", __func__);
-
- i = readl_relaxed(&device_data->base->imsc);
- i = i | irq_src;
- writel_relaxed(i, &device_data->base->imsc);
-}
-
-void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src)
-{
- u32 i;
-
- dev_dbg(device_data->dev, "[%s]", __func__);
-
- i = readl_relaxed(&device_data->base->imsc);
- i = i & ~irq_src;
- writel_relaxed(i, &device_data->base->imsc);
-}
-
-bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src)
-{
- return (readl_relaxed(&device_data->base->mis) & irq_src) > 0;
-}
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.h b/drivers/crypto/ux500/cryp/cryp_irq.h
deleted file mode 100644
index da90029ea141..000000000000
--- a/drivers/crypto/ux500/cryp/cryp_irq.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
- * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
- * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
- * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
- * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
- */
-
-#ifndef _CRYP_IRQ_H_
-#define _CRYP_IRQ_H_
-
-#include "cryp.h"
-
-enum cryp_irq_src_id {
- CRYP_IRQ_SRC_INPUT_FIFO = 0x1,
- CRYP_IRQ_SRC_OUTPUT_FIFO = 0x2,
- CRYP_IRQ_SRC_ALL = 0x3
-};
-
-/*
- * M0 Funtions
- */
-void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src);
-
-void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src);
-
-bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src);
-
-#endif /* _CRYP_IRQ_H_ */
diff --git a/drivers/crypto/ux500/cryp/cryp_irqp.h b/drivers/crypto/ux500/cryp/cryp_irqp.h
deleted file mode 100644
index 4981a3f461e5..000000000000
--- a/drivers/crypto/ux500/cryp/cryp_irqp.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
- * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
- * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
- * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
- * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
- */
-
-#ifndef __CRYP_IRQP_H_
-#define __CRYP_IRQP_H_
-
-#include "cryp_irq.h"
-
-/*
- *
- * CRYP Registers - Offset mapping
- * +-----------------+
- * 00h | CRYP_CR | Configuration register
- * +-----------------+
- * 04h | CRYP_SR | Status register
- * +-----------------+
- * 08h | CRYP_DIN | Data In register
- * +-----------------+
- * 0ch | CRYP_DOUT | Data out register
- * +-----------------+
- * 10h | CRYP_DMACR | DMA control register
- * +-----------------+
- * 14h | CRYP_IMSC | IMSC
- * +-----------------+
- * 18h | CRYP_RIS | Raw interrupt status
- * +-----------------+
- * 1ch | CRYP_MIS | Masked interrupt status.
- * +-----------------+
- * Key registers
- * IVR registers
- * Peripheral
- * Cell IDs
- *
- * Refer data structure for other register map
- */
-
-/**
- * struct cryp_register
- * @cr - Configuration register
- * @status - Status register
- * @din - Data input register
- * @din_size - Data input size register
- * @dout - Data output register
- * @dout_size - Data output size register
- * @dmacr - Dma control register
- * @imsc - Interrupt mask set/clear register
- * @ris - Raw interrupt status
- * @mis - Masked interrupt statu register
- * @key_1_l - Key register 1 L
- * @key_1_r - Key register 1 R
- * @key_2_l - Key register 2 L
- * @key_2_r - Key register 2 R
- * @key_3_l - Key register 3 L
- * @key_3_r - Key register 3 R
- * @key_4_l - Key register 4 L
- * @key_4_r - Key register 4 R
- * @init_vect_0_l - init vector 0 L
- * @init_vect_0_r - init vector 0 R
- * @init_vect_1_l - init vector 1 L
- * @init_vect_1_r - init vector 1 R
- * @cryp_unused1 - unused registers
- * @itcr - Integration test control register
- * @itip - Integration test input register
- * @itop - Integration test output register
- * @cryp_unused2 - unused registers
- * @periphId0 - FE0 CRYP Peripheral Identication Register
- * @periphId1 - FE4
- * @periphId2 - FE8
- * @periphId3 - FEC
- * @pcellId0 - FF0 CRYP PCell Identication Register
- * @pcellId1 - FF4
- * @pcellId2 - FF8
- * @pcellId3 - FFC
- */
-struct cryp_register {
- u32 cr; /* Configuration register */
- u32 sr; /* Status register */
- u32 din; /* Data input register */
- u32 din_size; /* Data input size register */
- u32 dout; /* Data output register */
- u32 dout_size; /* Data output size register */
- u32 dmacr; /* Dma control register */
- u32 imsc; /* Interrupt mask set/clear register */
- u32 ris; /* Raw interrupt status */
- u32 mis; /* Masked interrupt statu register */
-
- u32 key_1_l; /*Key register 1 L */
- u32 key_1_r; /*Key register 1 R */
- u32 key_2_l; /*Key register 2 L */
- u32 key_2_r; /*Key register 2 R */
- u32 key_3_l; /*Key register 3 L */
- u32 key_3_r; /*Key register 3 R */
- u32 key_4_l; /*Key register 4 L */
- u32 key_4_r; /*Key register 4 R */
-
- u32 init_vect_0_l; /*init vector 0 L */
- u32 init_vect_0_r; /*init vector 0 R */
- u32 init_vect_1_l; /*init vector 1 L */
- u32 init_vect_1_r; /*init vector 1 R */
-
- u32 cryp_unused1[(0x80 - 0x58) / sizeof(u32)]; /* unused registers */
- u32 itcr; /*Integration test control register */
- u32 itip; /*Integration test input register */
- u32 itop; /*Integration test output register */
- u32 cryp_unused2[(0xFE0 - 0x8C) / sizeof(u32)]; /* unused registers */
-
- u32 periphId0; /* FE0 CRYP Peripheral Identication Register */
- u32 periphId1; /* FE4 */
- u32 periphId2; /* FE8 */
- u32 periphId3; /* FEC */
-
- u32 pcellId0; /* FF0 CRYP PCell Identication Register */
- u32 pcellId1; /* FF4 */
- u32 pcellId2; /* FF8 */
- u32 pcellId3; /* FFC */
-};
-
-#endif
diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h
deleted file mode 100644
index 60b47fe4de35..000000000000
--- a/drivers/crypto/ux500/cryp/cryp_p.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
- * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
- * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
- * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
- * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
- */
-
-#ifndef _CRYP_P_H_
-#define _CRYP_P_H_
-
-#include <linux/io.h>
-#include <linux/bitops.h>
-
-#include "cryp.h"
-#include "cryp_irqp.h"
-
-/*
- * Generic Macros
- */
-#define CRYP_SET_BITS(reg_name, mask) \
- writel_relaxed((readl_relaxed(reg_name) | mask), reg_name)
-
-#define CRYP_WRITE_BIT(reg_name, val, mask) \
- writel_relaxed(((readl_relaxed(reg_name) & ~(mask)) |\
- ((val) & (mask))), reg_name)
-
-#define CRYP_TEST_BITS(reg_name, val) \
- (readl_relaxed(reg_name) & (val))
-
-#define CRYP_PUT_BITS(reg, val, shift, mask) \
- writel_relaxed(((readl_relaxed(reg) & ~(mask)) | \
- (((u32)val << shift) & (mask))), reg)
-
-/*
- * CRYP specific Macros
- */
-#define CRYP_PERIPHERAL_ID0 0xE3
-#define CRYP_PERIPHERAL_ID1 0x05
-
-#define CRYP_PERIPHERAL_ID2_DB8500 0x28
-#define CRYP_PERIPHERAL_ID3 0x00
-
-#define CRYP_PCELL_ID0 0x0D
-#define CRYP_PCELL_ID1 0xF0
-#define CRYP_PCELL_ID2 0x05
-#define CRYP_PCELL_ID3 0xB1
-
-/*
- * CRYP register default values
- */
-#define MAX_DEVICE_SUPPORT 2
-
-/* Priv set, keyrden set and datatype 8bits swapped set as default. */
-#define CRYP_CR_DEFAULT 0x0482
-#define CRYP_DMACR_DEFAULT 0x0
-#define CRYP_IMSC_DEFAULT 0x0
-#define CRYP_DIN_DEFAULT 0x0
-#define CRYP_DOUT_DEFAULT 0x0
-#define CRYP_KEY_DEFAULT 0x0
-#define CRYP_INIT_VECT_DEFAULT 0x0
-
-/*
- * CRYP Control register specific mask
- */
-#define CRYP_CR_SECURE_MASK BIT(0)
-#define CRYP_CR_PRLG_MASK BIT(1)
-#define CRYP_CR_ALGODIR_MASK BIT(2)
-#define CRYP_CR_ALGOMODE_MASK (BIT(5) | BIT(4) | BIT(3))
-#define CRYP_CR_DATATYPE_MASK (BIT(7) | BIT(6))
-#define CRYP_CR_KEYSIZE_MASK (BIT(9) | BIT(8))
-#define CRYP_CR_KEYRDEN_MASK BIT(10)
-#define CRYP_CR_KSE_MASK BIT(11)
-#define CRYP_CR_START_MASK BIT(12)
-#define CRYP_CR_INIT_MASK BIT(13)
-#define CRYP_CR_FFLUSH_MASK BIT(14)
-#define CRYP_CR_CRYPEN_MASK BIT(15)
-#define CRYP_CR_CONTEXT_SAVE_MASK (CRYP_CR_SECURE_MASK |\
- CRYP_CR_PRLG_MASK |\
- CRYP_CR_ALGODIR_MASK |\
- CRYP_CR_ALGOMODE_MASK |\
- CRYP_CR_KEYSIZE_MASK |\
- CRYP_CR_KEYRDEN_MASK |\
- CRYP_CR_DATATYPE_MASK)
-
-
-#define CRYP_SR_INFIFO_READY_MASK (BIT(0) | BIT(1))
-#define CRYP_SR_IFEM_MASK BIT(0)
-#define CRYP_SR_BUSY_MASK BIT(4)
-
-/*
- * Bit position used while setting bits in register
- */
-#define CRYP_CR_PRLG_POS 1
-#define CRYP_CR_ALGODIR_POS 2
-#define CRYP_CR_ALGOMODE_POS 3
-#define CRYP_CR_DATATYPE_POS 6
-#define CRYP_CR_KEYSIZE_POS 8
-#define CRYP_CR_KEYRDEN_POS 10
-#define CRYP_CR_KSE_POS 11
-#define CRYP_CR_START_POS 12
-#define CRYP_CR_INIT_POS 13
-#define CRYP_CR_CRYPEN_POS 15
-
-#define CRYP_SR_BUSY_POS 4
-
-/*
- * CRYP PCRs------PC_NAND control register
- * BIT_MASK
- */
-#define CRYP_DMA_REQ_MASK (BIT(1) | BIT(0))
-#define CRYP_DMA_REQ_MASK_POS 0
-
-
-struct cryp_system_context {
- /* CRYP Register structure */
- struct cryp_register *p_cryp_reg[MAX_DEVICE_SUPPORT];
-};
-
-#endif
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index 168195672e2e..b2979be613b8 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -479,6 +479,9 @@ static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)
ctx->enginectx.op.prepare_request = NULL;
ctx->enginectx.op.unprepare_request = NULL;
+ akcipher_set_reqsize(tfm,
+ sizeof(struct virtio_crypto_akcipher_request));
+
return 0;
}
@@ -505,7 +508,6 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
.max_size = virtio_crypto_rsa_max_size,
.init = virtio_crypto_rsa_init_tfm,
.exit = virtio_crypto_rsa_exit_tfm,
- .reqsize = sizeof(struct virtio_crypto_akcipher_request),
.base = {
.cra_name = "rsa",
.cra_driver_name = "virtio-crypto-rsa",
@@ -528,7 +530,6 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
.max_size = virtio_crypto_rsa_max_size,
.init = virtio_crypto_rsa_init_tfm,
.exit = virtio_crypto_rsa_exit_tfm,
- .reqsize = sizeof(struct virtio_crypto_akcipher_request),
.base = {
.cra_name = "pkcs1pad(rsa,sha1)",
.cra_driver_name = "virtio-pkcs1-rsa-with-sha1",