summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/iproc-rng200.c1
-rw-r--r--drivers/crypto/Kconfig89
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c1
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c24
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c5
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h9
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c6
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c6
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h8
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c4
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c31
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c37
-rw-r--r--drivers/crypto/amlogic/Kconfig1
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c1
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c4
-rw-r--r--drivers/crypto/atmel-aes.c359
-rw-r--r--drivers/crypto/atmel-authenc.h3
-rw-r--r--drivers/crypto/atmel-sha.c473
-rw-r--r--drivers/crypto/atmel-tdes.c375
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c10
-rw-r--r--drivers/crypto/bcm/cipher.c17
-rw-r--r--drivers/crypto/caam/Kconfig14
-rw-r--r--drivers/crypto/caam/caamalg.c33
-rw-r--r--drivers/crypto/caam/caamalg_qi.c44
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c206
-rw-r--r--drivers/crypto/caam/caamhash.c167
-rw-r--r--drivers/crypto/caam/ctrl.c15
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c12
-rw-r--r--drivers/crypto/ccp/Makefile4
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c4
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c1
-rw-r--r--drivers/crypto/ccp/psp-dev.c1042
-rw-r--r--drivers/crypto/ccp/psp-dev.h51
-rw-r--r--drivers/crypto/ccp/sev-dev.c1077
-rw-r--r--drivers/crypto/ccp/sev-dev.h63
-rw-r--r--drivers/crypto/ccp/sp-dev.h17
-rw-r--r--drivers/crypto/ccp/sp-pci.c43
-rw-r--r--drivers/crypto/ccp/tee-dev.c375
-rw-r--r--drivers/crypto/ccp/tee-dev.h110
-rw-r--r--drivers/crypto/ccree/cc_aead.c43
-rw-r--r--drivers/crypto/ccree/cc_cipher.c58
-rw-r--r--drivers/crypto/ccree/cc_driver.c24
-rw-r--r--drivers/crypto/ccree/cc_driver.h6
-rw-r--r--drivers/crypto/ccree/cc_fips.c2
-rw-r--r--drivers/crypto/ccree/cc_hash.c8
-rw-r--r--drivers/crypto/ccree/cc_pm.c39
-rw-r--r--drivers/crypto/ccree/cc_pm.h17
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c103
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.h8
-rw-r--r--drivers/crypto/chelsio/Kconfig30
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c53
-rw-r--r--drivers/crypto/chelsio/chcr_core.c10
-rw-r--r--drivers/crypto/chelsio/chtls/chtls.h7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c57
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.h21
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_hw.c65
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c28
-rw-r--r--drivers/crypto/geode-aes.c24
-rw-r--r--drivers/crypto/hisilicon/Kconfig11
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c141
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c60
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h49
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c963
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h22
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c23
-rw-r--r--drivers/crypto/hisilicon/sgl.c17
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h4
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c92
-rw-r--r--drivers/crypto/img-hash.c6
-rw-r--r--drivers/crypto/inside-secure/safexcel.c12
-rw-r--r--drivers/crypto/inside-secure/safexcel.h34
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c600
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c36
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c130
-rw-r--r--drivers/crypto/ixp4xx_crypto.c31
-rw-r--r--drivers/crypto/marvell/cipher.c4
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c4
-rw-r--r--drivers/crypto/mxs-dcp.c12
-rw-r--r--drivers/crypto/n2_core.c1
-rw-r--r--drivers/crypto/omap-aes-gcm.c223
-rw-r--r--drivers/crypto/omap-aes.c142
-rw-r--r--drivers/crypto/omap-aes.h12
-rw-r--r--drivers/crypto/omap-crypto.c37
-rw-r--r--drivers/crypto/omap-des.c13
-rw-r--r--drivers/crypto/omap-sham.c191
-rw-r--r--drivers/crypto/padlock-aes.c9
-rw-r--r--drivers/crypto/padlock-sha.c26
-rw-r--r--drivers/crypto/picoxcell_crypto.c30
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c6
-rw-r--r--drivers/crypto/qce/Makefile7
-rw-r--r--drivers/crypto/qce/common.c244
-rw-r--r--drivers/crypto/qce/core.c4
-rw-r--r--drivers/crypto/qce/dma.c6
-rw-r--r--drivers/crypto/qce/dma.h3
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/qce/skcipher.c41
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c4
-rw-r--r--drivers/crypto/sahara.c9
-rw-r--r--drivers/crypto/stm32/Kconfig6
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c4
-rw-r--r--drivers/crypto/stm32/stm32-hash.c6
-rw-r--r--drivers/crypto/talitos.c15
-rw-r--r--drivers/crypto/ux500/Kconfig16
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c8
-rw-r--r--drivers/crypto/vmx/aes_xts.c3
-rw-r--r--drivers/tee/Kconfig4
-rw-r--r--drivers/tee/Makefile1
-rw-r--r--drivers/tee/amdtee/Kconfig8
-rw-r--r--drivers/tee/amdtee/Makefile5
-rw-r--r--drivers/tee/amdtee/amdtee_if.h183
-rw-r--r--drivers/tee/amdtee/amdtee_private.h159
-rw-r--r--drivers/tee/amdtee/call.c373
-rw-r--r--drivers/tee/amdtee/core.c518
-rw-r--r--drivers/tee/amdtee/shm_pool.c93
122 files changed, 6106 insertions, 3917 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 8486c29d8324..914e293ba62b 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -90,7 +90,7 @@ config HW_RANDOM_BCM2835
config HW_RANDOM_IPROC_RNG200
tristate "Broadcom iProc/STB RNG200 support"
- depends on ARCH_BCM_IPROC || ARCH_BRCMSTB
+ depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB
default HW_RANDOM
---help---
This driver provides kernel-side support for the RNG200
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index 899ff25f4f28..32d9fe61a225 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -213,6 +213,7 @@ static int iproc_rng200_probe(struct platform_device *pdev)
}
static const struct of_device_id iproc_rng200_of_match[] = {
+ { .compatible = "brcm,bcm2711-rng200", },
{ .compatible = "brcm,bcm7211-rng200", },
{ .compatible = "brcm,bcm7278-rng200", },
{ .compatible = "brcm,iproc-rng200", },
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 91eb768d4221..c2767ed54dfe 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -248,15 +248,15 @@ config CRYPTO_DEV_MARVELL_CESA
This driver supports CPU offload through DMA transfers.
config CRYPTO_DEV_NIAGARA2
- tristate "Niagara2 Stream Processing Unit driver"
- select CRYPTO_LIB_DES
- select CRYPTO_SKCIPHER
- select CRYPTO_HASH
- select CRYPTO_MD5
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- depends on SPARC64
- help
+ tristate "Niagara2 Stream Processing Unit driver"
+ select CRYPTO_LIB_DES
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ depends on SPARC64
+ help
Each core of a Niagara2 processor contains a Stream
Processing Unit, which itself contains several cryptographic
sub-units. One set provides the Modular Arithmetic Unit,
@@ -357,7 +357,7 @@ config CRYPTO_DEV_OMAP
depends on ARCH_OMAP2PLUS
help
OMAP processors have various crypto HW accelerators. Select this if
- you want to use the OMAP modules for any of the crypto algorithms.
+ you want to use the OMAP modules for any of the crypto algorithms.
if CRYPTO_DEV_OMAP
@@ -430,7 +430,7 @@ config CRYPTO_DEV_SAHARA
found in some Freescale i.MX chips.
config CRYPTO_DEV_EXYNOS_RNG
- tristate "EXYNOS HW pseudo random number generator support"
+ tristate "Exynos HW pseudo random number generator support"
depends on ARCH_EXYNOS || COMPILE_TEST
depends on HAS_IOMEM
select CRYPTO_RNG
@@ -618,6 +618,14 @@ config CRYPTO_DEV_QCE
tristate "Qualcomm crypto engine accelerator"
depends on ARCH_QCOM || COMPILE_TEST
depends on HAS_IOMEM
+ help
+ This driver supports Qualcomm crypto engine accelerator
+ hardware. To compile this driver as a module, choose M here. The
+ module will be called qcrypto.
+
+config CRYPTO_DEV_QCE_SKCIPHER
+ bool
+ depends on CRYPTO_DEV_QCE
select CRYPTO_AES
select CRYPTO_LIB_DES
select CRYPTO_ECB
@@ -625,10 +633,57 @@ config CRYPTO_DEV_QCE
select CRYPTO_XTS
select CRYPTO_CTR
select CRYPTO_SKCIPHER
- help
- This driver supports Qualcomm crypto engine accelerator
- hardware. To compile this driver as a module, choose M here. The
- module will be called qcrypto.
+
+config CRYPTO_DEV_QCE_SHA
+ bool
+ depends on CRYPTO_DEV_QCE
+
+choice
+ prompt "Algorithms enabled for QCE acceleration"
+ default CRYPTO_DEV_QCE_ENABLE_ALL
+ depends on CRYPTO_DEV_QCE
+ help
+ This option allows to choose whether to build support for all algorihtms
+ (default), hashes-only, or skciphers-only.
+
+ The QCE engine does not appear to scale as well as the CPU to handle
+ multiple crypto requests. While the ipq40xx chips have 4-core CPUs, the
+ QCE handles only 2 requests in parallel.
+
+ Ipsec throughput seems to improve when disabling either family of
+ algorithms, sharing the load with the CPU. Enabling skciphers-only
+ appears to work best.
+
+ config CRYPTO_DEV_QCE_ENABLE_ALL
+ bool "All supported algorithms"
+ select CRYPTO_DEV_QCE_SKCIPHER
+ select CRYPTO_DEV_QCE_SHA
+ help
+ Enable all supported algorithms:
+ - AES (CBC, CTR, ECB, XTS)
+ - 3DES (CBC, ECB)
+ - DES (CBC, ECB)
+ - SHA1, HMAC-SHA1
+ - SHA256, HMAC-SHA256
+
+ config CRYPTO_DEV_QCE_ENABLE_SKCIPHER
+ bool "Symmetric-key ciphers only"
+ select CRYPTO_DEV_QCE_SKCIPHER
+ help
+ Enable symmetric-key ciphers only:
+ - AES (CBC, CTR, ECB, XTS)
+ - 3DES (ECB, CBC)
+ - DES (ECB, CBC)
+
+ config CRYPTO_DEV_QCE_ENABLE_SHA
+ bool "Hash/HMAC only"
+ select CRYPTO_DEV_QCE_SHA
+ help
+ Enable hashes/HMAC algorithms only:
+ - SHA1, HMAC-SHA1
+ - SHA256, HMAC-SHA256
+
+endchoice
config CRYPTO_DEV_QCOM_RNG
tristate "Qualcomm Random Number Generator Driver"
@@ -639,7 +694,7 @@ config CRYPTO_DEV_QCOM_RNG
Generator hardware found on Qualcomm SoCs.
To compile this driver as a module, choose M here. The
- module will be called qcom-rng. If unsure, say N.
+ module will be called qcom-rng. If unsure, say N.
config CRYPTO_DEV_VMX
bool "Support for VMX cryptographic acceleration instructions"
@@ -716,7 +771,7 @@ source "drivers/crypto/stm32/Kconfig"
config CRYPTO_DEV_SAFEXCEL
tristate "Inside Secure's SafeXcel cryptographic engine driver"
- depends on OF || PCI || COMPILE_TEST
+ depends on (OF || PCI || COMPILE_TEST) && HAS_IOMEM
select CRYPTO_LIB_AES
select CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index cb2b0874f68f..7f22d305178e 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -541,7 +541,6 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
op->keylen = keylen;
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 814cd12149a9..a2b67f7f8a81 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <crypto/scatterwalk.h>
#include <linux/scatterlist.h>
@@ -22,6 +23,14 @@
#include "sun4i-ss.h"
+static const struct ss_variant ss_a10_variant = {
+ .sha1_in_be = false,
+};
+
+static const struct ss_variant ss_a33_variant = {
+ .sha1_in_be = true,
+};
+
static struct sun4i_ss_alg_template ss_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH,
.mode = SS_OP_MD5,
@@ -273,7 +282,7 @@ err_enable:
return err;
}
-const struct dev_pm_ops sun4i_ss_pm_ops = {
+static const struct dev_pm_ops sun4i_ss_pm_ops = {
SET_RUNTIME_PM_OPS(sun4i_ss_pm_suspend, sun4i_ss_pm_resume, NULL)
};
@@ -323,6 +332,12 @@ static int sun4i_ss_probe(struct platform_device *pdev)
return PTR_ERR(ss->base);
}
+ ss->variant = of_device_get_match_data(&pdev->dev);
+ if (!ss->variant) {
+ dev_err(&pdev->dev, "Missing Security System variant\n");
+ return -EINVAL;
+ }
+
ss->ssclk = devm_clk_get(&pdev->dev, "mod");
if (IS_ERR(ss->ssclk)) {
err = PTR_ERR(ss->ssclk);
@@ -484,7 +499,12 @@ static int sun4i_ss_remove(struct platform_device *pdev)
}
static const struct of_device_id a20ss_crypto_of_match_table[] = {
- { .compatible = "allwinner,sun4i-a10-crypto" },
+ { .compatible = "allwinner,sun4i-a10-crypto",
+ .data = &ss_a10_variant
+ },
+ { .compatible = "allwinner,sun8i-a33-crypto",
+ .data = &ss_a33_variant
+ },
{}
};
MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table);
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
index fdc0e6cdbb85..dc35edd90034 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
@@ -479,7 +479,10 @@ hash_final:
/* Get the hash from the device */
if (op->mode == SS_OP_SHA1) {
for (i = 0; i < 5; i++) {
- v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
+ if (ss->variant->sha1_in_be)
+ v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4));
+ else
+ v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
memcpy(areq->result + i * 4, &v, 4);
}
} else {
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
index 60425ac75d90..2b4c6333eb67 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
@@ -131,7 +131,16 @@
#define SS_SEED_LEN 192
#define SS_DATA_LEN 160
+/*
+ * struct ss_variant - Describe SS hardware variant
+ * @sha1_in_be: The SHA1 digest is given by SS in BE, and so need to be inverted.
+ */
+struct ss_variant {
+ bool sha1_in_be;
+};
+
struct sun4i_ss_ctx {
+ const struct ss_variant *variant;
void __iomem *base;
int irq;
struct clk *busclk;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 37d0b6c386a0..a5fd8975f3d3 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -144,11 +144,6 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
cet->t_sym_ctl = cpu_to_le32(sym);
cet->t_asym_ctl = 0;
- chan->op_mode = ce->variant->op_mode[algt->ce_blockmode];
- chan->op_dir = rctx->op_dir;
- chan->method = ce->variant->alg_cipher[algt->ce_algo_id];
- chan->keylen = op->keylen;
-
addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
cet->t_key = cpu_to_le32(addr_key);
if (dma_mapping_error(ce->dev, addr_key)) {
@@ -394,7 +389,6 @@ int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if (op->key) {
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 73a7649f915d..f72346a44e69 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -555,7 +555,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
return -EINVAL;
}
- ce->base = devm_platform_ioremap_resource(pdev, 0);;
+ ce->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ce->base))
return PTR_ERR(ce->base);
@@ -624,7 +624,7 @@ error_alg:
error_irq:
sun8i_ce_pm_exit(ce);
error_pm:
- sun8i_ce_free_chanlist(ce, MAXFLOW);
+ sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
return err;
}
@@ -638,7 +638,7 @@ static int sun8i_ce_remove(struct platform_device *pdev)
debugfs_remove_recursive(ce->dbgfs_dir);
#endif
- sun8i_ce_free_chanlist(ce, MAXFLOW);
+ sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
sun8i_ce_pm_exit(ce);
return 0;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 43db49ceafe4..8f8404c84a4d 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -131,12 +131,8 @@ struct ce_task {
* @engine: ptr to the crypto_engine for this flow
* @bounce_iv: buffer which contain the IV
* @ivlen: size of bounce_iv
- * @keylen: keylen for this flow operation
* @complete: completion for the current task on this flow
* @status: set to 1 by interrupt if task is done
- * @method: current method for flow
- * @op_dir: direction (encrypt vs decrypt) of this flow
- * @op_mode: op_mode for this flow
* @t_phy: Physical address of task
* @tl: pointer to the current ce_task for this flow
* @stat_req: number of request done by this flow
@@ -145,12 +141,8 @@ struct sun8i_ce_flow {
struct crypto_engine *engine;
void *bounce_iv;
unsigned int ivlen;
- unsigned int keylen;
struct completion complete;
int status;
- u32 method;
- u32 op_dir;
- u32 op_mode;
dma_addr_t t_phy;
int timeout;
struct ce_task *tl;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index f222979a5623..84d52fc3a2da 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -390,7 +390,6 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if (op->key) {
@@ -416,7 +415,6 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
dev_dbg(ss->dev, "Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 90997cc509b8..6b301afffd11 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -595,7 +595,7 @@ error_alg:
error_irq:
sun8i_ss_pm_exit(ss);
error_pm:
- sun8i_ss_free_flows(ss, MAXFLOW);
+ sun8i_ss_free_flows(ss, MAXFLOW - 1);
return err;
}
@@ -609,7 +609,7 @@ static int sun8i_ss_remove(struct platform_device *pdev)
debugfs_remove_recursive(ss->dbgfs_dir);
#endif
- sun8i_ss_free_flows(ss, MAXFLOW);
+ sun8i_ss_free_flows(ss, MAXFLOW - 1);
sun8i_ss_pm_exit(ss);
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index a42f8619589d..f7fc0c464125 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -128,12 +128,9 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
struct dynamic_sa_ctl *sa;
int rc;
- if (keylen != AES_KEYSIZE_256 &&
- keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) {
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_128)
return -EINVAL;
- }
/* Create SA */
if (ctx->sa_in || ctx->sa_out)
@@ -292,19 +289,11 @@ static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
const u8 *key,
unsigned int keylen)
{
- int rc;
-
crypto_sync_skcipher_clear_flags(ctx->sw_cipher.cipher,
CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ctx->sw_cipher.cipher,
crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
- rc = crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
- crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
- crypto_skcipher_set_flags(cipher,
- crypto_sync_skcipher_get_flags(ctx->sw_cipher.cipher) &
- CRYPTO_TFM_RES_MASK);
-
- return rc;
+ return crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
}
int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
@@ -379,18 +368,10 @@ static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx,
const u8 *key,
unsigned int keylen)
{
- int rc;
-
crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(ctx->sw_cipher.aead,
crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
- rc = crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
- crypto_aead_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(cipher,
- crypto_aead_get_flags(ctx->sw_cipher.aead) &
- CRYPTO_TFM_RES_MASK);
-
- return rc;
+ return crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
}
/**
@@ -551,10 +532,8 @@ int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
struct dynamic_sa_ctl *sa;
int rc = 0;
- if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) {
- crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0)
return -EINVAL;
- }
rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen);
if (rc)
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 7d6b695c4ab3..981de43ea5e2 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -169,7 +169,7 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
int i;
dev->pdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
- &dev->pdr_pa, GFP_ATOMIC);
+ &dev->pdr_pa, GFP_KERNEL);
if (!dev->pdr)
return -ENOMEM;
@@ -185,13 +185,13 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
&dev->shadow_sa_pool_pa,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!dev->shadow_sa_pool)
return -ENOMEM;
dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
- &dev->shadow_sr_pool_pa, GFP_ATOMIC);
+ &dev->shadow_sr_pool_pa, GFP_KERNEL);
if (!dev->shadow_sr_pool)
return -ENOMEM;
for (i = 0; i < PPC4XX_NUM_PD; i++) {
@@ -277,7 +277,7 @@ static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
{
dev->gdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_gd) * PPC4XX_NUM_GD,
- &dev->gdr_pa, GFP_ATOMIC);
+ &dev->gdr_pa, GFP_KERNEL);
if (!dev->gdr)
return -ENOMEM;
@@ -286,7 +286,8 @@ static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
{
- dma_free_coherent(dev->core_dev->device,
+ if (dev->gdr)
+ dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_gd) * PPC4XX_NUM_GD,
dev->gdr, dev->gdr_pa);
}
@@ -354,20 +355,20 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
{
int i;
- /* alloc memory for scatter descriptor ring */
- dev->sdr = dma_alloc_coherent(dev->core_dev->device,
- sizeof(struct ce_sd) * PPC4XX_NUM_SD,
- &dev->sdr_pa, GFP_ATOMIC);
- if (!dev->sdr)
- return -ENOMEM;
-
dev->scatter_buffer_va =
dma_alloc_coherent(dev->core_dev->device,
PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
- &dev->scatter_buffer_pa, GFP_ATOMIC);
+ &dev->scatter_buffer_pa, GFP_KERNEL);
if (!dev->scatter_buffer_va)
return -ENOMEM;
+ /* alloc memory for scatter descriptor ring */
+ dev->sdr = dma_alloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_sd) * PPC4XX_NUM_SD,
+ &dev->sdr_pa, GFP_KERNEL);
+ if (!dev->sdr)
+ return -ENOMEM;
+
for (i = 0; i < PPC4XX_NUM_SD; i++) {
dev->sdr[i].ptr = dev->scatter_buffer_pa +
PPC4XX_SD_BUFFER_SIZE * i;
@@ -1439,16 +1440,15 @@ static int crypto4xx_probe(struct platform_device *ofdev)
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
ratelimit_default_init(&core_dev->dev->aead_ratelimit);
+ rc = crypto4xx_build_sdr(core_dev->dev);
+ if (rc)
+ goto err_build_sdr;
rc = crypto4xx_build_pdr(core_dev->dev);
if (rc)
- goto err_build_pdr;
+ goto err_build_sdr;
rc = crypto4xx_build_gdr(core_dev->dev);
if (rc)
- goto err_build_pdr;
-
- rc = crypto4xx_build_sdr(core_dev->dev);
- if (rc)
goto err_build_sdr;
/* Init tasklet for bottom half processing */
@@ -1493,7 +1493,6 @@ err_iomap:
err_build_sdr:
crypto4xx_destroy_sdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
-err_build_pdr:
crypto4xx_destroy_pdr(core_dev->dev);
kfree(core_dev->dev);
err_alloc_dev:
diff --git a/drivers/crypto/amlogic/Kconfig b/drivers/crypto/amlogic/Kconfig
index b90850d18965..cf9547602670 100644
--- a/drivers/crypto/amlogic/Kconfig
+++ b/drivers/crypto/amlogic/Kconfig
@@ -1,5 +1,6 @@
config CRYPTO_DEV_AMLOGIC_GXL
tristate "Support for amlogic cryptographic offloader"
+ depends on HAS_IOMEM
default y if ARCH_MESON
select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index e589015aac1c..9819dd50fbad 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -366,7 +366,6 @@ int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if (op->key) {
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index fa05fce1c0de..9d4ead2f7ebb 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -289,7 +289,7 @@ static int meson_crypto_probe(struct platform_device *pdev)
error_alg:
meson_unregister_algs(mc);
error_flow:
- meson_free_chanlist(mc, MAXFLOW);
+ meson_free_chanlist(mc, MAXFLOW - 1);
clk_disable_unprepare(mc->busclk);
return err;
}
@@ -304,7 +304,7 @@ static int meson_crypto_remove(struct platform_device *pdev)
meson_unregister_algs(mc);
- meson_free_chanlist(mc, MAXFLOW);
+ meson_free_chanlist(mc, MAXFLOW - 1);
clk_disable_unprepare(mc->busclk);
return 0;
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 91092504bc96..a6e14491e080 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -37,8 +38,6 @@
#include <crypto/xts.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
-#include <linux/platform_data/crypto-atmel.h>
-#include <dt-bindings/dma/at91.h>
#include "atmel-aes-regs.h"
#include "atmel-authenc.h"
@@ -89,7 +88,6 @@
struct atmel_aes_caps {
bool has_dualbuff;
bool has_cfb64;
- bool has_ctr32;
bool has_gcm;
bool has_xts;
bool has_authenc;
@@ -122,6 +120,7 @@ struct atmel_aes_ctr_ctx {
size_t offset;
struct scatterlist src[2];
struct scatterlist dst[2];
+ u32 blocks;
};
struct atmel_aes_gcm_ctx {
@@ -514,8 +513,37 @@ static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
}
}
+static inline struct atmel_aes_ctr_ctx *
+atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
+{
+ return container_of(ctx, struct atmel_aes_ctr_ctx, base);
+}
+
+static void atmel_aes_ctr_update_req_iv(struct atmel_aes_dev *dd)
+{
+ struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+ int i;
+
+ /*
+ * The CTR transfer works in fragments of data of maximum 1 MByte
+ * because of the 16 bit CTR counter embedded in the IP. When reaching
+ * here, ctx->blocks contains the number of blocks of the last fragment
+ * processed, there is no need to explicit cast it to u16.
+ */
+ for (i = 0; i < ctx->blocks; i++)
+ crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
+
+ memcpy(req->iv, ctx->iv, ivsize);
+}
+
static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
{
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
+
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->ctx->is_aead)
atmel_aes_authenc_complete(dd, err);
@@ -524,8 +552,13 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
clk_disable(dd->iclk);
dd->flags &= ~AES_FLAGS_BUSY;
- if (!dd->ctx->is_aead)
- atmel_aes_set_iv_as_last_ciphertext_block(dd);
+ if (!err && !dd->ctx->is_aead &&
+ (rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB) {
+ if ((rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_CTR)
+ atmel_aes_set_iv_as_last_ciphertext_block(dd);
+ else
+ atmel_aes_ctr_update_req_iv(dd);
+ }
if (dd->is_async)
dd->areq->complete(dd->areq, err);
@@ -790,7 +823,6 @@ static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
int err;
memset(&config, 0, sizeof(config));
- config.direction = dir;
config.src_addr_width = addr_width;
config.dst_addr_width = addr_width;
config.src_maxburst = maxburst;
@@ -830,27 +862,6 @@ static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
return 0;
}
-static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd,
- enum dma_transfer_direction dir)
-{
- struct atmel_aes_dma *dma;
-
- switch (dir) {
- case DMA_MEM_TO_DEV:
- dma = &dd->src;
- break;
-
- case DMA_DEV_TO_MEM:
- dma = &dd->dst;
- break;
-
- default:
- return;
- }
-
- dmaengine_terminate_all(dma->chan);
-}
-
static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
struct scatterlist *src,
struct scatterlist *dst,
@@ -909,25 +920,18 @@ static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
return -EINPROGRESS;
output_transfer_stop:
- atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
+ dmaengine_terminate_sync(dd->dst.chan);
unmap:
atmel_aes_unmap(dd);
exit:
return atmel_aes_complete(dd, err);
}
-static void atmel_aes_dma_stop(struct atmel_aes_dev *dd)
-{
- atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV);
- atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
- atmel_aes_unmap(dd);
-}
-
static void atmel_aes_dma_callback(void *data)
{
struct atmel_aes_dev *dd = data;
- atmel_aes_dma_stop(dd);
+ atmel_aes_unmap(dd);
dd->is_async = true;
(void)dd->resume(dd);
}
@@ -1004,19 +1008,14 @@ static int atmel_aes_start(struct atmel_aes_dev *dd)
atmel_aes_transfer_complete);
}
-static inline struct atmel_aes_ctr_ctx *
-atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
-{
- return container_of(ctx, struct atmel_aes_ctr_ctx, base);
-}
-
static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
{
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
struct skcipher_request *req = skcipher_request_cast(dd->areq);
struct scatterlist *src, *dst;
- u32 ctr, blocks;
size_t datalen;
+ u32 ctr;
+ u16 start, end;
bool use_dma, fragmented = false;
/* Check for transfer completion. */
@@ -1026,29 +1025,19 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
/* Compute data length. */
datalen = req->cryptlen - ctx->offset;
- blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
+ ctx->blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
ctr = be32_to_cpu(ctx->iv[3]);
- if (dd->caps.has_ctr32) {
- /* Check 32bit counter overflow. */
- u32 start = ctr;
- u32 end = start + blocks - 1;
-
- if (end < start) {
- ctr |= 0xffffffff;
- datalen = AES_BLOCK_SIZE * -start;
- fragmented = true;
- }
- } else {
- /* Check 16bit counter overflow. */
- u16 start = ctr & 0xffff;
- u16 end = start + (u16)blocks - 1;
-
- if (blocks >> 16 || end < start) {
- ctr |= 0xffff;
- datalen = AES_BLOCK_SIZE * (0x10000-start);
- fragmented = true;
- }
+
+ /* Check 16bit counter overflow. */
+ start = ctr & 0xffff;
+ end = start + ctx->blocks - 1;
+
+ if (ctx->blocks >> 16 || end < start) {
+ ctr |= 0xffff;
+ datalen = AES_BLOCK_SIZE * (0x10000 - start);
+ fragmented = true;
}
+
use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
/* Jump to offset. */
@@ -1131,7 +1120,8 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
rctx = skcipher_request_ctx(req);
rctx->mode = mode;
- if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
+ if ((mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB &&
+ !(mode & AES_FLAGS_ENCRYPT) && req->src == req->dst) {
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
if (req->cryptlen >= ivsize)
@@ -1150,10 +1140,8 @@ static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (keylen != AES_KEYSIZE_128 &&
keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256) {
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ keylen != AES_KEYSIZE_256)
return -EINVAL;
- }
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
@@ -1275,12 +1263,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "atmel-ecb-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1292,12 +1276,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "atmel-cbc-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1310,12 +1290,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "ofb(aes)",
.base.cra_driver_name = "atmel-ofb-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1328,12 +1304,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb(aes)",
.base.cra_driver_name = "atmel-cfb-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1346,12 +1318,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb32(aes)",
.base.cra_driver_name = "atmel-cfb32-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB32_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1364,12 +1332,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb16(aes)",
.base.cra_driver_name = "atmel-cfb16-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB16_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1382,12 +1346,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb8(aes)",
.base.cra_driver_name = "atmel-cfb8-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB8_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1400,12 +1360,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "atmel-ctr-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_ctr_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1420,12 +1376,8 @@ static struct skcipher_alg aes_algs[] = {
static struct skcipher_alg aes_cfb64_alg = {
.base.cra_name = "cfb64(aes)",
.base.cra_driver_name = "atmel-cfb64-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB64_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1762,10 +1714,8 @@ static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
if (keylen != AES_KEYSIZE_256 &&
keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_128) {
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ keylen != AES_KEYSIZE_128)
return -EINVAL;
- }
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
@@ -1776,21 +1726,7 @@ static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- /* Same as crypto_gcm_authsize() from crypto/gcm.c */
- switch (authsize) {
- case 4:
- case 8:
- case 12:
- case 13:
- case 14:
- case 15:
- case 16:
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
+ return crypto_gcm_check_authsize(authsize);
}
static int atmel_aes_gcm_encrypt(struct aead_request *req)
@@ -1825,12 +1761,8 @@ static struct aead_alg aes_gcm_alg = {
.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "atmel-gcm-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct atmel_aes_gcm_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
};
@@ -1947,12 +1879,8 @@ static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
static struct skcipher_alg aes_xts_alg = {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "atmel-xts-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
@@ -2113,7 +2041,6 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
{
struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_authenc_keys keys;
- u32 flags;
int err;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
@@ -2123,11 +2050,9 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
goto badkey;
/* Save auth key. */
- flags = crypto_aead_get_flags(tfm);
err = atmel_sha_authenc_setkey(ctx->auth,
keys.authkey, keys.authkeylen,
- &flags);
- crypto_aead_set_flags(tfm, flags & CRYPTO_TFM_RES_MASK);
+ crypto_aead_get_flags(tfm));
if (err) {
memzero_explicit(&keys, sizeof(keys));
return err;
@@ -2141,7 +2066,6 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
return 0;
badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -2252,12 +2176,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha1-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2272,12 +2192,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha224),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha224-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2292,12 +2208,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha256-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2312,12 +2224,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha384),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha384-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2332,12 +2240,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha512-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
};
@@ -2364,47 +2268,30 @@ static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
free_page((unsigned long)dd->buf);
}
-static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
+static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
{
- struct at_dma_slave *sl = slave;
-
- if (sl && sl->dma_dev == chan->device->dev) {
- chan->private = sl;
- return true;
- } else {
- return false;
- }
-}
-
-static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
- struct crypto_platform_data *pdata)
-{
- struct at_dma_slave *slave;
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ int ret;
/* Try to grab 2 DMA channels */
- slave = &pdata->dma_slave->rxdata;
- dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
- slave, dd->dev, "tx");
- if (!dd->src.chan)
+ dd->src.chan = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->src.chan)) {
+ ret = PTR_ERR(dd->src.chan);
goto err_dma_in;
+ }
- slave = &pdata->dma_slave->txdata;
- dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
- slave, dd->dev, "rx");
- if (!dd->dst.chan)
+ dd->dst.chan = dma_request_chan(dd->dev, "rx");
+ if (IS_ERR(dd->dst.chan)) {
+ ret = PTR_ERR(dd->dst.chan);
goto err_dma_out;
+ }
return 0;
err_dma_out:
dma_release_channel(dd->src.chan);
err_dma_in:
- dev_warn(dd->dev, "no DMA channel available\n");
- return -ENODEV;
+ dev_err(dd->dev, "no DMA channel available\n");
+ return ret;
}
static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
@@ -2469,29 +2356,45 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
crypto_unregister_skcipher(&aes_algs[i]);
}
+static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
+{
+ alg->cra_flags = CRYPTO_ALG_ASYNC;
+ alg->cra_alignmask = 0xf;
+ alg->cra_priority = ATMEL_AES_PRIORITY;
+ alg->cra_module = THIS_MODULE;
+}
+
static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
{
int err, i, j;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+ atmel_aes_crypto_alg_init(&aes_algs[i].base);
+
err = crypto_register_skcipher(&aes_algs[i]);
if (err)
goto err_aes_algs;
}
if (dd->caps.has_cfb64) {
+ atmel_aes_crypto_alg_init(&aes_cfb64_alg.base);
+
err = crypto_register_skcipher(&aes_cfb64_alg);
if (err)
goto err_aes_cfb64_alg;
}
if (dd->caps.has_gcm) {
+ atmel_aes_crypto_alg_init(&aes_gcm_alg.base);
+
err = crypto_register_aead(&aes_gcm_alg);
if (err)
goto err_aes_gcm_alg;
}
if (dd->caps.has_xts) {
+ atmel_aes_crypto_alg_init(&aes_xts_alg.base);
+
err = crypto_register_skcipher(&aes_xts_alg);
if (err)
goto err_aes_xts_alg;
@@ -2500,6 +2403,8 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->caps.has_authenc) {
for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
+ atmel_aes_crypto_alg_init(&aes_authenc_algs[i].base);
+
err = crypto_register_aead(&aes_authenc_algs[i]);
if (err)
goto err_aes_authenc_alg;
@@ -2533,7 +2438,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
{
dd->caps.has_dualbuff = 0;
dd->caps.has_cfb64 = 0;
- dd->caps.has_ctr32 = 0;
dd->caps.has_gcm = 0;
dd->caps.has_xts = 0;
dd->caps.has_authenc = 0;
@@ -2544,7 +2448,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
case 0x500:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
- dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
dd->caps.has_xts = 1;
dd->caps.has_authenc = 1;
@@ -2553,7 +2456,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
case 0x200:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
- dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
dd->caps.max_burst_size = 4;
break;
@@ -2577,65 +2479,18 @@ static const struct of_device_id atmel_aes_dt_ids[] = {
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
-
-static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct crypto_platform_data *pdata;
-
- if (!np) {
- dev_err(&pdev->dev, "device node not found\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->dma_slave = devm_kzalloc(&pdev->dev,
- sizeof(*(pdata->dma_slave)),
- GFP_KERNEL);
- if (!pdata->dma_slave) {
- devm_kfree(&pdev->dev, pdata);
- return ERR_PTR(-ENOMEM);
- }
-
- return pdata;
-}
-#else
-static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
-{
- return ERR_PTR(-EINVAL);
-}
#endif
static int atmel_aes_probe(struct platform_device *pdev)
{
struct atmel_aes_dev *aes_dd;
- struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *aes_res;
int err;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- pdata = atmel_aes_of_init(pdev);
- if (IS_ERR(pdata)) {
- err = PTR_ERR(pdata);
- goto aes_dd_err;
- }
- }
-
- if (!pdata->dma_slave) {
- err = -ENXIO;
- goto aes_dd_err;
- }
-
aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
- if (aes_dd == NULL) {
- err = -ENOMEM;
- goto aes_dd_err;
- }
+ if (!aes_dd)
+ return -ENOMEM;
aes_dd->dev = dev;
@@ -2656,7 +2511,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
if (!aes_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
- goto res_err;
+ goto err_tasklet_kill;
}
aes_dd->phys_base = aes_res->start;
@@ -2664,14 +2519,14 @@ static int atmel_aes_probe(struct platform_device *pdev)
aes_dd->irq = platform_get_irq(pdev, 0);
if (aes_dd->irq < 0) {
err = aes_dd->irq;
- goto res_err;
+ goto err_tasklet_kill;
}
err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
IRQF_SHARED, "atmel-aes", aes_dd);
if (err) {
dev_err(dev, "unable to request aes irq.\n");
- goto res_err;
+ goto err_tasklet_kill;
}
/* Initializing the clock */
@@ -2679,40 +2534,40 @@ static int atmel_aes_probe(struct platform_device *pdev)
if (IS_ERR(aes_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(aes_dd->iclk);
- goto res_err;
+ goto err_tasklet_kill;
}
aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
if (IS_ERR(aes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
err = PTR_ERR(aes_dd->io_base);
- goto res_err;
+ goto err_tasklet_kill;
}
err = clk_prepare(aes_dd->iclk);
if (err)
- goto res_err;
+ goto err_tasklet_kill;
err = atmel_aes_hw_version_init(aes_dd);
if (err)
- goto iclk_unprepare;
+ goto err_iclk_unprepare;
atmel_aes_get_cap(aes_dd);
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
err = -EPROBE_DEFER;
- goto iclk_unprepare;
+ goto err_iclk_unprepare;
}
#endif
err = atmel_aes_buff_init(aes_dd);
if (err)
- goto err_aes_buff;
+ goto err_iclk_unprepare;
- err = atmel_aes_dma_init(aes_dd, pdata);
+ err = atmel_aes_dma_init(aes_dd);
if (err)
- goto err_aes_dma;
+ goto err_buff_cleanup;
spin_lock(&atmel_aes.lock);
list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
@@ -2733,17 +2588,13 @@ err_algs:
list_del(&aes_dd->list);
spin_unlock(&atmel_aes.lock);
atmel_aes_dma_cleanup(aes_dd);
-err_aes_dma:
+err_buff_cleanup:
atmel_aes_buff_cleanup(aes_dd);
-err_aes_buff:
-iclk_unprepare:
+err_iclk_unprepare:
clk_unprepare(aes_dd->iclk);
-res_err:
+err_tasklet_kill:
tasklet_kill(&aes_dd->done_task);
tasklet_kill(&aes_dd->queue_task);
-aes_dd_err:
- if (err != -EPROBE_DEFER)
- dev_err(dev, "initialization failed.\n");
return err;
}
diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h
index d6de810df44f..c6530a1c8c20 100644
--- a/drivers/crypto/atmel-authenc.h
+++ b/drivers/crypto/atmel-authenc.h
@@ -30,8 +30,7 @@ unsigned int atmel_sha_authenc_get_reqsize(void);
struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode);
void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth);
int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth,
- const u8 *key, unsigned int keylen,
- u32 *flags);
+ const u8 *key, unsigned int keylen, u32 flags);
int atmel_sha_authenc_schedule(struct ahash_request *req,
struct atmel_sha_authenc_ctx *auth,
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8ea0e4bcde0d..e536e2a6bbd8 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -36,10 +37,11 @@
#include <crypto/sha.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
-#include <linux/platform_data/crypto-atmel.h>
#include "atmel-sha-regs.h"
#include "atmel-authenc.h"
+#define ATMEL_SHA_PRIORITY 300
+
/* SHA flags */
#define SHA_FLAGS_BUSY BIT(0)
#define SHA_FLAGS_FINAL BIT(1)
@@ -134,7 +136,6 @@ struct atmel_sha_dev {
void __iomem *io_base;
spinlock_t lock;
- int err;
struct tasklet_struct done_task;
struct tasklet_struct queue_task;
@@ -851,7 +852,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
0, final);
}
-static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
+static void atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
@@ -870,8 +871,6 @@ static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
ctx->block_size, DMA_TO_DEVICE);
}
-
- return 0;
}
static int atmel_sha_update_req(struct atmel_sha_dev *dd)
@@ -1025,7 +1024,6 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
if (!(SHA_FLAGS_INIT & dd->flags)) {
atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
dd->flags |= SHA_FLAGS_INIT;
- dd->err = 0;
}
return 0;
@@ -1036,9 +1034,13 @@ static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
}
-static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
+static int atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
{
- atmel_sha_hw_init(dd);
+ int err;
+
+ err = atmel_sha_hw_init(dd);
+ if (err)
+ return err;
dd->hw_version = atmel_sha_get_version(dd);
@@ -1046,6 +1048,8 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
"version: 0x%x\n", dd->hw_version);
clk_disable(dd->iclk);
+
+ return 0;
}
static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
@@ -1248,130 +1252,66 @@ static int atmel_sha_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static void atmel_sha_alg_init(struct ahash_alg *alg)
+{
+ alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
+ alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx);
+ alg->halg.base.cra_module = THIS_MODULE;
+ alg->halg.base.cra_init = atmel_sha_cra_init;
+
+ alg->halg.statesize = sizeof(struct atmel_sha_reqctx);
+
+ alg->init = atmel_sha_init;
+ alg->update = atmel_sha_update;
+ alg->final = atmel_sha_final;
+ alg->finup = atmel_sha_finup;
+ alg->digest = atmel_sha_digest;
+ alg->export = atmel_sha_export;
+ alg->import = atmel_sha_import;
+}
+
static struct ahash_alg sha_1_256_algs[] = {
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha1",
- .cra_driver_name = "atmel-sha1",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha1",
+ .halg.base.cra_driver_name = "atmel-sha1",
+ .halg.base.cra_blocksize = SHA1_BLOCK_SIZE,
+
+ .halg.digestsize = SHA1_DIGEST_SIZE,
},
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "atmel-sha256",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha256",
+ .halg.base.cra_driver_name = "atmel-sha256",
+ .halg.base.cra_blocksize = SHA256_BLOCK_SIZE,
+
+ .halg.digestsize = SHA256_DIGEST_SIZE,
},
};
static struct ahash_alg sha_224_alg = {
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name = "atmel-sha224",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha224",
+ .halg.base.cra_driver_name = "atmel-sha224",
+ .halg.base.cra_blocksize = SHA224_BLOCK_SIZE,
+
+ .halg.digestsize = SHA224_DIGEST_SIZE,
};
static struct ahash_alg sha_384_512_algs[] = {
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA384_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha384",
- .cra_driver_name = "atmel-sha384",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0x3,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha384",
+ .halg.base.cra_driver_name = "atmel-sha384",
+ .halg.base.cra_blocksize = SHA384_BLOCK_SIZE,
+ .halg.base.cra_alignmask = 0x3,
+
+ .halg.digestsize = SHA384_DIGEST_SIZE,
},
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha512",
- .cra_driver_name = "atmel-sha512",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0x3,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha512",
+ .halg.base.cra_driver_name = "atmel-sha512",
+ .halg.base.cra_blocksize = SHA512_BLOCK_SIZE,
+ .halg.base.cra_alignmask = 0x3,
+
+ .halg.digestsize = SHA512_DIGEST_SIZE,
},
};
@@ -1395,10 +1335,6 @@ static int atmel_sha_done(struct atmel_sha_dev *dd)
if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
atmel_sha_update_dma_stop(dd);
- if (dd->err) {
- err = dd->err;
- goto finish;
- }
}
if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
/* hash or semi-hash ready */
@@ -1493,7 +1429,6 @@ static void atmel_sha_dma_callback2(void *data)
struct scatterlist *sg;
int nents;
- dmaengine_terminate_all(dma->chan);
dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
sg = dma->sg;
@@ -1918,12 +1853,7 @@ static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
{
struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
- if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
- return 0;
+ return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen);
}
static int atmel_sha_hmac_init(struct ahash_request *req)
@@ -2084,131 +2014,61 @@ static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm)
atmel_sha_hmac_key_release(&hmac->hkey);
}
+static void atmel_sha_hmac_alg_init(struct ahash_alg *alg)
+{
+ alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
+ alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx);
+ alg->halg.base.cra_module = THIS_MODULE;
+ alg->halg.base.cra_init = atmel_sha_hmac_cra_init;
+ alg->halg.base.cra_exit = atmel_sha_hmac_cra_exit;
+
+ alg->halg.statesize = sizeof(struct atmel_sha_reqctx);
+
+ alg->init = atmel_sha_hmac_init;
+ alg->update = atmel_sha_update;
+ alg->final = atmel_sha_final;
+ alg->digest = atmel_sha_hmac_digest;
+ alg->setkey = atmel_sha_hmac_setkey;
+ alg->export = atmel_sha_export;
+ alg->import = atmel_sha_import;
+}
+
static struct ahash_alg sha_hmac_algs[] = {
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name = "atmel-hmac-sha1",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha1)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha1",
+ .halg.base.cra_blocksize = SHA1_BLOCK_SIZE,
+
+ .halg.digestsize = SHA1_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha224)",
- .cra_driver_name = "atmel-hmac-sha224",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha224)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha224",
+ .halg.base.cra_blocksize = SHA224_BLOCK_SIZE,
+
+ .halg.digestsize = SHA224_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha256)",
- .cra_driver_name = "atmel-hmac-sha256",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha256)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha256",
+ .halg.base.cra_blocksize = SHA256_BLOCK_SIZE,
+
+ .halg.digestsize = SHA256_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA384_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha384)",
- .cra_driver_name = "atmel-hmac-sha384",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha384)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha384",
+ .halg.base.cra_blocksize = SHA384_BLOCK_SIZE,
+
+ .halg.digestsize = SHA384_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha512)",
- .cra_driver_name = "atmel-hmac-sha512",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha512)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha512",
+ .halg.base.cra_blocksize = SHA512_BLOCK_SIZE,
+
+ .halg.digestsize = SHA512_DIGEST_SIZE,
},
};
@@ -2347,18 +2207,13 @@ void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth)
EXPORT_SYMBOL_GPL(atmel_sha_authenc_free);
int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth,
- const u8 *key, unsigned int keylen,
- u32 *flags)
+ const u8 *key, unsigned int keylen, u32 flags)
{
struct crypto_ahash *tfm = auth->tfm;
- int err;
crypto_ahash_clear_flags(tfm, CRYPTO_TFM_REQ_MASK);
- crypto_ahash_set_flags(tfm, *flags & CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(tfm, key, keylen);
- *flags = crypto_ahash_get_flags(tfm);
-
- return err;
+ crypto_ahash_set_flags(tfm, flags & CRYPTO_TFM_REQ_MASK);
+ return crypto_ahash_setkey(tfm, key, keylen);
}
EXPORT_SYMBOL_GPL(atmel_sha_authenc_setkey);
@@ -2561,12 +2416,16 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
+ atmel_sha_alg_init(&sha_1_256_algs[i]);
+
err = crypto_register_ahash(&sha_1_256_algs[i]);
if (err)
goto err_sha_1_256_algs;
}
if (dd->caps.has_sha224) {
+ atmel_sha_alg_init(&sha_224_alg);
+
err = crypto_register_ahash(&sha_224_alg);
if (err)
goto err_sha_224_algs;
@@ -2574,6 +2433,8 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
if (dd->caps.has_sha_384_512) {
for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
+ atmel_sha_alg_init(&sha_384_512_algs[i]);
+
err = crypto_register_ahash(&sha_384_512_algs[i]);
if (err)
goto err_sha_384_512_algs;
@@ -2582,6 +2443,8 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
if (dd->caps.has_hmac) {
for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) {
+ atmel_sha_hmac_alg_init(&sha_hmac_algs[i]);
+
err = crypto_register_ahash(&sha_hmac_algs[i]);
if (err)
goto err_sha_hmac_algs;
@@ -2608,35 +2471,14 @@ err_sha_1_256_algs:
return err;
}
-static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
+static int atmel_sha_dma_init(struct atmel_sha_dev *dd)
{
- struct at_dma_slave *sl = slave;
-
- if (sl && sl->dma_dev == chan->device->dev) {
- chan->private = sl;
- return true;
- } else {
- return false;
+ dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->dma_lch_in.chan)) {
+ dev_err(dd->dev, "DMA channel is not available\n");
+ return PTR_ERR(dd->dma_lch_in.chan);
}
-}
-static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
- struct crypto_platform_data *pdata)
-{
- dma_cap_mask_t mask_in;
-
- /* Try to grab DMA channel */
- dma_cap_zero(mask_in);
- dma_cap_set(DMA_SLAVE, mask_in);
-
- dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
- atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
- if (!dd->dma_lch_in.chan) {
- dev_warn(dd->dev, "no DMA channel available\n");
- return -ENODEV;
- }
-
- dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
SHA_REG_DIN(0);
dd->dma_lch_in.dma_conf.src_maxburst = 1;
@@ -2709,49 +2551,18 @@ static const struct of_device_id atmel_sha_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
-
-static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct crypto_platform_data *pdata;
-
- if (!np) {
- dev_err(&pdev->dev, "device node not found\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->dma_slave = devm_kzalloc(&pdev->dev,
- sizeof(*(pdata->dma_slave)),
- GFP_KERNEL);
- if (!pdata->dma_slave)
- return ERR_PTR(-ENOMEM);
-
- return pdata;
-}
-#else /* CONFIG_OF */
-static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
-{
- return ERR_PTR(-EINVAL);
-}
#endif
static int atmel_sha_probe(struct platform_device *pdev)
{
struct atmel_sha_dev *sha_dd;
- struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *sha_res;
int err;
sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
- if (sha_dd == NULL) {
- err = -ENOMEM;
- goto sha_dd_err;
- }
+ if (!sha_dd)
+ return -ENOMEM;
sha_dd->dev = dev;
@@ -2772,7 +2583,7 @@ static int atmel_sha_probe(struct platform_device *pdev)
if (!sha_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
- goto res_err;
+ goto err_tasklet_kill;
}
sha_dd->phys_base = sha_res->start;
@@ -2780,14 +2591,14 @@ static int atmel_sha_probe(struct platform_device *pdev)
sha_dd->irq = platform_get_irq(pdev, 0);
if (sha_dd->irq < 0) {
err = sha_dd->irq;
- goto res_err;
+ goto err_tasklet_kill;
}
err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
IRQF_SHARED, "atmel-sha", sha_dd);
if (err) {
dev_err(dev, "unable to request sha irq.\n");
- goto res_err;
+ goto err_tasklet_kill;
}
/* Initializing the clock */
@@ -2795,41 +2606,30 @@ static int atmel_sha_probe(struct platform_device *pdev)
if (IS_ERR(sha_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(sha_dd->iclk);
- goto res_err;
+ goto err_tasklet_kill;
}
sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
if (IS_ERR(sha_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
err = PTR_ERR(sha_dd->io_base);
- goto res_err;
+ goto err_tasklet_kill;
}
err = clk_prepare(sha_dd->iclk);
if (err)
- goto res_err;
+ goto err_tasklet_kill;
- atmel_sha_hw_version_init(sha_dd);
+ err = atmel_sha_hw_version_init(sha_dd);
+ if (err)
+ goto err_iclk_unprepare;
atmel_sha_get_cap(sha_dd);
if (sha_dd->caps.has_dma) {
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- pdata = atmel_sha_of_init(pdev);
- if (IS_ERR(pdata)) {
- dev_err(&pdev->dev, "platform data not available\n");
- err = PTR_ERR(pdata);
- goto iclk_unprepare;
- }
- }
- if (!pdata->dma_slave) {
- err = -ENXIO;
- goto iclk_unprepare;
- }
- err = atmel_sha_dma_init(sha_dd, pdata);
+ err = atmel_sha_dma_init(sha_dd);
if (err)
- goto err_sha_dma;
+ goto err_iclk_unprepare;
dev_info(dev, "using %s for DMA transfers\n",
dma_chan_name(sha_dd->dma_lch_in.chan));
@@ -2855,14 +2655,11 @@ err_algs:
spin_unlock(&atmel_sha.lock);
if (sha_dd->caps.has_dma)
atmel_sha_dma_cleanup(sha_dd);
-err_sha_dma:
-iclk_unprepare:
+err_iclk_unprepare:
clk_unprepare(sha_dd->iclk);
-res_err:
+err_tasklet_kill:
tasklet_kill(&sha_dd->queue_task);
tasklet_kill(&sha_dd->done_task);
-sha_dd_err:
- dev_err(dev, "initialization failed.\n");
return err;
}
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 0c1f79b30fc1..ed40dbb98c6b 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -30,31 +31,32 @@
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/internal/des.h>
-#include <crypto/hash.h>
-#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
-#include <linux/platform_data/crypto-atmel.h>
#include "atmel-tdes-regs.h"
+#define ATMEL_TDES_PRIORITY 300
+
/* TDES flags */
-#define TDES_FLAGS_MODE_MASK 0x00ff
-#define TDES_FLAGS_ENCRYPT BIT(0)
-#define TDES_FLAGS_CBC BIT(1)
-#define TDES_FLAGS_CFB BIT(2)
-#define TDES_FLAGS_CFB8 BIT(3)
-#define TDES_FLAGS_CFB16 BIT(4)
-#define TDES_FLAGS_CFB32 BIT(5)
-#define TDES_FLAGS_CFB64 BIT(6)
-#define TDES_FLAGS_OFB BIT(7)
-
-#define TDES_FLAGS_INIT BIT(16)
-#define TDES_FLAGS_FAST BIT(17)
-#define TDES_FLAGS_BUSY BIT(18)
-#define TDES_FLAGS_DMA BIT(19)
+/* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */
+#define TDES_FLAGS_ENCRYPT TDES_MR_CYPHER_ENC
+#define TDES_FLAGS_OPMODE_MASK (TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
+#define TDES_FLAGS_ECB TDES_MR_OPMOD_ECB
+#define TDES_FLAGS_CBC TDES_MR_OPMOD_CBC
+#define TDES_FLAGS_OFB TDES_MR_OPMOD_OFB
+#define TDES_FLAGS_CFB64 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_64b)
+#define TDES_FLAGS_CFB32 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_32b)
+#define TDES_FLAGS_CFB16 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_16b)
+#define TDES_FLAGS_CFB8 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_8b)
+
+#define TDES_FLAGS_MODE_MASK (TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
+
+#define TDES_FLAGS_INIT BIT(3)
+#define TDES_FLAGS_FAST BIT(4)
+#define TDES_FLAGS_BUSY BIT(5)
+#define TDES_FLAGS_DMA BIT(6)
#define ATMEL_TDES_QUEUE_LENGTH 50
@@ -100,7 +102,6 @@ struct atmel_tdes_dev {
int irq;
unsigned long flags;
- int err;
spinlock_t lock;
struct crypto_queue queue;
@@ -189,7 +190,7 @@ static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
}
static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
- u32 *value, int count)
+ const u32 *value, int count)
{
for (; count--; value++, offset += 4)
atmel_tdes_write(dd, offset, *value);
@@ -226,7 +227,6 @@ static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
if (!(dd->flags & TDES_FLAGS_INIT)) {
atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
dd->flags |= TDES_FLAGS_INIT;
- dd->err = 0;
}
return 0;
@@ -237,9 +237,13 @@ static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
}
-static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
+static int atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
{
- atmel_tdes_hw_init(dd);
+ int err;
+
+ err = atmel_tdes_hw_init(dd);
+ if (err)
+ return err;
dd->hw_version = atmel_tdes_get_version(dd);
@@ -247,6 +251,8 @@ static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
"version: 0x%x\n", dd->hw_version);
clk_disable_unprepare(dd->iclk);
+
+ return 0;
}
static void atmel_tdes_dma_callback(void *data)
@@ -260,7 +266,7 @@ static void atmel_tdes_dma_callback(void *data)
static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
{
int err;
- u32 valcr = 0, valmr = TDES_MR_SMOD_PDC;
+ u32 valmr = TDES_MR_SMOD_PDC;
err = atmel_tdes_hw_init(dd);
@@ -282,36 +288,15 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
valmr |= TDES_MR_TDESMOD_DES;
}
- if (dd->flags & TDES_FLAGS_CBC) {
- valmr |= TDES_MR_OPMOD_CBC;
- } else if (dd->flags & TDES_FLAGS_CFB) {
- valmr |= TDES_MR_OPMOD_CFB;
-
- if (dd->flags & TDES_FLAGS_CFB8)
- valmr |= TDES_MR_CFBS_8b;
- else if (dd->flags & TDES_FLAGS_CFB16)
- valmr |= TDES_MR_CFBS_16b;
- else if (dd->flags & TDES_FLAGS_CFB32)
- valmr |= TDES_MR_CFBS_32b;
- else if (dd->flags & TDES_FLAGS_CFB64)
- valmr |= TDES_MR_CFBS_64b;
- } else if (dd->flags & TDES_FLAGS_OFB) {
- valmr |= TDES_MR_OPMOD_OFB;
- }
-
- if ((dd->flags & TDES_FLAGS_ENCRYPT) || (dd->flags & TDES_FLAGS_OFB))
- valmr |= TDES_MR_CYPHER_ENC;
+ valmr |= dd->flags & TDES_FLAGS_MODE_MASK;
- atmel_tdes_write(dd, TDES_CR, valcr);
atmel_tdes_write(dd, TDES_MR, valmr);
atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
dd->ctx->keylen >> 2);
- if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) ||
- (dd->flags & TDES_FLAGS_OFB)) && dd->req->iv) {
+ if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB)
atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
- }
return 0;
}
@@ -397,11 +382,11 @@ static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
free_page((unsigned long)dd->buf_in);
}
-static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
- dma_addr_t dma_addr_out, int length)
+static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
+ dma_addr_t dma_addr_in,
+ dma_addr_t dma_addr_out, int length)
{
- struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct atmel_tdes_dev *dd = ctx->dd;
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
int len32;
dd->dma_size = length;
@@ -411,12 +396,19 @@ static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
DMA_TO_DEVICE);
}
- if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB8))
+ switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
+ case TDES_FLAGS_CFB8:
len32 = DIV_ROUND_UP(length, sizeof(u8));
- else if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB16))
+ break;
+
+ case TDES_FLAGS_CFB16:
len32 = DIV_ROUND_UP(length, sizeof(u16));
- else
+ break;
+
+ default:
len32 = DIV_ROUND_UP(length, sizeof(u32));
+ break;
+ }
atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
@@ -433,13 +425,14 @@ static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
return 0;
}
-static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
- dma_addr_t dma_addr_out, int length)
+static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
+ dma_addr_t dma_addr_in,
+ dma_addr_t dma_addr_out, int length)
{
- struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct atmel_tdes_dev *dd = ctx->dd;
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
struct scatterlist sg[2];
struct dma_async_tx_descriptor *in_desc, *out_desc;
+ enum dma_slave_buswidth addr_width;
dd->dma_size = length;
@@ -448,23 +441,23 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
DMA_TO_DEVICE);
}
- if (dd->flags & TDES_FLAGS_CFB8) {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_1_BYTE;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_1_BYTE;
- } else if (dd->flags & TDES_FLAGS_CFB16) {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES;
- } else {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
+ switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
+ case TDES_FLAGS_CFB8:
+ addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+
+ case TDES_FLAGS_CFB16:
+ addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+
+ default:
+ addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
}
+ dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
+ dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
+
dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
@@ -504,8 +497,6 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(
- crypto_skcipher_reqtfm(dd->req));
int err, fast = 0, in, out;
size_t count;
dma_addr_t addr_in, addr_out;
@@ -561,9 +552,9 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
dd->total -= count;
if (dd->caps.has_dma)
- err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count);
+ err = atmel_tdes_crypt_dma(dd, addr_in, addr_out, count);
else
- err = atmel_tdes_crypt_pdc(tfm, addr_in, addr_out, count);
+ err = atmel_tdes_crypt_pdc(dd, addr_in, addr_out, count);
if (err && (dd->flags & TDES_FLAGS_FAST)) {
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
@@ -600,12 +591,14 @@ atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
{
struct skcipher_request *req = dd->req;
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
clk_disable_unprepare(dd->iclk);
dd->flags &= ~TDES_FLAGS_BUSY;
- atmel_tdes_set_iv_as_last_ciphertext_block(dd);
+ if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB)
+ atmel_tdes_set_iv_as_last_ciphertext_block(dd);
req->base.complete(&req->base, err);
}
@@ -699,35 +692,44 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
- if (mode & TDES_FLAGS_CFB8) {
+ switch (mode & TDES_FLAGS_OPMODE_MASK) {
+ case TDES_FLAGS_CFB8:
if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB8 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB8_BLOCK_SIZE;
- } else if (mode & TDES_FLAGS_CFB16) {
+ break;
+
+ case TDES_FLAGS_CFB16:
if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB16 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB16_BLOCK_SIZE;
- } else if (mode & TDES_FLAGS_CFB32) {
+ break;
+
+ case TDES_FLAGS_CFB32:
if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB32 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB32_BLOCK_SIZE;
- } else {
+ break;
+
+ default:
if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
pr_err("request size is not exact amount of DES blocks\n");
return -EINVAL;
}
ctx->block_size = DES_BLOCK_SIZE;
+ break;
}
rctx->mode = mode;
- if (!(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
+ if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
+ !(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
if (req->cryptlen >= ivsize)
@@ -739,33 +741,17 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
return atmel_tdes_handle_queue(ctx->dd, req);
}
-static bool atmel_tdes_filter(struct dma_chan *chan, void *slave)
+static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
{
- struct at_dma_slave *sl = slave;
-
- if (sl && sl->dma_dev == chan->device->dev) {
- chan->private = sl;
- return true;
- } else {
- return false;
- }
-}
-
-static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
- struct crypto_platform_data *pdata)
-{
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ int ret;
/* Try to grab 2 DMA channels */
- dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
- atmel_tdes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
- if (!dd->dma_lch_in.chan)
+ dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->dma_lch_in.chan)) {
+ ret = PTR_ERR(dd->dma_lch_in.chan);
goto err_dma_in;
+ }
- dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
TDES_IDATA1R;
dd->dma_lch_in.dma_conf.src_maxburst = 1;
@@ -776,12 +762,12 @@ static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
DMA_SLAVE_BUSWIDTH_4_BYTES;
dd->dma_lch_in.dma_conf.device_fc = false;
- dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
- atmel_tdes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
- if (!dd->dma_lch_out.chan)
+ dd->dma_lch_out.chan = dma_request_chan(dd->dev, "rx");
+ if (IS_ERR(dd->dma_lch_out.chan)) {
+ ret = PTR_ERR(dd->dma_lch_out.chan);
goto err_dma_out;
+ }
- dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
TDES_ODATA1R;
dd->dma_lch_out.dma_conf.src_maxburst = 1;
@@ -797,8 +783,8 @@ static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
err_dma_out:
dma_release_channel(dd->dma_lch_in.chan);
err_dma_in:
- dev_warn(dd->dev, "no DMA channel available\n");
- return -ENODEV;
+ dev_err(dd->dev, "no DMA channel available\n");
+ return ret;
}
static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
@@ -841,17 +827,17 @@ static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT);
+ return atmel_tdes_crypt(req, TDES_FLAGS_ECB | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, 0);
+ return atmel_tdes_crypt(req, TDES_FLAGS_ECB);
}
static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CBC | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
@@ -860,50 +846,47 @@ static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
}
static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB64 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB64);
}
static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
- TDES_FLAGS_CFB8);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB8 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB8);
}
static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
- TDES_FLAGS_CFB16);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB16 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB16);
}
static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
- TDES_FLAGS_CFB32);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB32 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB32);
}
static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB);
+ return atmel_tdes_crypt(req, TDES_FLAGS_OFB | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
@@ -925,18 +908,23 @@ static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
return 0;
}
+static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
+{
+ alg->base.cra_priority = ATMEL_TDES_PRIORITY;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ alg->base.cra_module = THIS_MODULE;
+
+ alg->init = atmel_tdes_init_tfm;
+}
+
static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "atmel-ecb-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = atmel_des_setkey,
@@ -946,14 +934,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "atmel-cbc-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -964,14 +947,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb(des)",
.base.cra_driver_name = "atmel-cfb-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -982,14 +960,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb8(des)",
.base.cra_driver_name = "atmel-cfb8-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB8_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1000,14 +973,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb16(des)",
.base.cra_driver_name = "atmel-cfb16-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB16_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x1,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1018,14 +986,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb32(des)",
.base.cra_driver_name = "atmel-cfb32-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB32_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x3,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1036,14 +999,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ofb(des)",
.base.cra_driver_name = "atmel-ofb-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1054,14 +1012,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "atmel-ecb-tdes",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = atmel_tdes_setkey,
@@ -1071,14 +1024,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "atmel-cbc-tdes",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = atmel_tdes_setkey,
@@ -1089,14 +1037,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ofb(des3_ede)",
.base.cra_driver_name = "atmel-ofb-tdes",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = atmel_tdes_setkey,
@@ -1123,8 +1066,6 @@ static void atmel_tdes_done_task(unsigned long data)
else
err = atmel_tdes_crypt_dma_stop(dd);
- err = dd->err ? : err;
-
if (dd->total && !err) {
if (dd->flags & TDES_FLAGS_FAST) {
dd->in_sg = sg_next(dd->in_sg);
@@ -1173,6 +1114,8 @@ static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
+ atmel_tdes_skcipher_alg_init(&tdes_algs[i]);
+
err = crypto_register_skcipher(&tdes_algs[i]);
if (err)
goto err_tdes_algs;
@@ -1214,49 +1157,18 @@ static const struct of_device_id atmel_tdes_dt_ids[] = {
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
-
-static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct crypto_platform_data *pdata;
-
- if (!np) {
- dev_err(&pdev->dev, "device node not found\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->dma_slave = devm_kzalloc(&pdev->dev,
- sizeof(*(pdata->dma_slave)),
- GFP_KERNEL);
- if (!pdata->dma_slave)
- return ERR_PTR(-ENOMEM);
-
- return pdata;
-}
-#else /* CONFIG_OF */
-static inline struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
-{
- return ERR_PTR(-EINVAL);
-}
#endif
static int atmel_tdes_probe(struct platform_device *pdev)
{
struct atmel_tdes_dev *tdes_dd;
- struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *tdes_res;
int err;
tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
- if (tdes_dd == NULL) {
- err = -ENOMEM;
- goto tdes_dd_err;
- }
+ if (!tdes_dd)
+ return -ENOMEM;
tdes_dd->dev = dev;
@@ -1277,7 +1189,7 @@ static int atmel_tdes_probe(struct platform_device *pdev)
if (!tdes_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
- goto res_err;
+ goto err_tasklet_kill;
}
tdes_dd->phys_base = tdes_res->start;
@@ -1285,14 +1197,14 @@ static int atmel_tdes_probe(struct platform_device *pdev)
tdes_dd->irq = platform_get_irq(pdev, 0);
if (tdes_dd->irq < 0) {
err = tdes_dd->irq;
- goto res_err;
+ goto err_tasklet_kill;
}
err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
IRQF_SHARED, "atmel-tdes", tdes_dd);
if (err) {
dev_err(dev, "unable to request tdes irq.\n");
- goto res_err;
+ goto err_tasklet_kill;
}
/* Initializing the clock */
@@ -1300,41 +1212,30 @@ static int atmel_tdes_probe(struct platform_device *pdev)
if (IS_ERR(tdes_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(tdes_dd->iclk);
- goto res_err;
+ goto err_tasklet_kill;
}
tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
if (IS_ERR(tdes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
err = PTR_ERR(tdes_dd->io_base);
- goto res_err;
+ goto err_tasklet_kill;
}
- atmel_tdes_hw_version_init(tdes_dd);
+ err = atmel_tdes_hw_version_init(tdes_dd);
+ if (err)
+ goto err_tasklet_kill;
atmel_tdes_get_cap(tdes_dd);
err = atmel_tdes_buff_init(tdes_dd);
if (err)
- goto err_tdes_buff;
+ goto err_tasklet_kill;
if (tdes_dd->caps.has_dma) {
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- pdata = atmel_tdes_of_init(pdev);
- if (IS_ERR(pdata)) {
- dev_err(&pdev->dev, "platform data not available\n");
- err = PTR_ERR(pdata);
- goto err_pdata;
- }
- }
- if (!pdata->dma_slave) {
- err = -ENXIO;
- goto err_pdata;
- }
- err = atmel_tdes_dma_init(tdes_dd, pdata);
+ err = atmel_tdes_dma_init(tdes_dd);
if (err)
- goto err_tdes_dma;
+ goto err_buff_cleanup;
dev_info(dev, "using %s, %s for DMA transfers\n",
dma_chan_name(tdes_dd->dma_lch_in.chan),
@@ -1359,15 +1260,11 @@ err_algs:
spin_unlock(&atmel_tdes.lock);
if (tdes_dd->caps.has_dma)
atmel_tdes_dma_cleanup(tdes_dd);
-err_tdes_dma:
-err_pdata:
+err_buff_cleanup:
atmel_tdes_buff_cleanup(tdes_dd);
-err_tdes_buff:
-res_err:
+err_tasklet_kill:
tasklet_kill(&tdes_dd->done_task);
tasklet_kill(&tdes_dd->queue_task);
-tdes_dd_err:
- dev_err(dev, "initialization failed.\n");
return err;
}
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 4b20606983a4..fcf1effc7661 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1249,10 +1249,8 @@ static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
{
struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
- if (len != 16 && len != 24 && len != 32) {
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -1;
- }
+ if (len != 16 && len != 24 && len != 32)
+ return -EINVAL;
ctx->key_length = len;
@@ -1606,8 +1604,6 @@ artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
case 32:
break;
default:
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1634,8 +1630,6 @@ artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
case 64:
break;
default:
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 1564a6f8c9cb..c8b9408541a9 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1846,7 +1846,6 @@ static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
ctx->cipher_type = CIPHER_TYPE_AES256;
break;
default:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
@@ -2894,13 +2893,8 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
ctx->fallback_cipher->base.crt_flags |=
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
- if (ret) {
+ if (ret)
flow_log(" fallback setkey() returned:%d\n", ret);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- (ctx->fallback_cipher->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
}
ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
@@ -2916,7 +2910,6 @@ badkey:
ctx->authkeylen = 0;
ctx->digestsize = 0;
- crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -2967,13 +2960,8 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
ret = crypto_aead_setkey(ctx->fallback_cipher, key,
keylen + ctx->salt_len);
- if (ret) {
+ if (ret)
flow_log(" fallback setkey() returned:%d\n", ret);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- (ctx->fallback_cipher->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
}
ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
@@ -2992,7 +2980,6 @@ badkey:
ctx->authkeylen = 0;
ctx->digestsize = 0;
- crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 87053e46c788..fac5b2e26610 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -130,13 +130,13 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
scatterlist crypto API to the SEC4 via job ring.
config CRYPTO_DEV_FSL_CAAM_PKC_API
- bool "Register public key cryptography implementations with Crypto API"
- default y
- select CRYPTO_RSA
- help
- Selecting this will allow SEC Public key support for RSA.
- Supported cryptographic primitives: encryption, decryption,
- signature and verification.
+ bool "Register public key cryptography implementations with Crypto API"
+ default y
+ select CRYPTO_RSA
+ help
+ Selecting this will allow SEC Public key support for RSA.
+ Supported cryptographic primitives: encryption, decryption,
+ signature and verification.
config CRYPTO_DEV_FSL_CAAM_RNG_API
bool "Register caam device for hwrng API"
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 2912006b946b..ef1a65f4fc92 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -548,10 +548,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int ivsize = crypto_aead_ivsize(aead);
unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
- if (keylen != CHACHA_KEY_SIZE + saltlen) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != CHACHA_KEY_SIZE + saltlen)
return -EINVAL;
- }
ctx->cdata.key_virt = key;
ctx->cdata.keylen = keylen - saltlen;
@@ -619,7 +617,6 @@ skip_split_key:
memzero_explicit(&keys, sizeof(keys));
return aead_set_sh_desc(aead);
badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -649,10 +646,8 @@ static int gcm_setkey(struct crypto_aead *aead,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -672,10 +667,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
int err;
err = aes_check_keylen(keylen - 4);
- if (err) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -700,10 +693,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
int err;
err = aes_check_keylen(keylen - 4);
- if (err) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -762,11 +753,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -786,11 +774,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -809,11 +794,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
ctx1_iv_off = 16;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -846,7 +828,6 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
dev_err(jrdev, "key size mismatch\n");
return -EINVAL;
}
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 8e3449670d2f..4a29e0ef9d63 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -268,7 +268,6 @@ skip_split_key:
memzero_explicit(&keys, sizeof(keys));
return ret;
badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -356,10 +355,8 @@ static int gcm_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -462,10 +459,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -570,10 +565,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -644,7 +637,7 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_enc);
if (ret) {
dev_err(jrdev, "driver enc context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
@@ -653,14 +646,11 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_dec);
if (ret) {
dev_err(jrdev, "driver dec context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
return ret;
-badkey:
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
@@ -669,11 +659,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -693,11 +680,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -716,11 +700,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
ctx1_iv_off = 16;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -748,7 +729,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
dev_err(jrdev, "key size mismatch\n");
- goto badkey;
+ return -EINVAL;
}
ctx->cdata.keylen = keylen;
@@ -765,7 +746,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_enc);
if (ret) {
dev_err(jrdev, "driver enc context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
@@ -774,14 +755,11 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_dec);
if (ret) {
dev_err(jrdev, "driver dec context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
return ret;
-badkey:
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
/*
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 3443f6d6dd83..28669cbecf77 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -313,7 +313,6 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
memzero_explicit(&keys, sizeof(keys));
return aead_set_sh_desc(aead);
badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -326,11 +325,11 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
err = crypto_authenc_extractkeys(&keys, key, keylen);
if (unlikely(err))
- goto badkey;
+ goto out;
err = -EINVAL;
if (keys.enckeylen != DES3_EDE_KEY_SIZE)
- goto badkey;
+ goto out;
err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
aead_setkey(aead, key, keylen);
@@ -338,10 +337,6 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
out:
memzero_explicit(&keys, sizeof(keys));
return err;
-
-badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
- goto out;
}
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
@@ -634,10 +629,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int ivsize = crypto_aead_ivsize(aead);
unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
- if (keylen != CHACHA_KEY_SIZE + saltlen) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != CHACHA_KEY_SIZE + saltlen)
return -EINVAL;
- }
ctx->cdata.key_virt = key;
ctx->cdata.keylen = keylen - saltlen;
@@ -725,10 +718,8 @@ static int gcm_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -822,10 +813,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -923,10 +912,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -992,11 +979,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -1016,11 +1000,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -1039,11 +1020,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
ctx1_iv_off = 16;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -1051,11 +1029,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
const u8 *key, unsigned int keylen)
{
- if (keylen != CHACHA_KEY_SIZE) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != CHACHA_KEY_SIZE)
return -EINVAL;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -1084,7 +1059,6 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
dev_err(dev, "key size mismatch\n");
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -2481,7 +2455,7 @@ static struct caam_aead_alg driver_aeads[] = {
.cra_name = "echainiv(authenc(hmac(sha256),"
"cbc(des)))",
.cra_driver_name = "echainiv-authenc-"
- "hmac-sha256-cbc-desi-"
+ "hmac-sha256-cbc-des-"
"caam-qi2",
.cra_blocksize = DES_BLOCK_SIZE,
},
@@ -2998,15 +2972,13 @@ struct caam_hash_state {
dma_addr_t buf_dma;
dma_addr_t ctx_dma;
int ctx_dma_len;
- u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_0;
- u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_1;
+ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen;
+ int next_buflen;
u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
- int current_buf;
};
struct caam_export_state {
@@ -3018,42 +2990,17 @@ struct caam_export_state {
int (*finup)(struct ahash_request *req);
};
-static inline void switch_buf(struct caam_hash_state *state)
-{
- state->current_buf ^= 1;
-}
-
-static inline u8 *current_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_1 : state->buf_0;
-}
-
-static inline u8 *alt_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_0 : state->buf_1;
-}
-
-static inline int *current_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_1 : &state->buflen_0;
-}
-
-static inline int *alt_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_0 : &state->buflen_1;
-}
-
/* Map current buffer in state (if length > 0) and put it in link table */
static inline int buf_map_to_qm_sg(struct device *dev,
struct dpaa2_sg_entry *qm_sg,
struct caam_hash_state *state)
{
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
if (!buflen)
return 0;
- state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
+ state->buf_dma = dma_map_single(dev, state->buf, buflen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, state->buf_dma)) {
dev_err(dev, "unable to map buf\n");
@@ -3304,7 +3251,6 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
return ret;
bad_free_key:
kfree(hashed_key);
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -3321,7 +3267,7 @@ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
DMA_TO_DEVICE);
if (state->buf_dma) {
- dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+ dma_unmap_single(dev, state->buf_dma, state->buflen,
DMA_TO_DEVICE);
state->buf_dma = 0;
}
@@ -3383,9 +3329,17 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
- switch_buf(state);
qi_cache_free(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -3440,9 +3394,17 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
- switch_buf(state);
qi_cache_free(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -3464,16 +3426,14 @@ static int ahash_update_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state), last_buflen;
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int in_len = *buflen + req->nbytes, to_hash;
int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
struct ahash_edesc *edesc;
int ret = 0;
- last_buflen = *next_buflen;
*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
to_hash = in_len - *next_buflen;
@@ -3524,10 +3484,6 @@ static int ahash_update_ctx(struct ahash_request *req)
if (mapped_nents) {
sg_to_qm_sg_last(req->src, src_len,
sg_table + qm_sg_src_index, 0);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
} else {
dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
true);
@@ -3566,14 +3522,11 @@ static int ahash_update_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = last_buflen;
- }
- print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -3592,7 +3545,7 @@ static int ahash_final_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
int qm_sg_bytes;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
@@ -3663,7 +3616,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
int qm_sg_bytes, qm_sg_src_index;
int src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -3852,8 +3805,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int buflen = *current_buflen(state);
+ u8 *buf = state->buf;
+ int buflen = state->buflen;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = -ENOMEM;
@@ -3925,10 +3878,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int in_len = *buflen + req->nbytes, to_hash;
int qm_sg_bytes, src_nents, mapped_nents;
struct ahash_edesc *edesc;
@@ -3977,11 +3929,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
-
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
qm_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
@@ -4029,14 +3976,11 @@ static int ahash_update_no_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = 0;
- }
- print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -4055,7 +3999,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
int qm_sg_bytes, src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
@@ -4151,8 +4095,9 @@ static int ahash_update_first(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int to_hash;
int src_nents, mapped_nents;
struct ahash_edesc *edesc;
@@ -4220,10 +4165,6 @@ static int ahash_update_first(struct ahash_request *req)
dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
}
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src, to_hash,
- *next_buflen, 0);
-
state->ctx_dma_len = ctx->ctx_len;
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
ctx->ctx_len, DMA_FROM_DEVICE);
@@ -4257,14 +4198,14 @@ static int ahash_update_first(struct ahash_request *req)
state->update = ahash_update_no_ctx;
state->finup = ahash_finup_no_ctx;
state->final = ahash_final_no_ctx;
- scatterwalk_map_and_copy(next_buf, req->src, 0,
+ scatterwalk_map_and_copy(buf, req->src, 0,
req->nbytes, 0);
- switch_buf(state);
- }
+ *buflen = *next_buflen;
- print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -4288,10 +4229,9 @@ static int ahash_init(struct ahash_request *req)
state->ctx_dma = 0;
state->ctx_dma_len = 0;
- state->current_buf = 0;
state->buf_dma = 0;
- state->buflen_0 = 0;
- state->buflen_1 = 0;
+ state->buflen = 0;
+ state->next_buflen = 0;
return 0;
}
@@ -4321,16 +4261,8 @@ static int ahash_export(struct ahash_request *req, void *out)
{
struct caam_hash_state *state = ahash_request_ctx(req);
struct caam_export_state *export = out;
- int len;
- u8 *buf;
-
- if (state->current_buf) {
- buf = state->buf_1;
- len = state->buflen_1;
- } else {
- buf = state->buf_0;
- len = state->buflen_0;
- }
+ u8 *buf = state->buf;
+ int len = state->buflen;
memcpy(export->buf, buf, len);
memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
@@ -4348,9 +4280,9 @@ static int ahash_import(struct ahash_request *req, const void *in)
const struct caam_export_state *export = in;
memset(state, 0, sizeof(*state));
- memcpy(state->buf_0, export->buf, export->buflen);
+ memcpy(state->buf, export->buf, export->buflen);
memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
- state->buflen_0 = export->buflen;
+ state->buflen = export->buflen;
state->update = export->update;
state->final = export->final;
state->finup = export->finup;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 65399cb2a770..8d9143407fc5 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -107,15 +107,13 @@ struct caam_hash_state {
dma_addr_t buf_dma;
dma_addr_t ctx_dma;
int ctx_dma_len;
- u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_0;
- u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_1;
+ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen;
+ int next_buflen;
u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
- int current_buf;
};
struct caam_export_state {
@@ -127,31 +125,6 @@ struct caam_export_state {
int (*finup)(struct ahash_request *req);
};
-static inline void switch_buf(struct caam_hash_state *state)
-{
- state->current_buf ^= 1;
-}
-
-static inline u8 *current_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_1 : state->buf_0;
-}
-
-static inline u8 *alt_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_0 : state->buf_1;
-}
-
-static inline int *current_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_1 : &state->buflen_0;
-}
-
-static inline int *alt_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_0 : &state->buflen_1;
-}
-
static inline bool is_cmac_aes(u32 algtype)
{
return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
@@ -183,12 +156,12 @@ static inline int buf_map_to_sec4_sg(struct device *jrdev,
struct sec4_sg_entry *sec4_sg,
struct caam_hash_state *state)
{
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
if (!buflen)
return 0;
- state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
+ state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, state->buf_dma)) {
dev_err(jrdev, "unable to map buf\n");
@@ -500,7 +473,6 @@ static int ahash_setkey(struct crypto_ahash *ahash,
return ahash_set_sh_desc(ahash);
bad_free_key:
kfree(hashed_key);
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -510,10 +482,8 @@ static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *jrdev = ctx->jrdev;
- if (keylen != AES_KEYSIZE_128) {
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != AES_KEYSIZE_128)
return -EINVAL;
- }
memcpy(ctx->key, key, keylen);
dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
@@ -533,10 +503,8 @@ static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
/* key is immediate data for all cmac shared descriptors */
ctx->adata.key_virt = key;
@@ -578,7 +546,7 @@ static inline void ahash_unmap(struct device *dev,
edesc->sec4_sg_bytes, DMA_TO_DEVICE);
if (state->buf_dma) {
- dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+ dma_unmap_single(dev, state->buf_dma, state->buflen,
DMA_TO_DEVICE);
state->buf_dma = 0;
}
@@ -643,9 +611,17 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
- switch_buf(state);
kfree(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -703,9 +679,17 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
- switch_buf(state);
kfree(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -786,18 +770,16 @@ static int ahash_update_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
- u8 *next_buf = alt_buf(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int blocksize = crypto_ahash_blocksize(ahash);
- int *next_buflen = alt_buflen(state), last_buflen;
int in_len = *buflen + req->nbytes, to_hash;
u32 *desc;
int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
struct ahash_edesc *edesc;
int ret = 0;
- last_buflen = *next_buflen;
*next_buflen = in_len & (blocksize - 1);
to_hash = in_len - *next_buflen;
@@ -868,10 +850,6 @@ static int ahash_update_ctx(struct ahash_request *req)
sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
1);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -901,14 +879,11 @@ static int ahash_update_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = last_buflen;
- }
- print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
- *next_buflen, 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -925,7 +900,7 @@ static int ahash_final_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
u32 *desc;
int sec4_sg_bytes;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -991,7 +966,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
u32 *desc;
int sec4_sg_src_index;
int src_nents, mapped_nents;
@@ -1148,8 +1123,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int buflen = *current_buflen(state);
+ u8 *buf = state->buf;
+ int buflen = state->buflen;
u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
@@ -1207,11 +1182,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int blocksize = crypto_ahash_blocksize(ahash);
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
int in_len = *buflen + req->nbytes, to_hash;
int sec4_sg_bytes, src_nents, mapped_nents;
struct ahash_edesc *edesc;
@@ -1278,12 +1252,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
- if (*next_buflen) {
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
- }
-
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -1317,14 +1285,11 @@ static int ahash_update_no_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = 0;
- }
- print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -1342,7 +1307,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -1428,8 +1393,9 @@ static int ahash_update_first(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int to_hash;
int blocksize = crypto_ahash_blocksize(ahash);
u32 *desc;
@@ -1491,10 +1457,6 @@ static int ahash_update_first(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src, to_hash,
- *next_buflen, 0);
-
desc = edesc->hw_desc;
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
@@ -1517,14 +1479,14 @@ static int ahash_update_first(struct ahash_request *req)
state->update = ahash_update_no_ctx;
state->finup = ahash_finup_no_ctx;
state->final = ahash_final_no_ctx;
- scatterwalk_map_and_copy(next_buf, req->src, 0,
+ scatterwalk_map_and_copy(buf, req->src, 0,
req->nbytes, 0);
- switch_buf(state);
- }
+ *buflen = *next_buflen;
- print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -1548,10 +1510,9 @@ static int ahash_init(struct ahash_request *req)
state->ctx_dma = 0;
state->ctx_dma_len = 0;
- state->current_buf = 0;
state->buf_dma = 0;
- state->buflen_0 = 0;
- state->buflen_1 = 0;
+ state->buflen = 0;
+ state->next_buflen = 0;
return 0;
}
@@ -1581,16 +1542,8 @@ static int ahash_export(struct ahash_request *req, void *out)
{
struct caam_hash_state *state = ahash_request_ctx(req);
struct caam_export_state *export = out;
- int len;
- u8 *buf;
-
- if (state->current_buf) {
- buf = state->buf_1;
- len = state->buflen_1;
- } else {
- buf = state->buf_0;
- len = state->buflen_0;
- }
+ u8 *buf = state->buf;
+ int len = state->buflen;
memcpy(export->buf, buf, len);
memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
@@ -1608,9 +1561,9 @@ static int ahash_import(struct ahash_request *req, const void *in)
const struct caam_export_state *export = in;
memset(state, 0, sizeof(*state));
- memcpy(state->buf_0, export->buf, export->buflen);
+ memcpy(state->buf, export->buf, export->buflen);
memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
- state->buflen_0 = export->buflen;
+ state->buflen = export->buflen;
state->update = export->update;
state->final = export->final;
state->finup = export->finup;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index d7c3c3805693..7139366da016 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -99,10 +99,13 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
if (ctrlpriv->virt_en == 1 ||
/*
- * Apparently on i.MX8MQ it doesn't matter if virt_en == 1
+ * Apparently on i.MX8M{Q,M,N,P} it doesn't matter if virt_en == 1
* and the following steps should be performed regardless
*/
- of_machine_is_compatible("fsl,imx8mq")) {
+ of_machine_is_compatible("fsl,imx8mq") ||
+ of_machine_is_compatible("fsl,imx8mm") ||
+ of_machine_is_compatible("fsl,imx8mn") ||
+ of_machine_is_compatible("fsl,imx8mp")) {
clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
@@ -508,7 +511,7 @@ static const struct soc_device_attribute caam_imx_soc_table[] = {
{ .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
{ .soc_id = "i.MX6*", .data = &caam_imx6_data },
{ .soc_id = "i.MX7*", .data = &caam_imx7_data },
- { .soc_id = "i.MX8MQ", .data = &caam_imx7_data },
+ { .soc_id = "i.MX8M*", .data = &caam_imx7_data },
{ .family = "Freescale i.MX" },
{ /* sentinel */ }
};
@@ -671,11 +674,9 @@ static int caam_probe(struct platform_device *pdev)
of_node_put(np);
if (!ctrlpriv->mc_en)
- clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
+ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK,
MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
- MCFGR_WDENABLE | MCFGR_LARGE_BURST |
- (sizeof(dma_addr_t) == sizeof(u64) ?
- MCFGR_LONG_PTR : 0));
+ MCFGR_WDENABLE | MCFGR_LARGE_BURST);
handle_imx6_err005766(&ctrl->mcr);
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 1ad66677d88e..1be1adffff1d 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -295,8 +295,6 @@ static int cvm_setkey(struct crypto_skcipher *cipher, const u8 *key,
memcpy(ctx->enc_key, key, keylen);
return 0;
} else {
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
index 6f80cc3b5c84..dce5423a5883 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_aead.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -40,10 +40,8 @@ static int nitrox_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
union fc_ctx_flags flags;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
/* fill crypto context */
fctx = nctx->u.fctx;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index 97af4d50d003..18088b0a2257 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -200,10 +200,8 @@ static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
int aes_keylen;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
}
@@ -351,10 +349,8 @@ static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher,
keylen /= 2;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
fctx = nctx->u.fctx;
/* copy KEY2 */
@@ -382,10 +378,8 @@ static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
}
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 6b86f1e6d634..db362fe472ea 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -8,7 +8,9 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \
ccp-dmaengine.o
ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o
ccp-$(CONFIG_PCI) += sp-pci.o
-ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o
+ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
+ sev-dev.o \
+ tee-dev.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 32f19f402073..5eba7ee49e81 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -276,7 +276,6 @@ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->u.aes.mode = alg->mode;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index ff50ee80d223..9e8f07c1afac 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -42,7 +42,6 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 33328a153225..51e12fbd1159 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -51,7 +51,6 @@ static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->u.aes.mode = alg->mode;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 453b9797f93f..474e6f1a6a84 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -293,10 +293,8 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
ret = crypto_shash_digest(sdesc, key, key_len,
ctx->u.sha.key);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return -EINVAL;
- }
key_len = digest_size;
} else {
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 0186b3df4c87..0d5576f6ad21 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -586,6 +586,7 @@ const struct ccp_vdata ccpv3_platform = {
.setup = NULL,
.perform = &ccp3_actions,
.offset = 0,
+ .rsamax = CCP_RSA_MAX_WIDTH,
};
const struct ccp_vdata ccpv3 = {
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 7ca2d3408e7a..e95e7aa5dbf1 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -2,57 +2,20 @@
/*
* AMD Platform Security Processor (PSP) interface
*
- * Copyright (C) 2016,2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*/
-#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/spinlock_types.h>
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/hw_random.h>
-#include <linux/ccp.h>
-#include <linux/firmware.h>
-
-#include <asm/smp.h>
+#include <linux/irqreturn.h>
#include "sp-dev.h"
#include "psp-dev.h"
+#include "sev-dev.h"
+#include "tee-dev.h"
-#define DEVICE_NAME "sev"
-#define SEV_FW_FILE "amd/sev.fw"
-#define SEV_FW_NAME_SIZE 64
-
-static DEFINE_MUTEX(sev_cmd_mutex);
-static struct sev_misc_dev *misc_dev;
-static struct psp_device *psp_master;
-
-static int psp_cmd_timeout = 100;
-module_param(psp_cmd_timeout, int, 0644);
-MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
-
-static int psp_probe_timeout = 5;
-module_param(psp_probe_timeout, int, 0644);
-MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
-
-static bool psp_dead;
-static int psp_timeout;
-
-static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
-{
- if (psp_master->api_major > maj)
- return true;
- if (psp_master->api_major == maj && psp_master->api_minor >= min)
- return true;
- return false;
-}
+struct psp_device *psp_master;
static struct psp_device *psp_alloc_struct(struct sp_device *sp)
{
@@ -75,902 +38,95 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
{
struct psp_device *psp = data;
unsigned int status;
- int reg;
/* Read the interrupt status: */
status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
- /* Check if it is command completion: */
- if (!(status & PSP_CMD_COMPLETE))
- goto done;
+ /* invoke subdevice interrupt handlers */
+ if (status) {
+ if (psp->sev_irq_handler)
+ psp->sev_irq_handler(irq, psp->sev_irq_data, status);
- /* Check if it is SEV command completion: */
- reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
- if (reg & PSP_CMDRESP_RESP) {
- psp->sev_int_rcvd = 1;
- wake_up(&psp->sev_int_queue);
+ if (psp->tee_irq_handler)
+ psp->tee_irq_handler(irq, psp->tee_irq_data, status);
}
-done:
/* Clear the interrupt status by writing the same value we read. */
iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
return IRQ_HANDLED;
}
-static int sev_wait_cmd_ioc(struct psp_device *psp,
- unsigned int *reg, unsigned int timeout)
-{
- int ret;
-
- ret = wait_event_timeout(psp->sev_int_queue,
- psp->sev_int_rcvd, timeout * HZ);
- if (!ret)
- return -ETIMEDOUT;
-
- *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
-
- return 0;
-}
-
-static int sev_cmd_buffer_len(int cmd)
-{
- switch (cmd) {
- case SEV_CMD_INIT: return sizeof(struct sev_data_init);
- case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
- case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
- case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
- case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export);
- case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start);
- case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data);
- case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa);
- case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish);
- case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure);
- case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate);
- case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate);
- case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission);
- case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status);
- case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg);
- case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg);
- case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start);
- case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data);
- case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa);
- case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish);
- case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start);
- case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish);
- case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data);
- case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa);
- case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
- case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
- case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
- default: return 0;
- }
-
- return 0;
-}
-
-static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
-{
- struct psp_device *psp = psp_master;
- unsigned int phys_lsb, phys_msb;
- unsigned int reg, ret = 0;
-
- if (!psp)
- return -ENODEV;
-
- if (psp_dead)
- return -EBUSY;
-
- /* Get the physical address of the command buffer */
- phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
- phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
-
- dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
- cmd, phys_msb, phys_lsb, psp_timeout);
-
- print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
- sev_cmd_buffer_len(cmd), false);
-
- iowrite32(phys_lsb, psp->io_regs + psp->vdata->cmdbuff_addr_lo_reg);
- iowrite32(phys_msb, psp->io_regs + psp->vdata->cmdbuff_addr_hi_reg);
-
- psp->sev_int_rcvd = 0;
-
- reg = cmd;
- reg <<= PSP_CMDRESP_CMD_SHIFT;
- reg |= PSP_CMDRESP_IOC;
- iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
-
- /* wait for command completion */
- ret = sev_wait_cmd_ioc(psp, &reg, psp_timeout);
- if (ret) {
- if (psp_ret)
- *psp_ret = 0;
-
- dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd);
- psp_dead = true;
-
- return ret;
- }
-
- psp_timeout = psp_cmd_timeout;
-
- if (psp_ret)
- *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
-
- if (reg & PSP_CMDRESP_ERR_MASK) {
- dev_dbg(psp->dev, "sev command %#x failed (%#010x)\n",
- cmd, reg & PSP_CMDRESP_ERR_MASK);
- ret = -EIO;
- }
-
- print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
- sev_cmd_buffer_len(cmd), false);
-
- return ret;
-}
-
-static int sev_do_cmd(int cmd, void *data, int *psp_ret)
-{
- int rc;
-
- mutex_lock(&sev_cmd_mutex);
- rc = __sev_do_cmd_locked(cmd, data, psp_ret);
- mutex_unlock(&sev_cmd_mutex);
-
- return rc;
-}
-
-static int __sev_platform_init_locked(int *error)
-{
- struct psp_device *psp = psp_master;
- int rc = 0;
-
- if (!psp)
- return -ENODEV;
-
- if (psp->sev_state == SEV_STATE_INIT)
- return 0;
-
- rc = __sev_do_cmd_locked(SEV_CMD_INIT, &psp->init_cmd_buf, error);
- if (rc)
- return rc;
-
- psp->sev_state = SEV_STATE_INIT;
-
- /* Prepare for first SEV guest launch after INIT */
- wbinvd_on_all_cpus();
- rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
- if (rc)
- return rc;
-
- dev_dbg(psp->dev, "SEV firmware initialized\n");
-
- return rc;
-}
-
-int sev_platform_init(int *error)
-{
- int rc;
-
- mutex_lock(&sev_cmd_mutex);
- rc = __sev_platform_init_locked(error);
- mutex_unlock(&sev_cmd_mutex);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(sev_platform_init);
-
-static int __sev_platform_shutdown_locked(int *error)
-{
- int ret;
-
- ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
- if (ret)
- return ret;
-
- psp_master->sev_state = SEV_STATE_UNINIT;
- dev_dbg(psp_master->dev, "SEV firmware shutdown\n");
-
- return ret;
-}
-
-static int sev_platform_shutdown(int *error)
+static unsigned int psp_get_capability(struct psp_device *psp)
{
- int rc;
-
- mutex_lock(&sev_cmd_mutex);
- rc = __sev_platform_shutdown_locked(NULL);
- mutex_unlock(&sev_cmd_mutex);
-
- return rc;
-}
-
-static int sev_get_platform_state(int *state, int *error)
-{
- int rc;
-
- rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS,
- &psp_master->status_cmd_buf, error);
- if (rc)
- return rc;
-
- *state = psp_master->status_cmd_buf.state;
- return rc;
-}
-
-static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
-{
- int state, rc;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg);
/*
- * The SEV spec requires that FACTORY_RESET must be issued in
- * UNINIT state. Before we go further lets check if any guest is
- * active.
- *
- * If FW is in WORKING state then deny the request otherwise issue
- * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET.
- *
- */
- rc = sev_get_platform_state(&state, &argp->error);
- if (rc)
- return rc;
-
- if (state == SEV_STATE_WORKING)
- return -EBUSY;
-
- if (state == SEV_STATE_INIT) {
- rc = __sev_platform_shutdown_locked(&argp->error);
- if (rc)
- return rc;
- }
-
- return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
-}
-
-static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
-{
- struct sev_user_data_status *data = &psp_master->status_cmd_buf;
- int ret;
-
- ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error);
- if (ret)
- return ret;
-
- if (copy_to_user((void __user *)argp->data, data, sizeof(*data)))
- ret = -EFAULT;
-
- return ret;
-}
-
-static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
-{
- int rc;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (psp_master->sev_state == SEV_STATE_UNINIT) {
- rc = __sev_platform_init_locked(&argp->error);
- if (rc)
- return rc;
- }
-
- return __sev_do_cmd_locked(cmd, NULL, &argp->error);
-}
-
-static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
-{
- struct sev_user_data_pek_csr input;
- struct sev_data_pek_csr *data;
- void *blob = NULL;
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* userspace wants to query CSR length */
- if (!input.address || !input.length)
- goto cmd;
-
- /* allocate a physically contiguous buffer to store the CSR blob */
- if (!access_ok(input.address, input.length) ||
- input.length > SEV_FW_BLOB_MAX_SIZE) {
- ret = -EFAULT;
- goto e_free;
- }
-
- blob = kmalloc(input.length, GFP_KERNEL);
- if (!blob) {
- ret = -ENOMEM;
- goto e_free;
- }
-
- data->address = __psp_pa(blob);
- data->len = input.length;
-
-cmd:
- if (psp_master->sev_state == SEV_STATE_UNINIT) {
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- goto e_free_blob;
- }
-
- ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error);
-
- /* If we query the CSR length, FW responded with expected data. */
- input.length = data->len;
-
- if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
- ret = -EFAULT;
- goto e_free_blob;
- }
-
- if (blob) {
- if (copy_to_user((void __user *)input.address, blob, input.length))
- ret = -EFAULT;
- }
-
-e_free_blob:
- kfree(blob);
-e_free:
- kfree(data);
- return ret;
-}
-
-void *psp_copy_user_blob(u64 __user uaddr, u32 len)
-{
- if (!uaddr || !len)
- return ERR_PTR(-EINVAL);
-
- /* verify that blob length does not exceed our limit */
- if (len > SEV_FW_BLOB_MAX_SIZE)
- return ERR_PTR(-EINVAL);
-
- return memdup_user((void __user *)(uintptr_t)uaddr, len);
-}
-EXPORT_SYMBOL_GPL(psp_copy_user_blob);
-
-static int sev_get_api_version(void)
-{
- struct sev_user_data_status *status;
- int error = 0, ret;
-
- status = &psp_master->status_cmd_buf;
- ret = sev_platform_status(status, &error);
- if (ret) {
- dev_err(psp_master->dev,
- "SEV: failed to get status. Error: %#x\n", error);
- return 1;
- }
-
- psp_master->api_major = status->api_major;
- psp_master->api_minor = status->api_minor;
- psp_master->build = status->build;
- psp_master->sev_state = status->state;
-
- return 0;
-}
-
-static int sev_get_firmware(struct device *dev,
- const struct firmware **firmware)
-{
- char fw_name_specific[SEV_FW_NAME_SIZE];
- char fw_name_subset[SEV_FW_NAME_SIZE];
-
- snprintf(fw_name_specific, sizeof(fw_name_specific),
- "amd/amd_sev_fam%.2xh_model%.2xh.sbin",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
-
- snprintf(fw_name_subset, sizeof(fw_name_subset),
- "amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
- boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
-
- /* Check for SEV FW for a particular model.
- * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
- *
- * or
- *
- * Check for SEV FW common to a subset of models.
- * Ex. amd_sev_fam17h_model0xh.sbin for
- * Family 17h Model 00h -- Family 17h Model 0Fh
- *
- * or
- *
- * Fall-back to using generic name: sev.fw
+ * Check for a access to the registers. If this read returns
+ * 0xffffffff, it's likely that the system is running a broken
+ * BIOS which disallows access to the device. Stop here and
+ * fail the PSP initialization (but not the load, as the CCP
+ * could get properly initialized).
*/
- if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
- (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
- (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
+ if (val == 0xffffffff) {
+ dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n");
return 0;
-
- return -ENOENT;
-}
-
-/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
-static int sev_update_firmware(struct device *dev)
-{
- struct sev_data_download_firmware *data;
- const struct firmware *firmware;
- int ret, error, order;
- struct page *p;
- u64 data_size;
-
- if (sev_get_firmware(dev, &firmware) == -ENOENT) {
- dev_dbg(dev, "No SEV firmware file present\n");
- return -1;
- }
-
- /*
- * SEV FW expects the physical address given to it to be 32
- * byte aligned. Memory allocated has structure placed at the
- * beginning followed by the firmware being passed to the SEV
- * FW. Allocate enough memory for data structure + alignment
- * padding + SEV FW.
- */
- data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
-
- order = get_order(firmware->size + data_size);
- p = alloc_pages(GFP_KERNEL, order);
- if (!p) {
- ret = -1;
- goto fw_err;
}
- /*
- * Copy firmware data to a kernel allocated contiguous
- * memory region.
- */
- data = page_address(p);
- memcpy(page_address(p) + data_size, firmware->data, firmware->size);
-
- data->address = __psp_pa(page_address(p) + data_size);
- data->len = firmware->size;
-
- ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
- if (ret)
- dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
- else
- dev_info(dev, "SEV firmware update successful\n");
-
- __free_pages(p, order);
-
-fw_err:
- release_firmware(firmware);
-
- return ret;
+ return val;
}
-static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
+static int psp_check_sev_support(struct psp_device *psp,
+ unsigned int capability)
{
- struct sev_user_data_pek_cert_import input;
- struct sev_data_pek_cert_import *data;
- void *pek_blob, *oca_blob;
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* copy PEK certificate blobs from userspace */
- pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
- if (IS_ERR(pek_blob)) {
- ret = PTR_ERR(pek_blob);
- goto e_free;
- }
-
- data->pek_cert_address = __psp_pa(pek_blob);
- data->pek_cert_len = input.pek_cert_len;
-
- /* copy PEK certificate blobs from userspace */
- oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
- if (IS_ERR(oca_blob)) {
- ret = PTR_ERR(oca_blob);
- goto e_free_pek;
- }
-
- data->oca_cert_address = __psp_pa(oca_blob);
- data->oca_cert_len = input.oca_cert_len;
-
- /* If platform is not in INIT state then transition it to INIT */
- if (psp_master->sev_state != SEV_STATE_INIT) {
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- goto e_free_oca;
+ /* Check if device supports SEV feature */
+ if (!(capability & 1)) {
+ dev_dbg(psp->dev, "psp does not support SEV\n");
+ return -ENODEV;
}
- ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error);
-
-e_free_oca:
- kfree(oca_blob);
-e_free_pek:
- kfree(pek_blob);
-e_free:
- kfree(data);
- return ret;
+ return 0;
}
-static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
+static int psp_check_tee_support(struct psp_device *psp,
+ unsigned int capability)
{
- struct sev_user_data_get_id2 input;
- struct sev_data_get_id *data;
- void *id_blob = NULL;
- int ret;
-
- /* SEV GET_ID is available from SEV API v0.16 and up */
- if (!sev_version_greater_or_equal(0, 16))
- return -ENOTSUPP;
-
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- /* Check if we have write access to the userspace buffer */
- if (input.address &&
- input.length &&
- !access_ok(input.address, input.length))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- if (input.address && input.length) {
- id_blob = kmalloc(input.length, GFP_KERNEL);
- if (!id_blob) {
- kfree(data);
- return -ENOMEM;
- }
-
- data->address = __psp_pa(id_blob);
- data->len = input.length;
- }
-
- ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
-
- /*
- * Firmware will return the length of the ID value (either the minimum
- * required length or the actual length written), return it to the user.
- */
- input.length = data->len;
-
- if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
- ret = -EFAULT;
- goto e_free;
- }
-
- if (id_blob) {
- if (copy_to_user((void __user *)input.address,
- id_blob, data->len)) {
- ret = -EFAULT;
- goto e_free;
- }
+ /* Check if device supports TEE feature */
+ if (!(capability & 2)) {
+ dev_dbg(psp->dev, "psp does not support TEE\n");
+ return -ENODEV;
}
-e_free:
- kfree(id_blob);
- kfree(data);
-
- return ret;
+ return 0;
}
-static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
+static int psp_check_support(struct psp_device *psp,
+ unsigned int capability)
{
- struct sev_data_get_id *data;
- u64 data_size, user_size;
- void *id_blob, *mem;
- int ret;
+ int sev_support = psp_check_sev_support(psp, capability);
+ int tee_support = psp_check_tee_support(psp, capability);
- /* SEV GET_ID available from SEV API v0.16 and up */
- if (!sev_version_greater_or_equal(0, 16))
- return -ENOTSUPP;
-
- /* SEV FW expects the buffer it fills with the ID to be
- * 8-byte aligned. Memory allocated should be enough to
- * hold data structure + alignment padding + memory
- * where SEV FW writes the ID.
- */
- data_size = ALIGN(sizeof(struct sev_data_get_id), 8);
- user_size = sizeof(struct sev_user_data_get_id);
-
- mem = kzalloc(data_size + user_size, GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
-
- data = mem;
- id_blob = mem + data_size;
-
- data->address = __psp_pa(id_blob);
- data->len = user_size;
-
- ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
- if (!ret) {
- if (copy_to_user((void __user *)argp->data, id_blob, data->len))
- ret = -EFAULT;
- }
-
- kfree(mem);
+ /* Return error if device neither supports SEV nor TEE */
+ if (sev_support && tee_support)
+ return -ENODEV;
- return ret;
+ return 0;
}
-static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
+static int psp_init(struct psp_device *psp, unsigned int capability)
{
- struct sev_user_data_pdh_cert_export input;
- void *pdh_blob = NULL, *cert_blob = NULL;
- struct sev_data_pdh_cert_export *data;
int ret;
- /* If platform is not in INIT state then transition it to INIT. */
- if (psp_master->sev_state != SEV_STATE_INIT) {
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- ret = __sev_platform_init_locked(&argp->error);
+ if (!psp_check_sev_support(psp, capability)) {
+ ret = sev_dev_init(psp);
if (ret)
return ret;
}
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* Userspace wants to query the certificate length. */
- if (!input.pdh_cert_address ||
- !input.pdh_cert_len ||
- !input.cert_chain_address)
- goto cmd;
-
- /* Allocate a physically contiguous buffer to store the PDH blob. */
- if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) ||
- !access_ok(input.pdh_cert_address, input.pdh_cert_len)) {
- ret = -EFAULT;
- goto e_free;
- }
-
- /* Allocate a physically contiguous buffer to store the cert chain blob. */
- if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) ||
- !access_ok(input.cert_chain_address, input.cert_chain_len)) {
- ret = -EFAULT;
- goto e_free;
- }
-
- pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
- if (!pdh_blob) {
- ret = -ENOMEM;
- goto e_free;
- }
-
- data->pdh_cert_address = __psp_pa(pdh_blob);
- data->pdh_cert_len = input.pdh_cert_len;
-
- cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
- if (!cert_blob) {
- ret = -ENOMEM;
- goto e_free_pdh;
- }
-
- data->cert_chain_address = __psp_pa(cert_blob);
- data->cert_chain_len = input.cert_chain_len;
-
-cmd:
- ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
-
- /* If we query the length, FW responded with expected data. */
- input.cert_chain_len = data->cert_chain_len;
- input.pdh_cert_len = data->pdh_cert_len;
-
- if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
- ret = -EFAULT;
- goto e_free_cert;
- }
-
- if (pdh_blob) {
- if (copy_to_user((void __user *)input.pdh_cert_address,
- pdh_blob, input.pdh_cert_len)) {
- ret = -EFAULT;
- goto e_free_cert;
- }
- }
-
- if (cert_blob) {
- if (copy_to_user((void __user *)input.cert_chain_address,
- cert_blob, input.cert_chain_len))
- ret = -EFAULT;
- }
-
-e_free_cert:
- kfree(cert_blob);
-e_free_pdh:
- kfree(pdh_blob);
-e_free:
- kfree(data);
- return ret;
-}
-
-static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- struct sev_issue_cmd input;
- int ret = -EFAULT;
-
- if (!psp_master)
- return -ENODEV;
-
- if (ioctl != SEV_ISSUE_CMD)
- return -EINVAL;
-
- if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd)))
- return -EFAULT;
-
- if (input.cmd > SEV_MAX)
- return -EINVAL;
-
- mutex_lock(&sev_cmd_mutex);
-
- switch (input.cmd) {
-
- case SEV_FACTORY_RESET:
- ret = sev_ioctl_do_reset(&input);
- break;
- case SEV_PLATFORM_STATUS:
- ret = sev_ioctl_do_platform_status(&input);
- break;
- case SEV_PEK_GEN:
- ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input);
- break;
- case SEV_PDH_GEN:
- ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input);
- break;
- case SEV_PEK_CSR:
- ret = sev_ioctl_do_pek_csr(&input);
- break;
- case SEV_PEK_CERT_IMPORT:
- ret = sev_ioctl_do_pek_import(&input);
- break;
- case SEV_PDH_CERT_EXPORT:
- ret = sev_ioctl_do_pdh_export(&input);
- break;
- case SEV_GET_ID:
- pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
- ret = sev_ioctl_do_get_id(&input);
- break;
- case SEV_GET_ID2:
- ret = sev_ioctl_do_get_id2(&input);
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd)))
- ret = -EFAULT;
-out:
- mutex_unlock(&sev_cmd_mutex);
-
- return ret;
-}
-
-static const struct file_operations sev_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sev_ioctl,
-};
-
-int sev_platform_status(struct sev_user_data_status *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_platform_status);
-
-int sev_guest_deactivate(struct sev_data_deactivate *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_deactivate);
-
-int sev_guest_activate(struct sev_data_activate *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_ACTIVATE, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_activate);
-
-int sev_guest_decommission(struct sev_data_decommission *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_decommission);
-
-int sev_guest_df_flush(int *error)
-{
- return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_df_flush);
-
-static void sev_exit(struct kref *ref)
-{
- struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount);
-
- misc_deregister(&misc_dev->misc);
-}
-
-static int sev_misc_init(struct psp_device *psp)
-{
- struct device *dev = psp->dev;
- int ret;
-
- /*
- * SEV feature support can be detected on multiple devices but the SEV
- * FW commands must be issued on the master. During probe, we do not
- * know the master hence we create /dev/sev on the first device probe.
- * sev_do_cmd() finds the right master device to which to issue the
- * command to the firmware.
- */
- if (!misc_dev) {
- struct miscdevice *misc;
-
- misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL);
- if (!misc_dev)
- return -ENOMEM;
-
- misc = &misc_dev->misc;
- misc->minor = MISC_DYNAMIC_MINOR;
- misc->name = DEVICE_NAME;
- misc->fops = &sev_fops;
-
- ret = misc_register(misc);
+ if (!psp_check_tee_support(psp, capability)) {
+ ret = tee_dev_init(psp);
if (ret)
return ret;
-
- kref_init(&misc_dev->refcount);
- } else {
- kref_get(&misc_dev->refcount);
- }
-
- init_waitqueue_head(&psp->sev_int_queue);
- psp->sev_misc = misc_dev;
- dev_dbg(dev, "registered SEV device\n");
-
- return 0;
-}
-
-static int psp_check_sev_support(struct psp_device *psp)
-{
- unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg);
-
- /*
- * Check for a access to the registers. If this read returns
- * 0xffffffff, it's likely that the system is running a broken
- * BIOS which disallows access to the device. Stop here and
- * fail the PSP initialization (but not the load, as the CCP
- * could get properly initialized).
- */
- if (val == 0xffffffff) {
- dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n");
- return -ENODEV;
- }
-
- if (!(val & 1)) {
- /* Device does not support the SEV feature */
- dev_dbg(psp->dev, "psp does not support SEV\n");
- return -ENODEV;
}
return 0;
@@ -980,6 +136,7 @@ int psp_dev_init(struct sp_device *sp)
{
struct device *dev = sp->dev;
struct psp_device *psp;
+ unsigned int capability;
int ret;
ret = -ENOMEM;
@@ -998,7 +155,11 @@ int psp_dev_init(struct sp_device *sp)
psp->io_regs = sp->io_map;
- ret = psp_check_sev_support(psp);
+ capability = psp_get_capability(psp);
+ if (!capability)
+ goto e_disable;
+
+ ret = psp_check_support(psp, capability);
if (ret)
goto e_disable;
@@ -1013,7 +174,7 @@ int psp_dev_init(struct sp_device *sp)
goto e_err;
}
- ret = sev_misc_init(psp);
+ ret = psp_init(psp, capability);
if (ret)
goto e_irq;
@@ -1049,83 +210,52 @@ void psp_dev_destroy(struct sp_device *sp)
if (!psp)
return;
- if (psp->sev_misc)
- kref_put(&misc_dev->refcount, sev_exit);
+ sev_dev_destroy(psp);
+
+ tee_dev_destroy(psp);
sp_free_psp_irq(sp, psp);
}
-int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
- void *data, int *error)
+void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data)
{
- if (!filep || filep->f_op != &sev_fops)
- return -EBADF;
-
- return sev_do_cmd(cmd, data, error);
+ psp->sev_irq_data = data;
+ psp->sev_irq_handler = handler;
}
-EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
-void psp_pci_init(void)
+void psp_clear_sev_irq_handler(struct psp_device *psp)
{
- struct sp_device *sp;
- int error, rc;
-
- sp = sp_get_psp_master_device();
- if (!sp)
- return;
-
- psp_master = sp->psp_data;
-
- psp_timeout = psp_probe_timeout;
+ psp_set_sev_irq_handler(psp, NULL, NULL);
+}
- if (sev_get_api_version())
- goto err;
+void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data)
+{
+ psp->tee_irq_data = data;
+ psp->tee_irq_handler = handler;
+}
- /*
- * If platform is not in UNINIT state then firmware upgrade and/or
- * platform INIT command will fail. These command require UNINIT state.
- *
- * In a normal boot we should never run into case where the firmware
- * is not in UNINIT state on boot. But in case of kexec boot, a reboot
- * may not go through a typical shutdown sequence and may leave the
- * firmware in INIT or WORKING state.
- */
+void psp_clear_tee_irq_handler(struct psp_device *psp)
+{
+ psp_set_tee_irq_handler(psp, NULL, NULL);
+}
- if (psp_master->sev_state != SEV_STATE_UNINIT) {
- sev_platform_shutdown(NULL);
- psp_master->sev_state = SEV_STATE_UNINIT;
- }
+struct psp_device *psp_get_master_device(void)
+{
+ struct sp_device *sp = sp_get_psp_master_device();
- if (sev_version_greater_or_equal(0, 15) &&
- sev_update_firmware(psp_master->dev) == 0)
- sev_get_api_version();
+ return sp ? sp->psp_data : NULL;
+}
- /* Initialize the platform */
- rc = sev_platform_init(&error);
- if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
- /*
- * INIT command returned an integrity check failure
- * status code, meaning that firmware load and
- * validation of SEV related persistent data has
- * failed and persistent state has been erased.
- * Retrying INIT command here should succeed.
- */
- dev_dbg(sp->dev, "SEV: retrying INIT command");
- rc = sev_platform_init(&error);
- }
+void psp_pci_init(void)
+{
+ psp_master = psp_get_master_device();
- if (rc) {
- dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
+ if (!psp_master)
return;
- }
-
- dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,
- psp_master->api_minor, psp_master->build);
-
- return;
-err:
- psp_master = NULL;
+ sev_pci_init();
}
void psp_pci_exit(void)
@@ -1133,5 +263,5 @@ void psp_pci_exit(void)
if (!psp_master)
return;
- sev_platform_shutdown(NULL);
+ sev_pci_exit();
}
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index dd516b35ba86..ef38e4135d81 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -2,7 +2,7 @@
/*
* AMD Platform Security Processor (PSP) interface driver
*
- * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2019 Advanced Micro Devices, Inc.
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*/
@@ -11,35 +11,20 @@
#define __PSP_DEV_H__
#include <linux/device.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/list.h>
-#include <linux/wait.h>
-#include <linux/dmapool.h>
-#include <linux/hw_random.h>
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/interrupt.h>
-#include <linux/irqreturn.h>
-#include <linux/dmaengine.h>
-#include <linux/psp-sev.h>
-#include <linux/miscdevice.h>
-#include <linux/capability.h>
#include "sp-dev.h"
-#define PSP_CMD_COMPLETE BIT(1)
-
-#define PSP_CMDRESP_CMD_SHIFT 16
-#define PSP_CMDRESP_IOC BIT(0)
#define PSP_CMDRESP_RESP BIT(31)
#define PSP_CMDRESP_ERR_MASK 0xffff
#define MAX_PSP_NAME_LEN 16
-struct sev_misc_dev {
- struct kref refcount;
- struct miscdevice misc;
-};
+extern struct psp_device *psp_master;
+
+typedef void (*psp_irq_handler_t)(int, void *, unsigned int);
struct psp_device {
struct list_head entry;
@@ -52,16 +37,24 @@ struct psp_device {
void __iomem *io_regs;
- int sev_state;
- unsigned int sev_int_rcvd;
- wait_queue_head_t sev_int_queue;
- struct sev_misc_dev *sev_misc;
- struct sev_user_data_status status_cmd_buf;
- struct sev_data_init init_cmd_buf;
+ psp_irq_handler_t sev_irq_handler;
+ void *sev_irq_data;
+
+ psp_irq_handler_t tee_irq_handler;
+ void *tee_irq_data;
- u8 api_major;
- u8 api_minor;
- u8 build;
+ void *sev_data;
+ void *tee_data;
};
+void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data);
+void psp_clear_sev_irq_handler(struct psp_device *psp);
+
+void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data);
+void psp_clear_tee_irq_handler(struct psp_device *psp);
+
+struct psp_device *psp_get_master_device(void);
+
#endif /* __PSP_DEV_H */
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
new file mode 100644
index 000000000000..e467860f797d
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -0,0 +1,1077 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Secure Encrypted Virtualization (SEV) interface
+ *
+ * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/ccp.h>
+#include <linux/firmware.h>
+
+#include <asm/smp.h>
+
+#include "psp-dev.h"
+#include "sev-dev.h"
+
+#define DEVICE_NAME "sev"
+#define SEV_FW_FILE "amd/sev.fw"
+#define SEV_FW_NAME_SIZE 64
+
+static DEFINE_MUTEX(sev_cmd_mutex);
+static struct sev_misc_dev *misc_dev;
+
+static int psp_cmd_timeout = 100;
+module_param(psp_cmd_timeout, int, 0644);
+MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
+
+static int psp_probe_timeout = 5;
+module_param(psp_probe_timeout, int, 0644);
+MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
+
+static bool psp_dead;
+static int psp_timeout;
+
+static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
+{
+ struct sev_device *sev = psp_master->sev_data;
+
+ if (sev->api_major > maj)
+ return true;
+
+ if (sev->api_major == maj && sev->api_minor >= min)
+ return true;
+
+ return false;
+}
+
+static void sev_irq_handler(int irq, void *data, unsigned int status)
+{
+ struct sev_device *sev = data;
+ int reg;
+
+ /* Check if it is command completion: */
+ if (!(status & SEV_CMD_COMPLETE))
+ return;
+
+ /* Check if it is SEV command completion: */
+ reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
+ if (reg & PSP_CMDRESP_RESP) {
+ sev->int_rcvd = 1;
+ wake_up(&sev->int_queue);
+ }
+}
+
+static int sev_wait_cmd_ioc(struct sev_device *sev,
+ unsigned int *reg, unsigned int timeout)
+{
+ int ret;
+
+ ret = wait_event_timeout(sev->int_queue,
+ sev->int_rcvd, timeout * HZ);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
+
+ return 0;
+}
+
+static int sev_cmd_buffer_len(int cmd)
+{
+ switch (cmd) {
+ case SEV_CMD_INIT: return sizeof(struct sev_data_init);
+ case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
+ case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
+ case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
+ case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export);
+ case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start);
+ case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data);
+ case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa);
+ case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish);
+ case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure);
+ case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate);
+ case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate);
+ case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission);
+ case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status);
+ case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg);
+ case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg);
+ case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start);
+ case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data);
+ case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa);
+ case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish);
+ case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start);
+ case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish);
+ case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data);
+ case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa);
+ case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
+ case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
+ case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
+ default: return 0;
+ }
+
+ return 0;
+}
+
+static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+ unsigned int phys_lsb, phys_msb;
+ unsigned int reg, ret = 0;
+
+ if (!psp || !psp->sev_data)
+ return -ENODEV;
+
+ if (psp_dead)
+ return -EBUSY;
+
+ sev = psp->sev_data;
+
+ /* Get the physical address of the command buffer */
+ phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
+ phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
+
+ dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
+ cmd, phys_msb, phys_lsb, psp_timeout);
+
+ print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
+ sev_cmd_buffer_len(cmd), false);
+
+ iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg);
+ iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg);
+
+ sev->int_rcvd = 0;
+
+ reg = cmd;
+ reg <<= SEV_CMDRESP_CMD_SHIFT;
+ reg |= SEV_CMDRESP_IOC;
+ iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
+
+ /* wait for command completion */
+ ret = sev_wait_cmd_ioc(sev, &reg, psp_timeout);
+ if (ret) {
+ if (psp_ret)
+ *psp_ret = 0;
+
+ dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd);
+ psp_dead = true;
+
+ return ret;
+ }
+
+ psp_timeout = psp_cmd_timeout;
+
+ if (psp_ret)
+ *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
+
+ if (reg & PSP_CMDRESP_ERR_MASK) {
+ dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n",
+ cmd, reg & PSP_CMDRESP_ERR_MASK);
+ ret = -EIO;
+ }
+
+ print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
+ sev_cmd_buffer_len(cmd), false);
+
+ return ret;
+}
+
+static int sev_do_cmd(int cmd, void *data, int *psp_ret)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_do_cmd_locked(cmd, data, psp_ret);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+
+static int __sev_platform_init_locked(int *error)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+ int rc = 0;
+
+ if (!psp || !psp->sev_data)
+ return -ENODEV;
+
+ sev = psp->sev_data;
+
+ if (sev->state == SEV_STATE_INIT)
+ return 0;
+
+ rc = __sev_do_cmd_locked(SEV_CMD_INIT, &sev->init_cmd_buf, error);
+ if (rc)
+ return rc;
+
+ sev->state = SEV_STATE_INIT;
+
+ /* Prepare for first SEV guest launch after INIT */
+ wbinvd_on_all_cpus();
+ rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
+ if (rc)
+ return rc;
+
+ dev_dbg(sev->dev, "SEV firmware initialized\n");
+
+ return rc;
+}
+
+int sev_platform_init(int *error)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_platform_init_locked(error);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(sev_platform_init);
+
+static int __sev_platform_shutdown_locked(int *error)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
+ if (ret)
+ return ret;
+
+ sev->state = SEV_STATE_UNINIT;
+ dev_dbg(sev->dev, "SEV firmware shutdown\n");
+
+ return ret;
+}
+
+static int sev_platform_shutdown(int *error)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_platform_shutdown_locked(NULL);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+
+static int sev_get_platform_state(int *state, int *error)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int rc;
+
+ rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS,
+ &sev->status_cmd_buf, error);
+ if (rc)
+ return rc;
+
+ *state = sev->status_cmd_buf.state;
+ return rc;
+}
+
+static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
+{
+ int state, rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /*
+ * The SEV spec requires that FACTORY_RESET must be issued in
+ * UNINIT state. Before we go further lets check if any guest is
+ * active.
+ *
+ * If FW is in WORKING state then deny the request otherwise issue
+ * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET.
+ *
+ */
+ rc = sev_get_platform_state(&state, &argp->error);
+ if (rc)
+ return rc;
+
+ if (state == SEV_STATE_WORKING)
+ return -EBUSY;
+
+ if (state == SEV_STATE_INIT) {
+ rc = __sev_platform_shutdown_locked(&argp->error);
+ if (rc)
+ return rc;
+ }
+
+ return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
+}
+
+static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_status *data = &sev->status_cmd_buf;
+ int ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)argp->data, data, sizeof(*data)))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (sev->state == SEV_STATE_UNINIT) {
+ rc = __sev_platform_init_locked(&argp->error);
+ if (rc)
+ return rc;
+ }
+
+ return __sev_do_cmd_locked(cmd, NULL, &argp->error);
+}
+
+static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_pek_csr input;
+ struct sev_data_pek_csr *data;
+ void *blob = NULL;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* userspace wants to query CSR length */
+ if (!input.address || !input.length)
+ goto cmd;
+
+ /* allocate a physically contiguous buffer to store the CSR blob */
+ if (!access_ok(input.address, input.length) ||
+ input.length > SEV_FW_BLOB_MAX_SIZE) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ blob = kmalloc(input.length, GFP_KERNEL);
+ if (!blob) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ data->address = __psp_pa(blob);
+ data->len = input.length;
+
+cmd:
+ if (sev->state == SEV_STATE_UNINIT) {
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ goto e_free_blob;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error);
+
+ /* If we query the CSR length, FW responded with expected data. */
+ input.length = data->len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free_blob;
+ }
+
+ if (blob) {
+ if (copy_to_user((void __user *)input.address, blob, input.length))
+ ret = -EFAULT;
+ }
+
+e_free_blob:
+ kfree(blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+void *psp_copy_user_blob(u64 __user uaddr, u32 len)
+{
+ if (!uaddr || !len)
+ return ERR_PTR(-EINVAL);
+
+ /* verify that blob length does not exceed our limit */
+ if (len > SEV_FW_BLOB_MAX_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ return memdup_user((void __user *)(uintptr_t)uaddr, len);
+}
+EXPORT_SYMBOL_GPL(psp_copy_user_blob);
+
+static int sev_get_api_version(void)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_status *status;
+ int error = 0, ret;
+
+ status = &sev->status_cmd_buf;
+ ret = sev_platform_status(status, &error);
+ if (ret) {
+ dev_err(sev->dev,
+ "SEV: failed to get status. Error: %#x\n", error);
+ return 1;
+ }
+
+ sev->api_major = status->api_major;
+ sev->api_minor = status->api_minor;
+ sev->build = status->build;
+ sev->state = status->state;
+
+ return 0;
+}
+
+static int sev_get_firmware(struct device *dev,
+ const struct firmware **firmware)
+{
+ char fw_name_specific[SEV_FW_NAME_SIZE];
+ char fw_name_subset[SEV_FW_NAME_SIZE];
+
+ snprintf(fw_name_specific, sizeof(fw_name_specific),
+ "amd/amd_sev_fam%.2xh_model%.2xh.sbin",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+
+ snprintf(fw_name_subset, sizeof(fw_name_subset),
+ "amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
+ boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
+
+ /* Check for SEV FW for a particular model.
+ * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
+ *
+ * or
+ *
+ * Check for SEV FW common to a subset of models.
+ * Ex. amd_sev_fam17h_model0xh.sbin for
+ * Family 17h Model 00h -- Family 17h Model 0Fh
+ *
+ * or
+ *
+ * Fall-back to using generic name: sev.fw
+ */
+ if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
+ (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
+ (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
+ return 0;
+
+ return -ENOENT;
+}
+
+/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
+static int sev_update_firmware(struct device *dev)
+{
+ struct sev_data_download_firmware *data;
+ const struct firmware *firmware;
+ int ret, error, order;
+ struct page *p;
+ u64 data_size;
+
+ if (sev_get_firmware(dev, &firmware) == -ENOENT) {
+ dev_dbg(dev, "No SEV firmware file present\n");
+ return -1;
+ }
+
+ /*
+ * SEV FW expects the physical address given to it to be 32
+ * byte aligned. Memory allocated has structure placed at the
+ * beginning followed by the firmware being passed to the SEV
+ * FW. Allocate enough memory for data structure + alignment
+ * padding + SEV FW.
+ */
+ data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
+
+ order = get_order(firmware->size + data_size);
+ p = alloc_pages(GFP_KERNEL, order);
+ if (!p) {
+ ret = -1;
+ goto fw_err;
+ }
+
+ /*
+ * Copy firmware data to a kernel allocated contiguous
+ * memory region.
+ */
+ data = page_address(p);
+ memcpy(page_address(p) + data_size, firmware->data, firmware->size);
+
+ data->address = __psp_pa(page_address(p) + data_size);
+ data->len = firmware->size;
+
+ ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+ if (ret)
+ dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
+ else
+ dev_info(dev, "SEV firmware update successful\n");
+
+ __free_pages(p, order);
+
+fw_err:
+ release_firmware(firmware);
+
+ return ret;
+}
+
+static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_pek_cert_import input;
+ struct sev_data_pek_cert_import *data;
+ void *pek_blob, *oca_blob;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* copy PEK certificate blobs from userspace */
+ pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
+ if (IS_ERR(pek_blob)) {
+ ret = PTR_ERR(pek_blob);
+ goto e_free;
+ }
+
+ data->pek_cert_address = __psp_pa(pek_blob);
+ data->pek_cert_len = input.pek_cert_len;
+
+ /* copy PEK certificate blobs from userspace */
+ oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
+ if (IS_ERR(oca_blob)) {
+ ret = PTR_ERR(oca_blob);
+ goto e_free_pek;
+ }
+
+ data->oca_cert_address = __psp_pa(oca_blob);
+ data->oca_cert_len = input.oca_cert_len;
+
+ /* If platform is not in INIT state then transition it to INIT */
+ if (sev->state != SEV_STATE_INIT) {
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ goto e_free_oca;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error);
+
+e_free_oca:
+ kfree(oca_blob);
+e_free_pek:
+ kfree(pek_blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
+{
+ struct sev_user_data_get_id2 input;
+ struct sev_data_get_id *data;
+ void *id_blob = NULL;
+ int ret;
+
+ /* SEV GET_ID is available from SEV API v0.16 and up */
+ if (!sev_version_greater_or_equal(0, 16))
+ return -ENOTSUPP;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ /* Check if we have write access to the userspace buffer */
+ if (input.address &&
+ input.length &&
+ !access_ok(input.address, input.length))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (input.address && input.length) {
+ id_blob = kmalloc(input.length, GFP_KERNEL);
+ if (!id_blob) {
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ data->address = __psp_pa(id_blob);
+ data->len = input.length;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
+
+ /*
+ * Firmware will return the length of the ID value (either the minimum
+ * required length or the actual length written), return it to the user.
+ */
+ input.length = data->len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ if (id_blob) {
+ if (copy_to_user((void __user *)input.address,
+ id_blob, data->len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+ }
+
+e_free:
+ kfree(id_blob);
+ kfree(data);
+
+ return ret;
+}
+
+static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
+{
+ struct sev_data_get_id *data;
+ u64 data_size, user_size;
+ void *id_blob, *mem;
+ int ret;
+
+ /* SEV GET_ID available from SEV API v0.16 and up */
+ if (!sev_version_greater_or_equal(0, 16))
+ return -ENOTSUPP;
+
+ /* SEV FW expects the buffer it fills with the ID to be
+ * 8-byte aligned. Memory allocated should be enough to
+ * hold data structure + alignment padding + memory
+ * where SEV FW writes the ID.
+ */
+ data_size = ALIGN(sizeof(struct sev_data_get_id), 8);
+ user_size = sizeof(struct sev_user_data_get_id);
+
+ mem = kzalloc(data_size + user_size, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ data = mem;
+ id_blob = mem + data_size;
+
+ data->address = __psp_pa(id_blob);
+ data->len = user_size;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
+ if (!ret) {
+ if (copy_to_user((void __user *)argp->data, id_blob, data->len))
+ ret = -EFAULT;
+ }
+
+ kfree(mem);
+
+ return ret;
+}
+
+static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_pdh_cert_export input;
+ void *pdh_blob = NULL, *cert_blob = NULL;
+ struct sev_data_pdh_cert_export *data;
+ int ret;
+
+ /* If platform is not in INIT state then transition it to INIT. */
+ if (sev->state != SEV_STATE_INIT) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ return ret;
+ }
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* Userspace wants to query the certificate length. */
+ if (!input.pdh_cert_address ||
+ !input.pdh_cert_len ||
+ !input.cert_chain_address)
+ goto cmd;
+
+ /* Allocate a physically contiguous buffer to store the PDH blob. */
+ if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) ||
+ !access_ok(input.pdh_cert_address, input.pdh_cert_len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ /* Allocate a physically contiguous buffer to store the cert chain blob. */
+ if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) ||
+ !access_ok(input.cert_chain_address, input.cert_chain_len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
+ if (!pdh_blob) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ data->pdh_cert_address = __psp_pa(pdh_blob);
+ data->pdh_cert_len = input.pdh_cert_len;
+
+ cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
+ if (!cert_blob) {
+ ret = -ENOMEM;
+ goto e_free_pdh;
+ }
+
+ data->cert_chain_address = __psp_pa(cert_blob);
+ data->cert_chain_len = input.cert_chain_len;
+
+cmd:
+ ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
+
+ /* If we query the length, FW responded with expected data. */
+ input.cert_chain_len = data->cert_chain_len;
+ input.pdh_cert_len = data->pdh_cert_len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free_cert;
+ }
+
+ if (pdh_blob) {
+ if (copy_to_user((void __user *)input.pdh_cert_address,
+ pdh_blob, input.pdh_cert_len)) {
+ ret = -EFAULT;
+ goto e_free_cert;
+ }
+ }
+
+ if (cert_blob) {
+ if (copy_to_user((void __user *)input.cert_chain_address,
+ cert_blob, input.cert_chain_len))
+ ret = -EFAULT;
+ }
+
+e_free_cert:
+ kfree(cert_blob);
+e_free_pdh:
+ kfree(pdh_blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct sev_issue_cmd input;
+ int ret = -EFAULT;
+
+ if (!psp_master || !psp_master->sev_data)
+ return -ENODEV;
+
+ if (ioctl != SEV_ISSUE_CMD)
+ return -EINVAL;
+
+ if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd)))
+ return -EFAULT;
+
+ if (input.cmd > SEV_MAX)
+ return -EINVAL;
+
+ mutex_lock(&sev_cmd_mutex);
+
+ switch (input.cmd) {
+
+ case SEV_FACTORY_RESET:
+ ret = sev_ioctl_do_reset(&input);
+ break;
+ case SEV_PLATFORM_STATUS:
+ ret = sev_ioctl_do_platform_status(&input);
+ break;
+ case SEV_PEK_GEN:
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input);
+ break;
+ case SEV_PDH_GEN:
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input);
+ break;
+ case SEV_PEK_CSR:
+ ret = sev_ioctl_do_pek_csr(&input);
+ break;
+ case SEV_PEK_CERT_IMPORT:
+ ret = sev_ioctl_do_pek_import(&input);
+ break;
+ case SEV_PDH_CERT_EXPORT:
+ ret = sev_ioctl_do_pdh_export(&input);
+ break;
+ case SEV_GET_ID:
+ pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
+ ret = sev_ioctl_do_get_id(&input);
+ break;
+ case SEV_GET_ID2:
+ ret = sev_ioctl_do_get_id2(&input);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd)))
+ ret = -EFAULT;
+out:
+ mutex_unlock(&sev_cmd_mutex);
+
+ return ret;
+}
+
+static const struct file_operations sev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sev_ioctl,
+};
+
+int sev_platform_status(struct sev_user_data_status *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_platform_status);
+
+int sev_guest_deactivate(struct sev_data_deactivate *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_deactivate);
+
+int sev_guest_activate(struct sev_data_activate *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_ACTIVATE, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_activate);
+
+int sev_guest_decommission(struct sev_data_decommission *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_decommission);
+
+int sev_guest_df_flush(int *error)
+{
+ return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_df_flush);
+
+static void sev_exit(struct kref *ref)
+{
+ struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount);
+
+ misc_deregister(&misc_dev->misc);
+}
+
+static int sev_misc_init(struct sev_device *sev)
+{
+ struct device *dev = sev->dev;
+ int ret;
+
+ /*
+ * SEV feature support can be detected on multiple devices but the SEV
+ * FW commands must be issued on the master. During probe, we do not
+ * know the master hence we create /dev/sev on the first device probe.
+ * sev_do_cmd() finds the right master device to which to issue the
+ * command to the firmware.
+ */
+ if (!misc_dev) {
+ struct miscdevice *misc;
+
+ misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL);
+ if (!misc_dev)
+ return -ENOMEM;
+
+ misc = &misc_dev->misc;
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = DEVICE_NAME;
+ misc->fops = &sev_fops;
+
+ ret = misc_register(misc);
+ if (ret)
+ return ret;
+
+ kref_init(&misc_dev->refcount);
+ } else {
+ kref_get(&misc_dev->refcount);
+ }
+
+ init_waitqueue_head(&sev->int_queue);
+ sev->misc = misc_dev;
+ dev_dbg(dev, "registered SEV device\n");
+
+ return 0;
+}
+
+int sev_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct sev_device *sev;
+ int ret = -ENOMEM;
+
+ sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL);
+ if (!sev)
+ goto e_err;
+
+ psp->sev_data = sev;
+
+ sev->dev = dev;
+ sev->psp = psp;
+
+ sev->io_regs = psp->io_regs;
+
+ sev->vdata = (struct sev_vdata *)psp->vdata->sev;
+ if (!sev->vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "sev: missing driver data\n");
+ goto e_err;
+ }
+
+ psp_set_sev_irq_handler(psp, sev_irq_handler, sev);
+
+ ret = sev_misc_init(sev);
+ if (ret)
+ goto e_irq;
+
+ dev_notice(dev, "sev enabled\n");
+
+ return 0;
+
+e_irq:
+ psp_clear_sev_irq_handler(psp);
+e_err:
+ psp->sev_data = NULL;
+
+ dev_notice(dev, "sev initialization failed\n");
+
+ return ret;
+}
+
+void sev_dev_destroy(struct psp_device *psp)
+{
+ struct sev_device *sev = psp->sev_data;
+
+ if (!sev)
+ return;
+
+ if (sev->misc)
+ kref_put(&misc_dev->refcount, sev_exit);
+
+ psp_clear_sev_irq_handler(psp);
+}
+
+int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
+ void *data, int *error)
+{
+ if (!filep || filep->f_op != &sev_fops)
+ return -EBADF;
+
+ return sev_do_cmd(cmd, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
+
+void sev_pci_init(void)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int error, rc;
+
+ if (!sev)
+ return;
+
+ psp_timeout = psp_probe_timeout;
+
+ if (sev_get_api_version())
+ goto err;
+
+ /*
+ * If platform is not in UNINIT state then firmware upgrade and/or
+ * platform INIT command will fail. These command require UNINIT state.
+ *
+ * In a normal boot we should never run into case where the firmware
+ * is not in UNINIT state on boot. But in case of kexec boot, a reboot
+ * may not go through a typical shutdown sequence and may leave the
+ * firmware in INIT or WORKING state.
+ */
+
+ if (sev->state != SEV_STATE_UNINIT) {
+ sev_platform_shutdown(NULL);
+ sev->state = SEV_STATE_UNINIT;
+ }
+
+ if (sev_version_greater_or_equal(0, 15) &&
+ sev_update_firmware(sev->dev) == 0)
+ sev_get_api_version();
+
+ /* Initialize the platform */
+ rc = sev_platform_init(&error);
+ if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
+ /*
+ * INIT command returned an integrity check failure
+ * status code, meaning that firmware load and
+ * validation of SEV related persistent data has
+ * failed and persistent state has been erased.
+ * Retrying INIT command here should succeed.
+ */
+ dev_dbg(sev->dev, "SEV: retrying INIT command");
+ rc = sev_platform_init(&error);
+ }
+
+ if (rc) {
+ dev_err(sev->dev, "SEV: failed to INIT error %#x\n", error);
+ return;
+ }
+
+ dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major,
+ sev->api_minor, sev->build);
+
+ return;
+
+err:
+ psp_master->sev_data = NULL;
+}
+
+void sev_pci_exit(void)
+{
+ if (!psp_master->sev_data)
+ return;
+
+ sev_platform_shutdown(NULL);
+}
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
new file mode 100644
index 000000000000..dd5c4fe82914
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Platform Security Processor (PSP) interface driver
+ *
+ * Copyright (C) 2017-2019 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ */
+
+#ifndef __SEV_DEV_H__
+#define __SEV_DEV_H__
+
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+#include <linux/hw_random.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/dmaengine.h>
+#include <linux/psp-sev.h>
+#include <linux/miscdevice.h>
+#include <linux/capability.h>
+
+#define SEV_CMD_COMPLETE BIT(1)
+#define SEV_CMDRESP_CMD_SHIFT 16
+#define SEV_CMDRESP_IOC BIT(0)
+
+struct sev_misc_dev {
+ struct kref refcount;
+ struct miscdevice misc;
+};
+
+struct sev_device {
+ struct device *dev;
+ struct psp_device *psp;
+
+ void __iomem *io_regs;
+
+ struct sev_vdata *vdata;
+
+ int state;
+ unsigned int int_rcvd;
+ wait_queue_head_t int_queue;
+ struct sev_misc_dev *misc;
+ struct sev_user_data_status status_cmd_buf;
+ struct sev_data_init init_cmd_buf;
+
+ u8 api_major;
+ u8 api_minor;
+ u8 build;
+};
+
+int sev_dev_init(struct psp_device *psp);
+void sev_dev_destroy(struct psp_device *psp);
+
+void sev_pci_init(void);
+void sev_pci_exit(void);
+
+#endif /* __SEV_DEV_H */
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 53c12562d31e..423594608ad1 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -2,7 +2,7 @@
/*
* AMD Secure Processor driver
*
- * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2019 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -39,10 +39,23 @@ struct ccp_vdata {
const unsigned int rsamax;
};
-struct psp_vdata {
+struct sev_vdata {
const unsigned int cmdresp_reg;
const unsigned int cmdbuff_addr_lo_reg;
const unsigned int cmdbuff_addr_hi_reg;
+};
+
+struct tee_vdata {
+ const unsigned int cmdresp_reg;
+ const unsigned int cmdbuff_addr_lo_reg;
+ const unsigned int cmdbuff_addr_hi_reg;
+ const unsigned int ring_wptr_reg;
+ const unsigned int ring_rptr_reg;
+};
+
+struct psp_vdata {
+ const struct sev_vdata *sev;
+ const struct tee_vdata *tee;
const unsigned int feature_reg;
const unsigned int inten_reg;
const unsigned int intsts_reg;
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index b29d2e663e10..56c1f61c0f84 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -2,7 +2,7 @@
/*
* AMD Secure Processor device driver
*
- * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2019 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -262,19 +262,42 @@ static int sp_pci_resume(struct pci_dev *pdev)
#endif
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
-static const struct psp_vdata pspv1 = {
+static const struct sev_vdata sevv1 = {
.cmdresp_reg = 0x10580,
.cmdbuff_addr_lo_reg = 0x105e0,
.cmdbuff_addr_hi_reg = 0x105e4,
+};
+
+static const struct sev_vdata sevv2 = {
+ .cmdresp_reg = 0x10980,
+ .cmdbuff_addr_lo_reg = 0x109e0,
+ .cmdbuff_addr_hi_reg = 0x109e4,
+};
+
+static const struct tee_vdata teev1 = {
+ .cmdresp_reg = 0x10544,
+ .cmdbuff_addr_lo_reg = 0x10548,
+ .cmdbuff_addr_hi_reg = 0x1054c,
+ .ring_wptr_reg = 0x10550,
+ .ring_rptr_reg = 0x10554,
+};
+
+static const struct psp_vdata pspv1 = {
+ .sev = &sevv1,
.feature_reg = 0x105fc,
.inten_reg = 0x10610,
.intsts_reg = 0x10614,
};
static const struct psp_vdata pspv2 = {
- .cmdresp_reg = 0x10980,
- .cmdbuff_addr_lo_reg = 0x109e0,
- .cmdbuff_addr_hi_reg = 0x109e4,
+ .sev = &sevv2,
+ .feature_reg = 0x109fc,
+ .inten_reg = 0x10690,
+ .intsts_reg = 0x10694,
+};
+
+static const struct psp_vdata pspv3 = {
+ .tee = &teev1,
.feature_reg = 0x109fc,
.inten_reg = 0x10690,
.intsts_reg = 0x10694,
@@ -312,12 +335,22 @@ static const struct sp_dev_vdata dev_vdata[] = {
.psp_vdata = &pspv2,
#endif
},
+ { /* 4 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv5a,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv3,
+#endif
+ },
};
static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
{ PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] },
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
+ { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
new file mode 100644
index 000000000000..5e697a90ea7f
--- /dev/null
+++ b/drivers/crypto/ccp/tee-dev.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: MIT
+/*
+ * AMD Trusted Execution Environment (TEE) interface
+ *
+ * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
+ * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
+ *
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/psp-sev.h>
+#include <linux/psp-tee.h>
+
+#include "psp-dev.h"
+#include "tee-dev.h"
+
+static bool psp_dead;
+
+static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size)
+{
+ struct ring_buf_manager *rb_mgr = &tee->rb_mgr;
+ void *start_addr;
+
+ if (!ring_size)
+ return -EINVAL;
+
+ /* We need actual physical address instead of DMA address, since
+ * Trusted OS running on AMD Secure Processor will map this region
+ */
+ start_addr = (void *)__get_free_pages(GFP_KERNEL, get_order(ring_size));
+ if (!start_addr)
+ return -ENOMEM;
+
+ rb_mgr->ring_start = start_addr;
+ rb_mgr->ring_size = ring_size;
+ rb_mgr->ring_pa = __psp_pa(start_addr);
+ mutex_init(&rb_mgr->mutex);
+
+ return 0;
+}
+
+static void tee_free_ring(struct psp_tee_device *tee)
+{
+ struct ring_buf_manager *rb_mgr = &tee->rb_mgr;
+
+ if (!rb_mgr->ring_start)
+ return;
+
+ free_pages((unsigned long)rb_mgr->ring_start,
+ get_order(rb_mgr->ring_size));
+
+ rb_mgr->ring_start = NULL;
+ rb_mgr->ring_size = 0;
+ rb_mgr->ring_pa = 0;
+ mutex_destroy(&rb_mgr->mutex);
+}
+
+static int tee_wait_cmd_poll(struct psp_tee_device *tee, unsigned int timeout,
+ unsigned int *reg)
+{
+ /* ~10ms sleep per loop => nloop = timeout * 100 */
+ int nloop = timeout * 100;
+
+ while (--nloop) {
+ *reg = ioread32(tee->io_regs + tee->vdata->cmdresp_reg);
+ if (*reg & PSP_CMDRESP_RESP)
+ return 0;
+
+ usleep_range(10000, 10100);
+ }
+
+ dev_err(tee->dev, "tee: command timed out, disabling PSP\n");
+ psp_dead = true;
+
+ return -ETIMEDOUT;
+}
+
+static
+struct tee_init_ring_cmd *tee_alloc_cmd_buffer(struct psp_tee_device *tee)
+{
+ struct tee_init_ring_cmd *cmd;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->hi_addr = upper_32_bits(tee->rb_mgr.ring_pa);
+ cmd->low_addr = lower_32_bits(tee->rb_mgr.ring_pa);
+ cmd->size = tee->rb_mgr.ring_size;
+
+ dev_dbg(tee->dev, "tee: ring address: high = 0x%x low = 0x%x size = %u\n",
+ cmd->hi_addr, cmd->low_addr, cmd->size);
+
+ return cmd;
+}
+
+static inline void tee_free_cmd_buffer(struct tee_init_ring_cmd *cmd)
+{
+ kfree(cmd);
+}
+
+static int tee_init_ring(struct psp_tee_device *tee)
+{
+ int ring_size = MAX_RING_BUFFER_ENTRIES * sizeof(struct tee_ring_cmd);
+ struct tee_init_ring_cmd *cmd;
+ phys_addr_t cmd_buffer;
+ unsigned int reg;
+ int ret;
+
+ BUILD_BUG_ON(sizeof(struct tee_ring_cmd) != 1024);
+
+ ret = tee_alloc_ring(tee, ring_size);
+ if (ret) {
+ dev_err(tee->dev, "tee: ring allocation failed %d\n", ret);
+ return ret;
+ }
+
+ tee->rb_mgr.wptr = 0;
+
+ cmd = tee_alloc_cmd_buffer(tee);
+ if (!cmd) {
+ tee_free_ring(tee);
+ return -ENOMEM;
+ }
+
+ cmd_buffer = __psp_pa((void *)cmd);
+
+ /* Send command buffer details to Trusted OS by writing to
+ * CPU-PSP message registers
+ */
+
+ iowrite32(lower_32_bits(cmd_buffer),
+ tee->io_regs + tee->vdata->cmdbuff_addr_lo_reg);
+ iowrite32(upper_32_bits(cmd_buffer),
+ tee->io_regs + tee->vdata->cmdbuff_addr_hi_reg);
+ iowrite32(TEE_RING_INIT_CMD,
+ tee->io_regs + tee->vdata->cmdresp_reg);
+
+ ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg);
+ if (ret) {
+ dev_err(tee->dev, "tee: ring init command timed out\n");
+ tee_free_ring(tee);
+ goto free_buf;
+ }
+
+ if (reg & PSP_CMDRESP_ERR_MASK) {
+ dev_err(tee->dev, "tee: ring init command failed (%#010x)\n",
+ reg & PSP_CMDRESP_ERR_MASK);
+ tee_free_ring(tee);
+ ret = -EIO;
+ }
+
+free_buf:
+ tee_free_cmd_buffer(cmd);
+
+ return ret;
+}
+
+static void tee_destroy_ring(struct psp_tee_device *tee)
+{
+ unsigned int reg;
+ int ret;
+
+ if (!tee->rb_mgr.ring_start)
+ return;
+
+ if (psp_dead)
+ goto free_ring;
+
+ iowrite32(TEE_RING_DESTROY_CMD,
+ tee->io_regs + tee->vdata->cmdresp_reg);
+
+ ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg);
+ if (ret) {
+ dev_err(tee->dev, "tee: ring destroy command timed out\n");
+ } else if (reg & PSP_CMDRESP_ERR_MASK) {
+ dev_err(tee->dev, "tee: ring destroy command failed (%#010x)\n",
+ reg & PSP_CMDRESP_ERR_MASK);
+ }
+
+free_ring:
+ tee_free_ring(tee);
+}
+
+int tee_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct psp_tee_device *tee;
+ int ret;
+
+ ret = -ENOMEM;
+ tee = devm_kzalloc(dev, sizeof(*tee), GFP_KERNEL);
+ if (!tee)
+ goto e_err;
+
+ psp->tee_data = tee;
+
+ tee->dev = dev;
+ tee->psp = psp;
+
+ tee->io_regs = psp->io_regs;
+
+ tee->vdata = (struct tee_vdata *)psp->vdata->tee;
+ if (!tee->vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "tee: missing driver data\n");
+ goto e_err;
+ }
+
+ ret = tee_init_ring(tee);
+ if (ret) {
+ dev_err(dev, "tee: failed to init ring buffer\n");
+ goto e_err;
+ }
+
+ dev_notice(dev, "tee enabled\n");
+
+ return 0;
+
+e_err:
+ psp->tee_data = NULL;
+
+ dev_notice(dev, "tee initialization failed\n");
+
+ return ret;
+}
+
+void tee_dev_destroy(struct psp_device *psp)
+{
+ struct psp_tee_device *tee = psp->tee_data;
+
+ if (!tee)
+ return;
+
+ tee_destroy_ring(tee);
+}
+
+static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
+ void *buf, size_t len, struct tee_ring_cmd **resp)
+{
+ struct tee_ring_cmd *cmd;
+ u32 rptr, wptr;
+ int nloop = 1000, ret = 0;
+
+ *resp = NULL;
+
+ mutex_lock(&tee->rb_mgr.mutex);
+
+ wptr = tee->rb_mgr.wptr;
+
+ /* Check if ring buffer is full */
+ do {
+ rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg);
+
+ if (!(wptr + sizeof(struct tee_ring_cmd) == rptr))
+ break;
+
+ dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
+ rptr, wptr);
+
+ /* Wait if ring buffer is full */
+ mutex_unlock(&tee->rb_mgr.mutex);
+ schedule_timeout_interruptible(msecs_to_jiffies(10));
+ mutex_lock(&tee->rb_mgr.mutex);
+
+ } while (--nloop);
+
+ if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) {
+ dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
+ rptr, wptr);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /* Pointer to empty data entry in ring buffer */
+ cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr);
+
+ /* Write command data into ring buffer */
+ cmd->cmd_id = cmd_id;
+ cmd->cmd_state = TEE_CMD_STATE_INIT;
+ memset(&cmd->buf[0], 0, sizeof(cmd->buf));
+ memcpy(&cmd->buf[0], buf, len);
+
+ /* Update local copy of write pointer */
+ tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd);
+ if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size)
+ tee->rb_mgr.wptr = 0;
+
+ /* Trigger interrupt to Trusted OS */
+ iowrite32(tee->rb_mgr.wptr, tee->io_regs + tee->vdata->ring_wptr_reg);
+
+ /* The response is provided by Trusted OS in same
+ * location as submitted data entry within ring buffer.
+ */
+ *resp = cmd;
+
+unlock:
+ mutex_unlock(&tee->rb_mgr.mutex);
+
+ return ret;
+}
+
+static int tee_wait_cmd_completion(struct psp_tee_device *tee,
+ struct tee_ring_cmd *resp,
+ unsigned int timeout)
+{
+ /* ~5ms sleep per loop => nloop = timeout * 200 */
+ int nloop = timeout * 200;
+
+ while (--nloop) {
+ if (resp->cmd_state == TEE_CMD_STATE_COMPLETED)
+ return 0;
+
+ usleep_range(5000, 5100);
+ }
+
+ dev_err(tee->dev, "tee: command 0x%x timed out, disabling PSP\n",
+ resp->cmd_id);
+
+ psp_dead = true;
+
+ return -ETIMEDOUT;
+}
+
+int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
+ u32 *status)
+{
+ struct psp_device *psp = psp_get_master_device();
+ struct psp_tee_device *tee;
+ struct tee_ring_cmd *resp;
+ int ret;
+
+ if (!buf || !status || !len || len > sizeof(resp->buf))
+ return -EINVAL;
+
+ *status = 0;
+
+ if (!psp || !psp->tee_data)
+ return -ENODEV;
+
+ if (psp_dead)
+ return -EBUSY;
+
+ tee = psp->tee_data;
+
+ ret = tee_submit_cmd(tee, cmd_id, buf, len, &resp);
+ if (ret)
+ return ret;
+
+ ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &resp->buf[0], len);
+ *status = resp->status;
+
+ return 0;
+}
+EXPORT_SYMBOL(psp_tee_process_cmd);
+
+int psp_check_tee_status(void)
+{
+ struct psp_device *psp = psp_get_master_device();
+
+ if (!psp || !psp->tee_data)
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL(psp_check_tee_status);
diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h
new file mode 100644
index 000000000000..f09960112115
--- /dev/null
+++ b/drivers/crypto/ccp/tee-dev.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
+ * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
+ *
+ */
+
+/* This file describes the TEE communication interface between host and AMD
+ * Secure Processor
+ */
+
+#ifndef __TEE_DEV_H__
+#define __TEE_DEV_H__
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+
+#define TEE_DEFAULT_TIMEOUT 10
+#define MAX_BUFFER_SIZE 992
+
+/**
+ * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
+ * @TEE_RING_INIT_CMD: Initialize ring buffer
+ * @TEE_RING_DESTROY_CMD: Destroy ring buffer
+ * @TEE_RING_MAX_CMD: Maximum command id
+ */
+enum tee_ring_cmd_id {
+ TEE_RING_INIT_CMD = 0x00010000,
+ TEE_RING_DESTROY_CMD = 0x00020000,
+ TEE_RING_MAX_CMD = 0x000F0000,
+};
+
+/**
+ * struct tee_init_ring_cmd - Command to init TEE ring buffer
+ * @low_addr: bits [31:0] of the physical address of ring buffer
+ * @hi_addr: bits [63:32] of the physical address of ring buffer
+ * @size: size of ring buffer in bytes
+ */
+struct tee_init_ring_cmd {
+ u32 low_addr;
+ u32 hi_addr;
+ u32 size;
+};
+
+#define MAX_RING_BUFFER_ENTRIES 32
+
+/**
+ * struct ring_buf_manager - Helper structure to manage ring buffer.
+ * @ring_start: starting address of ring buffer
+ * @ring_size: size of ring buffer in bytes
+ * @ring_pa: physical address of ring buffer
+ * @wptr: index to the last written entry in ring buffer
+ */
+struct ring_buf_manager {
+ struct mutex mutex; /* synchronizes access to ring buffer */
+ void *ring_start;
+ u32 ring_size;
+ phys_addr_t ring_pa;
+ u32 wptr;
+};
+
+struct psp_tee_device {
+ struct device *dev;
+ struct psp_device *psp;
+ void __iomem *io_regs;
+ struct tee_vdata *vdata;
+ struct ring_buf_manager rb_mgr;
+};
+
+/**
+ * enum tee_cmd_state - TEE command states for the ring buffer interface
+ * @TEE_CMD_STATE_INIT: initial state of command when sent from host
+ * @TEE_CMD_STATE_PROCESS: command being processed by TEE environment
+ * @TEE_CMD_STATE_COMPLETED: command processing completed
+ */
+enum tee_cmd_state {
+ TEE_CMD_STATE_INIT,
+ TEE_CMD_STATE_PROCESS,
+ TEE_CMD_STATE_COMPLETED,
+};
+
+/**
+ * struct tee_ring_cmd - Structure of the command buffer in TEE ring
+ * @cmd_id: refers to &enum tee_cmd_id. Command id for the ring buffer
+ * interface
+ * @cmd_state: refers to &enum tee_cmd_state
+ * @status: status of TEE command execution
+ * @res0: reserved region
+ * @pdata: private data (currently unused)
+ * @res1: reserved region
+ * @buf: TEE command specific buffer
+ */
+struct tee_ring_cmd {
+ u32 cmd_id;
+ u32 cmd_state;
+ u32 status;
+ u32 res0[1];
+ u64 pdata;
+ u32 res1[2];
+ u8 buf[MAX_BUFFER_SIZE];
+
+ /* Total size: 1024 bytes */
+} __packed;
+
+int tee_dev_init(struct psp_device *psp);
+void tee_dev_destroy(struct psp_device *psp);
+
+#endif /* __TEE_DEV_H__ */
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 64d318dc0d47..2fc0e0da790b 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -237,7 +237,7 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
* revealed the decrypted message --> zero its memory.
*/
sg_zero_buffer(areq->dst, sg_nents(areq->dst),
- areq->cryptlen, 0);
+ areq->cryptlen, areq->assoclen);
err = -EBADMSG;
}
/*ENCRYPT*/
@@ -385,13 +385,13 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
return -EINVAL;
break;
default:
- dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
+ dev_dbg(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
return -EINVAL;
}
/* Check cipher key size */
if (ctx->flow_mode == S_DIN_to_DES) {
if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
- dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
+ dev_dbg(dev, "Invalid cipher(3DES) key size: %u\n",
ctx->enc_keylen);
return -EINVAL;
}
@@ -399,7 +399,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
if (ctx->enc_keylen != AES_KEYSIZE_128 &&
ctx->enc_keylen != AES_KEYSIZE_192 &&
ctx->enc_keylen != AES_KEYSIZE_256) {
- dev_err(dev, "Invalid cipher(AES) key size: %u\n",
+ dev_dbg(dev, "Invalid cipher(AES) key size: %u\n",
ctx->enc_keylen);
return -EINVAL;
}
@@ -562,7 +562,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
rc = crypto_authenc_extractkeys(&keys, key, keylen);
if (rc)
- goto badkey;
+ return rc;
enckey = keys.enckey;
authkey = keys.authkey;
ctx->enc_keylen = keys.enckeylen;
@@ -570,10 +570,9 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
/* the nonce is stored in bytes at end of key */
- rc = -EINVAL;
if (ctx->enc_keylen <
(AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
- goto badkey;
+ return -EINVAL;
/* Copy nonce from last 4 bytes in CTR key to
* first 4 bytes in CTR IV
*/
@@ -591,7 +590,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
rc = validate_keys_sizes(ctx);
if (rc)
- goto badkey;
+ return rc;
/* STAT_PHASE_1: Copy key to ctx */
@@ -605,7 +604,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
if (rc)
- goto badkey;
+ return rc;
}
/* STAT_PHASE_2: Create sequence */
@@ -622,8 +621,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
break; /* No auth. key setup */
default:
dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
- rc = -ENOTSUPP;
- goto badkey;
+ return -ENOTSUPP;
}
/* STAT_PHASE_3: Submit sequence to HW */
@@ -632,18 +630,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- goto setkey_error;
+ return rc;
}
}
/* Update STAT_PHASE_3 */
return rc;
-
-badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
-setkey_error:
- return rc;
}
static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
@@ -1567,7 +1559,7 @@ static int config_ccm_adata(struct aead_request *req)
/* taken from crypto/ccm.c */
/* 2 <= L <= 8, so 1 <= L' <= 7. */
if (l < 2 || l > 8) {
- dev_err(dev, "illegal iv value %X\n", req->iv[0]);
+ dev_dbg(dev, "illegal iv value %X\n", req->iv[0]);
return -EINVAL;
}
memcpy(b0, req->iv, AES_BLOCK_SIZE);
@@ -1925,7 +1917,6 @@ static int cc_proc_aead(struct aead_request *req,
if (validate_data_size(ctx, direct, req)) {
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
req->cryptlen, areq_ctx->assoclen);
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
return -EINVAL;
}
@@ -2065,7 +2056,7 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2115,7 +2106,7 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2234,7 +2225,7 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2265,7 +2256,7 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2299,7 +2290,7 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2330,7 +2321,7 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 3112b58d0bb1..7d6252d892d7 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -291,7 +291,6 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
/* This check the size of the protected key token */
if (keylen != sizeof(hki)) {
dev_err(dev, "Unsupported protected key size %d.\n", keylen);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -303,8 +302,7 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
keylen = hki.keylen;
if (validate_keys_sizes(ctx_p, keylen)) {
- dev_err(dev, "Unsupported key size %d.\n", keylen);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ dev_dbg(dev, "Unsupported key size %d.\n", keylen);
return -EINVAL;
}
@@ -394,8 +392,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
/* STAT_PHASE_0: Init and sanity checks */
if (validate_keys_sizes(ctx_p, keylen)) {
- dev_err(dev, "Unsupported key size %d.\n", keylen);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ dev_dbg(dev, "Unsupported key size %d.\n", keylen);
return -EINVAL;
}
@@ -523,6 +520,7 @@ static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
}
}
+
static void cc_setup_state_desc(struct crypto_tfm *tfm,
struct cipher_req_ctx *req_ctx,
unsigned int ivsize, unsigned int nbytes,
@@ -534,8 +532,6 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
int cipher_mode = ctx_p->cipher_mode;
int flow_mode = ctx_p->flow_mode;
int direction = req_ctx->gen_ctx.op_type;
- dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
- unsigned int key_len = ctx_p->keylen;
dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
unsigned int du_size = nbytes;
@@ -571,6 +567,47 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
case DRV_CIPHER_BITLOCKER:
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+
+static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = ctx_p->flow_mode;
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+ unsigned int key_len = ctx_p->keylen;
+ dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+ unsigned int du_size = nbytes;
+
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+
+ if (cc_alg->data_unit)
+ du_size = cc_alg->data_unit;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_ECB:
+ break;
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
/* load XEX key */
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
@@ -836,8 +873,7 @@ static int cc_cipher_process(struct skcipher_request *req,
/* TODO: check data length according to mode */
if (validate_data_size(ctx_p, nbytes)) {
- dev_err(dev, "Unsupported data size %d.\n", nbytes);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
+ dev_dbg(dev, "Unsupported data size %d.\n", nbytes);
rc = -EINVAL;
goto exit_process;
}
@@ -881,12 +917,14 @@ static int cc_cipher_process(struct skcipher_request *req,
/* STAT_PHASE_2: Create sequence */
- /* Setup IV and XEX key used */
+ /* Setup state (IV) */
cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
/* Setup MLLI line, if needed */
cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
/* Setup key */
cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
+ /* Setup state (IV and XEX key) */
+ cc_setup_xex_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
/* Data processing */
cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len);
/* Read next IV */
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 8b8eee513c27..532bc95a8373 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -133,7 +133,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
u32 imr;
/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
- /* if driver suspended return, probebly shared interrupt */
+ /* if driver suspended return, probably shared interrupt */
if (cc_pm_is_dev_suspended(dev))
return IRQ_NONE;
@@ -271,6 +271,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
const struct cc_hw_data *hw_rev;
const struct of_device_id *dev_id;
struct clk *clk;
+ int irq;
int rc = 0;
new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
@@ -337,9 +338,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
&req_mem_cc_regs->start, new_drvdata->cc_base);
/* Then IRQ */
- new_drvdata->irq = platform_get_irq(plat_dev, 0);
- if (new_drvdata->irq < 0)
- return new_drvdata->irq;
+ irq = platform_get_irq(plat_dev, 0);
+ if (irq < 0)
+ return irq;
init_completion(&new_drvdata->hw_queue_avail);
@@ -442,14 +443,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
/* register the driver isr function */
- rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
- IRQF_SHARED, "ccree", new_drvdata);
+ rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "ccree",
+ new_drvdata);
if (rc) {
- dev_err(dev, "Could not register to interrupt %d\n",
- new_drvdata->irq);
+ dev_err(dev, "Could not register to interrupt %d\n", irq);
goto post_clk_err;
}
- dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
+ dev_dbg(dev, "Registered to IRQ: %d\n", irq);
rc = init_cc_regs(new_drvdata, true);
if (rc) {
@@ -465,7 +465,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
rc = cc_fips_init(new_drvdata);
if (rc) {
- dev_err(dev, "CC_FIPS_INIT failed 0x%x\n", rc);
+ dev_err(dev, "cc_fips_init failed 0x%x\n", rc);
goto post_debugfs_err;
}
rc = cc_sram_mgr_init(new_drvdata);
@@ -490,13 +490,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
rc = cc_buffer_mgr_init(new_drvdata);
if (rc) {
- dev_err(dev, "buffer_mgr_init failed\n");
+ dev_err(dev, "cc_buffer_mgr_init failed\n");
goto post_req_mgr_err;
}
rc = cc_pm_init(new_drvdata);
if (rc) {
- dev_err(dev, "ssi_power_mgr_init failed\n");
+ dev_err(dev, "cc_pm_init failed\n");
goto post_buf_mgr_err;
}
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index ab31d4a68c80..c227718ba992 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -28,7 +28,6 @@
/* Registers definitions from shared/hw/ree_include */
#include "cc_host_regs.h"
-#define CC_DEV_SHA_MAX 512
#include "cc_crypto_ctx.h"
#include "cc_hw_queue_defs.h"
#include "cc_sram_mgr.h"
@@ -133,13 +132,11 @@ struct cc_crypto_req {
/**
* struct cc_drvdata - driver private data context
* @cc_base: virt address of the CC registers
- * @irq: device IRQ number
- * @irq_mask: Interrupt mask shadow (1 for masked interrupts)
+ * @irq: bitmap indicating source of last interrupt
*/
struct cc_drvdata {
void __iomem *cc_base;
int irq;
- u32 irq_mask;
struct completion hw_queue_avail; /* wait for HW queue availability */
struct platform_device *plat_dev;
cc_sram_addr_t mlli_sram_addr;
@@ -161,6 +158,7 @@ struct cc_drvdata {
int std_bodies;
bool sec_disabled;
u32 comp_mask;
+ bool pm_on;
};
struct cc_crypto_alg {
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
index 4c8bce33abcf..702aefc21447 100644
--- a/drivers/crypto/ccree/cc_fips.c
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -120,7 +120,7 @@ static void fips_dsr(unsigned long devarg)
cc_tee_handle_fips_error(drvdata);
}
- /* after verifing that there is nothing to do,
+ /* after verifying that there is nothing to do,
* unmask AXI completion interrupt.
*/
val = (CC_REG(HOST_IMR) & ~irq);
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index bc71bdf44a9f..912e5ce5079d 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -899,9 +899,6 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
out:
- if (rc)
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
if (ctx->key_params.key_dma_addr) {
dma_unmap_single(dev, ctx->key_params.key_dma_addr,
ctx->key_params.keylen, DMA_TO_DEVICE);
@@ -990,9 +987,6 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
- if (rc)
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
dma_unmap_single(dev, ctx->key_params.key_dma_addr,
ctx->key_params.keylen, DMA_TO_DEVICE);
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -2358,11 +2352,9 @@ cc_digest_len_addr(void *drvdata, u32 mode)
case DRV_HASH_SHA256:
case DRV_HASH_MD5:
return digest_len_addr;
-#if (CC_DEV_SHA_MAX > 256)
case DRV_HASH_SHA384:
case DRV_HASH_SHA512:
return digest_len_addr + sizeof(cc_digest_len_init);
-#endif
default:
return digest_len_addr; /*to avoid kernel crash*/
}
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index dbc508fb719b..24c368b866f6 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -22,14 +22,8 @@ const struct dev_pm_ops ccree_pm = {
int cc_pm_suspend(struct device *dev)
{
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- int rc;
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
- rc = cc_suspend_req_queue(drvdata);
- if (rc) {
- dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
- return rc;
- }
fini_cc_regs(drvdata);
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
cc_clk_off(drvdata);
@@ -48,7 +42,7 @@ int cc_pm_resume(struct device *dev)
dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc;
}
- /* wait for Crytpcell reset completion */
+ /* wait for Cryptocell reset completion */
if (!cc_wait_for_reset_completion(drvdata)) {
dev_err(dev, "Cryptocell reset not completed");
return -EBUSY;
@@ -63,13 +57,6 @@ int cc_pm_resume(struct device *dev)
/* check if tee fips error occurred during power down */
cc_tee_handle_fips_error(drvdata);
- rc = cc_resume_req_queue(drvdata);
- if (rc) {
- dev_err(dev, "cc_resume_req_queue (%x)\n", rc);
- return rc;
- }
-
- /* must be after the queue resuming as it uses the HW queue*/
cc_init_hash_sram(drvdata);
return 0;
@@ -80,28 +67,20 @@ int cc_pm_get(struct device *dev)
int rc = 0;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- if (cc_req_queue_suspended(drvdata))
+ if (drvdata->pm_on)
rc = pm_runtime_get_sync(dev);
- else
- pm_runtime_get_noresume(dev);
- return rc;
+ return (rc == 1 ? 0 : rc);
}
-int cc_pm_put_suspend(struct device *dev)
+void cc_pm_put_suspend(struct device *dev)
{
- int rc = 0;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- if (!cc_req_queue_suspended(drvdata)) {
+ if (drvdata->pm_on) {
pm_runtime_mark_last_busy(dev);
- rc = pm_runtime_put_autosuspend(dev);
- } else {
- /* Something wrong happens*/
- dev_err(dev, "request to suspend already suspended queue");
- rc = -EBUSY;
+ pm_runtime_put_autosuspend(dev);
}
- return rc;
}
bool cc_pm_is_dev_suspended(struct device *dev)
@@ -114,10 +93,10 @@ int cc_pm_init(struct cc_drvdata *drvdata)
{
struct device *dev = drvdata_to_dev(drvdata);
- /* must be before the enabling to avoid resdundent suspending */
+ /* must be before the enabling to avoid redundant suspending */
pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(dev);
- /* activate the PM module */
+ /* set us as active - note we won't do PM ops until cc_pm_go()! */
return pm_runtime_set_active(dev);
}
@@ -125,9 +104,11 @@ int cc_pm_init(struct cc_drvdata *drvdata)
void cc_pm_go(struct cc_drvdata *drvdata)
{
pm_runtime_enable(drvdata_to_dev(drvdata));
+ drvdata->pm_on = true;
}
void cc_pm_fini(struct cc_drvdata *drvdata)
{
pm_runtime_disable(drvdata_to_dev(drvdata));
+ drvdata->pm_on = false;
}
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index a7d98a5da2e1..80a18e11cae4 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -21,7 +21,7 @@ void cc_pm_fini(struct cc_drvdata *drvdata);
int cc_pm_suspend(struct device *dev);
int cc_pm_resume(struct device *dev);
int cc_pm_get(struct device *dev);
-int cc_pm_put_suspend(struct device *dev);
+void cc_pm_put_suspend(struct device *dev);
bool cc_pm_is_dev_suspended(struct device *dev);
#else
@@ -35,25 +35,12 @@ static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
-static inline int cc_pm_suspend(struct device *dev)
-{
- return 0;
-}
-
-static inline int cc_pm_resume(struct device *dev)
-{
- return 0;
-}
-
static inline int cc_pm_get(struct device *dev)
{
return 0;
}
-static inline int cc_pm_put_suspend(struct device *dev)
-{
- return 0;
-}
+static inline void cc_pm_put_suspend(struct device *dev) {}
static inline bool cc_pm_is_dev_suspended(struct device *dev)
{
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index a947d5a2cf35..9d61e6f12478 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -41,7 +41,6 @@ struct cc_req_mgr_handle {
#else
struct tasklet_struct comptask;
#endif
- bool is_runtime_suspended;
};
struct cc_bl_item {
@@ -230,7 +229,7 @@ static int cc_queues_status(struct cc_drvdata *drvdata,
struct device *dev = drvdata_to_dev(drvdata);
/* SW queue is checked only once as it will not
- * be chaned during the poll because the spinlock_bh
+ * be changed during the poll because the spinlock_bh
* is held by the thread
*/
if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
@@ -275,12 +274,11 @@ static int cc_queues_status(struct cc_drvdata *drvdata,
* \param len The crypto sequence length
* \param add_comp If "true": add an artificial dout DMA to mark completion
*
- * \return int Returns -EINPROGRESS or error code
*/
-static int cc_do_send_request(struct cc_drvdata *drvdata,
- struct cc_crypto_req *cc_req,
- struct cc_hw_desc *desc, unsigned int len,
- bool add_comp)
+static void cc_do_send_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ bool add_comp)
{
struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
unsigned int used_sw_slots;
@@ -303,8 +301,8 @@ static int cc_do_send_request(struct cc_drvdata *drvdata,
/*
* We are about to push command to the HW via the command registers
- * that may refernece hsot memory. We need to issue a memory barrier
- * to make sure there are no outstnading memory writes
+ * that may reference host memory. We need to issue a memory barrier
+ * to make sure there are no outstanding memory writes
*/
wmb();
@@ -328,9 +326,6 @@ static int cc_do_send_request(struct cc_drvdata *drvdata,
/* Update the free slots in HW queue */
req_mgr_h->q_free_slots -= total_seq_len;
}
-
- /* Operation still in process */
- return -EINPROGRESS;
}
static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
@@ -390,20 +385,15 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
return;
}
- rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
- bli->len, false);
-
+ cc_do_send_request(drvdata, &bli->creq, bli->desc, bli->len,
+ false);
spin_unlock(&mgr->hw_lock);
- if (rc != -EINPROGRESS) {
- cc_pm_put_suspend(dev);
- creq->user_cb(dev, req, rc);
- }
-
/* Remove ourselves from the backlog list */
spin_lock(&mgr->bl_lock);
list_del(&bli->list);
--mgr->bl_len;
+ kfree(bli);
}
spin_unlock(&mgr->bl_lock);
@@ -422,7 +412,7 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
rc = cc_pm_get(dev);
if (rc) {
- dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ dev_err(dev, "cc_pm_get returned %x\n", rc);
return rc;
}
@@ -451,8 +441,10 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
return -EBUSY;
}
- if (!rc)
- rc = cc_do_send_request(drvdata, cc_req, desc, len, false);
+ if (!rc) {
+ cc_do_send_request(drvdata, cc_req, desc, len, false);
+ rc = -EINPROGRESS;
+ }
spin_unlock_bh(&mgr->hw_lock);
return rc;
@@ -472,7 +464,7 @@ int cc_send_sync_request(struct cc_drvdata *drvdata,
rc = cc_pm_get(dev);
if (rc) {
- dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ dev_err(dev, "cc_pm_get returned %x\n", rc);
return rc;
}
@@ -492,14 +484,8 @@ int cc_send_sync_request(struct cc_drvdata *drvdata,
reinit_completion(&drvdata->hw_queue_avail);
}
- rc = cc_do_send_request(drvdata, cc_req, desc, len, true);
+ cc_do_send_request(drvdata, cc_req, desc, len, true);
spin_unlock_bh(&mgr->hw_lock);
-
- if (rc != -EINPROGRESS) {
- cc_pm_put_suspend(dev);
- return rc;
- }
-
wait_for_completion(&cc_req->seq_compl);
return 0;
}
@@ -532,8 +518,8 @@ int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
/*
* We are about to push command to the HW via the command registers
- * that may refernece hsot memory. We need to issue a memory barrier
- * to make sure there are no outstnading memory writes
+ * that may reference host memory. We need to issue a memory barrier
+ * to make sure there are no outstanding memory writes
*/
wmb();
enqueue_seq(drvdata, desc, len);
@@ -668,7 +654,7 @@ static void comp_handler(unsigned long devarg)
request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
}
- /* after verifing that there is nothing to do,
+ /* after verifying that there is nothing to do,
* unmask AXI completion interrupt
*/
cc_iowrite(drvdata, CC_REG(HOST_IMR),
@@ -677,52 +663,3 @@ static void comp_handler(unsigned long devarg)
cc_proc_backlog(drvdata);
dev_dbg(dev, "Comp. handler done.\n");
}
-
-/*
- * resume the queue configuration - no need to take the lock as this happens
- * inside the spin lock protection
- */
-#if defined(CONFIG_PM)
-int cc_resume_req_queue(struct cc_drvdata *drvdata)
-{
- struct cc_req_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- spin_lock_bh(&request_mgr_handle->hw_lock);
- request_mgr_handle->is_runtime_suspended = false;
- spin_unlock_bh(&request_mgr_handle->hw_lock);
-
- return 0;
-}
-
-/*
- * suspend the queue configuration. Since it is used for the runtime suspend
- * only verify that the queue can be suspended.
- */
-int cc_suspend_req_queue(struct cc_drvdata *drvdata)
-{
- struct cc_req_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- /* lock the send_request */
- spin_lock_bh(&request_mgr_handle->hw_lock);
- if (request_mgr_handle->req_queue_head !=
- request_mgr_handle->req_queue_tail) {
- spin_unlock_bh(&request_mgr_handle->hw_lock);
- return -EBUSY;
- }
- request_mgr_handle->is_runtime_suspended = true;
- spin_unlock_bh(&request_mgr_handle->hw_lock);
-
- return 0;
-}
-
-bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
-{
- struct cc_req_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- return request_mgr_handle->is_runtime_suspended;
-}
-
-#endif
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
index f46cf766fe4d..ff7746aaaf35 100644
--- a/drivers/crypto/ccree/cc_request_mgr.h
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -40,12 +40,4 @@ void complete_request(struct cc_drvdata *drvdata);
void cc_req_mgr_fini(struct cc_drvdata *drvdata);
-#if defined(CONFIG_PM)
-int cc_resume_req_queue(struct cc_drvdata *drvdata);
-
-int cc_suspend_req_queue(struct cc_drvdata *drvdata);
-
-bool cc_req_queue_suspended(struct cc_drvdata *drvdata);
-#endif
-
#endif /*__REQUEST_MGR_H__*/
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 91e424378217..f078b2686418 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -23,22 +23,22 @@ config CRYPTO_DEV_CHELSIO
will be called chcr.
config CHELSIO_IPSEC_INLINE
- bool "Chelsio IPSec XFRM Tx crypto offload"
- depends on CHELSIO_T4
+ bool "Chelsio IPSec XFRM Tx crypto offload"
+ depends on CHELSIO_T4
depends on CRYPTO_DEV_CHELSIO
- depends on XFRM_OFFLOAD
- depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
- default n
- ---help---
- Enable support for IPSec Tx Inline.
+ depends on XFRM_OFFLOAD
+ depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+ default n
+ ---help---
+ Enable support for IPSec Tx Inline.
config CRYPTO_DEV_CHELSIO_TLS
- tristate "Chelsio Crypto Inline TLS Driver"
- depends on CHELSIO_T4
- depends on TLS_TOE
- select CRYPTO_DEV_CHELSIO
- ---help---
- Support Chelsio Inline TLS with Chelsio crypto accelerator.
+ tristate "Chelsio Crypto Inline TLS Driver"
+ depends on CHELSIO_T4
+ depends on TLS_TOE
+ select CRYPTO_DEV_CHELSIO
+ ---help---
+ Support Chelsio Inline TLS with Chelsio crypto accelerator.
- To compile this driver as a module, choose M here: the module
- will be called chtls.
+ To compile this driver as a module, choose M here: the module
+ will be called chtls.
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 1b4a5664e604..b4b9b22125d1 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -870,20 +870,13 @@ static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
- int err = 0;
crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK;
- return err;
+ return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
}
static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
@@ -912,7 +905,6 @@ static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -943,7 +935,6 @@ static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -981,7 +972,6 @@ static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -1379,7 +1369,8 @@ static int chcr_device_init(struct chcr_context *ctx)
txq_perchan = ntxq / u_ctx->lldi.nchan;
spin_lock(&ctx->dev->lock_chcr_dev);
ctx->tx_chan_id = ctx->dev->tx_channel_id;
- ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
+ ctx->dev->tx_channel_id =
+ (ctx->dev->tx_channel_id + 1) % u_ctx->lldi.nchan;
spin_unlock(&ctx->dev->lock_chcr_dev);
rxq_idx = ctx->tx_chan_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
@@ -2173,7 +2164,6 @@ static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -3195,9 +3185,6 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
aeadctx->mayverify = VERIFY_SW;
break;
default:
-
- crypto_tfm_set_flags((struct crypto_tfm *) tfm,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
@@ -3222,8 +3209,6 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
aeadctx->mayverify = VERIFY_HW;
break;
default:
- crypto_tfm_set_flags((struct crypto_tfm *)tfm,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
@@ -3264,8 +3249,6 @@ static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
aeadctx->mayverify = VERIFY_HW;
break;
default:
- crypto_tfm_set_flags((struct crypto_tfm *)tfm,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
@@ -3290,8 +3273,6 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead,
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
} else {
- crypto_tfm_set_flags((struct crypto_tfm *)aead,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
aeadctx->enckey_len = 0;
return -EINVAL;
}
@@ -3314,9 +3295,6 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (error)
return error;
return chcr_ccm_common_setkey(aead, key, keylen);
@@ -3329,8 +3307,6 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
int error;
if (keylen < 3) {
- crypto_tfm_set_flags((struct crypto_tfm *)aead,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
aeadctx->enckey_len = 0;
return -EINVAL;
}
@@ -3338,9 +3314,6 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (error)
return error;
keylen -= 3;
@@ -3362,9 +3335,6 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
& CRYPTO_TFM_REQ_MASK);
ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (ret)
goto out;
@@ -3380,8 +3350,6 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
} else if (keylen == AES_KEYSIZE_256) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
- crypto_tfm_set_flags((struct crypto_tfm *)aead,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
pr_err("GCM: Invalid key length %d\n", keylen);
ret = -EINVAL;
goto out;
@@ -3432,16 +3400,11 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
& CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
- & CRYPTO_TFM_RES_MASK);
if (err)
goto out;
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto out;
- }
if (get_alg_config(&param, max_authsize)) {
pr_err("chcr : Unsupported digest size\n");
@@ -3562,16 +3525,12 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
& CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
- & CRYPTO_TFM_RES_MASK);
if (err)
goto out;
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto out;
- }
+
subtype = get_aead_subtype(authenc);
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 029a7354f541..e937605670ac 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -132,8 +132,6 @@ static void chcr_dev_init(struct uld_ctx *u_ctx)
static int chcr_dev_move(struct uld_ctx *u_ctx)
{
- struct adapter *adap;
-
mutex_lock(&drv_data.drv_mutex);
if (drv_data.last_dev == u_ctx) {
if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
@@ -146,8 +144,6 @@ static int chcr_dev_move(struct uld_ctx *u_ctx)
list_move(&u_ctx->entry, &drv_data.inact_dev);
if (list_empty(&drv_data.act_dev))
drv_data.last_dev = NULL;
- adap = padap(&u_ctx->dev);
- memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
atomic_dec(&drv_data.dev_count);
mutex_unlock(&drv_data.drv_mutex);
@@ -299,17 +295,21 @@ static int __init chcr_crypto_init(void)
static void __exit chcr_crypto_exit(void)
{
struct uld_ctx *u_ctx, *tmp;
+ struct adapter *adap;
stop_crypto();
-
cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
/* Remove all devices from list */
mutex_lock(&drv_data.drv_mutex);
list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
+ adap = padap(&u_ctx->dev);
+ memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
list_del(&u_ctx->entry);
kfree(u_ctx);
}
list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
+ adap = padap(&u_ctx->dev);
+ memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
list_del(&u_ctx->entry);
kfree(u_ctx);
}
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h
index d2bc655ab931..459442704eb1 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -179,7 +179,10 @@ struct chtls_hws {
u32 copied_seq;
u64 tx_seq_no;
struct tls_scmd scmd;
- struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ union {
+ struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
+ struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
+ } crypto_info;
};
struct chtls_sock {
@@ -482,7 +485,7 @@ int send_tx_flowc_wr(struct sock *sk, int compl,
void chtls_tcp_push(struct sock *sk, int flags);
int chtls_push_frames(struct chtls_sock *csk, int comp);
int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
-int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode);
+int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode, int cipher_type);
void skb_entail(struct sock *sk, struct sk_buff *skb, int flags);
unsigned int keyid_to_addr(int start_addr, int keyid);
void free_tls_keyid(struct sock *sk);
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index aca75237bbcf..d4674589e1aa 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -727,6 +727,14 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
return 0;
}
+static void chtls_purge_wr_queue(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ while ((skb = dequeue_wr(sk)) != NULL)
+ kfree_skb(skb);
+}
+
static void chtls_release_resources(struct sock *sk)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
@@ -741,6 +749,11 @@ static void chtls_release_resources(struct sock *sk)
kfree_skb(csk->txdata_skb_cache);
csk->txdata_skb_cache = NULL;
+ if (csk->wr_credits != csk->wr_max_credits) {
+ chtls_purge_wr_queue(sk);
+ chtls_reset_wr_list(csk);
+ }
+
if (csk->l2t_entry) {
cxgb4_l2t_release(csk->l2t_entry);
csk->l2t_entry = NULL;
@@ -1735,6 +1748,7 @@ static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
else
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}
+ kfree_skb(skb);
}
static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
@@ -1815,6 +1829,20 @@ static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
kfree_skb(skb);
}
+/*
+ * Add an skb to the deferred skb queue for processing from process context.
+ */
+static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
+ defer_handler_t handler)
+{
+ DEFERRED_SKB_CB(skb)->handler = handler;
+ spin_lock_bh(&cdev->deferq.lock);
+ __skb_queue_tail(&cdev->deferq, skb);
+ if (skb_queue_len(&cdev->deferq) == 1)
+ schedule_work(&cdev->deferq_task);
+ spin_unlock_bh(&cdev->deferq.lock);
+}
+
static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev, int status, int queue)
{
@@ -1829,7 +1857,7 @@ static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
if (!reply_skb) {
req->status = (queue << 1);
- send_defer_abort_rpl(cdev, skb);
+ t4_defer_reply(skb, cdev, send_defer_abort_rpl);
return;
}
@@ -1848,20 +1876,6 @@ static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
}
-/*
- * Add an skb to the deferred skb queue for processing from process context.
- */
-static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
- defer_handler_t handler)
-{
- DEFERRED_SKB_CB(skb)->handler = handler;
- spin_lock_bh(&cdev->deferq.lock);
- __skb_queue_tail(&cdev->deferq, skb);
- if (skb_queue_len(&cdev->deferq) == 1)
- schedule_work(&cdev->deferq_task);
- spin_unlock_bh(&cdev->deferq.lock);
-}
-
static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev,
int status, int queue)
@@ -2062,19 +2076,6 @@ rel_skb:
return 0;
}
-static struct sk_buff *dequeue_wr(struct sock *sk)
-{
- struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
- struct sk_buff *skb = csk->wr_skb_head;
-
- if (likely(skb)) {
- /* Don't bother clearing the tail */
- csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
- WR_SKB_CB(skb)->next_wr = NULL;
- }
- return skb;
-}
-
static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
{
struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR;
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h
index 129d7ac649a9..3fac0c74a41f 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.h
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.h
@@ -185,6 +185,12 @@ static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb);
}
+static inline void chtls_reset_wr_list(struct chtls_sock *csk)
+{
+ csk->wr_skb_head = NULL;
+ csk->wr_skb_tail = NULL;
+}
+
static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
{
WR_SKB_CB(skb)->next_wr = NULL;
@@ -197,4 +203,19 @@ static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb;
csk->wr_skb_tail = skb;
}
+
+static inline struct sk_buff *dequeue_wr(struct sock *sk)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct sk_buff *skb = NULL;
+
+ skb = csk->wr_skb_head;
+
+ if (likely(skb)) {
+ /* Don't bother clearing the tail */
+ csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
+ WR_SKB_CB(skb)->next_wr = NULL;
+ }
+ return skb;
+}
#endif
diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
index 2a34035d3cfb..f1820aca0d33 100644
--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
+++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
@@ -208,28 +208,53 @@ static void chtls_rxkey_ivauth(struct _key_ctx *kctx)
static int chtls_key_info(struct chtls_sock *csk,
struct _key_ctx *kctx,
- u32 keylen, u32 optname)
+ u32 keylen, u32 optname,
+ int cipher_type)
{
- unsigned char key[AES_KEYSIZE_128];
- struct tls12_crypto_info_aes_gcm_128 *gcm_ctx;
+ unsigned char key[AES_MAX_KEY_SIZE];
+ unsigned char *key_p, *salt;
unsigned char ghash_h[AEAD_H_SIZE];
- int ck_size, key_ctx_size;
+ int ck_size, key_ctx_size, kctx_mackey_size, salt_size;
struct crypto_aes_ctx aes;
int ret;
- gcm_ctx = (struct tls12_crypto_info_aes_gcm_128 *)
- &csk->tlshws.crypto_info;
-
key_ctx_size = sizeof(struct _key_ctx) +
roundup(keylen, 16) + AEAD_H_SIZE;
- if (keylen == AES_KEYSIZE_128) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
- } else {
+ /* GCM mode of AES supports 128 and 256 bit encryption, so
+ * prepare key context base on GCM cipher type
+ */
+ switch (cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *gcm_ctx_128 =
+ (struct tls12_crypto_info_aes_gcm_128 *)
+ &csk->tlshws.crypto_info;
+ memcpy(key, gcm_ctx_128->key, keylen);
+
+ key_p = gcm_ctx_128->key;
+ salt = gcm_ctx_128->salt;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
+ kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *gcm_ctx_256 =
+ (struct tls12_crypto_info_aes_gcm_256 *)
+ &csk->tlshws.crypto_info;
+ memcpy(key, gcm_ctx_256->key, keylen);
+
+ key_p = gcm_ctx_256->key;
+ salt = gcm_ctx_256->salt;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
+ kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ break;
+ }
+ default:
pr_err("GCM: Invalid key length %d\n", keylen);
return -EINVAL;
}
- memcpy(key, gcm_ctx->key, keylen);
/* Calculate the H = CIPH(K, 0 repeated 16 times).
* It will go in key context
@@ -249,20 +274,20 @@ static int chtls_key_info(struct chtls_sock *csk,
key_ctx = ((key_ctx_size >> 4) << 3);
kctx->ctx_hdr = FILL_KEY_CRX_HDR(ck_size,
- CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ kctx_mackey_size,
0, 0, key_ctx);
chtls_rxkey_ivauth(kctx);
} else {
kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
- CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ kctx_mackey_size,
0, 0, key_ctx_size >> 4);
}
- memcpy(kctx->salt, gcm_ctx->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
- memcpy(kctx->key, gcm_ctx->key, keylen);
+ memcpy(kctx->salt, salt, salt_size);
+ memcpy(kctx->key, key_p, keylen);
memcpy(kctx->key + keylen, ghash_h, AEAD_H_SIZE);
/* erase key info from driver */
- memset(gcm_ctx->key, 0, keylen);
+ memset(key_p, 0, keylen);
return 0;
}
@@ -288,7 +313,8 @@ static void chtls_set_scmd(struct chtls_sock *csk)
SCMD_TLS_FRAG_ENABLE_V(1);
}
-int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
+int chtls_setkey(struct chtls_sock *csk, u32 keylen,
+ u32 optname, int cipher_type)
{
struct tls_key_req *kwr;
struct chtls_dev *cdev;
@@ -350,9 +376,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
kwr->sc_imm.cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
kwr->sc_imm.len = cpu_to_be32(klen);
+ lock_sock(sk);
/* key info */
kctx = (struct _key_ctx *)(kwr + 1);
- ret = chtls_key_info(csk, kctx, keylen, optname);
+ ret = chtls_key_info(csk, kctx, keylen, optname, cipher_type);
if (ret)
goto out_notcb;
@@ -388,8 +415,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
csk->tlshws.txkey = keyid;
}
+ release_sock(sk);
return ret;
out_notcb:
+ release_sock(sk);
free_tls_keyid(sk);
out_nokey:
kfree_skb(skb);
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index 18996935d8ba..a038de90b2ea 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -84,7 +84,6 @@ static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
{
struct chtls_listen *clisten;
- int err;
if (sk->sk_protocol != IPPROTO_TCP)
return -EPROTONOSUPPORT;
@@ -100,10 +99,10 @@ static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
clisten->cdev = cdev;
clisten->sk = sk;
mutex_lock(&notify_mutex);
- err = raw_notifier_call_chain(&listen_notify_list,
+ raw_notifier_call_chain(&listen_notify_list,
CHTLS_LISTEN_START, clisten);
mutex_unlock(&notify_mutex);
- return err;
+ return 0;
}
static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
@@ -486,6 +485,7 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
struct tls_crypto_info *crypto_info, tmp_crypto_info;
struct chtls_sock *csk;
int keylen;
+ int cipher_type;
int rc = 0;
csk = rcu_dereference_sk_user_data(sk);
@@ -509,6 +509,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info;
+ /* GCM mode of AES supports 128 and 256 bit encryption, so
+ * copy keys from user based on GCM cipher type.
+ */
switch (tmp_crypto_info.cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
/* Obtain version and type from previous copy */
@@ -525,13 +528,30 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
}
keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
- rc = chtls_setkey(csk, keylen, optname);
+ cipher_type = TLS_CIPHER_AES_GCM_128;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ crypto_info[0] = tmp_crypto_info;
+ rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info),
+ optval + sizeof(*crypto_info),
+ sizeof(struct tls12_crypto_info_aes_gcm_256)
+ - sizeof(*crypto_info));
+
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ keylen = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
+ cipher_type = TLS_CIPHER_AES_GCM_256;
break;
}
default:
rc = -EINVAL;
goto out;
}
+ rc = chtls_setkey(csk, keylen, optname, cipher_type);
out:
return rc;
}
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 73a899e6f837..f4f18bfc2247 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -110,7 +110,6 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
unsigned int len)
{
struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- unsigned int ret;
tctx->keylen = len;
@@ -119,11 +118,9 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
return 0;
}
- if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+ if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256)
/* not supported at all */
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
- }
/*
* The requested key size is not supported by HW, do a fallback
@@ -132,20 +129,13 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
tctx->fallback.cip->base.crt_flags |=
(tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(tctx->fallback.cip, key, len);
- if (ret) {
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= (tctx->fallback.cip->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
- return ret;
+ return crypto_cipher_setkey(tctx->fallback.cip, key, len);
}
static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
- unsigned int ret;
tctx->keylen = len;
@@ -154,11 +144,9 @@ static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
return 0;
}
- if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+ if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256)
/* not supported at all */
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
- }
/*
* The requested key size is not supported by HW, do a fallback
@@ -168,11 +156,7 @@ static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
crypto_skcipher_set_flags(tctx->fallback.skcipher,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
- crypto_skcipher_set_flags(tfm,
- crypto_skcipher_get_flags(tctx->fallback.skcipher) &
- CRYPTO_TFM_RES_MASK);
- return ret;
+ return crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
}
static void
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index c0e7a85fe129..8851161f722f 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -16,16 +16,22 @@ config CRYPTO_DEV_HISI_SEC
config CRYPTO_DEV_HISI_SEC2
tristate "Support for HiSilicon SEC2 crypto block cipher accelerator"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_ALGAPI
select CRYPTO_LIB_DES
select CRYPTO_DEV_HISI_QM
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
depends on PCI && PCI_MSI
depends on ARM64 || (COMPILE_TEST && 64BIT)
help
Support for HiSilicon SEC Engine of version 2 in crypto subsystem.
It provides AES, SM4, and 3DES algorithms with ECB
- CBC, and XTS cipher mode.
+ CBC, and XTS cipher mode, and AEAD algorithms.
To compile this as a module, choose M here: the module
will be called hisi_sec2.
@@ -44,7 +50,6 @@ config CRYPTO_DEV_HISI_ZIP
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on !CPU_BIG_ENDIAN || COMPILE_TEST
select CRYPTO_DEV_HISI_QM
- select SG_SPLIT
help
Support for HiSilicon ZIP Driver
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 98f037e6ea3e..5d400d69e8e4 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -123,7 +123,7 @@ static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
ctx = hpre_req->ctx;
id = hpre_alloc_req_id(ctx);
- if (id < 0)
+ if (unlikely(id < 0))
return -EINVAL;
ctx->req_list[id] = hpre_req;
@@ -174,8 +174,8 @@ static struct hisi_qp *hpre_get_qp_and_start(void)
}
static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, dma_addr_t *tmp)
+ struct scatterlist *data, unsigned int len,
+ int is_src, dma_addr_t *tmp)
{
struct hpre_ctx *ctx = hpre_req->ctx;
struct device *dev = HPRE_DEV(ctx);
@@ -190,7 +190,7 @@ static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
}
*tmp = dma_map_single(dev, sg_virt(data),
len, dma_dir);
- if (dma_mapping_error(dev, *tmp)) {
+ if (unlikely(dma_mapping_error(dev, *tmp))) {
dev_err(dev, "dma map data err!\n");
return -ENOMEM;
}
@@ -199,8 +199,8 @@ static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
}
static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, dma_addr_t *tmp)
+ struct scatterlist *data, unsigned int len,
+ int is_src, dma_addr_t *tmp)
{
struct hpre_ctx *ctx = hpre_req->ctx;
struct device *dev = HPRE_DEV(ctx);
@@ -208,11 +208,11 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
int shift;
shift = ctx->key_sz - len;
- if (shift < 0)
+ if (unlikely(shift < 0))
return -EINVAL;
ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
- if (!ptr)
+ if (unlikely(!ptr))
return -ENOMEM;
if (is_src) {
@@ -226,12 +226,12 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
}
static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, int is_dh)
+ struct scatterlist *data, unsigned int len,
+ int is_src, int is_dh)
{
struct hpre_sqe *msg = &hpre_req->req;
struct hpre_ctx *ctx = hpre_req->ctx;
- dma_addr_t tmp;
+ dma_addr_t tmp = 0;
int ret;
/* when the data is dh's source, we should format it */
@@ -241,7 +241,7 @@ static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
else
ret = hpre_prepare_dma_buf(hpre_req, data, len,
is_src, &tmp);
- if (ret)
+ if (unlikely(ret))
return ret;
if (is_src)
@@ -253,15 +253,16 @@ static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
}
static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
- struct hpre_asym_request *req,
- struct scatterlist *dst, struct scatterlist *src)
+ struct hpre_asym_request *req,
+ struct scatterlist *dst,
+ struct scatterlist *src)
{
struct device *dev = HPRE_DEV(ctx);
struct hpre_sqe *sqe = &req->req;
dma_addr_t tmp;
tmp = le64_to_cpu(sqe->in);
- if (!tmp)
+ if (unlikely(!tmp))
return;
if (src) {
@@ -274,7 +275,7 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
}
tmp = le64_to_cpu(sqe->out);
- if (!tmp)
+ if (unlikely(!tmp))
return;
if (req->dst) {
@@ -288,7 +289,7 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
}
static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
- void **kreq)
+ void **kreq)
{
struct hpre_asym_request *req;
int err, id, done;
@@ -308,7 +309,7 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
HREE_SQE_DONE_MASK;
- if (err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE)
+ if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
return 0;
return -EINVAL;
@@ -375,7 +376,7 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
struct hpre_ctx *ctx = qp->qp_ctx;
struct hpre_sqe *sqe = resp;
- ctx->req_list[sqe->tag]->cb(ctx, resp);
+ ctx->req_list[le16_to_cpu(sqe->tag)]->cb(ctx, resp);
}
static int hpre_ctx_init(struct hpre_ctx *ctx)
@@ -454,33 +455,30 @@ static int hpre_dh_compute_value(struct kpp_request *req)
int ctr = 0;
int ret;
- if (!ctx)
- return -EINVAL;
-
ret = hpre_msg_request_set(ctx, req, false);
- if (ret)
+ if (unlikely(ret))
return ret;
if (req->src) {
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
}
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
if (ctx->crt_g2_mode && !req->src)
- msg->dw0 |= HPRE_ALG_DH_G2;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
else
- msg->dw0 |= HPRE_ALG_DH;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
do {
ret = hisi_qp_send(ctx->qp, msg);
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
- if (!ret)
+ if (likely(!ret))
return -EINPROGRESS;
clear_all:
@@ -520,12 +518,12 @@ static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
return -EINVAL;
if (hpre_is_dh_params_length_valid(params->p_size <<
- HPRE_BITS_2_BYTES_SHIFT))
+ HPRE_BITS_2_BYTES_SHIFT))
return -EINVAL;
sz = ctx->key_sz = params->p_size;
ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
- &ctx->dh.dma_xa_p, GFP_KERNEL);
+ &ctx->dh.dma_xa_p, GFP_KERNEL);
if (!ctx->dh.xa_p)
return -ENOMEM;
@@ -559,13 +557,12 @@ static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
hisi_qm_stop_qp(ctx->qp);
if (ctx->dh.g) {
- memset(ctx->dh.g, 0, sz);
dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
ctx->dh.g = NULL;
}
if (ctx->dh.xa_p) {
- memset(ctx->dh.xa_p, 0, sz);
+ memzero_explicit(ctx->dh.xa_p, sz);
dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
ctx->dh.dma_xa_p);
ctx->dh.xa_p = NULL;
@@ -661,9 +658,6 @@ static int hpre_rsa_enc(struct akcipher_request *req)
int ctr = 0;
int ret;
- if (!ctx)
- return -EINVAL;
-
/* For 512 and 1536 bits key size, use soft tfm instead */
if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
@@ -673,22 +667,22 @@ static int hpre_rsa_enc(struct akcipher_request *req)
return ret;
}
- if (!ctx->rsa.pubkey)
+ if (unlikely(!ctx->rsa.pubkey))
return -EINVAL;
ret = hpre_msg_request_set(ctx, req, true);
- if (ret)
+ if (unlikely(ret))
return ret;
- msg->dw0 |= HPRE_ALG_NC_NCRT;
+ msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey);
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
do {
@@ -696,7 +690,7 @@ static int hpre_rsa_enc(struct akcipher_request *req)
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
- if (!ret)
+ if (likely(!ret))
return -EINPROGRESS;
clear_all:
@@ -716,9 +710,6 @@ static int hpre_rsa_dec(struct akcipher_request *req)
int ctr = 0;
int ret;
- if (!ctx)
- return -EINVAL;
-
/* For 512 and 1536 bits key size, use soft tfm instead */
if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
@@ -728,27 +719,29 @@ static int hpre_rsa_dec(struct akcipher_request *req)
return ret;
}
- if (!ctx->rsa.prikey)
+ if (unlikely(!ctx->rsa.prikey))
return -EINVAL;
ret = hpre_msg_request_set(ctx, req, true);
- if (ret)
+ if (unlikely(ret))
return ret;
if (ctx->crt_g2_mode) {
msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey);
- msg->dw0 |= HPRE_ALG_NC_CRT;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
+ HPRE_ALG_NC_CRT);
} else {
msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey);
- msg->dw0 |= HPRE_ALG_NC_NCRT;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
+ HPRE_ALG_NC_NCRT);
}
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
do {
@@ -756,7 +749,7 @@ static int hpre_rsa_dec(struct akcipher_request *req)
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
- if (!ret)
+ if (likely(!ret))
return -EINPROGRESS;
clear_all:
@@ -811,10 +804,8 @@ static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
hpre_rsa_drop_leading_zeros(&ptr, &vlen);
- if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
- ctx->rsa.pubkey = NULL;
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
return -EINVAL;
- }
memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
@@ -836,17 +827,17 @@ static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
return 0;
}
-static int hpre_crt_para_get(char *para, const char *raw,
- unsigned int raw_sz, unsigned int para_size)
+static int hpre_crt_para_get(char *para, size_t para_sz,
+ const char *raw, size_t raw_sz)
{
const char *ptr = raw;
size_t len = raw_sz;
hpre_rsa_drop_leading_zeros(&ptr, &len);
- if (!len || len > para_size)
+ if (!len || len > para_sz)
return -EINVAL;
- memcpy(para + para_size - len, ptr, len);
+ memcpy(para + para_sz - len, ptr, len);
return 0;
}
@@ -864,32 +855,32 @@ static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
if (!ctx->rsa.crt_prikey)
return -ENOMEM;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey, rsa_key->dq,
- rsa_key->dq_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
+ rsa_key->dq, rsa_key->dq_sz);
if (ret)
goto free_key;
offset = hlf_ksz;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, rsa_key->dp,
- rsa_key->dp_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->dp, rsa_key->dp_sz);
if (ret)
goto free_key;
offset = hlf_ksz * HPRE_CRT_Q;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
- rsa_key->q, rsa_key->q_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->q, rsa_key->q_sz);
if (ret)
goto free_key;
offset = hlf_ksz * HPRE_CRT_P;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
- rsa_key->p, rsa_key->p_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->p, rsa_key->p_sz);
if (ret)
goto free_key;
offset = hlf_ksz * HPRE_CRT_INV;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
- rsa_key->qinv, rsa_key->qinv_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->qinv, rsa_key->qinv_sz);
if (ret)
goto free_key;
@@ -899,7 +890,7 @@ static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
free_key:
offset = hlf_ksz * HPRE_CRT_PRMS;
- memset(ctx->rsa.crt_prikey, 0, offset);
+ memzero_explicit(ctx->rsa.crt_prikey, offset);
dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
ctx->rsa.dma_crt_prikey);
ctx->rsa.crt_prikey = NULL;
@@ -924,14 +915,15 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
}
if (ctx->rsa.crt_prikey) {
- memset(ctx->rsa.crt_prikey, 0, half_key_sz * HPRE_CRT_PRMS);
+ memzero_explicit(ctx->rsa.crt_prikey,
+ half_key_sz * HPRE_CRT_PRMS);
dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
ctx->rsa.crt_prikey = NULL;
}
if (ctx->rsa.prikey) {
- memset(ctx->rsa.prikey, 0, ctx->key_sz);
+ memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
ctx->rsa.dma_prikey);
ctx->rsa.prikey = NULL;
@@ -1043,6 +1035,7 @@ static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
{
struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
if (IS_ERR(ctx->rsa.soft_tfm)) {
@@ -1050,7 +1043,11 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
return PTR_ERR(ctx->rsa.soft_tfm);
}
- return hpre_ctx_init(ctx);
+ ret = hpre_ctx_init(ctx);
+ if (ret)
+ crypto_free_akcipher(ctx->rsa.soft_tfm);
+
+ return ret;
}
static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 34e0424410bf..401747de67a8 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -73,7 +73,7 @@
#define HPRE_DBGFS_VAL_MAX_LEN 20
#define HPRE_PCI_DEVICE_ID 0xa258
#define HPRE_PCI_VF_DEVICE_ID 0xa259
-#define HPRE_ADDR(qm, offset) (qm->io_base + (offset))
+#define HPRE_ADDR(qm, offset) ((qm)->io_base + (offset))
#define HPRE_QM_USR_CFG_MASK 0xfffffffe
#define HPRE_QM_AXI_CFG_MASK 0xffff
#define HPRE_QM_VFG_AX_MASK 0xff
@@ -106,18 +106,18 @@ static const char * const hpre_debug_file_name[] = {
};
static const struct hpre_hw_error hpre_hw_errors[] = {
- { .int_msk = BIT(0), .msg = "hpre_ecc_1bitt_err" },
- { .int_msk = BIT(1), .msg = "hpre_ecc_2bit_err" },
- { .int_msk = BIT(2), .msg = "hpre_data_wr_err" },
- { .int_msk = BIT(3), .msg = "hpre_data_rd_err" },
- { .int_msk = BIT(4), .msg = "hpre_bd_rd_err" },
- { .int_msk = BIT(5), .msg = "hpre_ooo_2bit_ecc_err" },
- { .int_msk = BIT(6), .msg = "hpre_cltr1_htbt_tm_out_err" },
- { .int_msk = BIT(7), .msg = "hpre_cltr2_htbt_tm_out_err" },
- { .int_msk = BIT(8), .msg = "hpre_cltr3_htbt_tm_out_err" },
- { .int_msk = BIT(9), .msg = "hpre_cltr4_htbt_tm_out_err" },
- { .int_msk = GENMASK(15, 10), .msg = "hpre_ooo_rdrsp_err" },
- { .int_msk = GENMASK(21, 16), .msg = "hpre_ooo_wrrsp_err" },
+ { .int_msk = BIT(0), .msg = "core_ecc_1bit_err_int_set" },
+ { .int_msk = BIT(1), .msg = "core_ecc_2bit_err_int_set" },
+ { .int_msk = BIT(2), .msg = "dat_wb_poison_int_set" },
+ { .int_msk = BIT(3), .msg = "dat_rd_poison_int_set" },
+ { .int_msk = BIT(4), .msg = "bd_rd_poison_int_set" },
+ { .int_msk = BIT(5), .msg = "ooo_ecc_2bit_err_int_set" },
+ { .int_msk = BIT(6), .msg = "cluster1_shb_timeout_int_set" },
+ { .int_msk = BIT(7), .msg = "cluster2_shb_timeout_int_set" },
+ { .int_msk = BIT(8), .msg = "cluster3_shb_timeout_int_set" },
+ { .int_msk = BIT(9), .msg = "cluster4_shb_timeout_int_set" },
+ { .int_msk = GENMASK(15, 10), .msg = "ooo_rdrsp_err_int_set" },
+ { .int_msk = GENMASK(21, 16), .msg = "ooo_wrrsp_err_int_set" },
{ /* sentinel */ }
};
@@ -490,7 +490,7 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
return -EINVAL;
}
spin_unlock_irq(&file->lock);
- ret = sprintf(tbuf, "%u\n", val);
+ ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val);
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
@@ -557,7 +557,7 @@ static const struct file_operations hpre_ctrl_debug_fops = {
static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
- struct dentry *tmp, *file_dir;
+ struct dentry *file_dir;
if (dir)
file_dir = dir;
@@ -571,10 +571,8 @@ static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
dbg->files[indx].debug = dbg;
dbg->files[indx].type = type;
dbg->files[indx].index = indx;
- tmp = debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
- dbg->files + indx, &hpre_ctrl_debug_fops);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
+ dbg->files + indx, &hpre_ctrl_debug_fops);
return 0;
}
@@ -585,7 +583,6 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
- struct dentry *tmp;
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -595,10 +592,7 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
regset->base = qm->io_base;
- tmp = debugfs_create_regset32("regs", 0444, debug->debug_root, regset);
- if (!tmp)
- return -ENOENT;
-
+ debugfs_create_regset32("regs", 0444, debug->debug_root, regset);
return 0;
}
@@ -609,15 +603,14 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
struct device *dev = &qm->pdev->dev;
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
- struct dentry *tmp_d, *tmp;
+ struct dentry *tmp_d;
int i, ret;
for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
- sprintf(buf, "cluster%d", i);
-
+ ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
+ if (ret < 0)
+ return -EINVAL;
tmp_d = debugfs_create_dir(buf, debug->debug_root);
- if (!tmp_d)
- return -ENOENT;
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -627,9 +620,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
regset->base = qm->io_base + hpre_cluster_offsets[i];
- tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL,
i + HPRE_CLUSTER_CTRL);
if (ret)
@@ -668,9 +659,6 @@ static int hpre_debugfs_init(struct hpre *hpre)
int ret;
dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root);
- if (!dir)
- return -ENOENT;
-
qm->debug.debug_root = dir;
ret = hisi_qm_debug_init(qm);
@@ -1014,8 +1002,6 @@ static void hpre_register_debugfs(void)
return;
hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL);
- if (IS_ERR_OR_NULL(hpre_debugfs_root))
- hpre_debugfs_root = NULL;
}
static void hpre_unregister_debugfs(void)
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index b846d73d9a85..13e2d8d7be94 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -9,10 +9,12 @@
#include "../qm.h"
#include "sec_crypto.h"
-/* Cipher resource per hardware SEC queue */
-struct sec_cipher_res {
+/* Algorithm resource per hardware SEC queue */
+struct sec_alg_res {
u8 *c_ivin;
dma_addr_t c_ivin_dma;
+ u8 *out_mac;
+ dma_addr_t out_mac_dma;
};
/* Cipher request of SEC private */
@@ -21,33 +23,35 @@ struct sec_cipher_req {
dma_addr_t c_in_dma;
struct hisi_acc_hw_sgl *c_out;
dma_addr_t c_out_dma;
- u8 *c_ivin;
- dma_addr_t c_ivin_dma;
struct skcipher_request *sk_req;
u32 c_len;
bool encrypt;
};
+struct sec_aead_req {
+ u8 *out_mac;
+ dma_addr_t out_mac_dma;
+ struct aead_request *aead_req;
+};
+
/* SEC request of Crypto */
struct sec_req {
struct sec_sqe sec_sqe;
struct sec_ctx *ctx;
struct sec_qp_ctx *qp_ctx;
- /* Cipher supported only at present */
struct sec_cipher_req c_req;
+ struct sec_aead_req aead_req;
+
int err_type;
int req_id;
/* Status of the SEC request */
- atomic_t fake_busy;
+ bool fake_busy;
};
/**
* struct sec_req_op - Operations for SEC request
- * @get_res: Get resources for TFM on the SEC device
- * @resource_alloc: Allocate resources for queue context on the SEC device
- * @resource_free: Free resources for queue context on the SEC device
* @buf_map: DMA map the SGL buffers of the request
* @buf_unmap: DMA unmap the SGL buffers of the request
* @bd_fill: Fill the SEC queue BD
@@ -56,18 +60,25 @@ struct sec_req {
* @process: Main processing logic of Skcipher
*/
struct sec_req_op {
- int (*get_res)(struct sec_ctx *ctx, struct sec_req *req);
- int (*resource_alloc)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx);
- void (*resource_free)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx);
int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
- void (*callback)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err);
int (*process)(struct sec_ctx *ctx, struct sec_req *req);
};
+/* SEC auth context */
+struct sec_auth_ctx {
+ dma_addr_t a_key_dma;
+ u8 *a_key;
+ u8 a_key_len;
+ u8 mac_len;
+ u8 a_alg;
+ struct crypto_shash *hash_tfm;
+};
+
/* SEC cipher context which cipher's relatives */
struct sec_cipher_ctx {
u8 *c_key;
@@ -83,9 +94,9 @@ struct sec_cipher_ctx {
/* SEC queue context which defines queue's relatives */
struct sec_qp_ctx {
struct hisi_qp *qp;
- struct sec_req **req_list;
+ struct sec_req *req_list[QM_Q_DEPTH];
struct idr req_idr;
- void *alg_meta_data;
+ struct sec_alg_res res[QM_Q_DEPTH];
struct sec_ctx *ctx;
struct mutex req_lock;
struct hisi_acc_sgl_pool *c_in_pool;
@@ -93,6 +104,11 @@ struct sec_qp_ctx {
atomic_t pending_reqs;
};
+enum sec_alg_type {
+ SEC_SKCIPHER,
+ SEC_AEAD
+};
+
/* SEC Crypto TFM context which defines queue and cipher .etc relatives */
struct sec_ctx {
struct sec_qp_ctx *qp_ctx;
@@ -110,7 +126,10 @@ struct sec_ctx {
/* Currrent cyclic index to select a queue for decipher */
atomic_t dec_qcyclic;
+
+ enum sec_alg_type alg_type;
struct sec_cipher_ctx c_ctx;
+ struct sec_auth_ctx a_ctx;
};
enum sec_endian {
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 0a5391fff485..a2cfcc9ccd94 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -3,7 +3,11 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
+#include <crypto/authenc.h>
#include <crypto/des.h>
+#include <crypto/hash.h>
+#include <crypto/internal/aead.h>
+#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include <crypto/xts.h>
#include <linux/crypto.h>
@@ -27,6 +31,10 @@
#define SEC_SRC_SGL_OFFSET 7
#define SEC_CKEY_OFFSET 9
#define SEC_CMODE_OFFSET 12
+#define SEC_AKEY_OFFSET 5
+#define SEC_AEAD_ALG_OFFSET 11
+#define SEC_AUTH_OFFSET 6
+
#define SEC_FLAG_OFFSET 7
#define SEC_FLAG_MASK 0x0780
#define SEC_TYPE_MASK 0x0F
@@ -35,12 +43,19 @@
#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
#define SEC_SGL_SGE_NR 128
#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
+#define SEC_CIPHER_AUTH 0xfe
+#define SEC_AUTH_CIPHER 0x1
+#define SEC_MAX_MAC_LEN 64
+#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
+#define SEC_SQE_LEN_RATE 4
+#define SEC_SQE_CFLAG 2
+#define SEC_SQE_AEAD_FLAG 3
+#define SEC_SQE_DONE 0x1
-static DEFINE_MUTEX(sec_algs_lock);
-static unsigned int sec_active_devs;
+static atomic_t sec_active_devs;
/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
-static inline int sec_get_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
{
if (req->c_req.encrypt)
return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
@@ -50,7 +65,7 @@ static inline int sec_get_queue_id(struct sec_ctx *ctx, struct sec_req *req)
ctx->hlf_q_num;
}
-static inline void sec_put_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
{
if (req->c_req.encrypt)
atomic_dec(&ctx->enc_qcyclic);
@@ -67,7 +82,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
0, QM_Q_DEPTH, GFP_ATOMIC);
mutex_unlock(&qp_ctx->req_lock);
- if (req_id < 0) {
+ if (unlikely(req_id < 0)) {
dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
return req_id;
}
@@ -82,7 +97,7 @@ static void sec_free_req_id(struct sec_req *req)
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
int req_id = req->req_id;
- if (req_id < 0 || req_id >= QM_Q_DEPTH) {
+ if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
return;
}
@@ -95,36 +110,66 @@ static void sec_free_req_id(struct sec_req *req)
mutex_unlock(&qp_ctx->req_lock);
}
+static int sec_aead_verify(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
+{
+ struct aead_request *aead_req = req->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+ u8 *mac_out = qp_ctx->res[req->req_id].out_mac;
+ size_t authsize = crypto_aead_authsize(tfm);
+ u8 *mac = mac_out + SEC_MAX_MAC_LEN;
+ struct scatterlist *sgl = aead_req->src;
+ size_t sz;
+
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize,
+ aead_req->cryptlen + aead_req->assoclen -
+ authsize);
+ if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
+ dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n");
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
struct sec_sqe *bd = resp;
+ struct sec_ctx *ctx;
+ struct sec_req *req;
u16 done, flag;
+ int err = 0;
u8 type;
- struct sec_req *req;
type = bd->type_cipher_auth & SEC_TYPE_MASK;
- if (type == SEC_BD_TYPE2) {
- req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
- req->err_type = bd->type2.error_type;
-
- done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
- flag = (le16_to_cpu(bd->type2.done_flag) &
- SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
- if (req->err_type || done != 0x1 || flag != 0x2)
- dev_err(SEC_CTX_DEV(req->ctx),
- "err_type[%d],done[%d],flag[%d]\n",
- req->err_type, done, flag);
- } else {
+ if (unlikely(type != SEC_BD_TYPE2)) {
pr_err("err bd type [%d]\n", type);
return;
}
- atomic64_inc(&req->ctx->sec->debug.dfx.recv_cnt);
+ req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
+ req->err_type = bd->type2.error_type;
+ ctx = req->ctx;
+ done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
+ flag = (le16_to_cpu(bd->type2.done_flag) &
+ SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
+ if (unlikely(req->err_type || done != SEC_SQE_DONE ||
+ (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
+ (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
+ dev_err(SEC_CTX_DEV(ctx),
+ "err_type[%d],done[%d],flag[%d]\n",
+ req->err_type, done, flag);
+ err = -EIO;
+ }
+
+ if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
+ err = sec_aead_verify(req, qp_ctx);
- req->ctx->req_op->buf_unmap(req->ctx, req);
+ atomic64_inc(&ctx->sec->debug.dfx.recv_cnt);
- req->ctx->req_op->callback(req->ctx, req);
+ ctx->req_op->buf_unmap(ctx, req);
+
+ ctx->req_op->callback(ctx, req, err);
}
static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
@@ -137,11 +182,11 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
mutex_unlock(&qp_ctx->req_lock);
atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
- if (ret == -EBUSY)
+ if (unlikely(ret == -EBUSY))
return -ENOBUFS;
if (!ret) {
- if (atomic_read(&req->fake_busy))
+ if (req->fake_busy)
ret = -EBUSY;
else
ret = -EINPROGRESS;
@@ -150,6 +195,91 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
return ret;
}
+/* Get DMA memory resources */
+static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
+{
+ int i;
+
+ res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ &res->c_ivin_dma, GFP_KERNEL);
+ if (!res->c_ivin)
+ return -ENOMEM;
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
+ res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
+ }
+
+ return 0;
+}
+
+static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
+{
+ if (res->c_ivin)
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->c_ivin, res->c_ivin_dma);
+}
+
+static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
+{
+ int i;
+
+ res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+ &res->out_mac_dma, GFP_KERNEL);
+ if (!res->out_mac)
+ return -ENOMEM;
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].out_mac_dma = res->out_mac_dma +
+ i * (SEC_MAX_MAC_LEN << 1);
+ res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
+ }
+
+ return 0;
+}
+
+static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
+{
+ if (res->out_mac)
+ dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+ res->out_mac, res->out_mac_dma);
+}
+
+static int sec_alg_resource_alloc(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_alg_res *res = qp_ctx->res;
+ int ret;
+
+ ret = sec_alloc_civ_resource(dev, res);
+ if (ret)
+ return ret;
+
+ if (ctx->alg_type == SEC_AEAD) {
+ ret = sec_alloc_mac_resource(dev, res);
+ if (ret)
+ goto get_fail;
+ }
+
+ return 0;
+get_fail:
+ sec_free_civ_resource(dev, res);
+
+ return ret;
+}
+
+static void sec_alg_resource_free(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ sec_free_civ_resource(dev, qp_ctx->res);
+
+ if (ctx->alg_type == SEC_AEAD)
+ sec_free_mac_resource(dev, qp_ctx->res);
+}
+
static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
int qp_ctx_id, int alg_type)
{
@@ -173,15 +303,11 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
atomic_set(&qp_ctx->pending_reqs, 0);
idr_init(&qp_ctx->req_idr);
- qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC);
- if (!qp_ctx->req_list)
- goto err_destroy_idr;
-
qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_in_pool)) {
dev_err(dev, "fail to create sgl pool for input!\n");
- goto err_free_req_list;
+ goto err_destroy_idr;
}
qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
@@ -191,7 +317,7 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
goto err_free_c_in_pool;
}
- ret = ctx->req_op->resource_alloc(ctx, qp_ctx);
+ ret = sec_alg_resource_alloc(ctx, qp_ctx);
if (ret)
goto err_free_c_out_pool;
@@ -202,13 +328,11 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
return 0;
err_queue_free:
- ctx->req_op->resource_free(ctx, qp_ctx);
+ sec_alg_resource_free(ctx, qp_ctx);
err_free_c_out_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
err_free_c_in_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
-err_free_req_list:
- kfree(qp_ctx->req_list);
err_destroy_idr:
idr_destroy(&qp_ctx->req_idr);
hisi_qm_release_qp(qp);
@@ -222,66 +346,42 @@ static void sec_release_qp_ctx(struct sec_ctx *ctx,
struct device *dev = SEC_CTX_DEV(ctx);
hisi_qm_stop_qp(qp_ctx->qp);
- ctx->req_op->resource_free(ctx, qp_ctx);
+ sec_alg_resource_free(ctx, qp_ctx);
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
idr_destroy(&qp_ctx->req_idr);
- kfree(qp_ctx->req_list);
hisi_qm_release_qp(qp_ctx->qp);
}
-static int sec_skcipher_init(struct crypto_skcipher *tfm)
+static int sec_ctx_base_init(struct sec_ctx *ctx)
{
- struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct sec_cipher_ctx *c_ctx;
struct sec_dev *sec;
- struct device *dev;
- struct hisi_qm *qm;
int i, ret;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
-
sec = sec_find_device(cpu_to_node(smp_processor_id()));
if (!sec) {
- pr_err("find no Hisilicon SEC device!\n");
+ pr_err("Can not find proper Hisilicon SEC device!\n");
return -ENODEV;
}
ctx->sec = sec;
- qm = &sec->qm;
- dev = &qm->pdev->dev;
- ctx->hlf_q_num = sec->ctx_q_num >> 0x1;
+ ctx->hlf_q_num = sec->ctx_q_num >> 1;
/* Half of queue depth is taken as fake requests limit in the queue. */
- ctx->fake_req_limit = QM_Q_DEPTH >> 0x1;
+ ctx->fake_req_limit = QM_Q_DEPTH >> 1;
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
GFP_KERNEL);
if (!ctx->qp_ctx)
return -ENOMEM;
for (i = 0; i < sec->ctx_q_num; i++) {
- ret = sec_create_qp_ctx(qm, ctx, i, 0);
+ ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
if (ret)
goto err_sec_release_qp_ctx;
}
- c_ctx = &ctx->c_ctx;
- c_ctx->ivsize = crypto_skcipher_ivsize(tfm);
- if (c_ctx->ivsize > SEC_IV_SIZE) {
- dev_err(dev, "get error iv size!\n");
- ret = -EINVAL;
- goto err_sec_release_qp_ctx;
- }
- c_ctx->c_key = dma_alloc_coherent(dev, SEC_MAX_KEY_SIZE,
- &c_ctx->c_key_dma, GFP_KERNEL);
- if (!c_ctx->c_key) {
- ret = -ENOMEM;
- goto err_sec_release_qp_ctx;
- }
-
return 0;
-
err_sec_release_qp_ctx:
for (i = i - 1; i >= 0; i--)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
@@ -290,17 +390,9 @@ err_sec_release_qp_ctx:
return ret;
}
-static void sec_skcipher_exit(struct crypto_skcipher *tfm)
+static void sec_ctx_base_uninit(struct sec_ctx *ctx)
{
- struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- int i = 0;
-
- if (c_ctx->c_key) {
- dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
- c_ctx->c_key, c_ctx->c_key_dma);
- c_ctx->c_key = NULL;
- }
+ int i;
for (i = 0; i < ctx->sec->ctx_q_num; i++)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
@@ -308,6 +400,85 @@ static void sec_skcipher_exit(struct crypto_skcipher *tfm)
kfree(ctx->qp_ctx);
}
+static int sec_cipher_init(struct sec_ctx *ctx)
+{
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+
+ c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ &c_ctx->c_key_dma, GFP_KERNEL);
+ if (!c_ctx->c_key)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void sec_cipher_uninit(struct sec_ctx *ctx)
+{
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+
+ memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
+ dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ c_ctx->c_key, c_ctx->c_key_dma);
+}
+
+static int sec_auth_init(struct sec_ctx *ctx)
+{
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+
+ a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ &a_ctx->a_key_dma, GFP_KERNEL);
+ if (!a_ctx->a_key)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void sec_auth_uninit(struct sec_ctx *ctx)
+{
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+
+ memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
+ dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ a_ctx->a_key, a_ctx->a_key_dma);
+}
+
+static int sec_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ ctx = crypto_skcipher_ctx(tfm);
+ ctx->alg_type = SEC_SKCIPHER;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
+ ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
+ if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+ dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
+ return -EINVAL;
+ }
+
+ ret = sec_ctx_base_init(ctx);
+ if (ret)
+ return ret;
+
+ ret = sec_cipher_init(ctx);
+ if (ret)
+ goto err_cipher_init;
+
+ return 0;
+err_cipher_init:
+ sec_ctx_base_uninit(ctx);
+
+ return ret;
+}
+
+static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ sec_cipher_uninit(ctx);
+ sec_ctx_base_uninit(ctx);
+}
+
static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
const u32 keylen,
const enum sec_cmode c_mode)
@@ -420,62 +591,8 @@ GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
-static int sec_skcipher_get_res(struct sec_ctx *ctx,
- struct sec_req *req)
-{
- struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct sec_cipher_res *c_res = qp_ctx->alg_meta_data;
- struct sec_cipher_req *c_req = &req->c_req;
- int req_id = req->req_id;
-
- c_req->c_ivin = c_res[req_id].c_ivin;
- c_req->c_ivin_dma = c_res[req_id].c_ivin_dma;
-
- return 0;
-}
-
-static int sec_skcipher_resource_alloc(struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
-{
- struct device *dev = SEC_CTX_DEV(ctx);
- struct sec_cipher_res *res;
- int i;
-
- res = kcalloc(QM_Q_DEPTH, sizeof(struct sec_cipher_res), GFP_KERNEL);
- if (!res)
- return -ENOMEM;
-
- res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
- &res->c_ivin_dma, GFP_KERNEL);
- if (!res->c_ivin) {
- kfree(res);
- return -ENOMEM;
- }
-
- for (i = 1; i < QM_Q_DEPTH; i++) {
- res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
- res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
- }
- qp_ctx->alg_meta_data = res;
-
- return 0;
-}
-
-static void sec_skcipher_resource_free(struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
-{
- struct sec_cipher_res *res = qp_ctx->alg_meta_data;
- struct device *dev = SEC_CTX_DEV(ctx);
-
- if (!res)
- return;
-
- dma_free_coherent(dev, SEC_TOTAL_IV_SZ, res->c_ivin, res->c_ivin_dma);
- kfree(res);
-}
-
-static int sec_skcipher_map(struct device *dev, struct sec_req *req,
- struct scatterlist *src, struct scatterlist *dst)
+static int sec_cipher_map(struct device *dev, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
{
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
@@ -509,12 +626,20 @@ static int sec_skcipher_map(struct device *dev, struct sec_req *req,
return 0;
}
+static void sec_cipher_unmap(struct device *dev, struct sec_cipher_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
+{
+ if (dst != src)
+ hisi_acc_sg_buf_unmap(dev, src, req->c_in);
+
+ hisi_acc_sg_buf_unmap(dev, dst, req->c_out);
+}
+
static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
- struct sec_cipher_req *c_req = &req->c_req;
+ struct skcipher_request *sq = req->c_req.sk_req;
- return sec_skcipher_map(SEC_CTX_DEV(ctx), req,
- c_req->sk_req->src, c_req->sk_req->dst);
+ return sec_cipher_map(SEC_CTX_DEV(ctx), req, sq->src, sq->dst);
}
static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
@@ -523,10 +648,127 @@ static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
struct sec_cipher_req *c_req = &req->c_req;
struct skcipher_request *sk_req = c_req->sk_req;
- if (sk_req->dst != sk_req->src)
- hisi_acc_sg_buf_unmap(dev, sk_req->src, c_req->c_in);
+ sec_cipher_unmap(dev, c_req, sk_req->src, sk_req->dst);
+}
+
+static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
+ struct crypto_authenc_keys *keys)
+{
+ switch (keys->enckeylen) {
+ case AES_KEYSIZE_128:
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
+ break;
+ case AES_KEYSIZE_192:
+ c_ctx->c_key_len = SEC_CKEY_192BIT;
+ break;
+ case AES_KEYSIZE_256:
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
+ break;
+ default:
+ pr_err("hisi_sec2: aead aes key error!\n");
+ return -EINVAL;
+ }
+ memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
+
+ return 0;
+}
+
+static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
+ struct crypto_authenc_keys *keys)
+{
+ struct crypto_shash *hash_tfm = ctx->hash_tfm;
+ SHASH_DESC_ON_STACK(shash, hash_tfm);
+ int blocksize, ret;
- hisi_acc_sg_buf_unmap(dev, sk_req->dst, c_req->c_out);
+ if (!keys->authkeylen) {
+ pr_err("hisi_sec2: aead auth key error!\n");
+ return -EINVAL;
+ }
+
+ blocksize = crypto_shash_blocksize(hash_tfm);
+ if (keys->authkeylen > blocksize) {
+ ret = crypto_shash_digest(shash, keys->authkey,
+ keys->authkeylen, ctx->a_key);
+ if (ret) {
+ pr_err("hisi_sec2: aead auth digest error!\n");
+ return -EINVAL;
+ }
+ ctx->a_key_len = blocksize;
+ } else {
+ memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
+ ctx->a_key_len = keys->authkeylen;
+ }
+
+ return 0;
+}
+
+static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ const u32 keylen, const enum sec_hash_alg a_alg,
+ const enum sec_calg c_alg,
+ const enum sec_mac_len mac_len,
+ const enum sec_cmode c_mode)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct crypto_authenc_keys keys;
+ int ret;
+
+ ctx->a_ctx.a_alg = a_alg;
+ ctx->c_ctx.c_alg = c_alg;
+ ctx->a_ctx.mac_len = mac_len;
+ c_ctx->c_mode = c_mode;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen))
+ goto bad_key;
+
+ ret = sec_aead_aes_set_key(c_ctx, &keys);
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n");
+ goto bad_key;
+ }
+
+ ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n");
+ goto bad_key;
+ }
+
+ return 0;
+bad_key:
+ memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
+
+ return -EINVAL;
+}
+
+
+#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
+static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
+ u32 keylen) \
+{ \
+ return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
+}
+
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
+ SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
+ SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
+ SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
+
+static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct aead_request *aq = req->aead_req.aead_req;
+
+ return sec_cipher_map(SEC_CTX_DEV(ctx), req, aq->src, aq->dst);
+}
+
+static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_cipher_req *cq = &req->c_req;
+ struct aead_request *aq = req->aead_req.aead_req;
+
+ sec_cipher_unmap(dev, cq, aq->src, aq->dst);
}
static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
@@ -534,13 +776,13 @@ static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
int ret;
ret = ctx->req_op->buf_map(ctx, req);
- if (ret)
+ if (unlikely(ret))
return ret;
ctx->req_op->do_transfer(ctx, req);
ret = ctx->req_op->bd_fill(ctx, req);
- if (ret)
+ if (unlikely(ret))
goto unmap_req_buf;
return ret;
@@ -559,10 +801,9 @@ static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct skcipher_request *sk_req = req->c_req.sk_req;
- struct sec_cipher_req *c_req = &req->c_req;
+ u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin;
- c_req->c_len = sk_req->cryptlen;
- memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
+ memcpy(c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
}
static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
@@ -570,14 +811,15 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_sqe *sec_sqe = &req->sec_sqe;
- u8 de = 0;
u8 scene, sa_type, da_type;
u8 bd_type, cipher;
+ u8 de = 0;
memset(sec_sqe, 0, sizeof(struct sec_sqe));
sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
- sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
+ sec_sqe->type2.c_ivin_addr =
+ cpu_to_le64(req->qp_ctx->res[req->req_id].c_ivin_dma);
sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
@@ -611,25 +853,37 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
return 0;
}
-static void sec_update_iv(struct sec_req *req)
+static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
{
+ struct aead_request *aead_req = req->aead_req.aead_req;
struct skcipher_request *sk_req = req->c_req.sk_req;
u32 iv_size = req->ctx->c_ctx.ivsize;
struct scatterlist *sgl;
+ unsigned int cryptlen;
size_t sz;
+ u8 *iv;
if (req->c_req.encrypt)
- sgl = sk_req->dst;
+ sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
else
- sgl = sk_req->src;
+ sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
- sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), sk_req->iv,
- iv_size, sk_req->cryptlen - iv_size);
- if (sz != iv_size)
+ if (alg_type == SEC_SKCIPHER) {
+ iv = sk_req->iv;
+ cryptlen = sk_req->cryptlen;
+ } else {
+ iv = aead_req->iv;
+ cryptlen = aead_req->cryptlen;
+ }
+
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
+ cryptlen - iv_size);
+ if (unlikely(sz != iv_size))
dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
}
-static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req)
+static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
+ int err)
{
struct skcipher_request *sk_req = req->c_req.sk_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
@@ -638,13 +892,109 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req)
sec_free_req_id(req);
/* IV output at encrypto of CBC mode */
- if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
- sec_update_iv(req);
+ if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
+ sec_update_iv(req, SEC_SKCIPHER);
- if (atomic_cmpxchg(&req->fake_busy, 1, 0) != 1)
+ if (req->fake_busy)
sk_req->base.complete(&sk_req->base, -EINPROGRESS);
- sk_req->base.complete(&sk_req->base, req->err_type);
+ sk_req->base.complete(&sk_req->base, err);
+}
+
+static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct aead_request *aead_req = req->aead_req.aead_req;
+ u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin;
+
+ memcpy(c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
+}
+
+static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
+ struct sec_req *req, struct sec_sqe *sec_sqe)
+{
+ struct sec_aead_req *a_req = &req->aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct aead_request *aq = a_req->aead_req;
+
+ sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
+
+ sec_sqe->type2.mac_key_alg =
+ cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
+
+ sec_sqe->type2.mac_key_alg |=
+ cpu_to_le32((u32)((ctx->a_key_len) /
+ SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
+
+ sec_sqe->type2.mac_key_alg |=
+ cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
+
+ sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
+
+ if (dir)
+ sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
+ else
+ sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
+
+ sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
+
+ sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
+
+ sec_sqe->type2.mac_addr =
+ cpu_to_le64(req->qp_ctx->res[req->req_id].out_mac_dma);
+}
+
+static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ struct sec_sqe *sec_sqe = &req->sec_sqe;
+ int ret;
+
+ ret = sec_skcipher_bd_fill(ctx, req);
+ if (unlikely(ret)) {
+ dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n");
+ return ret;
+ }
+
+ sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
+
+ return 0;
+}
+
+static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
+{
+ struct aead_request *a_req = req->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ struct sec_cipher_req *c_req = &req->c_req;
+ size_t authsize = crypto_aead_authsize(tfm);
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ size_t sz;
+
+ atomic_dec(&qp_ctx->pending_reqs);
+
+ if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
+ sec_update_iv(req, SEC_AEAD);
+
+ /* Copy output mac */
+ if (!err && c_req->encrypt) {
+ struct scatterlist *sgl = a_req->dst;
+
+ sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
+ qp_ctx->res[req->req_id].out_mac,
+ authsize, a_req->cryptlen +
+ a_req->assoclen);
+
+ if (unlikely(sz != authsize)) {
+ dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n");
+ err = -EINVAL;
+ }
+ }
+
+ sec_free_req_id(req);
+
+ if (req->fake_busy)
+ a_req->base.complete(&a_req->base, -EINPROGRESS);
+
+ a_req->base.complete(&a_req->base, err);
}
static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
@@ -653,37 +1003,30 @@ static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
atomic_dec(&qp_ctx->pending_reqs);
sec_free_req_id(req);
- sec_put_queue_id(ctx, req);
+ sec_free_queue_id(ctx, req);
}
static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
{
struct sec_qp_ctx *qp_ctx;
- int issue_id, ret;
+ int queue_id;
/* To load balance */
- issue_id = sec_get_queue_id(ctx, req);
- qp_ctx = &ctx->qp_ctx[issue_id];
+ queue_id = sec_alloc_queue_id(ctx, req);
+ qp_ctx = &ctx->qp_ctx[queue_id];
req->req_id = sec_alloc_req_id(req, qp_ctx);
- if (req->req_id < 0) {
- sec_put_queue_id(ctx, req);
+ if (unlikely(req->req_id < 0)) {
+ sec_free_queue_id(ctx, req);
return req->req_id;
}
if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
- atomic_set(&req->fake_busy, 1);
+ req->fake_busy = true;
else
- atomic_set(&req->fake_busy, 0);
-
- ret = ctx->req_op->get_res(ctx, req);
- if (ret) {
- atomic_dec(&qp_ctx->pending_reqs);
- sec_request_uninit(ctx, req);
- dev_err(SEC_CTX_DEV(ctx), "get resources failed!\n");
- }
+ req->fake_busy = false;
- return ret;
+ return 0;
}
static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
@@ -691,20 +1034,20 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
int ret;
ret = sec_request_init(ctx, req);
- if (ret)
+ if (unlikely(ret))
return ret;
ret = sec_request_transfer(ctx, req);
- if (ret)
+ if (unlikely(ret))
goto err_uninit_req;
/* Output IV as decrypto */
if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
- sec_update_iv(req);
+ sec_update_iv(req, ctx->alg_type);
ret = ctx->req_op->bd_send(ctx, req);
- if (ret != -EBUSY && ret != -EINPROGRESS) {
- dev_err(SEC_CTX_DEV(ctx), "send sec request failed!\n");
+ if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) {
+ dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n");
goto err_send_req;
}
@@ -712,9 +1055,16 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
err_send_req:
/* As failing, restore the IV from user */
- if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
- memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin,
- ctx->c_ctx.ivsize);
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
+ if (ctx->alg_type == SEC_SKCIPHER)
+ memcpy(req->c_req.sk_req->iv,
+ req->qp_ctx->res[req->req_id].c_ivin,
+ ctx->c_ctx.ivsize);
+ else
+ memcpy(req->aead_req.aead_req->iv,
+ req->qp_ctx->res[req->req_id].c_ivin,
+ ctx->c_ctx.ivsize);
+ }
sec_request_untransfer(ctx, req);
err_uninit_req:
@@ -723,10 +1073,7 @@ err_uninit_req:
return ret;
}
-static struct sec_req_op sec_req_ops_tbl = {
- .get_res = sec_skcipher_get_res,
- .resource_alloc = sec_skcipher_resource_alloc,
- .resource_free = sec_skcipher_resource_free,
+static const struct sec_req_op sec_skcipher_req_ops = {
.buf_map = sec_skcipher_sgl_map,
.buf_unmap = sec_skcipher_sgl_unmap,
.do_transfer = sec_skcipher_copy_iv,
@@ -736,39 +1083,139 @@ static struct sec_req_op sec_req_ops_tbl = {
.process = sec_process,
};
+static const struct sec_req_op sec_aead_req_ops = {
+ .buf_map = sec_aead_sgl_map,
+ .buf_unmap = sec_aead_sgl_unmap,
+ .do_transfer = sec_aead_copy_iv,
+ .bd_fill = sec_aead_bd_fill,
+ .bd_send = sec_bd_send,
+ .callback = sec_aead_callback,
+ .process = sec_process,
+};
+
static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
{
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- ctx->req_op = &sec_req_ops_tbl;
+ ctx->req_op = &sec_skcipher_req_ops;
return sec_skcipher_init(tfm);
}
static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
{
- sec_skcipher_exit(tfm);
+ sec_skcipher_uninit(tfm);
}
-static int sec_skcipher_param_check(struct sec_ctx *ctx,
- struct skcipher_request *sk_req)
+static int sec_aead_init(struct crypto_aead *tfm)
{
- u8 c_alg = ctx->c_ctx.c_alg;
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
+ ctx->alg_type = SEC_AEAD;
+ ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
+ if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+ dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n");
+ return -EINVAL;
+ }
+
+ ctx->req_op = &sec_aead_req_ops;
+ ret = sec_ctx_base_init(ctx);
+ if (ret)
+ return ret;
+
+ ret = sec_auth_init(ctx);
+ if (ret)
+ goto err_auth_init;
+
+ ret = sec_cipher_init(ctx);
+ if (ret)
+ goto err_cipher_init;
+
+ return ret;
+
+err_cipher_init:
+ sec_auth_uninit(ctx);
+err_auth_init:
+ sec_ctx_base_uninit(ctx);
+
+ return ret;
+}
+
+static void sec_aead_exit(struct crypto_aead *tfm)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+
+ sec_cipher_uninit(ctx);
+ sec_auth_uninit(ctx);
+ sec_ctx_base_uninit(ctx);
+}
+
+static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ int ret;
+
+ ret = sec_aead_init(tfm);
+ if (ret) {
+ pr_err("hisi_sec2: aead init error!\n");
+ return ret;
+ }
+
+ auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(auth_ctx->hash_tfm)) {
+ dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n");
+ sec_aead_exit(tfm);
+ return PTR_ERR(auth_ctx->hash_tfm);
+ }
+
+ return 0;
+}
+
+static void sec_aead_ctx_exit(struct crypto_aead *tfm)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_free_shash(ctx->a_ctx.hash_tfm);
+ sec_aead_exit(tfm);
+}
+
+static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
+{
+ return sec_aead_ctx_init(tfm, "sha1");
+}
+
+static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
+{
+ return sec_aead_ctx_init(tfm, "sha256");
+}
+
+static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
+{
+ return sec_aead_ctx_init(tfm, "sha512");
+}
+
+static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+{
+ struct skcipher_request *sk_req = sreq->c_req.sk_req;
struct device *dev = SEC_CTX_DEV(ctx);
+ u8 c_alg = ctx->c_ctx.c_alg;
- if (!sk_req->src || !sk_req->dst) {
+ if (unlikely(!sk_req->src || !sk_req->dst)) {
dev_err(dev, "skcipher input param error!\n");
return -EINVAL;
}
-
+ sreq->c_req.c_len = sk_req->cryptlen;
if (c_alg == SEC_CALG_3DES) {
- if (sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1)) {
+ if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
dev_err(dev, "skcipher 3des input length error!\n");
return -EINVAL;
}
return 0;
} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
- if (sk_req->cryptlen & (AES_BLOCK_SIZE - 1)) {
+ if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) {
dev_err(dev, "skcipher aes input length error!\n");
return -EINVAL;
}
@@ -789,14 +1236,14 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
if (!sk_req->cryptlen)
return 0;
- ret = sec_skcipher_param_check(ctx, sk_req);
- if (ret)
- return ret;
-
req->c_req.sk_req = sk_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ ret = sec_skcipher_param_check(ctx, req);
+ if (unlikely(ret))
+ return -EINVAL;
+
return ctx->req_op->process(ctx, req);
}
@@ -837,7 +1284,7 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
-static struct skcipher_alg sec_algs[] = {
+static struct skcipher_alg sec_skciphers[] = {
SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
AES_BLOCK_SIZE, 0)
@@ -867,23 +1314,133 @@ static struct skcipher_alg sec_algs[] = {
AES_BLOCK_SIZE, AES_BLOCK_SIZE)
};
+static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+{
+ u8 c_alg = ctx->c_ctx.c_alg;
+ struct aead_request *req = sreq->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ size_t authsize = crypto_aead_authsize(tfm);
+
+ if (unlikely(!req->src || !req->dst || !req->cryptlen)) {
+ dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n");
+ return -EINVAL;
+ }
+
+ /* Support AES only */
+ if (unlikely(c_alg != SEC_CALG_AES)) {
+ dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n");
+ return -EINVAL;
+
+ }
+ if (sreq->c_req.encrypt)
+ sreq->c_req.c_len = req->cryptlen;
+ else
+ sreq->c_req.c_len = req->cryptlen - authsize;
+
+ if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
+ dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ struct sec_req *req = aead_request_ctx(a_req);
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ req->aead_req.aead_req = a_req;
+ req->c_req.encrypt = encrypt;
+ req->ctx = ctx;
+
+ ret = sec_aead_param_check(ctx, req);
+ if (unlikely(ret))
+ return -EINVAL;
+
+ return ctx->req_op->process(ctx, req);
+}
+
+static int sec_aead_encrypt(struct aead_request *a_req)
+{
+ return sec_aead_crypto(a_req, true);
+}
+
+static int sec_aead_decrypt(struct aead_request *a_req)
+{
+ return sec_aead_crypto(a_req, false);
+}
+
+#define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\
+ ctx_exit, blk_size, iv_size, max_authsize)\
+{\
+ .base = {\
+ .cra_name = sec_cra_name,\
+ .cra_driver_name = "hisi_sec_"sec_cra_name,\
+ .cra_priority = SEC_PRIORITY,\
+ .cra_flags = CRYPTO_ALG_ASYNC,\
+ .cra_blocksize = blk_size,\
+ .cra_ctxsize = sizeof(struct sec_ctx),\
+ .cra_module = THIS_MODULE,\
+ },\
+ .init = ctx_init,\
+ .exit = ctx_exit,\
+ .setkey = sec_set_key,\
+ .decrypt = sec_aead_decrypt,\
+ .encrypt = sec_aead_encrypt,\
+ .ivsize = iv_size,\
+ .maxauthsize = max_authsize,\
+}
+
+#define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\
+ SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\
+ sec_aead_ctx_exit, blksize, ivsize, authsize)
+
+static struct aead_alg sec_aeads[] = {
+ SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
+ sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
+
+ SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
+ sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
+
+ SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
+ sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+};
+
int sec_register_to_crypto(void)
{
int ret = 0;
/* To avoid repeat register */
- mutex_lock(&sec_algs_lock);
- if (++sec_active_devs == 1)
- ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
- mutex_unlock(&sec_algs_lock);
+ if (atomic_add_return(1, &sec_active_devs) == 1) {
+ ret = crypto_register_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
+ if (ret)
+ return ret;
+
+ ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ if (ret)
+ goto reg_aead_fail;
+ }
+
+ return ret;
+
+reg_aead_fail:
+ crypto_unregister_skciphers(sec_skciphers, ARRAY_SIZE(sec_skciphers));
return ret;
}
void sec_unregister_from_crypto(void)
{
- mutex_lock(&sec_algs_lock);
- if (--sec_active_devs == 0)
- crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
- mutex_unlock(&sec_algs_lock);
+ if (atomic_sub_return(1, &sec_active_devs) == 0) {
+ crypto_unregister_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
+ crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ }
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index 097dce828340..b2786e17d8fe 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -14,6 +14,18 @@ enum sec_calg {
SEC_CALG_SM4 = 0x3,
};
+enum sec_hash_alg {
+ SEC_A_HMAC_SHA1 = 0x10,
+ SEC_A_HMAC_SHA256 = 0x11,
+ SEC_A_HMAC_SHA512 = 0x15,
+};
+
+enum sec_mac_len {
+ SEC_HMAC_SHA1_MAC = 20,
+ SEC_HMAC_SHA256_MAC = 32,
+ SEC_HMAC_SHA512_MAC = 64,
+};
+
enum sec_cmode {
SEC_CMODE_ECB = 0x0,
SEC_CMODE_CBC = 0x1,
@@ -34,6 +46,12 @@ enum sec_bd_type {
SEC_BD_TYPE2 = 0x2,
};
+enum sec_auth {
+ SEC_NO_AUTH = 0x0,
+ SEC_AUTH_TYPE1 = 0x1,
+ SEC_AUTH_TYPE2 = 0x2,
+};
+
enum sec_cipher_dir {
SEC_CIPHER_ENC = 0x1,
SEC_CIPHER_DEC = 0x2,
@@ -48,8 +66,8 @@ enum sec_addr_type {
struct sec_sqe_type2 {
/*
- * mac_len: 0~5 bits
- * a_key_len: 6~10 bits
+ * mac_len: 0~4 bits
+ * a_key_len: 5~10 bits
* a_alg: 11~16 bits
*/
__le32 mac_key_alg;
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index ab742dfbab99..2bbaf1e2dae7 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -32,6 +32,7 @@
#define SEC_PF_DEF_Q_NUM 64
#define SEC_PF_DEF_Q_BASE 0
#define SEC_CTX_Q_NUM_DEF 24
+#define SEC_CTX_Q_NUM_MAX 32
#define SEC_CTRL_CNT_CLR_CE 0x301120
#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
@@ -221,7 +222,7 @@ static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
if (ret)
return -EINVAL;
- if (!ctx_q_num || ctx_q_num > QM_Q_DEPTH || ctx_q_num & 0x1) {
+ if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) {
pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
return -EINVAL;
}
@@ -235,7 +236,7 @@ static const struct kernel_param_ops sec_ctx_q_num_ops = {
};
static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
-MODULE_PARM_DESC(ctx_q_num, "Number of queue in ctx (2, 4, 6, ..., 1024)");
+MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
static const struct pci_device_id sec_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
@@ -608,13 +609,13 @@ static const struct file_operations sec_dbg_fops = {
.write = sec_debug_write,
};
-static int debugfs_atomic64_t_get(void *data, u64 *val)
+static int sec_debugfs_atomic64_get(void *data, u64 *val)
{
- *val = atomic64_read((atomic64_t *)data);
- return 0;
+ *val = atomic64_read((atomic64_t *)data);
+ return 0;
}
-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic64_t_ro, debugfs_atomic64_t_get, NULL,
- "%lld\n");
+DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
+ NULL, "%lld\n");
static int sec_core_debug_init(struct sec_dev *sec)
{
@@ -636,11 +637,11 @@ static int sec_core_debug_init(struct sec_dev *sec)
debugfs_create_regset32("regs", 0444, tmp_d, regset);
- debugfs_create_file("send_cnt", 0444, tmp_d, &dfx->send_cnt,
- &fops_atomic64_t_ro);
+ debugfs_create_file("send_cnt", 0444, tmp_d,
+ &dfx->send_cnt, &sec_atomic64_ops);
- debugfs_create_file("recv_cnt", 0444, tmp_d, &dfx->recv_cnt,
- &fops_atomic64_t_ro);
+ debugfs_create_file("recv_cnt", 0444, tmp_d,
+ &dfx->recv_cnt, &sec_atomic64_ops);
return 0;
}
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 012023c347b1..0e8c7e324fb4 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -202,18 +202,21 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
dma_addr_t curr_sgl_dma = 0;
struct acc_hw_sge *curr_hw_sge;
struct scatterlist *sg;
- int i, ret, sg_n;
+ int i, sg_n, sg_n_mapped;
if (!dev || !sgl || !pool || !hw_sgl_dma)
return ERR_PTR(-EINVAL);
sg_n = sg_nents(sgl);
- if (sg_n > pool->sge_nr)
+
+ sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+ if (!sg_n_mapped)
return ERR_PTR(-EINVAL);
- ret = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
- if (!ret)
+ if (sg_n_mapped > pool->sge_nr) {
+ dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
return ERR_PTR(-EINVAL);
+ }
curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
if (IS_ERR(curr_hw_sgl)) {
@@ -224,7 +227,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
curr_hw_sge = curr_hw_sgl->sge_entries;
- for_each_sg(sgl, sg, sg_n, i) {
+ for_each_sg(sgl, sg, sg_n_mapped, i) {
sg_map_to_hw_sg(sg, curr_hw_sge);
inc_hw_sgl_sge(curr_hw_sgl);
curr_hw_sge++;
@@ -260,7 +263,3 @@ void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
hw_sgl->entry_length_in_sgl = 0;
}
EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_unmap);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
-MODULE_DESCRIPTION("HiSilicon Accelerator SGL support");
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 79fc4dd3fe00..bc1db26598bb 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -11,6 +11,10 @@
/* hisi_zip_sqe dw3 */
#define HZIP_BD_STATUS_M GENMASK(7, 0)
+/* hisi_zip_sqe dw7 */
+#define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0)
+/* hisi_zip_sqe dw8 */
+#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
/* hisi_zip_sqe dw9 */
#define HZIP_REQ_TYPE_M GENMASK(7, 0)
#define HZIP_ALG_TYPE_ZLIB 0x02
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 795428c1d07e..9815d5e3ccd0 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -46,10 +46,8 @@ enum hisi_zip_alg_type {
struct hisi_zip_req {
struct acomp_req *req;
- struct scatterlist *src;
- struct scatterlist *dst;
- size_t slen;
- size_t dlen;
+ int sskip;
+ int dskip;
struct hisi_acc_hw_sgl *hw_src;
struct hisi_acc_hw_sgl *hw_dst;
dma_addr_t dma_src;
@@ -119,13 +117,15 @@ static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag)
static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type,
dma_addr_t s_addr, dma_addr_t d_addr, u32 slen,
- u32 dlen)
+ u32 dlen, int sskip, int dskip)
{
memset(sqe, 0, sizeof(struct hisi_zip_sqe));
- sqe->input_data_length = slen;
+ sqe->input_data_length = slen - sskip;
+ sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, sskip);
+ sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, dskip);
sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
- sqe->dest_avail_out = dlen;
+ sqe->dest_avail_out = dlen - dskip;
sqe->source_addr_l = lower_32_bits(s_addr);
sqe->source_addr_h = upper_32_bits(s_addr);
sqe->dest_addr_l = lower_32_bits(d_addr);
@@ -327,11 +327,6 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
{
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
- if (qp_ctx->qp->alg_type == HZIP_ALG_TYPE_COMP)
- kfree(req->dst);
- else
- kfree(req->src);
-
write_lock(&req_q->req_lock);
clear_bit(req->req_id, req_q->req_bitmap);
memset(req, 0, sizeof(struct hisi_zip_req));
@@ -359,8 +354,8 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
}
dlen = sqe->produced;
- hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src);
- hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0;
acomp_req->dlen = dlen + head_size;
@@ -454,20 +449,6 @@ static size_t get_comp_head_size(struct scatterlist *src, u8 req_type)
}
}
-static int get_sg_skip_bytes(struct scatterlist *sgl, size_t bytes,
- size_t remains, struct scatterlist **out)
-{
-#define SPLIT_NUM 2
- size_t split_sizes[SPLIT_NUM];
- int out_mapped_nents[SPLIT_NUM];
-
- split_sizes[0] = bytes;
- split_sizes[1] = remains;
-
- return sg_split(sgl, 0, 0, SPLIT_NUM, split_sizes, out,
- out_mapped_nents, GFP_KERNEL);
-}
-
static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
struct hisi_zip_qp_ctx *qp_ctx,
size_t head_size, bool is_comp)
@@ -475,31 +456,7 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct hisi_zip_req *q = req_q->q;
struct hisi_zip_req *req_cache;
- struct scatterlist *out[2];
- struct scatterlist *sgl;
- size_t len;
- int ret, req_id;
-
- /*
- * remove/add zlib/gzip head, as hardware operations do not include
- * comp head. so split req->src to get sgl without heads in acomp, or
- * add comp head to req->dst ahead of that hardware output compressed
- * data in sgl splited from req->dst without comp head.
- */
- if (is_comp) {
- sgl = req->dst;
- len = req->dlen - head_size;
- } else {
- sgl = req->src;
- len = req->slen - head_size;
- }
-
- ret = get_sg_skip_bytes(sgl, head_size, len, out);
- if (ret)
- return ERR_PTR(ret);
-
- /* sgl for comp head is useless, so free it now */
- kfree(out[0]);
+ int req_id;
write_lock(&req_q->req_lock);
@@ -507,7 +464,6 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
if (req_id >= req_q->size) {
write_unlock(&req_q->req_lock);
dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
- kfree(out[1]);
return ERR_PTR(-EBUSY);
}
set_bit(req_id, req_q->req_bitmap);
@@ -515,16 +471,13 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
req_cache = q + req_id;
req_cache->req_id = req_id;
req_cache->req = req;
+
if (is_comp) {
- req_cache->src = req->src;
- req_cache->dst = out[1];
- req_cache->slen = req->slen;
- req_cache->dlen = req->dlen - head_size;
+ req_cache->sskip = 0;
+ req_cache->dskip = head_size;
} else {
- req_cache->src = out[1];
- req_cache->dst = req->dst;
- req_cache->slen = req->slen - head_size;
- req_cache->dlen = req->dlen;
+ req_cache->sskip = head_size;
+ req_cache->dskip = 0;
}
write_unlock(&req_q->req_lock);
@@ -536,6 +489,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
struct hisi_zip_qp_ctx *qp_ctx)
{
struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe;
+ struct acomp_req *a_req = req->req;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
@@ -543,16 +497,16 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
dma_addr_t output;
int ret;
- if (!req->src || !req->slen || !req->dst || !req->dlen)
+ if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
return -EINVAL;
- req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->src, pool,
+ req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
req->req_id << 1, &input);
if (IS_ERR(req->hw_src))
return PTR_ERR(req->hw_src);
req->dma_src = input;
- req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->dst, pool,
+ req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
(req->req_id << 1) + 1,
&output);
if (IS_ERR(req->hw_dst)) {
@@ -561,8 +515,8 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
}
req->dma_dst = output;
- hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, req->slen,
- req->dlen);
+ hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, a_req->slen,
+ a_req->dlen, req->sskip, req->dskip);
hisi_zip_config_buf_type(zip_sqe, HZIP_SGL);
hisi_zip_config_tag(zip_sqe, req->req_id);
@@ -574,9 +528,9 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
return -EINPROGRESS;
err_unmap_output:
- hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst);
+ hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
err_unmap_input:
- hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src);
+ hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
return ret;
}
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index fe4cc8babe1c..25d5227f74a1 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -332,10 +332,10 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
struct dma_slave_config dma_conf;
int err = -EINVAL;
- hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
- if (!hdev->dma_lch) {
+ hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
+ if (IS_ERR(hdev->dma_lch)) {
dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
- return -EBUSY;
+ return PTR_ERR(hdev->dma_lch);
}
dma_conf.direction = DMA_MEM_TO_DEV;
dma_conf.dst_addr = hdev->bus_addr;
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 64894d8b442a..2cb53fbae841 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -501,8 +501,8 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
writel(upper_32_bits(priv->ring[i].cdr.base_dma),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
- writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 14) |
- priv->config.cd_size,
+ writel(EIP197_xDR_DESC_MODE_64BIT | EIP197_CDR_DESC_MODE_ADCP |
+ (priv->config.cd_offset << 14) | priv->config.cd_size,
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((cd_fetch_cnt *
(cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
@@ -974,16 +974,18 @@ int safexcel_invalidate_cache(struct crypto_async_request *async,
{
struct safexcel_command_desc *cdesc;
struct safexcel_result_desc *rdesc;
+ struct safexcel_token *dmmy;
int ret = 0;
/* Prepare command descriptor */
- cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
+ cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma,
+ &dmmy);
if (IS_ERR(cdesc))
return PTR_ERR(cdesc);
cdesc->control_data.type = EIP197_TYPE_EXTENDED;
cdesc->control_data.options = 0;
- cdesc->control_data.refresh = 0;
+ cdesc->control_data.context_lo &= ~EIP197_CONTEXT_SIZE_MASK;
cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
/* Prepare result descriptor */
@@ -1331,6 +1333,7 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
priv->config.cd_size = EIP197_CD64_FETCH_SIZE;
priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
+ priv->config.cdsh_offset = (EIP197_MAX_TOKENS + mask) & ~mask;
/* res token is behind the descr, but ofs must be rounded to buswdth */
priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask;
@@ -1341,6 +1344,7 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
/* convert dwords to bytes */
priv->config.cd_offset *= sizeof(u32);
+ priv->config.cdsh_offset *= sizeof(u32);
priv->config.rd_offset *= sizeof(u32);
priv->config.res_offset *= sizeof(u32);
}
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index b4624b5687ce..94016c505abb 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -40,7 +40,8 @@
/* Static configuration */
#define EIP197_DEFAULT_RING_SIZE 400
-#define EIP197_MAX_TOKENS 19
+#define EIP197_EMB_TOKENS 4 /* Pad CD to 16 dwords */
+#define EIP197_MAX_TOKENS 16
#define EIP197_MAX_RINGS 4
#define EIP197_FETCH_DEPTH 2
#define EIP197_MAX_BATCH_SZ 64
@@ -207,6 +208,7 @@
/* EIP197_HIA_xDR_DESC_SIZE */
#define EIP197_xDR_DESC_MODE_64BIT BIT(31)
+#define EIP197_CDR_DESC_MODE_ADCP BIT(30)
/* EIP197_HIA_xDR_DMA_CFG */
#define EIP197_HIA_xDR_WR_RES_BUF BIT(22)
@@ -277,9 +279,9 @@
#define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n) ((n) << 16)
#define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n) (((n) & 0x7) << 20)
#define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n) ((n) << 24)
-#define EIP197_HIA_DFE_CFG_DIS_DEBUG (BIT(31) | BIT(29))
+#define EIP197_HIA_DFE_CFG_DIS_DEBUG GENMASK(31, 29)
#define EIP197_HIA_DSE_CFG_EN_SINGLE_WR BIT(29)
-#define EIP197_HIA_DSE_CFG_DIS_DEBUG BIT(31)
+#define EIP197_HIA_DSE_CFG_DIS_DEBUG GENMASK(31, 30)
/* EIP197_HIA_DFE/DSE_THR_CTRL */
#define EIP197_DxE_THR_CTRL_EN BIT(30)
@@ -553,6 +555,8 @@ static inline void eip197_noop_token(struct safexcel_token *token)
{
token->opcode = EIP197_TOKEN_OPCODE_NOOP;
token->packet_length = BIT(2);
+ token->stat = 0;
+ token->instructions = 0;
}
/* Instructions */
@@ -574,14 +578,13 @@ struct safexcel_control_data_desc {
u16 application_id;
u16 rsvd;
- u8 refresh:2;
- u32 context_lo:30;
+ u32 context_lo;
u32 context_hi;
u32 control0;
u32 control1;
- u32 token[EIP197_MAX_TOKENS];
+ u32 token[EIP197_EMB_TOKENS];
} __packed;
#define EIP197_OPTION_MAGIC_VALUE BIT(0)
@@ -591,7 +594,10 @@ struct safexcel_control_data_desc {
#define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10)
#define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9)
+#define EIP197_TYPE_BCLA 0x0
#define EIP197_TYPE_EXTENDED 0x3
+#define EIP197_CONTEXT_SMALL 0x2
+#define EIP197_CONTEXT_SIZE_MASK 0x3
/* Basic Command Descriptor format */
struct safexcel_command_desc {
@@ -599,13 +605,16 @@ struct safexcel_command_desc {
u8 rsvd0:5;
u8 last_seg:1;
u8 first_seg:1;
- u16 additional_cdata_size:8;
+ u8 additional_cdata_size:8;
u32 rsvd1;
u32 data_lo;
u32 data_hi;
+ u32 atok_lo;
+ u32 atok_hi;
+
struct safexcel_control_data_desc control_data;
} __packed;
@@ -629,15 +638,20 @@ enum eip197_fw {
struct safexcel_desc_ring {
void *base;
+ void *shbase;
void *base_end;
+ void *shbase_end;
dma_addr_t base_dma;
+ dma_addr_t shbase_dma;
/* write and read pointers */
void *write;
+ void *shwrite;
void *read;
/* descriptor element offset */
- unsigned offset;
+ unsigned int offset;
+ unsigned int shoffset;
};
enum safexcel_alg_type {
@@ -652,6 +666,7 @@ struct safexcel_config {
u32 cd_size;
u32 cd_offset;
+ u32 cdsh_offset;
u32 rd_size;
u32 rd_offset;
@@ -862,7 +877,8 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
bool first, bool last,
dma_addr_t data, u32 len,
u32 full_data_len,
- dma_addr_t context);
+ dma_addr_t context,
+ struct safexcel_token **atoken);
struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
int ring_id,
bool first, bool last,
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index c02995694b41..0c5e80c3f6e3 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -47,8 +47,12 @@ struct safexcel_cipher_ctx {
u32 mode;
enum safexcel_cipher_alg alg;
- char aead; /* !=0=AEAD, 2=IPSec ESP AEAD, 3=IPsec ESP GMAC */
- char xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
+ u8 aead; /* !=0=AEAD, 2=IPSec ESP AEAD, 3=IPsec ESP GMAC */
+ u8 xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
+ u8 aadskip;
+ u8 blocksz;
+ u32 ivmask;
+ u32 ctrinit;
__le32 key[16];
u32 nonce;
@@ -72,251 +76,298 @@ struct safexcel_cipher_req {
int nr_src, nr_dst;
};
-static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
- struct safexcel_command_desc *cdesc)
+static int safexcel_skcipher_iv(struct safexcel_cipher_ctx *ctx, u8 *iv,
+ struct safexcel_command_desc *cdesc)
{
- u32 block_sz = 0;
-
- if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ||
- ctx->aead & EIP197_AEAD_TYPE_IPSEC_ESP) { /* _ESP and _ESP_GMAC */
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
/* 32 bit nonce */
cdesc->control_data.token[0] = ctx->nonce;
/* 64 bit IV part */
memcpy(&cdesc->control_data.token[1], iv, 8);
-
- if (ctx->alg == SAFEXCEL_CHACHA20 ||
- ctx->xcm == EIP197_XCM_MODE_CCM) {
- /* 32 bit counter, starting at 0 */
- cdesc->control_data.token[3] = 0;
- } else {
- /* 32 bit counter, start at 1 (big endian!) */
- cdesc->control_data.token[3] =
- (__force u32)cpu_to_be32(1);
- }
-
- return;
- } else if (ctx->xcm == EIP197_XCM_MODE_GCM ||
- (ctx->aead && ctx->alg == SAFEXCEL_CHACHA20)) {
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
- /* 96 bit IV part */
- memcpy(&cdesc->control_data.token[0], iv, 12);
-
- if (ctx->alg == SAFEXCEL_CHACHA20) {
- /* 32 bit counter, starting at 0 */
- cdesc->control_data.token[3] = 0;
- } else {
- /* 32 bit counter, start at 1 (big endian!) */
- *(__be32 *)&cdesc->control_data.token[3] =
- cpu_to_be32(1);
- }
-
- return;
- } else if (ctx->alg == SAFEXCEL_CHACHA20) {
+ /* 32 bit counter, start at 0 or 1 (big endian!) */
+ cdesc->control_data.token[3] =
+ (__force u32)cpu_to_be32(ctx->ctrinit);
+ return 4;
+ }
+ if (ctx->alg == SAFEXCEL_CHACHA20) {
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
/* 96 bit nonce part */
memcpy(&cdesc->control_data.token[0], &iv[4], 12);
/* 32 bit counter */
cdesc->control_data.token[3] = *(u32 *)iv;
-
- return;
- } else if (ctx->xcm == EIP197_XCM_MODE_CCM) {
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
- /* Variable length IV part */
- memcpy(&cdesc->control_data.token[0], iv, 15 - iv[0]);
- /* Start variable length counter at 0 */
- memset((u8 *)&cdesc->control_data.token[0] + 15 - iv[0],
- 0, iv[0] + 1);
-
- return;
+ return 4;
}
- if (ctx->mode != CONTEXT_CONTROL_CRYPTO_MODE_ECB) {
- switch (ctx->alg) {
- case SAFEXCEL_DES:
- block_sz = DES_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
- break;
- case SAFEXCEL_3DES:
- block_sz = DES3_EDE_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
- break;
- case SAFEXCEL_SM4:
- block_sz = SM4_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
- break;
- case SAFEXCEL_AES:
- block_sz = AES_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
- break;
- default:
- break;
- }
- memcpy(cdesc->control_data.token, iv, block_sz);
- }
+ cdesc->control_data.options |= ctx->ivmask;
+ memcpy(cdesc->control_data.token, iv, ctx->blocksz);
+ return ctx->blocksz / sizeof(u32);
}
static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
struct safexcel_command_desc *cdesc,
+ struct safexcel_token *atoken,
u32 length)
{
struct safexcel_token *token;
+ int ivlen;
- safexcel_cipher_token(ctx, iv, cdesc);
+ ivlen = safexcel_skcipher_iv(ctx, iv, cdesc);
+ if (ivlen == 4) {
+ /* No space in cdesc, instruction moves to atoken */
+ cdesc->additional_cdata_size = 1;
+ token = atoken;
+ } else {
+ /* Everything fits in cdesc */
+ token = (struct safexcel_token *)(cdesc->control_data.token + 2);
+ /* Need to pad with NOP */
+ eip197_noop_token(&token[1]);
+ }
- /* skip over worst case IV of 4 dwords, no need to be exact */
- token = (struct safexcel_token *)(cdesc->control_data.token + 4);
+ token->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ token->packet_length = length;
+ token->stat = EIP197_TOKEN_STAT_LAST_PACKET |
+ EIP197_TOKEN_STAT_LAST_HASH;
+ token->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_CRYPTO |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
+}
- token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[0].packet_length = length;
- token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET |
- EIP197_TOKEN_STAT_LAST_HASH;
- token[0].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_CRYPTO |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
+static void safexcel_aead_iv(struct safexcel_cipher_ctx *ctx, u8 *iv,
+ struct safexcel_command_desc *cdesc)
+{
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ||
+ ctx->aead & EIP197_AEAD_TYPE_IPSEC_ESP) { /* _ESP and _ESP_GMAC */
+ /* 32 bit nonce */
+ cdesc->control_data.token[0] = ctx->nonce;
+ /* 64 bit IV part */
+ memcpy(&cdesc->control_data.token[1], iv, 8);
+ /* 32 bit counter, start at 0 or 1 (big endian!) */
+ cdesc->control_data.token[3] =
+ (__force u32)cpu_to_be32(ctx->ctrinit);
+ return;
+ }
+ if (ctx->xcm == EIP197_XCM_MODE_GCM || ctx->alg == SAFEXCEL_CHACHA20) {
+ /* 96 bit IV part */
+ memcpy(&cdesc->control_data.token[0], iv, 12);
+ /* 32 bit counter, start at 0 or 1 (big endian!) */
+ cdesc->control_data.token[3] =
+ (__force u32)cpu_to_be32(ctx->ctrinit);
+ return;
+ }
+ /* CBC */
+ memcpy(cdesc->control_data.token, iv, ctx->blocksz);
}
static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
struct safexcel_command_desc *cdesc,
+ struct safexcel_token *atoken,
enum safexcel_cipher_direction direction,
u32 cryptlen, u32 assoclen, u32 digestsize)
{
- struct safexcel_token *token;
+ struct safexcel_token *aadref;
+ int atoksize = 2; /* Start with minimum size */
+ int assocadj = assoclen - ctx->aadskip, aadalign;
- safexcel_cipher_token(ctx, iv, cdesc);
+ /* Always 4 dwords of embedded IV for AEAD modes */
+ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
- if (direction == SAFEXCEL_ENCRYPT) {
- /* align end of instruction sequence to end of token */
- token = (struct safexcel_token *)(cdesc->control_data.token +
- EIP197_MAX_TOKENS - 14);
-
- token[13].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[13].packet_length = digestsize;
- token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
- EIP197_TOKEN_STAT_LAST_PACKET;
- token[13].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
- EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
- } else {
+ if (direction == SAFEXCEL_DECRYPT)
cryptlen -= digestsize;
- /* align end of instruction sequence to end of token */
- token = (struct safexcel_token *)(cdesc->control_data.token +
- EIP197_MAX_TOKENS - 15);
-
- token[13].opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
- token[13].packet_length = digestsize;
- token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
- EIP197_TOKEN_STAT_LAST_PACKET;
- token[13].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
-
- token[14].opcode = EIP197_TOKEN_OPCODE_VERIFY;
- token[14].packet_length = digestsize |
- EIP197_TOKEN_HASH_RESULT_VERIFY;
- token[14].stat = EIP197_TOKEN_STAT_LAST_HASH |
- EIP197_TOKEN_STAT_LAST_PACKET;
- token[14].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
- }
-
- if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
- /* For ESP mode (and not GMAC), skip over the IV */
- token[8].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[8].packet_length = EIP197_AEAD_IPSEC_IV_SIZE;
-
- assoclen -= EIP197_AEAD_IPSEC_IV_SIZE;
- }
+ if (unlikely(ctx->xcm == EIP197_XCM_MODE_CCM)) {
+ /* Construct IV block B0 for the CBC-MAC */
+ u8 *final_iv = (u8 *)cdesc->control_data.token;
+ u8 *cbcmaciv = (u8 *)&atoken[1];
+ __le32 *aadlen = (__le32 *)&atoken[5];
+
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
+ /* Length + nonce */
+ cdesc->control_data.token[0] = ctx->nonce;
+ /* Fixup flags byte */
+ *(__le32 *)cbcmaciv =
+ cpu_to_le32(ctx->nonce |
+ ((assocadj > 0) << 6) |
+ ((digestsize - 2) << 2));
+ /* 64 bit IV part */
+ memcpy(&cdesc->control_data.token[1], iv, 8);
+ memcpy(cbcmaciv + 4, iv, 8);
+ /* Start counter at 0 */
+ cdesc->control_data.token[3] = 0;
+ /* Message length */
+ *(__be32 *)(cbcmaciv + 12) = cpu_to_be32(cryptlen);
+ } else {
+ /* Variable length IV part */
+ memcpy(final_iv, iv, 15 - iv[0]);
+ memcpy(cbcmaciv, iv, 15 - iv[0]);
+ /* Start variable length counter at 0 */
+ memset(final_iv + 15 - iv[0], 0, iv[0] + 1);
+ memset(cbcmaciv + 15 - iv[0], 0, iv[0] - 1);
+ /* fixup flags byte */
+ cbcmaciv[0] |= ((assocadj > 0) << 6) |
+ ((digestsize - 2) << 2);
+ /* insert lower 2 bytes of message length */
+ cbcmaciv[14] = cryptlen >> 8;
+ cbcmaciv[15] = cryptlen & 255;
+ }
- token[6].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[6].packet_length = assoclen;
- token[6].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH;
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = AES_BLOCK_SIZE +
+ ((assocadj > 0) << 1);
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN |
+ EIP197_TOKEN_INS_TYPE_HASH;
+
+ if (likely(assocadj)) {
+ *aadlen = cpu_to_le32((assocadj >> 8) |
+ (assocadj & 255) << 8);
+ atoken += 6;
+ atoksize += 7;
+ } else {
+ atoken += 5;
+ atoksize += 6;
+ }
- if (likely(cryptlen || ctx->alg == SAFEXCEL_CHACHA20)) {
- token[11].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[11].packet_length = cryptlen;
- token[11].stat = EIP197_TOKEN_STAT_LAST_HASH;
- if (unlikely(ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) {
- token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
- /* Do not send to crypt engine in case of GMAC */
- token[11].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
+ /* Process AAD data */
+ aadref = atoken;
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = assocadj;
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ atoken++;
+
+ /* For CCM only, align AAD data towards hash engine */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ aadalign = (assocadj + 2) & 15;
+ atoken->packet_length = assocadj && aadalign ?
+ 16 - aadalign :
+ 0;
+ if (likely(cryptlen)) {
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
- token[11].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_CRYPTO |
- EIP197_TOKEN_INS_TYPE_HASH |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH;
}
- } else if (ctx->xcm != EIP197_XCM_MODE_CCM) {
- token[6].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ } else {
+ safexcel_aead_iv(ctx, iv, cdesc);
+
+ /* Process AAD data */
+ aadref = atoken;
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = assocadj;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH;
}
+ atoken++;
- if (!ctx->xcm)
- return;
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
+ /* For ESP mode (and not GMAC), skip over the IV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = EIP197_AEAD_IPSEC_IV_SIZE;
+ atoken->stat = 0;
+ atoken->instructions = 0;
+ atoken++;
+ atoksize++;
+ } else if (unlikely(ctx->alg == SAFEXCEL_CHACHA20 &&
+ direction == SAFEXCEL_DECRYPT)) {
+ /* Poly-chacha decryption needs a dummy NOP here ... */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = 16; /* According to Op Manual */
+ atoken->stat = 0;
+ atoken->instructions = 0;
+ atoken++;
+ atoksize++;
+ }
- token[9].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
- token[9].packet_length = 0;
- token[9].instructions = AES_BLOCK_SIZE;
+ if (ctx->xcm) {
+ /* For GCM and CCM, obtain enc(Y0) */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
+ atoken->packet_length = 0;
+ atoken->stat = 0;
+ atoken->instructions = AES_BLOCK_SIZE;
+ atoken++;
+
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = AES_BLOCK_SIZE;
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ EIP197_TOKEN_INS_TYPE_CRYPTO;
+ atoken++;
+ atoksize += 2;
+ }
- token[10].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[10].packet_length = AES_BLOCK_SIZE;
- token[10].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
- EIP197_TOKEN_INS_TYPE_CRYPTO;
+ if (likely(cryptlen || ctx->alg == SAFEXCEL_CHACHA20)) {
+ /* Fixup stat field for AAD direction instruction */
+ aadref->stat = 0;
- if (ctx->xcm != EIP197_XCM_MODE_GCM) {
- u8 *final_iv = (u8 *)cdesc->control_data.token;
- u8 *cbcmaciv = (u8 *)&token[1];
- __le32 *aadlen = (__le32 *)&token[5];
+ /* Process crypto data */
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = cryptlen;
- /* Construct IV block B0 for the CBC-MAC */
- token[0].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[0].packet_length = AES_BLOCK_SIZE +
- ((assoclen > 0) << 1);
- token[0].instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN |
- EIP197_TOKEN_INS_TYPE_HASH;
- /* Variable length IV part */
- memcpy(cbcmaciv, final_iv, 15 - final_iv[0]);
- /* fixup flags byte */
- cbcmaciv[0] |= ((assoclen > 0) << 6) | ((digestsize - 2) << 2);
- /* Clear upper bytes of variable message length to 0 */
- memset(cbcmaciv + 15 - final_iv[0], 0, final_iv[0] - 1);
- /* insert lower 2 bytes of message length */
- cbcmaciv[14] = cryptlen >> 8;
- cbcmaciv[15] = cryptlen & 255;
+ if (unlikely(ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) {
+ /* Fixup instruction field for AAD dir instruction */
+ aadref->instructions = EIP197_TOKEN_INS_TYPE_HASH;
- if (assoclen) {
- *aadlen = cpu_to_le32((assoclen >> 8) |
- ((assoclen & 0xff) << 8));
- assoclen += 2;
+ /* Do not send to crypt engine in case of GMAC */
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
+ } else {
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_CRYPTO |
+ EIP197_TOKEN_INS_TYPE_HASH |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
}
- token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
-
- /* Align AAD data towards hash engine */
- token[7].opcode = EIP197_TOKEN_OPCODE_INSERT;
- assoclen &= 15;
- token[7].packet_length = assoclen ? 16 - assoclen : 0;
-
- if (likely(cryptlen)) {
- token[7].instructions = EIP197_TOKEN_INS_TYPE_HASH;
-
- /* Align crypto data towards hash engine */
- token[11].stat = 0;
-
- token[12].opcode = EIP197_TOKEN_OPCODE_INSERT;
- cryptlen &= 15;
- token[12].packet_length = cryptlen ? 16 - cryptlen : 0;
- token[12].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[12].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ cryptlen &= 15;
+ if (unlikely(ctx->xcm == EIP197_XCM_MODE_CCM && cryptlen)) {
+ atoken->stat = 0;
+ /* For CCM only, pad crypto data to the hash engine */
+ atoken++;
+ atoksize++;
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = 16 - cryptlen;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
- token[7].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[7].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
}
+ atoken++;
+ atoksize++;
}
+
+ if (direction == SAFEXCEL_ENCRYPT) {
+ /* Append ICV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = digestsize;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+ } else {
+ /* Extract ICV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
+ atoken->packet_length = digestsize;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ atoken->instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+ atoken++;
+ atoksize++;
+
+ /* Verify ICV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_VERIFY;
+ atoken->packet_length = digestsize |
+ EIP197_TOKEN_HASH_RESULT_VERIFY;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
+ }
+
+ /* Fixup length of the token in the command descriptor */
+ cdesc->additional_cdata_size = atoksize;
}
static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
@@ -329,10 +380,8 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
@@ -382,12 +431,12 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
case SAFEXCEL_DES:
err = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen);
if (unlikely(err))
- goto badkey_expflags;
+ goto badkey;
break;
case SAFEXCEL_3DES:
err = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen);
if (unlikely(err))
- goto badkey_expflags;
+ goto badkey;
break;
case SAFEXCEL_AES:
err = aes_expandkey(&aes, keys.enckey, keys.enckeylen);
@@ -450,9 +499,6 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
goto badkey;
}
- crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) &
- CRYPTO_TFM_RES_MASK);
-
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
(memcmp(ctx->ipad, istate.state, ctx->state_sz) ||
memcmp(ctx->opad, ostate.state, ctx->state_sz)))
@@ -470,8 +516,6 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
return 0;
badkey:
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
-badkey_expflags:
memzero_explicit(&keys, sizeof(keys));
return err;
}
@@ -656,6 +700,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
unsigned int totlen;
unsigned int totlen_src = cryptlen + assoclen;
unsigned int totlen_dst = totlen_src;
+ struct safexcel_token *atoken;
int n_cdesc = 0, n_rdesc = 0;
int queued, i, ret = 0;
bool first = true;
@@ -730,56 +775,60 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
- /* The EIP cannot deal with zero length input packets! */
- if (totlen == 0)
- totlen = 1;
+ if (!totlen) {
+ /*
+ * The EIP97 cannot deal with zero length input packets!
+ * So stuff a dummy command descriptor indicating a 1 byte
+ * (dummy) input packet, using the context record as source.
+ */
+ first_cdesc = safexcel_add_cdesc(priv, ring,
+ 1, 1, ctx->base.ctxr_dma,
+ 1, 1, ctx->base.ctxr_dma,
+ &atoken);
+ if (IS_ERR(first_cdesc)) {
+ /* No space left in the command descriptor ring */
+ ret = PTR_ERR(first_cdesc);
+ goto cdesc_rollback;
+ }
+ n_cdesc = 1;
+ goto skip_cdesc;
+ }
/* command descriptors */
for_each_sg(src, sg, sreq->nr_src, i) {
int len = sg_dma_len(sg);
/* Do not overflow the request */
- if (queued - len < 0)
+ if (queued < len)
len = queued;
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
!(queued - len),
sg_dma_address(sg), len, totlen,
- ctx->base.ctxr_dma);
+ ctx->base.ctxr_dma, &atoken);
if (IS_ERR(cdesc)) {
/* No space left in the command descriptor ring */
ret = PTR_ERR(cdesc);
goto cdesc_rollback;
}
- n_cdesc++;
- if (n_cdesc == 1) {
+ if (!n_cdesc)
first_cdesc = cdesc;
- }
+ n_cdesc++;
queued -= len;
if (!queued)
break;
}
-
- if (unlikely(!n_cdesc)) {
- /*
- * Special case: zero length input buffer.
- * The engine always needs the 1st command descriptor, however!
- */
- first_cdesc = safexcel_add_cdesc(priv, ring, 1, 1, 0, 0, totlen,
- ctx->base.ctxr_dma);
- n_cdesc = 1;
- }
-
+skip_cdesc:
/* Add context control words and token to first command descriptor */
safexcel_context_control(ctx, base, sreq, first_cdesc);
if (ctx->aead)
- safexcel_aead_token(ctx, iv, first_cdesc,
+ safexcel_aead_token(ctx, iv, first_cdesc, atoken,
sreq->direction, cryptlen,
assoclen, digestsize);
else
- safexcel_skcipher_token(ctx, iv, first_cdesc,
+ safexcel_skcipher_token(ctx, iv, first_cdesc, atoken,
cryptlen);
/* result descriptors */
@@ -1166,6 +1215,8 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
ctx->base.send = safexcel_skcipher_send;
ctx->base.handle_result = safexcel_skcipher_handle_result;
+ ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD;
+ ctx->ctrinit = 1;
return 0;
}
@@ -1230,6 +1281,8 @@ static int safexcel_skcipher_aes_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1264,6 +1317,7 @@ static int safexcel_skcipher_aes_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -1300,6 +1354,7 @@ static int safexcel_skcipher_aes_cfb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
return 0;
}
@@ -1336,6 +1391,7 @@ static int safexcel_skcipher_aes_ofb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
return 0;
}
@@ -1381,10 +1437,8 @@ static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm,
/* exclude the nonce here */
keylen = len - CTR_RFC3686_NONCE_SIZE;
ret = aes_expandkey(&aes, key, keylen);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
@@ -1410,6 +1464,7 @@ static int safexcel_skcipher_aes_ctr_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
return 0;
}
@@ -1445,6 +1500,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
unsigned int len)
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
ret = verify_skcipher_des_key(ctfm, key);
@@ -1452,7 +1508,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
return ret;
/* if context exits and key changed, need to invalidate it */
- if (ctx->base.ctxr_dma)
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, len))
ctx->base.needs_inv = true;
@@ -1468,6 +1524,8 @@ static int safexcel_skcipher_des_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_DES;
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -1505,6 +1563,8 @@ static int safexcel_skcipher_des_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_DES;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1537,6 +1597,7 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
int err;
err = verify_skcipher_des3_key(ctfm, key);
@@ -1544,7 +1605,7 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
return err;
/* if context exits and key changed, need to invalidate it */
- if (ctx->base.ctxr_dma)
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, len))
ctx->base.needs_inv = true;
@@ -1560,6 +1621,8 @@ static int safexcel_skcipher_des3_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES;
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -1597,6 +1660,8 @@ static int safexcel_skcipher_des3_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1652,6 +1717,9 @@ static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
ctx->priv = tmpl->priv;
ctx->alg = SAFEXCEL_AES; /* default */
+ ctx->blocksz = AES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD;
+ ctx->ctrinit = 1;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; /* default */
ctx->aead = true;
ctx->base.send = safexcel_aead_send;
@@ -1840,6 +1908,8 @@ static int safexcel_aead_sha1_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha1_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1874,6 +1944,8 @@ static int safexcel_aead_sha256_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha256_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1908,6 +1980,8 @@ static int safexcel_aead_sha224_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha224_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1942,6 +2016,8 @@ static int safexcel_aead_sha512_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha512_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1976,6 +2052,8 @@ static int safexcel_aead_sha384_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha384_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2010,6 +2088,8 @@ static int safexcel_aead_sha1_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha1_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2044,6 +2124,8 @@ static int safexcel_aead_sha256_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha256_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2078,6 +2160,8 @@ static int safexcel_aead_sha224_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha224_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2112,6 +2196,8 @@ static int safexcel_aead_sha512_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha512_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2146,6 +2232,8 @@ static int safexcel_aead_sha384_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha384_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2362,10 +2450,8 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
/* Only half of the key data is cipher key */
keylen = (len >> 1);
ret = aes_expandkey(&aes, key, keylen);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
@@ -2381,10 +2467,8 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
/* The other half is the tweak key */
ret = aes_expandkey(&aes, (u8 *)(key + keylen), keylen);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
@@ -2412,6 +2496,7 @@ static int safexcel_skcipher_aes_xts_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->xts = 1;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XTS;
return 0;
@@ -2472,7 +2557,6 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
ret = aes_expandkey(&aes, key, len);
if (ret) {
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&aes, sizeof(aes));
return ret;
}
@@ -2496,8 +2580,6 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) &
CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(ctx->hkaes, key, len);
- crypto_aead_set_flags(ctfm, crypto_cipher_get_flags(ctx->hkaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
@@ -2532,10 +2614,7 @@ static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm)
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
ctx->hkaes = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->hkaes))
- return PTR_ERR(ctx->hkaes);
-
- return 0;
+ return PTR_ERR_OR_ZERO(ctx->hkaes);
}
static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm)
@@ -2589,7 +2668,6 @@ static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
ret = aes_expandkey(&aes, key, len);
if (ret) {
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&aes, sizeof(aes));
return ret;
}
@@ -2632,6 +2710,7 @@ static int safexcel_aead_ccm_cra_init(struct crypto_tfm *tfm)
ctx->state_sz = 3 * AES_BLOCK_SIZE;
ctx->xcm = EIP197_XCM_MODE_CCM;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
+ ctx->ctrinit = 0;
return 0;
}
@@ -2719,10 +2798,9 @@ static int safexcel_skcipher_chacha20_setkey(struct crypto_skcipher *ctfm,
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
- if (len != CHACHA_KEY_SIZE) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len != CHACHA_KEY_SIZE)
return -EINVAL;
- }
+
safexcel_chacha20_setkey(ctx, key);
return 0;
@@ -2734,6 +2812,7 @@ static int safexcel_skcipher_chacha20_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_CHACHA20;
+ ctx->ctrinit = 0;
ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32;
return 0;
}
@@ -2775,10 +2854,9 @@ static int safexcel_aead_chachapoly_setkey(struct crypto_aead *ctfm,
len -= EIP197_AEAD_IPSEC_NONCE_SIZE;
ctx->nonce = *(u32 *)(key + len);
}
- if (len != CHACHA_KEY_SIZE) {
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len != CHACHA_KEY_SIZE)
return -EINVAL;
- }
+
safexcel_chacha20_setkey(ctx, key);
return 0;
@@ -2885,6 +2963,7 @@ static int safexcel_aead_chachapoly_cra_init(struct crypto_tfm *tfm)
ctx->alg = SAFEXCEL_CHACHA20;
ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32 |
CONTEXT_CONTROL_CHACHA20_MODE_CALC_OTK;
+ ctx->ctrinit = 0;
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_POLY1305;
ctx->state_sz = 0; /* Precomputed by HW */
return 0;
@@ -2933,6 +3012,7 @@ static int safexcel_aead_chachapolyesp_cra_init(struct crypto_tfm *tfm)
ret = safexcel_aead_chachapoly_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
return ret;
}
@@ -2971,10 +3051,8 @@ static int safexcel_skcipher_sm4_setkey(struct crypto_skcipher *ctfm,
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
- if (len != SM4_KEY_SIZE) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len != SM4_KEY_SIZE)
return -EINVAL;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, SM4_KEY_SIZE))
@@ -3013,6 +3091,8 @@ static int safexcel_skcipher_sm4_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -3047,6 +3127,7 @@ static int safexcel_skcipher_sm4_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -3083,6 +3164,7 @@ static int safexcel_skcipher_sm4_ofb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
return 0;
}
@@ -3119,6 +3201,7 @@ static int safexcel_skcipher_sm4_cfb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
return 0;
}
@@ -3169,6 +3252,7 @@ static int safexcel_skcipher_sm4_ctr_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
return 0;
}
@@ -3228,6 +3312,7 @@ static int safexcel_aead_sm4cbc_sha1_cra_init(struct crypto_tfm *tfm)
safexcel_aead_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
ctx->state_sz = SHA1_DIGEST_SIZE;
return 0;
@@ -3335,6 +3420,7 @@ static int safexcel_aead_sm4cbc_sm3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_fallback_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
ctx->state_sz = SM3_DIGEST_SIZE;
return 0;
@@ -3473,6 +3559,7 @@ static int safexcel_rfc4106_gcm_cra_init(struct crypto_tfm *tfm)
ret = safexcel_aead_gcm_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
return ret;
}
@@ -3607,6 +3694,7 @@ static int safexcel_rfc4309_ccm_cra_init(struct crypto_tfm *tfm)
ret = safexcel_aead_ccm_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
return ret;
}
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 2134daef24f6..43962bc709c6 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -87,12 +87,14 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
input_length &= 15;
if (unlikely(cbcmac && input_length)) {
+ token[0].stat = 0;
token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
token[1].packet_length = 16 - input_length;
token[1].stat = EIP197_TOKEN_STAT_LAST_HASH;
token[1].instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ eip197_noop_token(&token[1]);
}
token[2].opcode = EIP197_TOKEN_OPCODE_INSERT;
@@ -101,6 +103,8 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
token[2].packet_length = result_length;
token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+
+ eip197_noop_token(&token[3]);
}
static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
@@ -111,6 +115,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
u64 count = 0;
cdesc->control_data.control0 = ctx->alg;
+ cdesc->control_data.control1 = 0;
/*
* Copy the input digest if needed, and setup the context
@@ -277,7 +282,8 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
sreq->processed = sreq->block_sz;
sreq->hmac = 0;
- ctx->base.needs_inv = true;
+ if (priv->flags & EIP197_TRC_CACHE)
+ ctx->base.needs_inv = true;
areq->nbytes = 0;
safexcel_ahash_enqueue(areq);
@@ -314,6 +320,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
+ struct safexcel_token *dmmy;
int i, extra = 0, n_cdesc = 0, ret = 0, cache_len, skip = 0;
u64 queued, len;
@@ -397,7 +404,8 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
first_cdesc = safexcel_add_cdesc(priv, ring, 1,
(cache_len == len),
req->cache_dma, cache_len,
- len, ctx->base.ctxr_dma);
+ len, ctx->base.ctxr_dma,
+ &dmmy);
if (IS_ERR(first_cdesc)) {
ret = PTR_ERR(first_cdesc);
goto unmap_cache;
@@ -436,7 +444,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
!(queued - sglen),
sg_dma_address(sg) + skip, sglen,
- len, ctx->base.ctxr_dma);
+ len, ctx->base.ctxr_dma, &dmmy);
if (IS_ERR(cdesc)) {
ret = PTR_ERR(cdesc);
goto unmap_sg;
@@ -1911,10 +1919,8 @@ static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key,
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- if (keylen != sizeof(u32)) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(u32))
return -EINVAL;
- }
memcpy(ctx->ipad, key, sizeof(u32));
return 0;
@@ -1987,10 +1993,8 @@ static int safexcel_cbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
memset(ctx->ipad, 0, 2 * AES_BLOCK_SIZE);
for (i = 0; i < len / sizeof(u32); i++)
@@ -2057,18 +2061,14 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
/* precompute the XCBC key material */
crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(ctx->kaes, key, len);
- crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
@@ -2088,8 +2088,6 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ret = crypto_cipher_setkey(ctx->kaes,
(u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
AES_MIN_KEY_SIZE);
- crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
@@ -2160,10 +2158,8 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
for (i = 0; i < len / sizeof(u32); i++)
ctx->ipad[i + 8] =
@@ -2174,8 +2170,6 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(ctx->kaes, key, len);
- crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index 9237ba745c2f..e454c3d44f07 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -14,6 +14,11 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *cdr,
struct safexcel_desc_ring *rdr)
{
+ int i;
+ struct safexcel_command_desc *cdesc;
+ dma_addr_t atok;
+
+ /* Actual command descriptor ring */
cdr->offset = priv->config.cd_offset;
cdr->base = dmam_alloc_coherent(priv->dev,
cdr->offset * EIP197_DEFAULT_RING_SIZE,
@@ -24,7 +29,34 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
cdr->read = cdr->base;
+ /* Command descriptor shadow ring for storing additional token data */
+ cdr->shoffset = priv->config.cdsh_offset;
+ cdr->shbase = dmam_alloc_coherent(priv->dev,
+ cdr->shoffset *
+ EIP197_DEFAULT_RING_SIZE,
+ &cdr->shbase_dma, GFP_KERNEL);
+ if (!cdr->shbase)
+ return -ENOMEM;
+ cdr->shwrite = cdr->shbase;
+ cdr->shbase_end = cdr->shbase + cdr->shoffset *
+ (EIP197_DEFAULT_RING_SIZE - 1);
+
+ /*
+ * Populate command descriptors with physical pointers to shadow descs.
+ * Note that we only need to do this once if we don't overwrite them.
+ */
+ cdesc = cdr->base;
+ atok = cdr->shbase_dma;
+ for (i = 0; i < EIP197_DEFAULT_RING_SIZE; i++) {
+ cdesc->atok_lo = lower_32_bits(atok);
+ cdesc->atok_hi = upper_32_bits(atok);
+ cdesc = (void *)cdesc + cdr->offset;
+ atok += cdr->shoffset;
+ }
+
rdr->offset = priv->config.rd_offset;
+ /* Use shoffset for result token offset here */
+ rdr->shoffset = priv->config.res_offset;
rdr->base = dmam_alloc_coherent(priv->dev,
rdr->offset * EIP197_DEFAULT_RING_SIZE,
&rdr->base_dma, GFP_KERNEL);
@@ -42,11 +74,40 @@ inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
return (atomic_inc_return(&priv->ring_used) % priv->config.rings);
}
-static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
- struct safexcel_desc_ring *ring)
+static void *safexcel_ring_next_cwptr(struct safexcel_crypto_priv *priv,
+ struct safexcel_desc_ring *ring,
+ bool first,
+ struct safexcel_token **atoken)
{
void *ptr = ring->write;
+ if (first)
+ *atoken = ring->shwrite;
+
+ if ((ring->write == ring->read - ring->offset) ||
+ (ring->read == ring->base && ring->write == ring->base_end))
+ return ERR_PTR(-ENOMEM);
+
+ if (ring->write == ring->base_end) {
+ ring->write = ring->base;
+ ring->shwrite = ring->shbase;
+ } else {
+ ring->write += ring->offset;
+ ring->shwrite += ring->shoffset;
+ }
+
+ return ptr;
+}
+
+static void *safexcel_ring_next_rwptr(struct safexcel_crypto_priv *priv,
+ struct safexcel_desc_ring *ring,
+ struct result_data_desc **rtoken)
+{
+ void *ptr = ring->write;
+
+ /* Result token at relative offset shoffset */
+ *rtoken = ring->write + ring->shoffset;
+
if ((ring->write == ring->read - ring->offset) ||
(ring->read == ring->base && ring->write == ring->base_end))
return ERR_PTR(-ENOMEM);
@@ -106,10 +167,13 @@ void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
if (ring->write == ring->read)
return;
- if (ring->write == ring->base)
+ if (ring->write == ring->base) {
ring->write = ring->base_end;
- else
+ ring->shwrite = ring->shbase_end;
+ } else {
ring->write -= ring->offset;
+ ring->shwrite -= ring->shoffset;
+ }
}
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
@@ -117,26 +181,26 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
bool first, bool last,
dma_addr_t data, u32 data_len,
u32 full_data_len,
- dma_addr_t context) {
+ dma_addr_t context,
+ struct safexcel_token **atoken)
+{
struct safexcel_command_desc *cdesc;
- int i;
- cdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].cdr);
+ cdesc = safexcel_ring_next_cwptr(priv, &priv->ring[ring_id].cdr,
+ first, atoken);
if (IS_ERR(cdesc))
return cdesc;
- memset(cdesc, 0, sizeof(struct safexcel_command_desc));
-
- cdesc->first_seg = first;
- cdesc->last_seg = last;
cdesc->particle_size = data_len;
+ cdesc->rsvd0 = 0;
+ cdesc->last_seg = last;
+ cdesc->first_seg = first;
+ cdesc->additional_cdata_size = 0;
+ cdesc->rsvd1 = 0;
cdesc->data_lo = lower_32_bits(data);
cdesc->data_hi = upper_32_bits(data);
- if (first && context) {
- struct safexcel_token *token =
- (struct safexcel_token *)cdesc->control_data.token;
-
+ if (first) {
/*
* Note that the length here MUST be >0 or else the EIP(1)97
* may hang. Newer EIP197 firmware actually incorporates this
@@ -146,20 +210,12 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
cdesc->control_data.packet_length = full_data_len ?: 1;
cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
EIP197_OPTION_64BIT_CTX |
- EIP197_OPTION_CTX_CTRL_IN_CMD;
- cdesc->control_data.context_lo =
- (lower_32_bits(context) & GENMASK(31, 2)) >> 2;
+ EIP197_OPTION_CTX_CTRL_IN_CMD |
+ EIP197_OPTION_RC_AUTO;
+ cdesc->control_data.type = EIP197_TYPE_BCLA;
+ cdesc->control_data.context_lo = lower_32_bits(context) |
+ EIP197_CONTEXT_SMALL;
cdesc->control_data.context_hi = upper_32_bits(context);
-
- if (priv->version == EIP197B_MRVL ||
- priv->version == EIP197D_MRVL)
- cdesc->control_data.options |= EIP197_OPTION_RC_AUTO;
-
- /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
- cdesc->control_data.refresh = 2;
-
- for (i = 0; i < EIP197_MAX_TOKENS; i++)
- eip197_noop_token(&token[i]);
}
return cdesc;
@@ -171,19 +227,27 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
dma_addr_t data, u32 len)
{
struct safexcel_result_desc *rdesc;
+ struct result_data_desc *rtoken;
- rdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].rdr);
+ rdesc = safexcel_ring_next_rwptr(priv, &priv->ring[ring_id].rdr,
+ &rtoken);
if (IS_ERR(rdesc))
return rdesc;
- memset(rdesc, 0, sizeof(struct safexcel_result_desc));
-
- rdesc->first_seg = first;
+ rdesc->particle_size = len;
+ rdesc->rsvd0 = 0;
+ rdesc->descriptor_overflow = 0;
+ rdesc->buffer_overflow = 0;
rdesc->last_seg = last;
+ rdesc->first_seg = first;
rdesc->result_size = EIP197_RD64_RESULT_SIZE;
- rdesc->particle_size = len;
+ rdesc->rsvd1 = 0;
rdesc->data_lo = lower_32_bits(data);
rdesc->data_hi = upper_32_bits(data);
+ /* Clear length & error code in result token */
+ rtoken->packet_length = 0;
+ rtoken->error_code = 0;
+
return rdesc;
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 391e3b4df364..ad73fc946682 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -740,7 +740,7 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
u32 keylen_cfg = 0;
struct ix_sa_dir *dir;
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
+ int err;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
cinfo = dir->npe_ctx;
@@ -757,12 +757,13 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
case 24: keylen_cfg = MOD_AES192; break;
case 32: keylen_cfg = MOD_AES256; break;
default:
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
cipher_cfg |= keylen_cfg;
} else {
- crypto_des_verify_key(tfm, key);
+ err = crypto_des_verify_key(tfm, key);
+ if (err)
+ return err;
}
/* write cfg word to cryptinfo */
*(u32*)cinfo = cpu_to_be32(cipher_cfg);
@@ -819,7 +820,6 @@ static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 *flags = &tfm->base.crt_flags;
int ret;
init_completion(&ctx->completion);
@@ -835,16 +835,6 @@ static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (ret)
goto out;
ret = setup_cipher(&tfm->base, 1, key, key_len);
- if (ret)
- goto out;
-
- if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
- if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
- ret = -EINVAL;
- } else {
- *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
- }
- }
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
@@ -1096,7 +1086,6 @@ free_buf_src:
static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
- u32 *flags = &tfm->base.crt_flags;
unsigned digest_len = crypto_aead_maxauthsize(tfm);
int ret;
@@ -1120,17 +1109,6 @@ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
goto out;
ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
ctx->authkey_len, digest_len);
- if (ret)
- goto out;
-
- if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
- if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
- ret = -EINVAL;
- goto out;
- } else {
- *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
- }
- }
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
@@ -1169,7 +1147,6 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
memzero_explicit(&keys, sizeof(keys));
return aead_setup(tfm, crypto_aead_authsize(tfm));
badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index d8e8c857770c..c24f34a48cef 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -255,10 +255,8 @@ static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
int i;
ret = aes_expandkey(&ctx->aes, key, len);
- if (ret) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
remaining = (ctx->aes.key_length - 16) / 4;
offset = ctx->aes.key_length + 24 - remaining;
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index 90880a81c534..78d660d963e2 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -652,7 +652,6 @@ static int mtk_aes_setkey(struct crypto_skcipher *tfm,
break;
default:
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1022,7 +1021,6 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
break;
default:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1033,8 +1031,6 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(ctr, key, keylen);
- crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
- CRYPTO_TFM_RES_MASK);
if (err)
return err;
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index f438b425c655..435ac1c83df9 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -492,7 +492,6 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
- unsigned int ret;
/*
* AES 128 is supposed by the hardware, store key into temporary
@@ -513,16 +512,7 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(actx->fallback,
tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_sync_skcipher_setkey(actx->fallback, key, len);
- if (!ret)
- return 0;
-
- tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) &
- CRYPTO_TFM_RES_MASK;
-
- return ret;
+ return crypto_sync_skcipher_setkey(actx->fallback, key, len);
}
static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 63bd565048f4..f5c468f2cc82 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -746,7 +746,6 @@ static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->enc_type |= ENC_TYPE_ALG_AES256;
break;
default:
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 9bbedbccfadf..32dc00dc570b 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -13,6 +13,7 @@
#include <linux/dmaengine.h>
#include <linux/omap-dma.h>
#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
#include <crypto/aes.h>
#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
@@ -29,11 +30,13 @@ static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
{
struct aead_request *req = dd->aead_req;
- dd->flags &= ~FLAGS_BUSY;
dd->in_sg = NULL;
dd->out_sg = NULL;
- req->base.complete(&req->base, ret);
+ crypto_finalize_aead_request(dd->engine, req, ret);
+
+ pm_runtime_mark_last_busy(dd->dev);
+ pm_runtime_put_autosuspend(dd->dev);
}
static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
@@ -81,7 +84,6 @@ static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
}
omap_aes_gcm_finish_req(dd, ret);
- omap_aes_gcm_handle_queue(dd, NULL);
}
static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
@@ -120,11 +122,16 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
FLAGS_ASSOC_DATA_ST_SHIFT,
&dd->flags);
+ if (ret)
+ return ret;
}
if (cryptlen) {
tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen);
+ if (nsg)
+ sg_unmark_end(dd->in_sgl);
+
ret = omap_crypto_align_sg(&tmp, cryptlen,
AES_BLOCK_SIZE, &dd->in_sgl[nsg],
OMAP_CRYPTO_COPY_DATA |
@@ -132,6 +139,8 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
FLAGS_IN_DATA_ST_SHIFT,
&dd->flags);
+ if (ret)
+ return ret;
}
dd->in_sg = dd->in_sgl;
@@ -142,18 +151,20 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
dd->out_sg = req->dst;
dd->orig_out = req->dst;
- dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen);
+ dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, req->assoclen);
flags = 0;
if (req->src == req->dst || dd->out_sg == sg_arr)
flags |= OMAP_CRYPTO_FORCE_COPY;
- ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
- AES_BLOCK_SIZE, &dd->out_sgl,
- flags,
- FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
- if (ret)
- return ret;
+ if (cryptlen) {
+ ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
+ AES_BLOCK_SIZE, &dd->out_sgl,
+ flags,
+ FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
+ if (ret)
+ return ret;
+ }
dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
@@ -161,62 +172,12 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
return 0;
}
-static void omap_aes_gcm_complete(struct crypto_async_request *req, int err)
-{
- struct omap_aes_gcm_result *res = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
{
- struct scatterlist iv_sg, tag_sg;
- struct skcipher_request *sk_req;
- struct omap_aes_gcm_result result;
- struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- int ret = 0;
-
- sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
- if (!sk_req) {
- pr_err("skcipher: Failed to allocate request\n");
- return -ENOMEM;
- }
-
- init_completion(&result.completion);
-
- sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE);
- sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE);
- skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- omap_aes_gcm_complete, &result);
- ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen);
- skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE,
- NULL);
- ret = crypto_skcipher_encrypt(sk_req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(&result.completion);
- if (!ret) {
- ret = result.err;
- if (!ret) {
- reinit_completion(&result.completion);
- break;
- }
- }
- /* fall through */
- default:
- pr_err("Encryption of IV failed for GCM mode\n");
- break;
- }
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- skcipher_request_free(sk_req);
- return ret;
+ aes_encrypt(&ctx->actx, (u8 *)tag, (u8 *)iv);
+ return 0;
}
void omap_aes_gcm_dma_out_callback(void *data)
@@ -246,37 +207,21 @@ void omap_aes_gcm_dma_out_callback(void *data)
static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
struct aead_request *req)
{
- struct omap_aes_ctx *ctx;
- struct aead_request *backlog;
- struct omap_aes_reqctx *rctx;
- unsigned long flags;
- int err, ret = 0;
-
- spin_lock_irqsave(&dd->lock, flags);
if (req)
- ret = aead_enqueue_request(&dd->aead_queue, req);
- if (dd->flags & FLAGS_BUSY) {
- spin_unlock_irqrestore(&dd->lock, flags);
- return ret;
- }
-
- backlog = aead_get_backlog(&dd->aead_queue);
- req = aead_dequeue_request(&dd->aead_queue);
- if (req)
- dd->flags |= FLAGS_BUSY;
- spin_unlock_irqrestore(&dd->lock, flags);
-
- if (!req)
- return ret;
+ return crypto_transfer_aead_request_to_engine(dd->engine, req);
- if (backlog)
- backlog->base.complete(&backlog->base, -EINPROGRESS);
+ return 0;
+}
- ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- rctx = aead_request_ctx(req);
+static int omap_aes_gcm_prepare_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct omap_aes_reqctx *rctx = aead_request_ctx(req);
+ struct omap_aes_dev *dd = rctx->dd;
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ int err;
- dd->ctx = ctx;
- rctx->dd = dd;
dd->aead_req = req;
rctx->mode &= FLAGS_MODE_MASK;
@@ -286,16 +231,9 @@ static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
if (err)
return err;
- err = omap_aes_write_ctrl(dd);
- if (!err)
- err = omap_aes_crypt_dma_start(dd);
+ dd->ctx = &ctx->octx;
- if (err) {
- omap_aes_gcm_finish_req(dd, err);
- omap_aes_gcm_handle_queue(dd, NULL);
- }
-
- return ret;
+ return omap_aes_write_ctrl(dd);
}
static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
@@ -350,36 +288,39 @@ int omap_aes_gcm_decrypt(struct aead_request *req)
int omap_aes_4106gcm_encrypt(struct aead_request *req)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
- memcpy(rctx->iv, ctx->nonce, 4);
+ memcpy(rctx->iv, ctx->octx.nonce, 4);
memcpy(rctx->iv + 4, req->iv, 8);
- return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
+ return crypto_ipsec_check_assoclen(req->assoclen) ?:
+ omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
FLAGS_RFC4106_GCM);
}
int omap_aes_4106gcm_decrypt(struct aead_request *req)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
- memcpy(rctx->iv, ctx->nonce, 4);
+ memcpy(rctx->iv, ctx->octx.nonce, 4);
memcpy(rctx->iv + 4, req->iv, 8);
- return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
+ return crypto_ipsec_check_assoclen(req->assoclen) ?:
+ omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
}
int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
- if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256)
- return -EINVAL;
+ ret = aes_expandkey(&ctx->actx, key, keylen);
+ if (ret)
+ return ret;
- memcpy(ctx->key, key, keylen);
- ctx->keylen = keylen;
+ memcpy(ctx->octx.key, key, keylen);
+ ctx->octx.keylen = keylen;
return 0;
}
@@ -387,19 +328,63 @@ int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
if (keylen < 4)
return -EINVAL;
-
keylen -= 4;
- if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256)
- return -EINVAL;
- memcpy(ctx->key, key, keylen);
- memcpy(ctx->nonce, key + keylen, 4);
- ctx->keylen = keylen;
+ ret = aes_expandkey(&ctx->actx, key, keylen);
+ if (ret)
+ return ret;
+
+ memcpy(ctx->octx.key, key, keylen);
+ memcpy(ctx->octx.nonce, key + keylen, 4);
+ ctx->octx.keylen = keylen;
+
+ return 0;
+}
+
+int omap_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ return crypto_gcm_check_authsize(authsize);
+}
+
+int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent,
+ unsigned int authsize)
+{
+ return crypto_rfc4106_check_authsize(authsize);
+}
+
+static int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct omap_aes_reqctx *rctx = aead_request_ctx(req);
+ struct omap_aes_dev *dd = rctx->dd;
+ int ret = 0;
+
+ if (!dd)
+ return -ENODEV;
+
+ if (dd->in_sg_len)
+ ret = omap_aes_crypt_dma_start(dd);
+ else
+ omap_aes_gcm_dma_out_callback(dd);
+
+ return ret;
+}
+
+int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
+{
+ struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+
+ ctx->enginectx.op.prepare_request = omap_aes_gcm_prepare_req;
+ ctx->enginectx.op.unprepare_request = NULL;
+ ctx->enginectx.op.do_one_request = omap_aes_gcm_crypt_req;
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct omap_aes_reqctx));
return 0;
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index a1fc03ed01f3..824ddf2a66ff 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -269,13 +269,14 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
struct scatterlist *out_sg,
int in_sg_len, int out_sg_len)
{
- struct dma_async_tx_descriptor *tx_in, *tx_out;
+ struct dma_async_tx_descriptor *tx_in, *tx_out = NULL, *cb_desc;
struct dma_slave_config cfg;
int ret;
if (dd->pio_only) {
scatterwalk_start(&dd->in_walk, dd->in_sg);
- scatterwalk_start(&dd->out_walk, dd->out_sg);
+ if (out_sg_len)
+ scatterwalk_start(&dd->out_walk, dd->out_sg);
/* Enable DATAIN interrupt and let it take
care of the rest */
@@ -312,34 +313,45 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
/* No callback necessary */
tx_in->callback_param = dd;
+ tx_in->callback = NULL;
/* OUT */
- ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
- if (ret) {
- dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
- ret);
- return ret;
- }
+ if (out_sg_len) {
+ ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
- tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!tx_out) {
- dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
- return -EINVAL;
+ tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg,
+ out_sg_len,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx_out) {
+ dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
+ return -EINVAL;
+ }
+
+ cb_desc = tx_out;
+ } else {
+ cb_desc = tx_in;
}
if (dd->flags & FLAGS_GCM)
- tx_out->callback = omap_aes_gcm_dma_out_callback;
+ cb_desc->callback = omap_aes_gcm_dma_out_callback;
else
- tx_out->callback = omap_aes_dma_out_callback;
- tx_out->callback_param = dd;
+ cb_desc->callback = omap_aes_dma_out_callback;
+ cb_desc->callback_param = dd;
+
dmaengine_submit(tx_in);
- dmaengine_submit(tx_out);
+ if (tx_out)
+ dmaengine_submit(tx_out);
dma_async_issue_pending(dd->dma_lch_in);
- dma_async_issue_pending(dd->dma_lch_out);
+ if (out_sg_len)
+ dma_async_issue_pending(dd->dma_lch_out);
/* start DMA */
dd->pdata->trigger(dd, dd->total);
@@ -361,11 +373,13 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
return -EINVAL;
}
- err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
- DMA_FROM_DEVICE);
- if (!err) {
- dev_err(dd->dev, "dma_map_sg() error\n");
- return -EINVAL;
+ if (dd->out_sg_len) {
+ err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+ DMA_FROM_DEVICE);
+ if (!err) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ return -EINVAL;
+ }
}
}
@@ -373,8 +387,9 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
dd->out_sg_len);
if (err && !dd->pio_only) {
dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
- dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
- DMA_FROM_DEVICE);
+ if (dd->out_sg_len)
+ dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+ DMA_FROM_DEVICE);
}
return err;
@@ -479,6 +494,14 @@ static int omap_aes_crypt_req(struct crypto_engine *engine,
return omap_aes_crypt_dma_start(dd);
}
+static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ ((u32 *)ivbuf)[i] = omap_aes_read(dd, AES_REG_IV(dd, i));
+}
+
static void omap_aes_done_task(unsigned long data)
{
struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
@@ -494,12 +517,16 @@ static void omap_aes_done_task(unsigned long data)
omap_aes_crypt_dma_stop(dd);
}
- omap_crypto_cleanup(dd->in_sgl, NULL, 0, dd->total_save,
+ omap_crypto_cleanup(dd->in_sg, NULL, 0, dd->total_save,
FLAGS_IN_DATA_ST_SHIFT, dd->flags);
- omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save,
+ omap_crypto_cleanup(dd->out_sg, dd->orig_out, 0, dd->total_save,
FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
+ /* Update IV output */
+ if (dd->flags & (FLAGS_CBC | FLAGS_CTR))
+ omap_aes_copy_ivout(dd, dd->req->iv);
+
omap_aes_finish_req(dd, 0);
pr_debug("exit\n");
@@ -513,6 +540,9 @@ static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode)
struct omap_aes_dev *dd;
int ret;
+ if ((req->cryptlen % AES_BLOCK_SIZE) && !(mode & FLAGS_CTR))
+ return -EINVAL;
+
pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->cryptlen,
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
@@ -627,36 +657,6 @@ static int omap_aes_init_tfm(struct crypto_skcipher *tfm)
return 0;
}
-static int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
-{
- struct omap_aes_dev *dd = NULL;
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
- int err;
-
- /* Find AES device, currently picks the first device */
- spin_lock_bh(&list_lock);
- list_for_each_entry(dd, &dev_list, list) {
- break;
- }
- spin_unlock_bh(&list_lock);
-
- err = pm_runtime_get_sync(dd->dev);
- if (err < 0) {
- dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
- __func__, err);
- return err;
- }
-
- tfm->reqsize = sizeof(struct omap_aes_reqctx);
- ctx->ctr = crypto_alloc_skcipher("ecb(aes)", 0, 0);
- if (IS_ERR(ctx->ctr)) {
- pr_warn("could not load aes driver for encrypting IV\n");
- return PTR_ERR(ctx->ctr);
- }
-
- return 0;
-}
-
static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
{
struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -667,19 +667,6 @@ static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
ctx->fallback = NULL;
}
-static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm)
-{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
-
- if (ctx->fallback)
- crypto_free_sync_skcipher(ctx->fallback);
-
- ctx->fallback = NULL;
-
- if (ctx->ctr)
- crypto_free_skcipher(ctx->ctr);
-}
-
/* ********************** ALGS ************************************ */
static struct skcipher_alg algs_ecb_cbc[] = {
@@ -732,7 +719,7 @@ static struct skcipher_alg algs_ctr[] = {
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct omap_aes_ctx),
.base.cra_module = THIS_MODULE,
@@ -763,15 +750,15 @@ static struct aead_alg algs_aead_gcm[] = {
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
.init = omap_aes_gcm_cra_init,
- .exit = omap_aes_gcm_cra_exit,
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = omap_aes_gcm_setkey,
+ .setauthsize = omap_aes_gcm_setauthsize,
.encrypt = omap_aes_gcm_encrypt,
.decrypt = omap_aes_gcm_decrypt,
},
@@ -783,15 +770,15 @@ static struct aead_alg algs_aead_gcm[] = {
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
.init = omap_aes_gcm_cra_init,
- .exit = omap_aes_gcm_cra_exit,
.maxauthsize = AES_BLOCK_SIZE,
.ivsize = GCM_RFC4106_IV_SIZE,
.setkey = omap_aes_4106gcm_setkey,
+ .setauthsize = omap_aes_4106gcm_setauthsize,
.encrypt = omap_aes_4106gcm_encrypt,
.decrypt = omap_aes_4106gcm_decrypt,
},
@@ -1296,7 +1283,8 @@ static int omap_aes_remove(struct platform_device *pdev)
tasklet_kill(&dd->done_task);
omap_aes_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
- dd = NULL;
+
+ sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group);
return 0;
}
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 2d3575231e31..2d111bf906e1 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -9,6 +9,7 @@
#ifndef __OMAP_AES_H__
#define __OMAP_AES_H__
+#include <crypto/aes.h>
#include <crypto/engine.h>
#define DST_MAXBURST 4
@@ -79,7 +80,6 @@
#define FLAGS_INIT BIT(5)
#define FLAGS_FAST BIT(6)
-#define FLAGS_BUSY BIT(7)
#define FLAGS_IN_DATA_ST_SHIFT 8
#define FLAGS_OUT_DATA_ST_SHIFT 10
@@ -98,7 +98,11 @@ struct omap_aes_ctx {
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u8 nonce[4];
struct crypto_sync_skcipher *fallback;
- struct crypto_skcipher *ctr;
+};
+
+struct omap_aes_gcm_ctx {
+ struct omap_aes_ctx octx;
+ struct crypto_aes_ctx actx;
};
struct omap_aes_reqctx {
@@ -202,8 +206,12 @@ int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
int omap_aes_gcm_encrypt(struct aead_request *req);
int omap_aes_gcm_decrypt(struct aead_request *req);
+int omap_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
int omap_aes_4106gcm_encrypt(struct aead_request *req);
int omap_aes_4106gcm_decrypt(struct aead_request *req);
+int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent,
+ unsigned int authsize);
+int omap_aes_gcm_cra_init(struct crypto_aead *tfm);
int omap_aes_write_ctrl(struct omap_aes_dev *dd);
int omap_aes_crypt_dma_start(struct omap_aes_dev *dd);
int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd);
diff --git a/drivers/crypto/omap-crypto.c b/drivers/crypto/omap-crypto.c
index 7d592d93bb1c..cc88b7362bc2 100644
--- a/drivers/crypto/omap-crypto.c
+++ b/drivers/crypto/omap-crypto.c
@@ -154,6 +154,41 @@ int omap_crypto_align_sg(struct scatterlist **sg, int total, int bs,
}
EXPORT_SYMBOL_GPL(omap_crypto_align_sg);
+static void omap_crypto_copy_data(struct scatterlist *src,
+ struct scatterlist *dst,
+ int offset, int len)
+{
+ int amt;
+ void *srcb, *dstb;
+ int srco = 0, dsto = offset;
+
+ while (src && dst && len) {
+ if (srco >= src->length) {
+ srco -= src->length;
+ src = sg_next(src);
+ continue;
+ }
+
+ if (dsto >= dst->length) {
+ dsto -= dst->length;
+ dst = sg_next(dst);
+ continue;
+ }
+
+ amt = min(src->length - srco, dst->length - dsto);
+ amt = min(len, amt);
+
+ srcb = sg_virt(src) + srco;
+ dstb = sg_virt(dst) + dsto;
+
+ memcpy(dstb, srcb, amt);
+
+ srco += amt;
+ dsto += amt;
+ len -= amt;
+ }
+}
+
void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
int offset, int len, u8 flags_shift,
unsigned long flags)
@@ -171,7 +206,7 @@ void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
pages = get_order(len);
if (orig && (flags & OMAP_CRYPTO_COPY_MASK))
- scatterwalk_map_and_copy(buf, orig, offset, len, 1);
+ omap_crypto_copy_data(sg, orig, offset, len);
if (flags & OMAP_CRYPTO_DATA_COPIED)
free_pages((unsigned long)buf, pages);
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 4c4dbc2b377e..8eda43319204 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -597,6 +597,7 @@ static int omap_des_crypt_req(struct crypto_engine *engine,
static void omap_des_done_task(unsigned long data)
{
struct omap_des_dev *dd = (struct omap_des_dev *)data;
+ int i;
pr_debug("enter done_task\n");
@@ -615,6 +616,11 @@ static void omap_des_done_task(unsigned long data)
omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save,
FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
+ if ((dd->flags & FLAGS_CBC) && dd->req->iv)
+ for (i = 0; i < 2; i++)
+ ((u32 *)dd->req->iv)[i] =
+ omap_des_read(dd, DES_REG_IV(dd, i));
+
omap_des_finish_req(dd, 0);
pr_debug("exit\n");
@@ -631,10 +637,11 @@ static int omap_des_crypt(struct skcipher_request *req, unsigned long mode)
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
- if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of DES blocks\n");
+ if (!req->cryptlen)
+ return 0;
+
+ if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE))
return -EINVAL;
- }
dd = omap_des_find_dev(ctx);
if (!dd)
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index ac80bc6af093..4f915a4ef5b0 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -112,6 +112,8 @@
#define FLAGS_BE32_SHA1 8
#define FLAGS_SGS_COPIED 9
#define FLAGS_SGS_ALLOCED 10
+#define FLAGS_HUGE 11
+
/* context flags */
#define FLAGS_FINUP 16
@@ -136,6 +138,8 @@
#define BUFLEN SHA512_BLOCK_SIZE
#define OMAP_SHA_DMA_THRESHOLD 256
+#define OMAP_SHA_MAX_DMA_LEN (1024 * 2048)
+
struct omap_sham_dev;
struct omap_sham_reqctx {
@@ -644,6 +648,8 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
struct scatterlist *tmp;
int offset = ctx->offset;
+ ctx->total = new_len;
+
if (ctx->bufcnt)
n++;
@@ -661,15 +667,16 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
tmp = sg_next(tmp);
ctx->sg_len++;
+ new_len -= ctx->bufcnt;
}
while (sg && new_len) {
int len = sg->length - offset;
- if (offset) {
+ if (len <= 0) {
offset -= sg->length;
- if (offset < 0)
- offset = 0;
+ sg = sg_next(sg);
+ continue;
}
if (new_len < len)
@@ -677,33 +684,37 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
if (len > 0) {
new_len -= len;
- sg_set_page(tmp, sg_page(sg), len, sg->offset);
+ sg_set_page(tmp, sg_page(sg), len, sg->offset + offset);
+ offset = 0;
+ ctx->offset = 0;
+ ctx->sg_len++;
if (new_len <= 0)
- sg_mark_end(tmp);
+ break;
tmp = sg_next(tmp);
- ctx->sg_len++;
}
sg = sg_next(sg);
}
+ if (tmp)
+ sg_mark_end(tmp);
+
set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
+ ctx->offset += new_len - ctx->bufcnt;
ctx->bufcnt = 0;
return 0;
}
static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
- struct scatterlist *sg, int bs, int new_len)
+ struct scatterlist *sg, int bs,
+ unsigned int new_len)
{
int pages;
void *buf;
- int len;
-
- len = new_len + ctx->bufcnt;
- pages = get_order(ctx->total);
+ pages = get_order(new_len);
buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
if (!buf) {
@@ -715,14 +726,15 @@ static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
- ctx->total - ctx->bufcnt, 0);
+ min(new_len, ctx->total) - ctx->bufcnt, 0);
sg_init_table(ctx->sgl, 1);
- sg_set_buf(ctx->sgl, buf, len);
+ sg_set_buf(ctx->sgl, buf, new_len);
ctx->sg = ctx->sgl;
set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
ctx->sg_len = 1;
+ ctx->offset += new_len - ctx->bufcnt;
ctx->bufcnt = 0;
- ctx->offset = 0;
+ ctx->total = new_len;
return 0;
}
@@ -737,6 +749,7 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
struct scatterlist *sg_tmp = sg;
int new_len;
int offset = rctx->offset;
+ int bufcnt = rctx->bufcnt;
if (!sg || !sg->length || !nbytes)
return 0;
@@ -751,12 +764,28 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
else
new_len = (new_len - 1) / bs * bs;
+ if (!new_len)
+ return 0;
+
if (nbytes != new_len)
list_ok = false;
while (nbytes > 0 && sg_tmp) {
n++;
+ if (bufcnt) {
+ if (!IS_ALIGNED(bufcnt, bs)) {
+ aligned = false;
+ break;
+ }
+ nbytes -= bufcnt;
+ bufcnt = 0;
+ if (!nbytes)
+ list_ok = false;
+
+ continue;
+ }
+
#ifdef CONFIG_ZONE_DMA
if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
aligned = false;
@@ -794,13 +823,27 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
}
}
+ if (new_len > OMAP_SHA_MAX_DMA_LEN) {
+ new_len = OMAP_SHA_MAX_DMA_LEN;
+ aligned = false;
+ }
+
if (!aligned)
return omap_sham_copy_sgs(rctx, sg, bs, new_len);
else if (!list_ok)
return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
+ rctx->total = new_len;
+ rctx->offset += new_len;
rctx->sg_len = n;
- rctx->sg = sg;
+ if (rctx->bufcnt) {
+ sg_init_table(rctx->sgl, 2);
+ sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
+ sg_chain(rctx->sgl, 2, sg);
+ rctx->sg = rctx->sgl;
+ } else {
+ rctx->sg = sg;
+ }
return 0;
}
@@ -810,31 +853,35 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
int bs;
int ret;
- int nbytes;
+ unsigned int nbytes;
bool final = rctx->flags & BIT(FLAGS_FINUP);
- int xmit_len, hash_later;
+ int hash_later;
bs = get_block_size(rctx);
+ nbytes = rctx->bufcnt;
+
if (update)
- nbytes = req->nbytes;
- else
- nbytes = 0;
+ nbytes += req->nbytes - rctx->offset;
- rctx->total = nbytes + rctx->bufcnt;
+ dev_dbg(rctx->dd->dev,
+ "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n",
+ __func__, nbytes, bs, rctx->total, rctx->offset,
+ rctx->bufcnt);
- if (!rctx->total)
+ if (!nbytes)
return 0;
- if (nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
+ rctx->total = nbytes;
+
+ if (update && req->nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
int len = bs - rctx->bufcnt % bs;
- if (len > nbytes)
- len = nbytes;
+ if (len > req->nbytes)
+ len = req->nbytes;
scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
0, len, 0);
rctx->bufcnt += len;
- nbytes -= len;
rctx->offset = len;
}
@@ -845,64 +892,25 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
if (ret)
return ret;
- xmit_len = rctx->total;
-
- if (!IS_ALIGNED(xmit_len, bs)) {
- if (final)
- xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs;
- else
- xmit_len = xmit_len / bs * bs;
- } else if (!final) {
- xmit_len -= bs;
- }
-
- hash_later = rctx->total - xmit_len;
+ hash_later = nbytes - rctx->total;
if (hash_later < 0)
hash_later = 0;
- if (rctx->bufcnt && nbytes) {
- /* have data from previous operation and current */
- sg_init_table(rctx->sgl, 2);
- sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
-
- sg_chain(rctx->sgl, 2, req->src);
-
- rctx->sg = rctx->sgl;
-
- rctx->sg_len++;
- } else if (rctx->bufcnt) {
- /* have buffered data only */
- sg_init_table(rctx->sgl, 1);
- sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, xmit_len);
-
- rctx->sg = rctx->sgl;
-
- rctx->sg_len = 1;
- }
-
if (hash_later) {
- int offset = 0;
-
- if (hash_later > req->nbytes) {
- memcpy(rctx->buffer, rctx->buffer + xmit_len,
- hash_later - req->nbytes);
- offset = hash_later - req->nbytes;
- }
-
- if (req->nbytes) {
- scatterwalk_map_and_copy(rctx->buffer + offset,
- req->src,
- offset + req->nbytes -
- hash_later, hash_later, 0);
- }
+ scatterwalk_map_and_copy(rctx->buffer,
+ req->src,
+ req->nbytes - hash_later,
+ hash_later, 0);
rctx->bufcnt = hash_later;
} else {
rctx->bufcnt = 0;
}
- if (!final)
- rctx->total = xmit_len;
+ if (hash_later > rctx->buflen)
+ set_bit(FLAGS_HUGE, &rctx->dd->flags);
+
+ rctx->total = min(nbytes, rctx->total);
return 0;
}
@@ -998,10 +1006,11 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
struct ahash_request *req = dd->req;
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err;
- bool final = ctx->flags & BIT(FLAGS_FINUP);
+ bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
+ !(dd->flags & BIT(FLAGS_HUGE));
- dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
- ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
+ dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, final: %d",
+ ctx->total, ctx->digcnt, final);
if (ctx->total < get_block_size(ctx) ||
ctx->total < dd->fallback_sz)
@@ -1024,6 +1033,9 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err = 0, use_dma = 1;
+ if (dd->flags & BIT(FLAGS_HUGE))
+ return 0;
+
if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
/*
* faster to handle last block with cpu or
@@ -1083,7 +1095,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
free_pages((unsigned long)sg_virt(ctx->sg),
- get_order(ctx->sg->length + ctx->bufcnt));
+ get_order(ctx->sg->length));
if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
kfree(ctx->sg);
@@ -1092,6 +1104,21 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED));
+ if (dd->flags & BIT(FLAGS_HUGE)) {
+ dd->flags &= ~(BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
+ BIT(FLAGS_OUTPUT_READY) | BIT(FLAGS_HUGE));
+ omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
+ if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
+ err = omap_sham_update_req(dd);
+ if (err != -EINPROGRESS &&
+ (ctx->flags & BIT(FLAGS_FINUP)))
+ err = omap_sham_final_req(dd);
+ } else if (ctx->op == OP_FINAL) {
+ omap_sham_final_req(dd);
+ }
+ return;
+ }
+
if (!err) {
dd->pdata->copy_hash(req, 1);
if (test_bit(FLAGS_FINAL, &dd->flags))
@@ -1107,6 +1134,8 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
+ ctx->offset = 0;
+
if (req->base.complete)
req->base.complete(&req->base, err);
}
@@ -1158,7 +1187,7 @@ retry:
/* request has changed - restore hash */
dd->pdata->copy_hash(req, 0);
- if (ctx->op == OP_UPDATE) {
+ if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
err = omap_sham_update_req(dd);
if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
/* no final() after finup() */
@@ -1730,6 +1759,8 @@ static void omap_sham_done_task(unsigned long data)
struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
int err = 0;
+ dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
+
if (!test_bit(FLAGS_BUSY, &dd->flags)) {
omap_sham_handle_queue(dd, NULL);
return;
@@ -2223,6 +2254,8 @@ static int omap_sham_remove(struct platform_device *pdev)
if (!dd->polling_mode)
dma_release_channel(dd->dma_lch);
+ sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
+
return 0;
}
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index c5b60f50e1b5..594d6b1695d5 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -108,14 +108,11 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
{
struct aes_ctx *ctx = aes_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
- u32 *flags = &tfm->crt_flags;
struct crypto_aes_ctx gen_aes;
int cpu;
- if (key_len % 8) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (key_len % 8)
return -EINVAL;
- }
/*
* If the hardware is capable of generating the extended key
@@ -146,10 +143,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
ctx->cword.encrypt.keygen = 1;
ctx->cword.decrypt.keygen = 1;
- if (aes_expandkey(&gen_aes, in_key, key_len)) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (aes_expandkey(&gen_aes, in_key, key_len))
return -EINVAL;
- }
memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index ddf1b549fdca..c826abe79e79 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -190,13 +190,11 @@ static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
return padlock_sha256_finup(desc, buf, 0, out);
}
-static int padlock_cra_init(struct crypto_tfm *tfm)
+static int padlock_init_tfm(struct crypto_shash *hash)
{
- struct crypto_shash *hash = __crypto_shash_cast(tfm);
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ const char *fallback_driver_name = crypto_shash_alg_name(hash);
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
struct crypto_shash *fallback_tfm;
- int err = -ENOMEM;
/* Allocate a fallback and abort if it failed. */
fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
@@ -204,21 +202,17 @@ static int padlock_cra_init(struct crypto_tfm *tfm)
if (IS_ERR(fallback_tfm)) {
printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
+ return PTR_ERR(fallback_tfm);
}
ctx->fallback = fallback_tfm;
hash->descsize += crypto_shash_descsize(fallback_tfm);
return 0;
-
-out:
- return err;
}
-static void padlock_cra_exit(struct crypto_tfm *tfm)
+static void padlock_exit_tfm(struct crypto_shash *hash)
{
- struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
crypto_free_shash(ctx->fallback);
}
@@ -231,6 +225,8 @@ static struct shash_alg sha1_alg = {
.final = padlock_sha1_final,
.export = padlock_sha_export,
.import = padlock_sha_import,
+ .init_tfm = padlock_init_tfm,
+ .exit_tfm = padlock_exit_tfm,
.descsize = sizeof(struct padlock_sha_desc),
.statesize = sizeof(struct sha1_state),
.base = {
@@ -241,8 +237,6 @@ static struct shash_alg sha1_alg = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
- .cra_init = padlock_cra_init,
- .cra_exit = padlock_cra_exit,
}
};
@@ -254,6 +248,8 @@ static struct shash_alg sha256_alg = {
.final = padlock_sha256_final,
.export = padlock_sha_export,
.import = padlock_sha_import,
+ .init_tfm = padlock_init_tfm,
+ .exit_tfm = padlock_exit_tfm,
.descsize = sizeof(struct padlock_sha_desc),
.statesize = sizeof(struct sha256_state),
.base = {
@@ -264,8 +260,6 @@ static struct shash_alg sha256_alg = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
- .cra_init = padlock_cra_init,
- .cra_exit = padlock_cra_exit,
}
};
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 29da449b3e9e..7384e91c8b32 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -465,9 +465,6 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (err)
return err;
@@ -490,7 +487,6 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
return 0;
badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -780,10 +776,8 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
int err = 0;
- if (len > AES_MAX_KEY_SIZE) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len > AES_MAX_KEY_SIZE)
return -EINVAL;
- }
/*
* IPSec engine only supports 128 and 256 bit AES keys. If we get a
@@ -805,12 +799,6 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
CRYPTO_TFM_REQ_MASK);
err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len);
-
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- crypto_sync_skcipher_get_flags(ctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK;
-
if (err)
goto sw_setkey_failed;
}
@@ -830,7 +818,6 @@ static int spacc_kasumi_f8_setkey(struct crypto_skcipher *cipher,
int err = 0;
if (len > AES_MAX_KEY_SIZE) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
err = -EINVAL;
goto out;
}
@@ -1595,6 +1582,11 @@ static const struct of_device_id spacc_of_id_table[] = {
MODULE_DEVICE_TABLE(of, spacc_of_id_table);
#endif /* CONFIG_OF */
+static void spacc_tasklet_kill(void *data)
+{
+ tasklet_kill(data);
+}
+
static int spacc_probe(struct platform_device *pdev)
{
int i, err, ret;
@@ -1637,6 +1629,14 @@ static int spacc_probe(struct platform_device *pdev)
return -ENXIO;
}
+ tasklet_init(&engine->complete, spacc_spacc_complete,
+ (unsigned long)engine);
+
+ ret = devm_add_action(&pdev->dev, spacc_tasklet_kill,
+ &engine->complete);
+ if (ret)
+ return ret;
+
if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
engine->name, engine)) {
dev_err(engine->dev, "failed to request IRQ\n");
@@ -1694,8 +1694,6 @@ static int spacc_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&engine->completed);
INIT_LIST_HEAD(&engine->in_progress);
engine->in_flight = 0;
- tasklet_init(&engine->complete, spacc_spacc_complete,
- (unsigned long)engine);
platform_set_drvdata(pdev, engine);
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 35bca76b640f..833bb1d3a11b 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -570,7 +570,6 @@ static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
memzero_explicit(&keys, sizeof(keys));
return 0;
bad_key:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
error:
@@ -586,14 +585,11 @@ static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
int alg;
if (qat_alg_validate_key(keylen, &alg, mode))
- goto bad_key;
+ return -EINVAL;
qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
return 0;
-bad_key:
- crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key,
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
index 8caa04e1ec43..14ade8a7d664 100644
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
qcrypto-objs := core.o \
common.o \
- dma.o \
- sha.o \
- skcipher.o
+ dma.o
+
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index da1188abc9ba..629e7f34dc09 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -45,52 +45,56 @@ qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
qce_write(qce, offset + i * sizeof(u32), 0);
}
-static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+static u32 qce_config_reg(struct qce_device *qce, int little)
{
- u32 cfg = 0;
+ u32 beats = (qce->burst_size >> 3) - 1;
+ u32 pipe_pair = qce->pipe_pair_id;
+ u32 config;
- if (IS_AES(flags)) {
- if (aes_key_size == AES_KEYSIZE_128)
- cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
- else if (aes_key_size == AES_KEYSIZE_256)
- cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
- }
+ config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
+ config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
+ BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
+ config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
+ config &= ~HIGH_SPD_EN_N_SHIFT;
- if (IS_AES(flags))
- cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
- else if (IS_DES(flags) || IS_3DES(flags))
- cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+ if (little)
+ config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
- if (IS_DES(flags))
- cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+ return config;
+}
- if (IS_3DES(flags))
- cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
+{
+ __be32 *d = dst;
+ const u8 *s = src;
+ unsigned int n;
- switch (flags & QCE_MODE_MASK) {
- case QCE_MODE_ECB:
- cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CBC:
- cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CTR:
- cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_XTS:
- cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CCM:
- cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
- cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
- break;
- default:
- return ~0;
+ n = len / sizeof(u32);
+ for (; n > 0; n--) {
+ *d = cpu_to_be32p((const __u32 *) s);
+ s += sizeof(__u32);
+ d++;
}
+}
- return cfg;
+static void qce_setup_config(struct qce_device *qce)
+{
+ u32 config;
+
+ /* get big endianness */
+ config = qce_config_reg(qce, 0);
+
+ /* clear status */
+ qce_write(qce, REG_STATUS, 0);
+ qce_write(qce, REG_CONFIG, config);
+}
+
+static inline void qce_crypto_go(struct qce_device *qce)
+{
+ qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
}
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
{
u32 cfg = 0;
@@ -137,88 +141,6 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
return cfg;
}
-static u32 qce_config_reg(struct qce_device *qce, int little)
-{
- u32 beats = (qce->burst_size >> 3) - 1;
- u32 pipe_pair = qce->pipe_pair_id;
- u32 config;
-
- config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
- config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
- BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
- config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
- config &= ~HIGH_SPD_EN_N_SHIFT;
-
- if (little)
- config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
-
- return config;
-}
-
-void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
-{
- __be32 *d = dst;
- const u8 *s = src;
- unsigned int n;
-
- n = len / sizeof(u32);
- for (; n > 0; n--) {
- *d = cpu_to_be32p((const __u32 *) s);
- s += sizeof(__u32);
- d++;
- }
-}
-
-static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
-{
- u8 swap[QCE_AES_IV_LENGTH];
- u32 i, j;
-
- if (ivsize > QCE_AES_IV_LENGTH)
- return;
-
- memset(swap, 0, QCE_AES_IV_LENGTH);
-
- for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
- i < QCE_AES_IV_LENGTH; i++, j--)
- swap[i] = src[j];
-
- qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
-}
-
-static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
- unsigned int enckeylen, unsigned int cryptlen)
-{
- u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
- unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
- unsigned int xtsdusize;
-
- qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
- enckeylen / 2);
- qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
-
- /* xts du size 512B */
- xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
- qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
-}
-
-static void qce_setup_config(struct qce_device *qce)
-{
- u32 config;
-
- /* get big endianness */
- config = qce_config_reg(qce, 0);
-
- /* clear status */
- qce_write(qce, REG_STATUS, 0);
- qce_write(qce, REG_CONFIG, config);
-}
-
-static inline void qce_crypto_go(struct qce_device *qce)
-{
- qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
-}
-
static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
{
@@ -303,6 +225,87 @@ go_proc:
return 0;
}
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
+static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+{
+ u32 cfg = 0;
+
+ if (IS_AES(flags)) {
+ if (aes_key_size == AES_KEYSIZE_128)
+ cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
+ else if (aes_key_size == AES_KEYSIZE_256)
+ cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
+ }
+
+ if (IS_AES(flags))
+ cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
+ else if (IS_DES(flags) || IS_3DES(flags))
+ cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+
+ if (IS_DES(flags))
+ cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+
+ if (IS_3DES(flags))
+ cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+
+ switch (flags & QCE_MODE_MASK) {
+ case QCE_MODE_ECB:
+ cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CBC:
+ cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CTR:
+ cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_XTS:
+ cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CCM:
+ cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
+ cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
+ break;
+ default:
+ return ~0;
+ }
+
+ return cfg;
+}
+
+static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
+{
+ u8 swap[QCE_AES_IV_LENGTH];
+ u32 i, j;
+
+ if (ivsize > QCE_AES_IV_LENGTH)
+ return;
+
+ memset(swap, 0, QCE_AES_IV_LENGTH);
+
+ for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
+ i < QCE_AES_IV_LENGTH; i++, j--)
+ swap[i] = src[j];
+
+ qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
+}
+
+static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
+ unsigned int enckeylen, unsigned int cryptlen)
+{
+ u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
+ unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
+ unsigned int xtsdusize;
+
+ qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
+ enckeylen / 2);
+ qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
+
+ /* xts du size 512B */
+ xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
+ qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+}
static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
@@ -384,15 +387,20 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
return 0;
}
+#endif
int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
u32 offset)
{
switch (type) {
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
case CRYPTO_ALG_TYPE_SKCIPHER:
return qce_setup_regs_skcipher(async_req, totallen, offset);
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
case CRYPTO_ALG_TYPE_AHASH:
return qce_setup_regs_ahash(async_req, totallen, offset);
+#endif
default:
return -EINVAL;
}
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 0a44a6eeacf5..cb6d61eb7302 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -22,8 +22,12 @@
#define QCE_QUEUE_LENGTH 1
static const struct qce_algo_ops *qce_ops[] = {
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
&skcipher_ops,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
&ahash_ops,
+#endif
};
static void qce_unregister_algs(struct qce_device *qce)
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 40a59214d2e1..7da893dc00e7 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -47,7 +47,8 @@ void qce_dma_release(struct qce_dma_data *dma)
}
struct scatterlist *
-qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
+ int max_ents)
{
struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
@@ -60,12 +61,13 @@ qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
if (!sg)
return ERR_PTR(-EINVAL);
- while (new_sgl && sg) {
+ while (new_sgl && sg && max_ents) {
sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
new_sgl->offset);
sg_last = sg;
sg = sg_next(sg);
new_sgl = sg_next(new_sgl);
+ max_ents--;
}
return sg_last;
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 1e25a9e0e6f8..ed25a0d9829e 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -42,6 +42,7 @@ int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
void qce_dma_issue_pending(struct qce_dma_data *dma);
int qce_dma_terminate_all(struct qce_dma_data *dma);
struct scatterlist *
-qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add,
+ int max_ents);
#endif /* _DMA_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 95ab16fc8fd6..1ab62e7d5f3c 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -396,8 +396,6 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);
ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
- if (ret)
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
kfree(buf);
err_free_req:
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index fee07323f8f9..4217b745f124 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -21,6 +21,7 @@ static void qce_skcipher_done(void *data)
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
+ struct qce_result_dump *result_buf = qce->dma.result_buf;
enum dma_data_direction dir_src, dir_dst;
u32 status;
int error;
@@ -45,6 +46,7 @@ static void qce_skcipher_done(void *data)
if (error < 0)
dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
+ memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize);
qce->async_req_done(tmpl->qce, error);
}
@@ -95,13 +97,13 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
- sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
+ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, rctx->dst_nents - 1);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
}
- sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
+ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 1);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
@@ -154,12 +156,13 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
int ret;
if (!key || !keylen)
return -EINVAL;
- switch (keylen) {
+ switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_256:
break;
@@ -213,13 +216,15 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+ int keylen;
int ret;
rctx->flags = tmpl->alg_flags;
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
+ keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
- if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
- ctx->enc_keylen != AES_KEYSIZE_256) {
+ if (IS_AES(rctx->flags) && keylen != AES_KEYSIZE_128 &&
+ keylen != AES_KEYSIZE_256) {
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
@@ -252,7 +257,14 @@ static int qce_skcipher_init(struct crypto_skcipher *tfm)
memset(ctx, 0, sizeof(*ctx));
crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx));
+ return 0;
+}
+
+static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
+{
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ qce_skcipher_init(tfm);
ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
0, CRYPTO_ALG_NEED_FALLBACK);
return PTR_ERR_OR_ZERO(ctx->fallback);
@@ -270,6 +282,7 @@ struct qce_skcipher_def {
const char *name;
const char *drv_name;
unsigned int blocksize;
+ unsigned int chunksize;
unsigned int ivsize;
unsigned int min_keysize;
unsigned int max_keysize;
@@ -298,7 +311,8 @@ static const struct qce_skcipher_def skcipher_def[] = {
.flags = QCE_ALG_AES | QCE_MODE_CTR,
.name = "ctr(aes)",
.drv_name = "ctr-aes-qce",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
+ .chunksize = AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -309,8 +323,8 @@ static const struct qce_skcipher_def skcipher_def[] = {
.drv_name = "xts-aes-qce",
.blocksize = AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
},
{
.flags = QCE_ALG_DES | QCE_MODE_ECB,
@@ -368,6 +382,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
def->drv_name);
alg->base.cra_blocksize = def->blocksize;
+ alg->chunksize = def->chunksize;
alg->ivsize = def->ivsize;
alg->min_keysize = def->min_keysize;
alg->max_keysize = def->max_keysize;
@@ -379,14 +394,18 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
alg->base.cra_priority = 300;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
alg->base.cra_alignmask = 0;
alg->base.cra_module = THIS_MODULE;
- alg->init = qce_skcipher_init;
- alg->exit = qce_skcipher_exit;
+ if (IS_AES(def->flags)) {
+ alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
+ alg->init = qce_skcipher_init_fallback;
+ alg->exit = qce_skcipher_exit;
+ } else {
+ alg->init = qce_skcipher_init;
+ }
INIT_LIST_HEAD(&tmpl->entry);
tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index ca4de4ddfe1f..4a75c8e1fa6c 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -34,10 +34,8 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher,
struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ keylen != AES_KEYSIZE_256)
return -EINVAL;
- }
ctx->keylen = keylen;
memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
return 0;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index d4ea2f11ca68..466e30bd529c 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -601,7 +601,6 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
- int ret;
ctx->keylen = keylen;
@@ -621,13 +620,7 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
-
- tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(ctx->fallback) &
- CRYPTO_TFM_RES_MASK;
- return ret;
+ return crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
}
static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 1aba9372cd23..4ef3eb11361c 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -4,7 +4,7 @@ config CRYPTO_DEV_STM32_CRC
depends on ARCH_STM32
select CRYPTO_HASH
help
- This enables support for the CRC32 hw accelerator which can be found
+ This enables support for the CRC32 hw accelerator which can be found
on STMicroelectronics STM32 SOC.
config CRYPTO_DEV_STM32_HASH
@@ -17,7 +17,7 @@ config CRYPTO_DEV_STM32_HASH
select CRYPTO_SHA256
select CRYPTO_ENGINE
help
- This enables support for the HASH hw accelerator which can be found
+ This enables support for the HASH hw accelerator which can be found
on STMicroelectronics STM32 SOC.
config CRYPTO_DEV_STM32_CRYP
@@ -27,5 +27,5 @@ config CRYPTO_DEV_STM32_CRYP
select CRYPTO_ENGINE
select CRYPTO_LIB_DES
help
- This enables support for the CRYP (AES/DES/TDES) hw accelerator which
+ This enables support for the CRYP (AES/DES/TDES) hw accelerator which
can be found on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 9e11c3480353..8e92e4ac79f1 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -85,10 +85,8 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
{
struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
- if (keylen != sizeof(u32)) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(u32))
return -EINVAL;
- }
mctx->key = get_unaligned_le32(key);
return 0;
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index cfc8e0e37bee..167b80eec437 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -518,10 +518,10 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
dma_conf.dst_maxburst = hdev->dma_maxburst;
dma_conf.device_fc = false;
- hdev->dma_lch = dma_request_slave_channel(hdev->dev, "in");
- if (!hdev->dma_lch) {
+ hdev->dma_lch = dma_request_chan(hdev->dev, "in");
+ if (IS_ERR(hdev->dma_lch)) {
dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
- return -EBUSY;
+ return PTR_ERR(hdev->dma_lch);
}
err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index d71d65846e47..9c6db7f698c4 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -914,7 +914,6 @@ static int aead_setkey(struct crypto_aead *authenc,
return 0;
badkey:
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -929,11 +928,11 @@ static int aead_des3_setkey(struct crypto_aead *authenc,
err = crypto_authenc_extractkeys(&keys, key, keylen);
if (unlikely(err))
- goto badkey;
+ goto out;
err = -EINVAL;
if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
- goto badkey;
+ goto out;
err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
if (err)
@@ -954,10 +953,6 @@ static int aead_des3_setkey(struct crypto_aead *authenc,
out:
memzero_explicit(&keys, sizeof(keys));
return err;
-
-badkey:
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
- goto out;
}
static void talitos_sg_unmap(struct device *dev,
@@ -1528,8 +1523,6 @@ static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
keylen == AES_KEYSIZE_256)
return skcipher_setkey(cipher, key, keylen);
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
return -EINVAL;
}
@@ -2234,10 +2227,8 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
/* Must get the hash of the long key */
ret = keyhash(tfm, key, keylen, hash);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return -EINVAL;
- }
keysize = digestsize;
memcpy(ctx->key, hash, digestsize);
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
index b731895aa241..f56d65c56ccf 100644
--- a/drivers/crypto/ux500/Kconfig
+++ b/drivers/crypto/ux500/Kconfig
@@ -11,18 +11,18 @@ config CRYPTO_DEV_UX500_CRYP
select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
help
- This selects the crypto driver for the UX500_CRYP hardware. It supports
- AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes.
+ This selects the crypto driver for the UX500_CRYP hardware. It supports
+ AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes.
config CRYPTO_DEV_UX500_HASH
- tristate "UX500 crypto driver for HASH block"
- depends on CRYPTO_DEV_UX500
- select CRYPTO_HASH
+ tristate "UX500 crypto driver for HASH block"
+ depends on CRYPTO_DEV_UX500
+ select CRYPTO_HASH
select CRYPTO_SHA1
select CRYPTO_SHA256
- help
- This selects the hash driver for the UX500_HASH hardware.
- Depends on UX500/STM DMA if running in DMA mode.
+ help
+ This selects the hash driver for the UX500_HASH hardware.
+ Depends on UX500/STM DMA if running in DMA mode.
config CRYPTO_DEV_UX500_DEBUG
bool "Activate ux500 platform debug-mode for crypto and hash block"
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 95fb694a2667..800dfc4d16c4 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -951,7 +951,6 @@ static int aes_skcipher_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
- u32 *flags = &cipher->base.crt_flags;
pr_debug(DEV_DBG_NAME " [%s]", __func__);
@@ -970,7 +969,6 @@ static int aes_skcipher_setkey(struct crypto_skcipher *cipher,
default:
pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__);
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 4b71e80951b7..fd045e64972a 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -272,11 +272,11 @@ static int virtio_crypto_alg_skcipher_init_sessions(
if (keylen > vcrypto->max_cipher_key_len) {
pr_err("virtio_crypto: the key is too long\n");
- goto bad_key;
+ return -EINVAL;
}
if (virtio_crypto_alg_validate_key(keylen, &alg))
- goto bad_key;
+ return -EINVAL;
/* Create encryption session */
ret = virtio_crypto_alg_skcipher_init_session(ctx,
@@ -291,10 +291,6 @@ static int virtio_crypto_alg_skcipher_init_sessions(
return ret;
}
return 0;
-
-bad_key:
- crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
/* Note: kernel crypto API realization */
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index d59e736882f6..9fee1b1532a4 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -84,6 +84,9 @@ static int p8_aes_xts_crypt(struct skcipher_request *req, int enc)
u8 tweak[AES_BLOCK_SIZE];
int ret;
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
if (!crypto_simd_usable() || (req->cryptlen % XTS_BLOCK_SIZE) != 0) {
struct skcipher_request *subreq = skcipher_request_ctx(req);
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
index 676ffcb64985..8da63f38e6bd 100644
--- a/drivers/tee/Kconfig
+++ b/drivers/tee/Kconfig
@@ -2,7 +2,7 @@
# Generic Trusted Execution Environment Configuration
config TEE
tristate "Trusted Execution Environment support"
- depends on HAVE_ARM_SMCCC || COMPILE_TEST
+ depends on HAVE_ARM_SMCCC || COMPILE_TEST || CPU_SUP_AMD
select DMA_SHARED_BUFFER
select GENERIC_ALLOCATOR
help
@@ -14,7 +14,7 @@ if TEE
menu "TEE drivers"
source "drivers/tee/optee/Kconfig"
-
+source "drivers/tee/amdtee/Kconfig"
endmenu
endif
diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile
index 21f51fd88b07..68da044afbfa 100644
--- a/drivers/tee/Makefile
+++ b/drivers/tee/Makefile
@@ -4,3 +4,4 @@ tee-objs += tee_core.o
tee-objs += tee_shm.o
tee-objs += tee_shm_pool.o
obj-$(CONFIG_OPTEE) += optee/
+obj-$(CONFIG_AMDTEE) += amdtee/
diff --git a/drivers/tee/amdtee/Kconfig b/drivers/tee/amdtee/Kconfig
new file mode 100644
index 000000000000..4e32b6413b41
--- /dev/null
+++ b/drivers/tee/amdtee/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: MIT
+# AMD-TEE Trusted Execution Environment Configuration
+config AMDTEE
+ tristate "AMD-TEE"
+ default m
+ depends on CRYPTO_DEV_SP_PSP
+ help
+ This implements AMD's Trusted Execution Environment (TEE) driver.
diff --git a/drivers/tee/amdtee/Makefile b/drivers/tee/amdtee/Makefile
new file mode 100644
index 000000000000..ff1485266117
--- /dev/null
+++ b/drivers/tee/amdtee/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: MIT
+obj-$(CONFIG_AMDTEE) += amdtee.o
+amdtee-objs += core.o
+amdtee-objs += call.o
+amdtee-objs += shm_pool.o
diff --git a/drivers/tee/amdtee/amdtee_if.h b/drivers/tee/amdtee/amdtee_if.h
new file mode 100644
index 000000000000..ff48c3e47375
--- /dev/null
+++ b/drivers/tee/amdtee/amdtee_if.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+/*
+ * This file has definitions related to Host and AMD-TEE Trusted OS interface.
+ * These definitions must match the definitions on the TEE side.
+ */
+
+#ifndef AMDTEE_IF_H
+#define AMDTEE_IF_H
+
+#include <linux/types.h>
+
+/*****************************************************************************
+ ** TEE Param
+ ******************************************************************************/
+#define TEE_MAX_PARAMS 4
+
+/**
+ * struct memref - memory reference structure
+ * @buf_id: buffer ID of the buffer mapped by TEE_CMD_ID_MAP_SHARED_MEM
+ * @offset: offset in bytes from beginning of the buffer
+ * @size: data size in bytes
+ */
+struct memref {
+ u32 buf_id;
+ u32 offset;
+ u32 size;
+};
+
+struct value {
+ u32 a;
+ u32 b;
+};
+
+/*
+ * Parameters passed to open_session or invoke_command
+ */
+union tee_op_param {
+ struct memref mref;
+ struct value val;
+};
+
+struct tee_operation {
+ u32 param_types;
+ union tee_op_param params[TEE_MAX_PARAMS];
+};
+
+/* Must be same as in GP TEE specification */
+#define TEE_OP_PARAM_TYPE_NONE 0
+#define TEE_OP_PARAM_TYPE_VALUE_INPUT 1
+#define TEE_OP_PARAM_TYPE_VALUE_OUTPUT 2
+#define TEE_OP_PARAM_TYPE_VALUE_INOUT 3
+#define TEE_OP_PARAM_TYPE_INVALID 4
+#define TEE_OP_PARAM_TYPE_MEMREF_INPUT 5
+#define TEE_OP_PARAM_TYPE_MEMREF_OUTPUT 6
+#define TEE_OP_PARAM_TYPE_MEMREF_INOUT 7
+
+#define TEE_PARAM_TYPE_GET(t, i) (((t) >> ((i) * 4)) & 0xF)
+#define TEE_PARAM_TYPES(t0, t1, t2, t3) \
+ ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12))
+
+/*****************************************************************************
+ ** TEE Commands
+ *****************************************************************************/
+
+/*
+ * The shared memory between rich world and secure world may be physically
+ * non-contiguous. Below structures are meant to describe a shared memory region
+ * via scatter/gather (sg) list
+ */
+
+/**
+ * struct tee_sg_desc - sg descriptor for a physically contiguous buffer
+ * @low_addr: [in] bits[31:0] of buffer's physical address. Must be 4KB aligned
+ * @hi_addr: [in] bits[63:32] of the buffer's physical address
+ * @size: [in] size in bytes (must be multiple of 4KB)
+ */
+struct tee_sg_desc {
+ u32 low_addr;
+ u32 hi_addr;
+ u32 size;
+};
+
+/**
+ * struct tee_sg_list - structure describing a scatter/gather list
+ * @count: [in] number of sg descriptors
+ * @size: [in] total size of all buffers in the list. Must be multiple of 4KB
+ * @buf: [in] list of sg buffer descriptors
+ */
+#define TEE_MAX_SG_DESC 64
+struct tee_sg_list {
+ u32 count;
+ u32 size;
+ struct tee_sg_desc buf[TEE_MAX_SG_DESC];
+};
+
+/**
+ * struct tee_cmd_map_shared_mem - command to map shared memory
+ * @buf_id: [out] return buffer ID value
+ * @sg_list: [in] list describing memory to be mapped
+ */
+struct tee_cmd_map_shared_mem {
+ u32 buf_id;
+ struct tee_sg_list sg_list;
+};
+
+/**
+ * struct tee_cmd_unmap_shared_mem - command to unmap shared memory
+ * @buf_id: [in] buffer ID of memory to be unmapped
+ */
+struct tee_cmd_unmap_shared_mem {
+ u32 buf_id;
+};
+
+/**
+ * struct tee_cmd_load_ta - load Trusted Application (TA) binary into TEE
+ * @low_addr: [in] bits [31:0] of the physical address of the TA binary
+ * @hi_addr: [in] bits [63:32] of the physical address of the TA binary
+ * @size: [in] size of TA binary in bytes
+ * @ta_handle: [out] return handle of the loaded TA
+ */
+struct tee_cmd_load_ta {
+ u32 low_addr;
+ u32 hi_addr;
+ u32 size;
+ u32 ta_handle;
+};
+
+/**
+ * struct tee_cmd_unload_ta - command to unload TA binary from TEE environment
+ * @ta_handle: [in] handle of the loaded TA to be unloaded
+ */
+struct tee_cmd_unload_ta {
+ u32 ta_handle;
+};
+
+/**
+ * struct tee_cmd_open_session - command to call TA_OpenSessionEntryPoint in TA
+ * @ta_handle: [in] handle of the loaded TA
+ * @session_info: [out] pointer to TA allocated session data
+ * @op: [in/out] operation parameters
+ * @return_origin: [out] origin of return code after TEE processing
+ */
+struct tee_cmd_open_session {
+ u32 ta_handle;
+ u32 session_info;
+ struct tee_operation op;
+ u32 return_origin;
+};
+
+/**
+ * struct tee_cmd_close_session - command to call TA_CloseSessionEntryPoint()
+ * in TA
+ * @ta_handle: [in] handle of the loaded TA
+ * @session_info: [in] pointer to TA allocated session data
+ */
+struct tee_cmd_close_session {
+ u32 ta_handle;
+ u32 session_info;
+};
+
+/**
+ * struct tee_cmd_invoke_cmd - command to call TA_InvokeCommandEntryPoint() in
+ * TA
+ * @ta_handle: [in] handle of the loaded TA
+ * @cmd_id: [in] TA command ID
+ * @session_info: [in] pointer to TA allocated session data
+ * @op: [in/out] operation parameters
+ * @return_origin: [out] origin of return code after TEE processing
+ */
+struct tee_cmd_invoke_cmd {
+ u32 ta_handle;
+ u32 cmd_id;
+ u32 session_info;
+ struct tee_operation op;
+ u32 return_origin;
+};
+
+#endif /*AMDTEE_IF_H*/
diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h
new file mode 100644
index 000000000000..d7f798c3394b
--- /dev/null
+++ b/drivers/tee/amdtee/amdtee_private.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#ifndef AMDTEE_PRIVATE_H
+#define AMDTEE_PRIVATE_H
+
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/tee_drv.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+#include "amdtee_if.h"
+
+#define DRIVER_NAME "amdtee"
+#define DRIVER_AUTHOR "AMD-TEE Linux driver team"
+
+/* Some GlobalPlatform error codes used in this driver */
+#define TEEC_SUCCESS 0x00000000
+#define TEEC_ERROR_GENERIC 0xFFFF0000
+#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
+
+#define TEEC_ORIGIN_COMMS 0x00000002
+
+/* Maximum number of sessions which can be opened with a Trusted Application */
+#define TEE_NUM_SESSIONS 32
+
+#define TA_LOAD_PATH "/amdtee"
+#define TA_PATH_MAX 60
+
+/**
+ * struct amdtee - main service struct
+ * @teedev: client device
+ * @pool: shared memory pool
+ */
+struct amdtee {
+ struct tee_device *teedev;
+ struct tee_shm_pool *pool;
+};
+
+/**
+ * struct amdtee_session - Trusted Application (TA) session related information.
+ * @ta_handle: handle to Trusted Application (TA) loaded in TEE environment
+ * @refcount: counter to keep track of sessions opened for the TA instance
+ * @session_info: an array pointing to TA allocated session data.
+ * @sess_mask: session usage bit-mask. If a particular bit is set, then the
+ * corresponding @session_info entry is in use or valid.
+ *
+ * Session structure is updated on open_session and this information is used for
+ * subsequent operations with the Trusted Application.
+ */
+struct amdtee_session {
+ struct list_head list_node;
+ u32 ta_handle;
+ struct kref refcount;
+ u32 session_info[TEE_NUM_SESSIONS];
+ DECLARE_BITMAP(sess_mask, TEE_NUM_SESSIONS);
+ spinlock_t lock; /* synchronizes access to @sess_mask */
+};
+
+/**
+ * struct amdtee_context_data - AMD-TEE driver context data
+ * @sess_list: Keeps track of sessions opened in current TEE context
+ */
+struct amdtee_context_data {
+ struct list_head sess_list;
+};
+
+struct amdtee_driver_data {
+ struct amdtee *amdtee;
+};
+
+struct shmem_desc {
+ void *kaddr;
+ u64 size;
+};
+
+/**
+ * struct amdtee_shm_data - Shared memory data
+ * @kaddr: Kernel virtual address of shared memory
+ * @buf_id: Buffer id of memory mapped by TEE_CMD_ID_MAP_SHARED_MEM
+ */
+struct amdtee_shm_data {
+ struct list_head shm_node;
+ void *kaddr;
+ u32 buf_id;
+};
+
+struct amdtee_shm_context {
+ struct list_head shmdata_list;
+};
+
+#define LOWER_TWO_BYTE_MASK 0x0000FFFF
+
+/**
+ * set_session_id() - Sets the session identifier.
+ * @ta_handle: [in] handle of the loaded Trusted Application (TA)
+ * @session_index: [in] Session index. Range: 0 to (TEE_NUM_SESSIONS - 1).
+ * @session: [out] Pointer to session id
+ *
+ * Lower two bytes of the session identifier represents the TA handle and the
+ * upper two bytes is session index.
+ */
+static inline void set_session_id(u32 ta_handle, u32 session_index,
+ u32 *session)
+{
+ *session = (session_index << 16) | (LOWER_TWO_BYTE_MASK & ta_handle);
+}
+
+static inline u32 get_ta_handle(u32 session)
+{
+ return session & LOWER_TWO_BYTE_MASK;
+}
+
+static inline u32 get_session_index(u32 session)
+{
+ return (session >> 16) & LOWER_TWO_BYTE_MASK;
+}
+
+int amdtee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param);
+
+int amdtee_close_session(struct tee_context *ctx, u32 session);
+
+int amdtee_invoke_func(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param);
+
+int amdtee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
+
+int amdtee_map_shmem(struct tee_shm *shm);
+
+void amdtee_unmap_shmem(struct tee_shm *shm);
+
+int handle_load_ta(void *data, u32 size,
+ struct tee_ioctl_open_session_arg *arg);
+
+int handle_unload_ta(u32 ta_handle);
+
+int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
+ struct tee_param *p);
+
+int handle_close_session(u32 ta_handle, u32 info);
+
+int handle_map_shmem(u32 count, struct shmem_desc *start, u32 *buf_id);
+
+void handle_unmap_shmem(u32 buf_id);
+
+int handle_invoke_cmd(struct tee_ioctl_invoke_arg *arg, u32 sinfo,
+ struct tee_param *p);
+
+struct tee_shm_pool *amdtee_config_shm(void);
+
+u32 get_buffer_id(struct tee_shm *shm);
+#endif /*AMDTEE_PRIVATE_H*/
diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
new file mode 100644
index 000000000000..096dd4d92d39
--- /dev/null
+++ b/drivers/tee/amdtee/call.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/tee.h>
+#include <linux/tee_drv.h>
+#include <linux/psp-tee.h>
+#include <linux/slab.h>
+#include <linux/psp-sev.h>
+#include "amdtee_if.h"
+#include "amdtee_private.h"
+
+static int tee_params_to_amd_params(struct tee_param *tee, u32 count,
+ struct tee_operation *amd)
+{
+ int i, ret = 0;
+ u32 type;
+
+ if (!count)
+ return 0;
+
+ if (!tee || !amd || count > TEE_MAX_PARAMS)
+ return -EINVAL;
+
+ amd->param_types = 0;
+ for (i = 0; i < count; i++) {
+ /* AMD TEE does not support meta parameter */
+ if (tee[i].attr > TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT)
+ return -EINVAL;
+
+ amd->param_types |= ((tee[i].attr & 0xF) << i * 4);
+ }
+
+ for (i = 0; i < count; i++) {
+ type = TEE_PARAM_TYPE_GET(amd->param_types, i);
+ pr_debug("%s: type[%d] = 0x%x\n", __func__, i, type);
+
+ if (type == TEE_OP_PARAM_TYPE_INVALID)
+ return -EINVAL;
+
+ if (type == TEE_OP_PARAM_TYPE_NONE)
+ continue;
+
+ /* It is assumed that all values are within 2^32-1 */
+ if (type > TEE_OP_PARAM_TYPE_VALUE_INOUT) {
+ u32 buf_id = get_buffer_id(tee[i].u.memref.shm);
+
+ amd->params[i].mref.buf_id = buf_id;
+ amd->params[i].mref.offset = tee[i].u.memref.shm_offs;
+ amd->params[i].mref.size = tee[i].u.memref.size;
+ pr_debug("%s: bufid[%d] = 0x%x, offset[%d] = 0x%x, size[%d] = 0x%x\n",
+ __func__,
+ i, amd->params[i].mref.buf_id,
+ i, amd->params[i].mref.offset,
+ i, amd->params[i].mref.size);
+ } else {
+ if (tee[i].u.value.c)
+ pr_warn("%s: Discarding value c", __func__);
+
+ amd->params[i].val.a = tee[i].u.value.a;
+ amd->params[i].val.b = tee[i].u.value.b;
+ pr_debug("%s: a[%d] = 0x%x, b[%d] = 0x%x\n", __func__,
+ i, amd->params[i].val.a,
+ i, amd->params[i].val.b);
+ }
+ }
+ return ret;
+}
+
+static int amd_params_to_tee_params(struct tee_param *tee, u32 count,
+ struct tee_operation *amd)
+{
+ int i, ret = 0;
+ u32 type;
+
+ if (!count)
+ return 0;
+
+ if (!tee || !amd || count > TEE_MAX_PARAMS)
+ return -EINVAL;
+
+ /* Assumes amd->param_types is valid */
+ for (i = 0; i < count; i++) {
+ type = TEE_PARAM_TYPE_GET(amd->param_types, i);
+ pr_debug("%s: type[%d] = 0x%x\n", __func__, i, type);
+
+ if (type == TEE_OP_PARAM_TYPE_INVALID ||
+ type > TEE_OP_PARAM_TYPE_MEMREF_INOUT)
+ return -EINVAL;
+
+ if (type == TEE_OP_PARAM_TYPE_NONE ||
+ type == TEE_OP_PARAM_TYPE_VALUE_INPUT ||
+ type == TEE_OP_PARAM_TYPE_MEMREF_INPUT)
+ continue;
+
+ /*
+ * It is assumed that buf_id remains unchanged for
+ * both open_session and invoke_cmd call
+ */
+ if (type > TEE_OP_PARAM_TYPE_MEMREF_INPUT) {
+ tee[i].u.memref.shm_offs = amd->params[i].mref.offset;
+ tee[i].u.memref.size = amd->params[i].mref.size;
+ pr_debug("%s: bufid[%d] = 0x%x, offset[%d] = 0x%x, size[%d] = 0x%x\n",
+ __func__,
+ i, amd->params[i].mref.buf_id,
+ i, amd->params[i].mref.offset,
+ i, amd->params[i].mref.size);
+ } else {
+ /* field 'c' not supported by AMD TEE */
+ tee[i].u.value.a = amd->params[i].val.a;
+ tee[i].u.value.b = amd->params[i].val.b;
+ tee[i].u.value.c = 0;
+ pr_debug("%s: a[%d] = 0x%x, b[%d] = 0x%x\n",
+ __func__,
+ i, amd->params[i].val.a,
+ i, amd->params[i].val.b);
+ }
+ }
+ return ret;
+}
+
+int handle_unload_ta(u32 ta_handle)
+{
+ struct tee_cmd_unload_ta cmd = {0};
+ u32 status;
+ int ret;
+
+ if (!ta_handle)
+ return -EINVAL;
+
+ cmd.ta_handle = ta_handle;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, (void *)&cmd,
+ sizeof(cmd), &status);
+ if (!ret && status != 0) {
+ pr_err("unload ta: status = 0x%x\n", status);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+int handle_close_session(u32 ta_handle, u32 info)
+{
+ struct tee_cmd_close_session cmd = {0};
+ u32 status;
+ int ret;
+
+ if (ta_handle == 0)
+ return -EINVAL;
+
+ cmd.ta_handle = ta_handle;
+ cmd.session_info = info;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_CLOSE_SESSION, (void *)&cmd,
+ sizeof(cmd), &status);
+ if (!ret && status != 0) {
+ pr_err("close session: status = 0x%x\n", status);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+void handle_unmap_shmem(u32 buf_id)
+{
+ struct tee_cmd_unmap_shared_mem cmd = {0};
+ u32 status;
+ int ret;
+
+ cmd.buf_id = buf_id;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_UNMAP_SHARED_MEM, (void *)&cmd,
+ sizeof(cmd), &status);
+ if (!ret)
+ pr_debug("unmap shared memory: buf_id %u status = 0x%x\n",
+ buf_id, status);
+}
+
+int handle_invoke_cmd(struct tee_ioctl_invoke_arg *arg, u32 sinfo,
+ struct tee_param *p)
+{
+ struct tee_cmd_invoke_cmd cmd = {0};
+ int ret;
+
+ if (!arg || (!p && arg->num_params))
+ return -EINVAL;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->session == 0) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return -EINVAL;
+ }
+
+ ret = tee_params_to_amd_params(p, arg->num_params, &cmd.op);
+ if (ret) {
+ pr_err("invalid Params. Abort invoke command\n");
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return ret;
+ }
+
+ cmd.ta_handle = get_ta_handle(arg->session);
+ cmd.cmd_id = arg->func;
+ cmd.session_info = sinfo;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_INVOKE_CMD, (void *)&cmd,
+ sizeof(cmd), &arg->ret);
+ if (ret) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ ret = amd_params_to_tee_params(p, arg->num_params, &cmd.op);
+ if (unlikely(ret)) {
+ pr_err("invoke command: failed to copy output\n");
+ arg->ret = TEEC_ERROR_GENERIC;
+ return ret;
+ }
+ arg->ret_origin = cmd.return_origin;
+ pr_debug("invoke command: RO = 0x%x ret = 0x%x\n",
+ arg->ret_origin, arg->ret);
+ }
+
+ return ret;
+}
+
+int handle_map_shmem(u32 count, struct shmem_desc *start, u32 *buf_id)
+{
+ struct tee_cmd_map_shared_mem *cmd;
+ phys_addr_t paddr;
+ int ret, i;
+ u32 status;
+
+ if (!count || !start || !buf_id)
+ return -EINVAL;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ /* Size must be page aligned */
+ for (i = 0; i < count ; i++) {
+ if (!start[i].kaddr || (start[i].size & (PAGE_SIZE - 1))) {
+ ret = -EINVAL;
+ goto free_cmd;
+ }
+
+ if ((u64)start[i].kaddr & (PAGE_SIZE - 1)) {
+ pr_err("map shared memory: page unaligned. addr 0x%llx",
+ (u64)start[i].kaddr);
+ ret = -EINVAL;
+ goto free_cmd;
+ }
+ }
+
+ cmd->sg_list.count = count;
+
+ /* Create buffer list */
+ for (i = 0; i < count ; i++) {
+ paddr = __psp_pa(start[i].kaddr);
+ cmd->sg_list.buf[i].hi_addr = upper_32_bits(paddr);
+ cmd->sg_list.buf[i].low_addr = lower_32_bits(paddr);
+ cmd->sg_list.buf[i].size = start[i].size;
+ cmd->sg_list.size += cmd->sg_list.buf[i].size;
+
+ pr_debug("buf[%d]:hi addr = 0x%x\n", i,
+ cmd->sg_list.buf[i].hi_addr);
+ pr_debug("buf[%d]:low addr = 0x%x\n", i,
+ cmd->sg_list.buf[i].low_addr);
+ pr_debug("buf[%d]:size = 0x%x\n", i, cmd->sg_list.buf[i].size);
+ pr_debug("list size = 0x%x\n", cmd->sg_list.size);
+ }
+
+ *buf_id = 0;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_MAP_SHARED_MEM, (void *)cmd,
+ sizeof(*cmd), &status);
+ if (!ret && !status) {
+ *buf_id = cmd->buf_id;
+ pr_debug("mapped buffer ID = 0x%x\n", *buf_id);
+ } else {
+ pr_err("map shared memory: status = 0x%x\n", status);
+ ret = -ENOMEM;
+ }
+
+free_cmd:
+ kfree(cmd);
+
+ return ret;
+}
+
+int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
+ struct tee_param *p)
+{
+ struct tee_cmd_open_session cmd = {0};
+ int ret;
+
+ if (!arg || !info || (!p && arg->num_params))
+ return -EINVAL;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->session == 0) {
+ arg->ret = TEEC_ERROR_GENERIC;
+ return -EINVAL;
+ }
+
+ ret = tee_params_to_amd_params(p, arg->num_params, &cmd.op);
+ if (ret) {
+ pr_err("invalid Params. Abort open session\n");
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return ret;
+ }
+
+ cmd.ta_handle = get_ta_handle(arg->session);
+ *info = 0;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_OPEN_SESSION, (void *)&cmd,
+ sizeof(cmd), &arg->ret);
+ if (ret) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ ret = amd_params_to_tee_params(p, arg->num_params, &cmd.op);
+ if (unlikely(ret)) {
+ pr_err("open session: failed to copy output\n");
+ arg->ret = TEEC_ERROR_GENERIC;
+ return ret;
+ }
+ arg->ret_origin = cmd.return_origin;
+ *info = cmd.session_info;
+ pr_debug("open session: session info = 0x%x\n", *info);
+ }
+
+ pr_debug("open session: ret = 0x%x RO = 0x%x\n", arg->ret,
+ arg->ret_origin);
+
+ return ret;
+}
+
+int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
+{
+ struct tee_cmd_load_ta cmd = {0};
+ phys_addr_t blob;
+ int ret;
+
+ if (size == 0 || !data || !arg)
+ return -EINVAL;
+
+ blob = __psp_pa(data);
+ if (blob & (PAGE_SIZE - 1)) {
+ pr_err("load TA: page unaligned. blob 0x%llx", blob);
+ return -EINVAL;
+ }
+
+ cmd.hi_addr = upper_32_bits(blob);
+ cmd.low_addr = lower_32_bits(blob);
+ cmd.size = size;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&cmd,
+ sizeof(cmd), &arg->ret);
+ if (ret) {
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ set_session_id(cmd.ta_handle, 0, &arg->session);
+ }
+
+ pr_debug("load TA: TA handle = 0x%x, RO = 0x%x, ret = 0x%x\n",
+ cmd.ta_handle, arg->ret_origin, arg->ret);
+
+ return 0;
+}
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
new file mode 100644
index 000000000000..6370bb55f512
--- /dev/null
+++ b/drivers/tee/amdtee/core.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include "amdtee_private.h"
+#include "../tee_private.h"
+#include <linux/psp-tee.h>
+
+static struct amdtee_driver_data *drv_data;
+static DEFINE_MUTEX(session_list_mutex);
+static struct amdtee_shm_context shmctx;
+
+static void amdtee_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_AMDTEE,
+ .impl_caps = 0,
+ .gen_caps = TEE_GEN_CAP_GP,
+ };
+ *vers = v;
+}
+
+static int amdtee_open(struct tee_context *ctx)
+{
+ struct amdtee_context_data *ctxdata;
+
+ ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
+ if (!ctxdata)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctxdata->sess_list);
+ INIT_LIST_HEAD(&shmctx.shmdata_list);
+
+ ctx->data = ctxdata;
+ return 0;
+}
+
+static void release_session(struct amdtee_session *sess)
+{
+ int i;
+
+ /* Close any open session */
+ for (i = 0; i < TEE_NUM_SESSIONS; ++i) {
+ /* Check if session entry 'i' is valid */
+ if (!test_bit(i, sess->sess_mask))
+ continue;
+
+ handle_close_session(sess->ta_handle, sess->session_info[i]);
+ }
+
+ /* Unload Trusted Application once all sessions are closed */
+ handle_unload_ta(sess->ta_handle);
+ kfree(sess);
+}
+
+static void amdtee_release(struct tee_context *ctx)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+
+ if (!ctxdata)
+ return;
+
+ while (true) {
+ struct amdtee_session *sess;
+
+ sess = list_first_entry_or_null(&ctxdata->sess_list,
+ struct amdtee_session,
+ list_node);
+
+ if (!sess)
+ break;
+
+ list_del(&sess->list_node);
+ release_session(sess);
+ }
+ kfree(ctxdata);
+
+ ctx->data = NULL;
+}
+
+/**
+ * alloc_session() - Allocate a session structure
+ * @ctxdata: TEE Context data structure
+ * @session: Session ID for which 'struct amdtee_session' structure is to be
+ * allocated.
+ *
+ * Scans the TEE context's session list to check if TA is already loaded in to
+ * TEE. If yes, returns the 'session' structure for that TA. Else allocates,
+ * initializes a new 'session' structure and adds it to context's session list.
+ *
+ * The caller must hold a mutex.
+ *
+ * Returns:
+ * 'struct amdtee_session *' on success and NULL on failure.
+ */
+static struct amdtee_session *alloc_session(struct amdtee_context_data *ctxdata,
+ u32 session)
+{
+ struct amdtee_session *sess;
+ u32 ta_handle = get_ta_handle(session);
+
+ /* Scan session list to check if TA is already loaded in to TEE */
+ list_for_each_entry(sess, &ctxdata->sess_list, list_node)
+ if (sess->ta_handle == ta_handle) {
+ kref_get(&sess->refcount);
+ return sess;
+ }
+
+ /* Allocate a new session and add to list */
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (sess) {
+ sess->ta_handle = ta_handle;
+ kref_init(&sess->refcount);
+ spin_lock_init(&sess->lock);
+ list_add(&sess->list_node, &ctxdata->sess_list);
+ }
+
+ return sess;
+}
+
+/* Requires mutex to be held */
+static struct amdtee_session *find_session(struct amdtee_context_data *ctxdata,
+ u32 session)
+{
+ u32 ta_handle = get_ta_handle(session);
+ u32 index = get_session_index(session);
+ struct amdtee_session *sess;
+
+ list_for_each_entry(sess, &ctxdata->sess_list, list_node)
+ if (ta_handle == sess->ta_handle &&
+ test_bit(index, sess->sess_mask))
+ return sess;
+
+ return NULL;
+}
+
+u32 get_buffer_id(struct tee_shm *shm)
+{
+ u32 buf_id = 0;
+ struct amdtee_shm_data *shmdata;
+
+ list_for_each_entry(shmdata, &shmctx.shmdata_list, shm_node)
+ if (shmdata->kaddr == shm->kaddr) {
+ buf_id = shmdata->buf_id;
+ break;
+ }
+
+ return buf_id;
+}
+
+static DEFINE_MUTEX(drv_mutex);
+static int copy_ta_binary(struct tee_context *ctx, void *ptr, void **ta,
+ size_t *ta_size)
+{
+ const struct firmware *fw;
+ char fw_name[TA_PATH_MAX];
+ struct {
+ u32 lo;
+ u16 mid;
+ u16 hi_ver;
+ u8 seq_n[8];
+ } *uuid = ptr;
+ int n, rc = 0;
+
+ n = snprintf(fw_name, TA_PATH_MAX,
+ "%s/%08x-%04x-%04x-%02x%02x%02x%02x%02x%02x%02x%02x.bin",
+ TA_LOAD_PATH, uuid->lo, uuid->mid, uuid->hi_ver,
+ uuid->seq_n[0], uuid->seq_n[1],
+ uuid->seq_n[2], uuid->seq_n[3],
+ uuid->seq_n[4], uuid->seq_n[5],
+ uuid->seq_n[6], uuid->seq_n[7]);
+ if (n < 0 || n >= TA_PATH_MAX) {
+ pr_err("failed to get firmware name\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&drv_mutex);
+ n = request_firmware(&fw, fw_name, &ctx->teedev->dev);
+ if (n) {
+ pr_err("failed to load firmware %s\n", fw_name);
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ *ta_size = roundup(fw->size, PAGE_SIZE);
+ *ta = (void *)__get_free_pages(GFP_KERNEL, get_order(*ta_size));
+ if (IS_ERR(*ta)) {
+ pr_err("%s: get_free_pages failed 0x%llx\n", __func__,
+ (u64)*ta);
+ rc = -ENOMEM;
+ goto rel_fw;
+ }
+
+ memcpy(*ta, fw->data, fw->size);
+rel_fw:
+ release_firmware(fw);
+unlock:
+ mutex_unlock(&drv_mutex);
+ return rc;
+}
+
+int amdtee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+ struct amdtee_session *sess = NULL;
+ u32 session_info;
+ size_t ta_size;
+ int rc, i;
+ void *ta;
+
+ if (arg->clnt_login != TEE_IOCTL_LOGIN_PUBLIC) {
+ pr_err("unsupported client login method\n");
+ return -EINVAL;
+ }
+
+ rc = copy_ta_binary(ctx, &arg->uuid[0], &ta, &ta_size);
+ if (rc) {
+ pr_err("failed to copy TA binary\n");
+ return rc;
+ }
+
+ /* Load the TA binary into TEE environment */
+ handle_load_ta(ta, ta_size, arg);
+ if (arg->ret == TEEC_SUCCESS) {
+ mutex_lock(&session_list_mutex);
+ sess = alloc_session(ctxdata, arg->session);
+ mutex_unlock(&session_list_mutex);
+ }
+
+ if (arg->ret != TEEC_SUCCESS)
+ goto out;
+
+ if (!sess) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Find an empty session index for the given TA */
+ spin_lock(&sess->lock);
+ i = find_first_zero_bit(sess->sess_mask, TEE_NUM_SESSIONS);
+ if (i < TEE_NUM_SESSIONS)
+ set_bit(i, sess->sess_mask);
+ spin_unlock(&sess->lock);
+
+ if (i >= TEE_NUM_SESSIONS) {
+ pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Open session with loaded TA */
+ handle_open_session(arg, &session_info, param);
+
+ if (arg->ret == TEEC_SUCCESS) {
+ sess->session_info[i] = session_info;
+ set_session_id(sess->ta_handle, i, &arg->session);
+ } else {
+ pr_err("open_session failed %d\n", arg->ret);
+ spin_lock(&sess->lock);
+ clear_bit(i, sess->sess_mask);
+ spin_unlock(&sess->lock);
+ }
+out:
+ free_pages((u64)ta, get_order(ta_size));
+ return rc;
+}
+
+static void destroy_session(struct kref *ref)
+{
+ struct amdtee_session *sess = container_of(ref, struct amdtee_session,
+ refcount);
+
+ /* Unload the TA from TEE */
+ handle_unload_ta(sess->ta_handle);
+ mutex_lock(&session_list_mutex);
+ list_del(&sess->list_node);
+ mutex_unlock(&session_list_mutex);
+ kfree(sess);
+}
+
+int amdtee_close_session(struct tee_context *ctx, u32 session)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+ u32 i, ta_handle, session_info;
+ struct amdtee_session *sess;
+
+ pr_debug("%s: sid = 0x%x\n", __func__, session);
+
+ /*
+ * Check that the session is valid and clear the session
+ * usage bit
+ */
+ mutex_lock(&session_list_mutex);
+ sess = find_session(ctxdata, session);
+ if (sess) {
+ ta_handle = get_ta_handle(session);
+ i = get_session_index(session);
+ session_info = sess->session_info[i];
+ spin_lock(&sess->lock);
+ clear_bit(i, sess->sess_mask);
+ spin_unlock(&sess->lock);
+ }
+ mutex_unlock(&session_list_mutex);
+
+ if (!sess)
+ return -EINVAL;
+
+ /* Close the session */
+ handle_close_session(ta_handle, session_info);
+
+ kref_put(&sess->refcount, destroy_session);
+
+ return 0;
+}
+
+int amdtee_map_shmem(struct tee_shm *shm)
+{
+ struct shmem_desc shmem;
+ struct amdtee_shm_data *shmnode;
+ int rc, count;
+ u32 buf_id;
+
+ if (!shm)
+ return -EINVAL;
+
+ shmnode = kmalloc(sizeof(*shmnode), GFP_KERNEL);
+ if (!shmnode)
+ return -ENOMEM;
+
+ count = 1;
+ shmem.kaddr = shm->kaddr;
+ shmem.size = shm->size;
+
+ /*
+ * Send a MAP command to TEE and get the corresponding
+ * buffer Id
+ */
+ rc = handle_map_shmem(count, &shmem, &buf_id);
+ if (rc) {
+ pr_err("map_shmem failed: ret = %d\n", rc);
+ kfree(shmnode);
+ return rc;
+ }
+
+ shmnode->kaddr = shm->kaddr;
+ shmnode->buf_id = buf_id;
+ list_add(&shmnode->shm_node, &shmctx.shmdata_list);
+
+ pr_debug("buf_id :[%x] kaddr[%p]\n", shmnode->buf_id, shmnode->kaddr);
+
+ return 0;
+}
+
+void amdtee_unmap_shmem(struct tee_shm *shm)
+{
+ struct amdtee_shm_data *shmnode;
+ u32 buf_id;
+
+ if (!shm)
+ return;
+
+ buf_id = get_buffer_id(shm);
+ /* Unmap the shared memory from TEE */
+ handle_unmap_shmem(buf_id);
+
+ list_for_each_entry(shmnode, &shmctx.shmdata_list, shm_node)
+ if (buf_id == shmnode->buf_id) {
+ list_del(&shmnode->shm_node);
+ kfree(shmnode);
+ break;
+ }
+}
+
+int amdtee_invoke_func(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+ struct amdtee_session *sess;
+ u32 i, session_info;
+
+ /* Check that the session is valid */
+ mutex_lock(&session_list_mutex);
+ sess = find_session(ctxdata, arg->session);
+ if (sess) {
+ i = get_session_index(arg->session);
+ session_info = sess->session_info[i];
+ }
+ mutex_unlock(&session_list_mutex);
+
+ if (!sess)
+ return -EINVAL;
+
+ handle_invoke_cmd(arg, session_info, param);
+
+ return 0;
+}
+
+int amdtee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
+{
+ return -EINVAL;
+}
+
+static const struct tee_driver_ops amdtee_ops = {
+ .get_version = amdtee_get_version,
+ .open = amdtee_open,
+ .release = amdtee_release,
+ .open_session = amdtee_open_session,
+ .close_session = amdtee_close_session,
+ .invoke_func = amdtee_invoke_func,
+ .cancel_req = amdtee_cancel_req,
+};
+
+static const struct tee_desc amdtee_desc = {
+ .name = DRIVER_NAME "-clnt",
+ .ops = &amdtee_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init amdtee_driver_init(void)
+{
+ struct tee_device *teedev;
+ struct tee_shm_pool *pool;
+ struct amdtee *amdtee;
+ int rc;
+
+ rc = psp_check_tee_status();
+ if (rc) {
+ pr_err("amd-tee driver: tee not present\n");
+ return rc;
+ }
+
+ drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
+ amdtee = kzalloc(sizeof(*amdtee), GFP_KERNEL);
+ if (!amdtee) {
+ rc = -ENOMEM;
+ goto err_kfree_drv_data;
+ }
+
+ pool = amdtee_config_shm();
+ if (IS_ERR(pool)) {
+ pr_err("shared pool configuration error\n");
+ rc = PTR_ERR(pool);
+ goto err_kfree_amdtee;
+ }
+
+ teedev = tee_device_alloc(&amdtee_desc, NULL, pool, amdtee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err_free_pool;
+ }
+ amdtee->teedev = teedev;
+
+ rc = tee_device_register(amdtee->teedev);
+ if (rc)
+ goto err_device_unregister;
+
+ amdtee->pool = pool;
+
+ drv_data->amdtee = amdtee;
+
+ pr_info("amd-tee driver initialization successful\n");
+ return 0;
+
+err_device_unregister:
+ tee_device_unregister(amdtee->teedev);
+
+err_free_pool:
+ tee_shm_pool_free(pool);
+
+err_kfree_amdtee:
+ kfree(amdtee);
+
+err_kfree_drv_data:
+ kfree(drv_data);
+ drv_data = NULL;
+
+ pr_err("amd-tee driver initialization failed\n");
+ return rc;
+}
+module_init(amdtee_driver_init);
+
+static void __exit amdtee_driver_exit(void)
+{
+ struct amdtee *amdtee;
+
+ if (!drv_data || !drv_data->amdtee)
+ return;
+
+ amdtee = drv_data->amdtee;
+
+ tee_device_unregister(amdtee->teedev);
+ tee_shm_pool_free(amdtee->pool);
+}
+module_exit(amdtee_driver_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION("AMD-TEE driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/tee/amdtee/shm_pool.c b/drivers/tee/amdtee/shm_pool.c
new file mode 100644
index 000000000000..065854e2db18
--- /dev/null
+++ b/drivers/tee/amdtee/shm_pool.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/psp-sev.h>
+#include "amdtee_private.h"
+
+static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm,
+ size_t size)
+{
+ unsigned int order = get_order(size);
+ unsigned long va;
+ int rc;
+
+ va = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!va)
+ return -ENOMEM;
+
+ shm->kaddr = (void *)va;
+ shm->paddr = __psp_pa((void *)va);
+ shm->size = PAGE_SIZE << order;
+
+ /* Map the allocated memory in to TEE */
+ rc = amdtee_map_shmem(shm);
+ if (rc) {
+ free_pages(va, order);
+ shm->kaddr = NULL;
+ return rc;
+ }
+
+ return 0;
+}
+
+static void pool_op_free(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm)
+{
+ /* Unmap the shared memory from TEE */
+ amdtee_unmap_shmem(shm);
+ free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+ shm->kaddr = NULL;
+}
+
+static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+{
+ kfree(poolm);
+}
+
+static const struct tee_shm_pool_mgr_ops pool_ops = {
+ .alloc = pool_op_alloc,
+ .free = pool_op_free,
+ .destroy_poolmgr = pool_op_destroy_poolmgr,
+};
+
+static struct tee_shm_pool_mgr *pool_mem_mgr_alloc(void)
+{
+ struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ mgr->ops = &pool_ops;
+
+ return mgr;
+}
+
+struct tee_shm_pool *amdtee_config_shm(void)
+{
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
+
+ rc = pool_mem_mgr_alloc();
+ if (IS_ERR(rc))
+ return rc;
+ priv_mgr = rc;
+
+ rc = pool_mem_mgr_alloc();
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ return rc;
+ }
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+ }
+
+ return rc;
+}