summaryrefslogtreecommitdiff
path: root/drivers/crypto/marvell
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/marvell')
-rw-r--r--drivers/crypto/marvell/Kconfig4
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c54
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c24
-rw-r--r--drivers/crypto/marvell/cesa/hash.c12
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c273
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.c18
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c9
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c20
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c20
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c16
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c264
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c15
13 files changed, 276 insertions, 454 deletions
diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig
index a48591af12d0..4c25a78ab3ed 100644
--- a/drivers/crypto/marvell/Kconfig
+++ b/drivers/crypto/marvell/Kconfig
@@ -7,7 +7,7 @@ config CRYPTO_DEV_MARVELL
config CRYPTO_DEV_MARVELL_CESA
tristate "Marvell's Cryptographic Engine driver"
- depends on PLAT_ORION || ARCH_MVEBU
+ depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST
select CRYPTO_LIB_AES
select CRYPTO_LIB_DES
select CRYPTO_SKCIPHER
@@ -28,6 +28,7 @@ config CRYPTO_DEV_OCTEONTX_CPT
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
select CRYPTO_DEV_MARVELL
help
This driver allows you to utilize the Marvell Cryptographic
@@ -47,6 +48,7 @@ config CRYPTO_DEV_OCTEONTX2_CPT
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
select NET_DEVLINK
help
This driver allows you to utilize the Marvell Cryptographic
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 5fd31ba715c2..fa08f10e6f3f 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -375,7 +375,6 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
{
struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
struct mv_cesa_engine *engine = &cesa->engines[idx];
- const char *res_name = "sram";
struct resource *res;
engine->pool = of_gen_pool_get(cesa->dev->of_node,
@@ -391,19 +390,7 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
return -ENOMEM;
}
- if (cesa->caps->nengines > 1) {
- if (!idx)
- res_name = "sram0";
- else
- res_name = "sram1";
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- res_name);
- if (!res || resource_size(res) < cesa->sram_size)
- return -EINVAL;
-
- engine->sram = devm_ioremap_resource(cesa->dev, res);
+ engine->sram = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
if (IS_ERR(engine->sram))
return PTR_ERR(engine->sram);
@@ -510,25 +497,21 @@ static int mv_cesa_probe(struct platform_device *pdev)
* if the clock does not exist.
*/
snprintf(res_name, sizeof(res_name), "cesa%u", i);
- engine->clk = devm_clk_get(dev, res_name);
+ engine->clk = devm_clk_get_optional_enabled(dev, res_name);
if (IS_ERR(engine->clk)) {
- engine->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(engine->clk))
- engine->clk = NULL;
+ engine->clk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(engine->clk)) {
+ ret = PTR_ERR(engine->clk);
+ goto err_cleanup;
+ }
}
snprintf(res_name, sizeof(res_name), "cesaz%u", i);
- engine->zclk = devm_clk_get(dev, res_name);
- if (IS_ERR(engine->zclk))
- engine->zclk = NULL;
-
- ret = clk_prepare_enable(engine->clk);
- if (ret)
- goto err_cleanup;
-
- ret = clk_prepare_enable(engine->zclk);
- if (ret)
+ engine->zclk = devm_clk_get_optional_enabled(dev, res_name);
+ if (IS_ERR(engine->zclk)) {
+ ret = PTR_ERR(engine->zclk);
goto err_cleanup;
+ }
engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
@@ -570,13 +553,8 @@ static int mv_cesa_probe(struct platform_device *pdev)
return 0;
err_cleanup:
- for (i = 0; i < caps->nengines; i++) {
- clk_disable_unprepare(cesa->engines[i].zclk);
- clk_disable_unprepare(cesa->engines[i].clk);
+ for (i = 0; i < caps->nengines; i++)
mv_cesa_put_sram(pdev, i);
- if (cesa->engines[i].irq > 0)
- irq_set_affinity_hint(cesa->engines[i].irq, NULL);
- }
return ret;
}
@@ -588,12 +566,8 @@ static void mv_cesa_remove(struct platform_device *pdev)
mv_cesa_remove_algs(cesa);
- for (i = 0; i < cesa->caps->nengines; i++) {
- clk_disable_unprepare(cesa->engines[i].zclk);
- clk_disable_unprepare(cesa->engines[i].clk);
+ for (i = 0; i < cesa->caps->nengines; i++)
mv_cesa_put_sram(pdev, i);
- irq_set_affinity_hint(cesa->engines[i].irq, NULL);
- }
}
static const struct platform_device_id mv_cesa_plat_id_table[] = {
@@ -604,7 +578,7 @@ MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table);
static struct platform_driver marvell_cesa = {
.probe = mv_cesa_probe,
- .remove_new = mv_cesa_remove,
+ .remove = mv_cesa_remove,
.id_table = mv_cesa_plat_id_table,
.driver = {
.name = "marvell-cesa",
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index 0f37dfd42d85..cf62db50f958 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -489,7 +489,7 @@ static int mv_cesa_des_op(struct skcipher_request *req,
static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -500,7 +500,7 @@ static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -543,7 +543,7 @@ static int mv_cesa_cbc_des_op(struct skcipher_request *req,
static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
@@ -552,7 +552,7 @@ static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
@@ -596,7 +596,7 @@ static int mv_cesa_des3_op(struct skcipher_request *req,
static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -608,7 +608,7 @@ static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -649,7 +649,7 @@ static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_CBC |
@@ -661,7 +661,7 @@ static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_CBC |
@@ -725,7 +725,7 @@ static int mv_cesa_aes_op(struct skcipher_request *req,
static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -736,7 +736,7 @@ static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -778,7 +778,7 @@ static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
@@ -787,7 +787,7 @@ static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index 8d84ad45571c..f150861ceaf6 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -947,7 +947,7 @@ struct ahash_alg mv_md5_alg = {
.base = {
.cra_name = "md5",
.cra_driver_name = "mv-md5",
- .cra_priority = 300,
+ .cra_priority = 0,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
@@ -1018,7 +1018,7 @@ struct ahash_alg mv_sha1_alg = {
.base = {
.cra_name = "sha1",
.cra_driver_name = "mv-sha1",
- .cra_priority = 300,
+ .cra_priority = 0,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
@@ -1092,7 +1092,7 @@ struct ahash_alg mv_sha256_alg = {
.base = {
.cra_name = "sha256",
.cra_driver_name = "mv-sha256",
- .cra_priority = 300,
+ .cra_priority = 0,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
@@ -1302,7 +1302,7 @@ struct ahash_alg mv_ahmac_md5_alg = {
.base = {
.cra_name = "hmac(md5)",
.cra_driver_name = "mv-hmac-md5",
- .cra_priority = 300,
+ .cra_priority = 0,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
@@ -1373,7 +1373,7 @@ struct ahash_alg mv_ahmac_sha1_alg = {
.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "mv-hmac-sha1",
- .cra_priority = 300,
+ .cra_priority = 0,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
@@ -1444,7 +1444,7 @@ struct ahash_alg mv_ahmac_sha256_alg = {
.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "mv-hmac-sha256",
- .cra_priority = 300,
+ .cra_priority = 0,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
index 3c5d577d8f0d..096be42e9d03 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -17,7 +17,6 @@
#include <crypto/sha2.h>
#include <crypto/xts.h>
#include <crypto/scatterwalk.h>
-#include <linux/rtnetlink.h>
#include <linux/sort.h>
#include <linux/module.h>
#include "otx_cptvf.h"
@@ -66,6 +65,8 @@ static struct cpt_device_table ae_devices = {
.count = ATOMIC_INIT(0)
};
+static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg);
+
static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
{
int count, ret = 0;
@@ -509,44 +510,61 @@ static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type)
ctx->cipher_type = cipher_type;
ctx->mac_type = mac_type;
+ switch (ctx->mac_type) {
+ case OTX_CPT_SHA1:
+ ctx->hashalg = crypto_alloc_shash("sha1", 0, 0);
+ break;
+
+ case OTX_CPT_SHA256:
+ ctx->hashalg = crypto_alloc_shash("sha256", 0, 0);
+ break;
+
+ case OTX_CPT_SHA384:
+ ctx->hashalg = crypto_alloc_shash("sha384", 0, 0);
+ break;
+
+ case OTX_CPT_SHA512:
+ ctx->hashalg = crypto_alloc_shash("sha512", 0, 0);
+ break;
+ }
+
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+
+ crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx));
+
+ if (!ctx->hashalg)
+ return 0;
+
/*
* When selected cipher is NULL we use HMAC opcode instead of
* FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
* for calculating ipad and opad
*/
if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) {
- switch (ctx->mac_type) {
- case OTX_CPT_SHA1:
- ctx->hashalg = crypto_alloc_shash("sha1", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->hashalg))
- return PTR_ERR(ctx->hashalg);
- break;
+ int ss = crypto_shash_statesize(ctx->hashalg);
- case OTX_CPT_SHA256:
- ctx->hashalg = crypto_alloc_shash("sha256", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->hashalg))
- return PTR_ERR(ctx->hashalg);
- break;
-
- case OTX_CPT_SHA384:
- ctx->hashalg = crypto_alloc_shash("sha384", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->hashalg))
- return PTR_ERR(ctx->hashalg);
- break;
+ ctx->ipad = kzalloc(ss, GFP_KERNEL);
+ if (!ctx->ipad) {
+ crypto_free_shash(ctx->hashalg);
+ return -ENOMEM;
+ }
- case OTX_CPT_SHA512:
- ctx->hashalg = crypto_alloc_shash("sha512", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->hashalg))
- return PTR_ERR(ctx->hashalg);
- break;
+ ctx->opad = kzalloc(ss, GFP_KERNEL);
+ if (!ctx->opad) {
+ kfree(ctx->ipad);
+ crypto_free_shash(ctx->hashalg);
+ return -ENOMEM;
}
}
- crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx));
+ ctx->sdesc = alloc_sdesc(ctx->hashalg);
+ if (!ctx->sdesc) {
+ kfree(ctx->opad);
+ kfree(ctx->ipad);
+ crypto_free_shash(ctx->hashalg);
+ return -ENOMEM;
+ }
return 0;
}
@@ -602,8 +620,7 @@ static void otx_cpt_aead_exit(struct crypto_aead *tfm)
kfree(ctx->ipad);
kfree(ctx->opad);
- if (ctx->hashalg)
- crypto_free_shash(ctx->hashalg);
+ crypto_free_shash(ctx->hashalg);
kfree(ctx->sdesc);
}
@@ -699,7 +716,7 @@ static inline void swap_data64(void *buf, u32 len)
*dst = cpu_to_be64p(src);
}
-static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
+static int swap_pad(u8 mac_type, u8 *pad)
{
struct sha512_state *sha512;
struct sha256_state *sha256;
@@ -707,22 +724,19 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
switch (mac_type) {
case OTX_CPT_SHA1:
- sha1 = (struct sha1_state *) in_pad;
+ sha1 = (struct sha1_state *)pad;
swap_data32(sha1->state, SHA1_DIGEST_SIZE);
- memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
break;
case OTX_CPT_SHA256:
- sha256 = (struct sha256_state *) in_pad;
+ sha256 = (struct sha256_state *)pad;
swap_data32(sha256->state, SHA256_DIGEST_SIZE);
- memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
break;
case OTX_CPT_SHA384:
case OTX_CPT_SHA512:
- sha512 = (struct sha512_state *) in_pad;
+ sha512 = (struct sha512_state *)pad;
swap_data64(sha512->state, SHA512_DIGEST_SIZE);
- memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
break;
default:
@@ -732,55 +746,53 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
return 0;
}
-static int aead_hmac_init(struct crypto_aead *cipher)
+static int aead_hmac_init(struct crypto_aead *cipher,
+ struct crypto_authenc_keys *keys)
{
struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
- int state_size = crypto_shash_statesize(ctx->hashalg);
int ds = crypto_shash_digestsize(ctx->hashalg);
int bs = crypto_shash_blocksize(ctx->hashalg);
- int authkeylen = ctx->auth_key_len;
+ int authkeylen = keys->authkeylen;
u8 *ipad = NULL, *opad = NULL;
- int ret = 0, icount = 0;
+ int icount = 0;
+ int ret;
- ctx->sdesc = alloc_sdesc(ctx->hashalg);
- if (!ctx->sdesc)
- return -ENOMEM;
+ if (authkeylen > bs) {
+ ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey,
+ authkeylen, ctx->key);
+ if (ret)
+ return ret;
+ authkeylen = ds;
+ } else
+ memcpy(ctx->key, keys->authkey, authkeylen);
- ctx->ipad = kzalloc(bs, GFP_KERNEL);
- if (!ctx->ipad) {
- ret = -ENOMEM;
- goto calc_fail;
- }
+ ctx->enc_key_len = keys->enckeylen;
+ ctx->auth_key_len = authkeylen;
- ctx->opad = kzalloc(bs, GFP_KERNEL);
- if (!ctx->opad) {
- ret = -ENOMEM;
- goto calc_fail;
- }
+ if (ctx->cipher_type == OTX_CPT_CIPHER_NULL)
+ return keys->enckeylen ? -EINVAL : 0;
- ipad = kzalloc(state_size, GFP_KERNEL);
- if (!ipad) {
- ret = -ENOMEM;
- goto calc_fail;
+ switch (keys->enckeylen) {
+ case AES_KEYSIZE_128:
+ ctx->key_type = OTX_CPT_AES_128_BIT;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->key_type = OTX_CPT_AES_192_BIT;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->key_type = OTX_CPT_AES_256_BIT;
+ break;
+ default:
+ /* Invalid key length */
+ return -EINVAL;
}
- opad = kzalloc(state_size, GFP_KERNEL);
- if (!opad) {
- ret = -ENOMEM;
- goto calc_fail;
- }
+ memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen);
- if (authkeylen > bs) {
- ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
- authkeylen, ipad);
- if (ret)
- goto calc_fail;
-
- authkeylen = ds;
- } else {
- memcpy(ipad, ctx->key, authkeylen);
- }
+ ipad = ctx->ipad;
+ opad = ctx->opad;
+ memcpy(ipad, ctx->key, authkeylen);
memset(ipad + authkeylen, 0, bs - authkeylen);
memcpy(opad, ipad, bs);
@@ -798,7 +810,7 @@ static int aead_hmac_init(struct crypto_aead *cipher)
crypto_shash_init(&ctx->sdesc->shash);
crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
crypto_shash_export(&ctx->sdesc->shash, ipad);
- ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
+ ret = swap_pad(ctx->mac_type, ipad);
if (ret)
goto calc_fail;
@@ -806,25 +818,9 @@ static int aead_hmac_init(struct crypto_aead *cipher)
crypto_shash_init(&ctx->sdesc->shash);
crypto_shash_update(&ctx->sdesc->shash, opad, bs);
crypto_shash_export(&ctx->sdesc->shash, opad);
- ret = copy_pad(ctx->mac_type, ctx->opad, opad);
- if (ret)
- goto calc_fail;
-
- kfree(ipad);
- kfree(opad);
-
- return 0;
+ ret = swap_pad(ctx->mac_type, opad);
calc_fail:
- kfree(ctx->ipad);
- ctx->ipad = NULL;
- kfree(ctx->opad);
- ctx->opad = NULL;
- kfree(ipad);
- kfree(opad);
- kfree(ctx->sdesc);
- ctx->sdesc = NULL;
-
return ret;
}
@@ -832,57 +828,15 @@ static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
- struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
- struct crypto_authenc_key_param *param;
- int enckeylen = 0, authkeylen = 0;
- struct rtattr *rta = (void *)key;
- int status = -EINVAL;
-
- if (!RTA_OK(rta, keylen))
- goto badkey;
-
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
-
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
-
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
- if (keylen < enckeylen)
- goto badkey;
-
- if (keylen > OTX_CPT_MAX_KEY_SIZE)
- goto badkey;
-
- authkeylen = keylen - enckeylen;
- memcpy(ctx->key, key, keylen);
-
- switch (enckeylen) {
- case AES_KEYSIZE_128:
- ctx->key_type = OTX_CPT_AES_128_BIT;
- break;
- case AES_KEYSIZE_192:
- ctx->key_type = OTX_CPT_AES_192_BIT;
- break;
- case AES_KEYSIZE_256:
- ctx->key_type = OTX_CPT_AES_256_BIT;
- break;
- default:
- /* Invalid key length */
- goto badkey;
- }
-
- ctx->enc_key_len = enckeylen;
- ctx->auth_key_len = authkeylen;
+ struct crypto_authenc_keys authenc_keys;
+ int status;
- status = aead_hmac_init(cipher);
+ status = crypto_authenc_extractkeys(&authenc_keys, key, keylen);
if (status)
goto badkey;
- return 0;
+ status = aead_hmac_init(cipher, &authenc_keys);
+
badkey:
return status;
}
@@ -891,36 +845,7 @@ static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
- struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
- struct crypto_authenc_key_param *param;
- struct rtattr *rta = (void *)key;
- int enckeylen = 0;
-
- if (!RTA_OK(rta, keylen))
- goto badkey;
-
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
-
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
-
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
- if (enckeylen != 0)
- goto badkey;
-
- if (keylen > OTX_CPT_MAX_KEY_SIZE)
- goto badkey;
-
- memcpy(ctx->key, key, keylen);
- ctx->enc_key_len = enckeylen;
- ctx->auth_key_len = keylen;
- return 0;
-badkey:
- return -EINVAL;
+ return otx_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen);
}
static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
@@ -1613,14 +1538,6 @@ static int compare_func(const void *lptr, const void *rptr)
return 0;
}
-static void swap_func(void *lptr, void *rptr, int size)
-{
- struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
- struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
-
- swap(*ldesc, *rdesc);
-}
-
int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
enum otx_cptpf_type pf_type,
enum otx_cptvf_type engine_type,
@@ -1655,7 +1572,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
is_crypto_registered = true;
}
sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
- compare_func, swap_func);
+ compare_func, NULL);
break;
case OTX_CPT_AE_TYPES:
@@ -1670,7 +1587,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
ae_devices.desc[count++].dev = pdev;
atomic_inc(&ae_devices.count);
sort(ae_devices.desc, count, sizeof(struct cpt_device_desc),
- compare_func, swap_func);
+ compare_func, NULL);
break;
default:
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h
index 4181b5c5c356..a50b5e2f8d00 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h
@@ -185,6 +185,5 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
int num_queues, int num_devices);
void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
enum otx_cptvf_type engine_type);
-void otx_cpt_callback(int status, void *arg, void *req);
#endif /* __OTX_CPT_ALGS_H */
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
index 79b4e74804f6..5cae8fafa151 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
@@ -73,7 +73,7 @@ int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, "CRYPTO_DEV_OCTEONTX2_CPT");
int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
{
@@ -94,7 +94,7 @@ int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, "CRYPTO_DEV_OCTEONTX2_CPT");
void cn10k_cpt_hw_ctx_clear(struct pci_dev *pdev,
struct cn10k_cpt_errata_ctx *er_ctx)
@@ -110,7 +110,7 @@ void cn10k_cpt_hw_ctx_clear(struct pci_dev *pdev,
DMA_BIDIRECTIONAL);
kfree(er_ctx->hw_ctx);
}
-EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_clear, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_clear, "CRYPTO_DEV_OCTEONTX2_CPT");
void cn10k_cpt_hw_ctx_set(union cn10k_cpt_hw_ctx *hctx, u16 ctx_sz)
{
@@ -119,7 +119,7 @@ void cn10k_cpt_hw_ctx_set(union cn10k_cpt_hw_ctx *hctx, u16 ctx_sz)
hctx->w0.ctx_sz = ctx_sz;
hctx->w0.ctx_push_sz = 1;
}
-EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_set, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_set, "CRYPTO_DEV_OCTEONTX2_CPT");
int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
struct cn10k_cpt_errata_ctx *er_ctx)
@@ -138,6 +138,10 @@ int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
return -ENOMEM;
cptr_dma = dma_map_single(&pdev->dev, hctx, CN10K_CPT_HW_CTX_SIZE,
DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pdev->dev, cptr_dma)) {
+ kfree(hctx);
+ return -ENOMEM;
+ }
cn10k_cpt_hw_ctx_set(hctx, 1);
er_ctx->hw_ctx = hctx;
@@ -145,7 +149,7 @@ int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_init, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_init, "CRYPTO_DEV_OCTEONTX2_CPT");
void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval)
{
@@ -164,7 +168,7 @@ void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval)
otx2_cpt_read64(lfs->reg_base, lfs->blkaddr, lfs->lf[0].slot,
OTX2_CPT_LF_CTX_ERR);
}
-EXPORT_SYMBOL_NS_GPL(cn10k_cpt_ctx_flush, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_ctx_flush, "CRYPTO_DEV_OCTEONTX2_CPT");
void cptvf_hw_ops_get(struct otx2_cptvf_dev *cptvf)
{
@@ -173,4 +177,4 @@ void cptvf_hw_ops_get(struct otx2_cptvf_dev *cptvf)
else
cptvf->lfs.ops = &otx2_hw_ops;
}
-EXPORT_SYMBOL_NS_GPL(cptvf_hw_ops_get, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cptvf_hw_ops_get, "CRYPTO_DEV_OCTEONTX2_CPT");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
index d2b8d26db968..215a1a8ba7e9 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
@@ -4,7 +4,8 @@
#include "otx2_cpt_devlink.h"
static int otx2_cpt_dl_egrp_create(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
@@ -13,7 +14,8 @@ static int otx2_cpt_dl_egrp_create(struct devlink *dl, u32 id,
}
static int otx2_cpt_dl_egrp_delete(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
@@ -45,7 +47,8 @@ static int otx2_cpt_dl_t106_mode_get(struct devlink *dl, u32 id,
}
static int otx2_cpt_dl_t106_mode_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
index 5be0103c1fb8..b8b7c8a3c0ca 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -19,7 +19,7 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
}
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_mbox_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
{
@@ -37,13 +37,13 @@ int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_ready_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_ready_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev)
{
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_af_reg_requests, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_af_reg_requests, "CRYPTO_DEV_OCTEONTX2_CPT");
static int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox,
struct pci_dev *pdev, u64 reg,
@@ -95,7 +95,7 @@ int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_add_write_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_add_write_af_reg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 *val, int blkaddr)
@@ -108,7 +108,7 @@ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_read_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_read_af_reg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 val, int blkaddr)
@@ -121,7 +121,7 @@ int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_write_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_write_af_reg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs)
{
@@ -180,7 +180,7 @@ int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs)
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_detach_rsrcs_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_detach_rsrcs_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
{
@@ -213,7 +213,7 @@ int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
}
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_msix_offset_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_msix_offset_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
{
@@ -228,7 +228,7 @@ int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
return otx2_mbox_check_rsp_msgs(mbox, 0);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_sync_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_sync_mbox_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot)
{
@@ -254,4 +254,4 @@ int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot)
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_lf_reset_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_lf_reset_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
index b52728e3c0d1..b5d66afcc030 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -288,8 +288,7 @@ void otx2_cptlf_unregister_misc_interrupts(struct otx2_cptlfs_info *lfs)
cptlf_set_misc_intrs(lfs, false);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_misc_interrupts,
- CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_misc_interrupts, "CRYPTO_DEV_OCTEONTX2_CPT");
void otx2_cptlf_unregister_done_interrupts(struct otx2_cptlfs_info *lfs)
{
@@ -308,8 +307,7 @@ void otx2_cptlf_unregister_done_interrupts(struct otx2_cptlfs_info *lfs)
cptlf_set_done_intrs(lfs, false);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_done_interrupts,
- CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_done_interrupts, "CRYPTO_DEV_OCTEONTX2_CPT");
static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
int lf_num, int irq_offset,
@@ -351,8 +349,7 @@ free_irq:
otx2_cptlf_unregister_misc_interrupts(lfs);
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_misc_interrupts,
- CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_misc_interrupts, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cptlf_register_done_interrupts(struct otx2_cptlfs_info *lfs)
{
@@ -375,8 +372,7 @@ free_irq:
otx2_cptlf_unregister_done_interrupts(lfs);
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_done_interrupts,
- CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_done_interrupts, "CRYPTO_DEV_OCTEONTX2_CPT");
void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
{
@@ -390,7 +386,7 @@ void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
free_cpumask_var(lfs->lf[slot].affinity_mask);
}
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_free_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_free_irqs_affinity, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs)
{
@@ -423,7 +419,7 @@ free_affinity_mask:
otx2_cptlf_free_irqs_affinity(lfs);
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_set_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_set_irqs_affinity, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
int lfs_num)
@@ -486,7 +482,7 @@ clear_lfs_num:
lfs->lfs_num = 0;
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, "CRYPTO_DEV_OCTEONTX2_CPT");
void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
{
@@ -498,7 +494,7 @@ void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
otx2_cpt_detach_rsrcs_msg(lfs);
lfs->lfs_num = 0;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, "CRYPTO_DEV_OCTEONTX2_CPT");
MODULE_AUTHOR("Marvell");
MODULE_DESCRIPTION("Marvell RVU CPT Common module");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 400e36d9908f..12971300296d 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -739,18 +739,22 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
dev_err(dev, "Unable to get usable DMA configuration\n");
goto clear_drvdata;
}
- /* Map PF's configuration registers */
- err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
- OTX2_CPT_DRV_NAME);
+ err = pcim_request_all_regions(pdev, OTX2_CPT_DRV_NAME);
if (err) {
- dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
+ dev_err(dev, "Couldn't request PCI resources 0x%x\n", err);
goto clear_drvdata;
}
pci_set_master(pdev);
pci_set_drvdata(pdev, cptpf);
cptpf->pdev = pdev;
- cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
+ /* Map PF's configuration registers */
+ cptpf->reg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
+ if (!cptpf->reg_base) {
+ err = -ENOMEM;
+ dev_err(dev, "Couldn't ioremap PCI resource 0x%x\n", err);
+ goto clear_drvdata;
+ }
/* Check if AF driver is up, otherwise defer probe */
err = cpt_is_pf_usable(cptpf);
@@ -864,7 +868,7 @@ static struct pci_driver otx2_cpt_pci_driver = {
module_pci_driver(otx2_cpt_pci_driver);
-MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
+MODULE_IMPORT_NS("CRYPTO_DEV_OCTEONTX2_CPT");
MODULE_AUTHOR("Marvell");
MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index 1604fc58dc13..7eb0bc13994d 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -11,7 +11,6 @@
#include <crypto/xts.h>
#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
-#include <linux/rtnetlink.h>
#include <linux/sort.h>
#include <linux/module.h>
#include "otx2_cptvf.h"
@@ -55,6 +54,8 @@ static struct cpt_device_table se_devices = {
.count = ATOMIC_INIT(0)
};
+static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg);
+
static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
{
int count;
@@ -598,40 +599,56 @@ static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
ctx->cipher_type = cipher_type;
ctx->mac_type = mac_type;
+ switch (ctx->mac_type) {
+ case OTX2_CPT_SHA1:
+ ctx->hashalg = crypto_alloc_shash("sha1", 0, 0);
+ break;
+
+ case OTX2_CPT_SHA256:
+ ctx->hashalg = crypto_alloc_shash("sha256", 0, 0);
+ break;
+
+ case OTX2_CPT_SHA384:
+ ctx->hashalg = crypto_alloc_shash("sha384", 0, 0);
+ break;
+
+ case OTX2_CPT_SHA512:
+ ctx->hashalg = crypto_alloc_shash("sha512", 0, 0);
+ break;
+ }
+
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+
+ if (ctx->hashalg) {
+ ctx->sdesc = alloc_sdesc(ctx->hashalg);
+ if (!ctx->sdesc) {
+ crypto_free_shash(ctx->hashalg);
+ return -ENOMEM;
+ }
+ }
+
/*
* When selected cipher is NULL we use HMAC opcode instead of
* FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
* for calculating ipad and opad
*/
- if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL) {
- switch (ctx->mac_type) {
- case OTX2_CPT_SHA1:
- ctx->hashalg = crypto_alloc_shash("sha1", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->hashalg))
- return PTR_ERR(ctx->hashalg);
- break;
-
- case OTX2_CPT_SHA256:
- ctx->hashalg = crypto_alloc_shash("sha256", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->hashalg))
- return PTR_ERR(ctx->hashalg);
- break;
+ if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL && ctx->hashalg) {
+ int ss = crypto_shash_statesize(ctx->hashalg);
- case OTX2_CPT_SHA384:
- ctx->hashalg = crypto_alloc_shash("sha384", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->hashalg))
- return PTR_ERR(ctx->hashalg);
- break;
+ ctx->ipad = kzalloc(ss, GFP_KERNEL);
+ if (!ctx->ipad) {
+ kfree(ctx->sdesc);
+ crypto_free_shash(ctx->hashalg);
+ return -ENOMEM;
+ }
- case OTX2_CPT_SHA512:
- ctx->hashalg = crypto_alloc_shash("sha512", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->hashalg))
- return PTR_ERR(ctx->hashalg);
- break;
+ ctx->opad = kzalloc(ss, GFP_KERNEL);
+ if (!ctx->opad) {
+ kfree(ctx->ipad);
+ kfree(ctx->sdesc);
+ crypto_free_shash(ctx->hashalg);
+ return -ENOMEM;
}
}
switch (ctx->cipher_type) {
@@ -713,8 +730,7 @@ static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
kfree(ctx->ipad);
kfree(ctx->opad);
- if (ctx->hashalg)
- crypto_free_shash(ctx->hashalg);
+ crypto_free_shash(ctx->hashalg);
kfree(ctx->sdesc);
if (ctx->fbk_cipher) {
@@ -788,7 +804,7 @@ static inline void swap_data64(void *buf, u32 len)
cpu_to_be64s(src);
}
-static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
+static int swap_pad(u8 mac_type, u8 *pad)
{
struct sha512_state *sha512;
struct sha256_state *sha256;
@@ -796,22 +812,19 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
switch (mac_type) {
case OTX2_CPT_SHA1:
- sha1 = (struct sha1_state *) in_pad;
+ sha1 = (struct sha1_state *)pad;
swap_data32(sha1->state, SHA1_DIGEST_SIZE);
- memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
break;
case OTX2_CPT_SHA256:
- sha256 = (struct sha256_state *) in_pad;
+ sha256 = (struct sha256_state *)pad;
swap_data32(sha256->state, SHA256_DIGEST_SIZE);
- memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
break;
case OTX2_CPT_SHA384:
case OTX2_CPT_SHA512:
- sha512 = (struct sha512_state *) in_pad;
+ sha512 = (struct sha512_state *)pad;
swap_data64(sha512->state, SHA512_DIGEST_SIZE);
- memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
break;
default:
@@ -821,55 +834,54 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
return 0;
}
-static int aead_hmac_init(struct crypto_aead *cipher)
+static int aead_hmac_init(struct crypto_aead *cipher,
+ struct crypto_authenc_keys *keys)
{
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
- int state_size = crypto_shash_statesize(ctx->hashalg);
int ds = crypto_shash_digestsize(ctx->hashalg);
int bs = crypto_shash_blocksize(ctx->hashalg);
- int authkeylen = ctx->auth_key_len;
+ int authkeylen = keys->authkeylen;
u8 *ipad = NULL, *opad = NULL;
- int ret = 0, icount = 0;
+ int icount = 0;
+ int ret;
- ctx->sdesc = alloc_sdesc(ctx->hashalg);
- if (!ctx->sdesc)
- return -ENOMEM;
+ if (authkeylen > bs) {
+ ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey,
+ authkeylen, ctx->key);
+ if (ret)
+ goto calc_fail;
- ctx->ipad = kzalloc(bs, GFP_KERNEL);
- if (!ctx->ipad) {
- ret = -ENOMEM;
- goto calc_fail;
- }
+ authkeylen = ds;
+ } else
+ memcpy(ctx->key, keys->authkey, authkeylen);
- ctx->opad = kzalloc(bs, GFP_KERNEL);
- if (!ctx->opad) {
- ret = -ENOMEM;
- goto calc_fail;
- }
+ ctx->enc_key_len = keys->enckeylen;
+ ctx->auth_key_len = authkeylen;
- ipad = kzalloc(state_size, GFP_KERNEL);
- if (!ipad) {
- ret = -ENOMEM;
- goto calc_fail;
- }
+ if (ctx->cipher_type == OTX2_CPT_CIPHER_NULL)
+ return keys->enckeylen ? -EINVAL : 0;
- opad = kzalloc(state_size, GFP_KERNEL);
- if (!opad) {
- ret = -ENOMEM;
- goto calc_fail;
+ switch (keys->enckeylen) {
+ case AES_KEYSIZE_128:
+ ctx->key_type = OTX2_CPT_AES_128_BIT;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->key_type = OTX2_CPT_AES_192_BIT;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->key_type = OTX2_CPT_AES_256_BIT;
+ break;
+ default:
+ /* Invalid key length */
+ return -EINVAL;
}
- if (authkeylen > bs) {
- ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
- authkeylen, ipad);
- if (ret)
- goto calc_fail;
+ memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen);
- authkeylen = ds;
- } else {
- memcpy(ipad, ctx->key, authkeylen);
- }
+ ipad = ctx->ipad;
+ opad = ctx->opad;
+ memcpy(ipad, ctx->key, authkeylen);
memset(ipad + authkeylen, 0, bs - authkeylen);
memcpy(opad, ipad, bs);
@@ -887,7 +899,7 @@ static int aead_hmac_init(struct crypto_aead *cipher)
crypto_shash_init(&ctx->sdesc->shash);
crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
crypto_shash_export(&ctx->sdesc->shash, ipad);
- ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
+ ret = swap_pad(ctx->mac_type, ipad);
if (ret)
goto calc_fail;
@@ -895,25 +907,9 @@ static int aead_hmac_init(struct crypto_aead *cipher)
crypto_shash_init(&ctx->sdesc->shash);
crypto_shash_update(&ctx->sdesc->shash, opad, bs);
crypto_shash_export(&ctx->sdesc->shash, opad);
- ret = copy_pad(ctx->mac_type, ctx->opad, opad);
- if (ret)
- goto calc_fail;
-
- kfree(ipad);
- kfree(opad);
-
- return 0;
+ ret = swap_pad(ctx->mac_type, opad);
calc_fail:
- kfree(ctx->ipad);
- ctx->ipad = NULL;
- kfree(ctx->opad);
- ctx->opad = NULL;
- kfree(ipad);
- kfree(opad);
- kfree(ctx->sdesc);
- ctx->sdesc = NULL;
-
return ret;
}
@@ -921,87 +917,17 @@ static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
- struct crypto_authenc_key_param *param;
- int enckeylen = 0, authkeylen = 0;
- struct rtattr *rta = (void *)key;
-
- if (!RTA_OK(rta, keylen))
- return -EINVAL;
+ struct crypto_authenc_keys authenc_keys;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- return -EINVAL;
-
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- return -EINVAL;
-
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
- if (keylen < enckeylen)
- return -EINVAL;
-
- if (keylen > OTX2_CPT_MAX_KEY_SIZE)
- return -EINVAL;
-
- authkeylen = keylen - enckeylen;
- memcpy(ctx->key, key, keylen);
-
- switch (enckeylen) {
- case AES_KEYSIZE_128:
- ctx->key_type = OTX2_CPT_AES_128_BIT;
- break;
- case AES_KEYSIZE_192:
- ctx->key_type = OTX2_CPT_AES_192_BIT;
- break;
- case AES_KEYSIZE_256:
- ctx->key_type = OTX2_CPT_AES_256_BIT;
- break;
- default:
- /* Invalid key length */
- return -EINVAL;
- }
-
- ctx->enc_key_len = enckeylen;
- ctx->auth_key_len = authkeylen;
-
- return aead_hmac_init(cipher);
+ return crypto_authenc_extractkeys(&authenc_keys, key, keylen) ?:
+ aead_hmac_init(cipher, &authenc_keys);
}
static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
- struct crypto_authenc_key_param *param;
- struct rtattr *rta = (void *)key;
- int enckeylen = 0;
-
- if (!RTA_OK(rta, keylen))
- return -EINVAL;
-
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- return -EINVAL;
-
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- return -EINVAL;
-
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
- if (enckeylen != 0)
- return -EINVAL;
-
- if (keylen > OTX2_CPT_MAX_KEY_SIZE)
- return -EINVAL;
-
- memcpy(ctx->key, key, keylen);
- ctx->enc_key_len = enckeylen;
- ctx->auth_key_len = keylen;
-
- return 0;
+ return otx2_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen);
}
static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
@@ -1702,14 +1628,6 @@ static int compare_func(const void *lptr, const void *rptr)
return 0;
}
-static void swap_func(void *lptr, void *rptr, int size)
-{
- struct cpt_device_desc *ldesc = lptr;
- struct cpt_device_desc *rdesc = rptr;
-
- swap(*ldesc, *rdesc);
-}
-
int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
int num_queues, int num_devices)
{
@@ -1739,7 +1657,7 @@ int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
is_crypto_registered = true;
}
sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
- compare_func, swap_func);
+ compare_func, NULL);
unlock:
mutex_unlock(&mutex);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
index 527d34cc258b..d84eebdf2fa8 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -358,9 +358,8 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
dev_err(dev, "Unable to get usable DMA configuration\n");
goto clear_drvdata;
}
- /* Map VF's configuration registers */
- ret = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
- OTX2_CPTVF_DRV_NAME);
+
+ ret = pcim_request_all_regions(pdev, OTX2_CPTVF_DRV_NAME);
if (ret) {
dev_err(dev, "Couldn't get PCI resources 0x%x\n", ret);
goto clear_drvdata;
@@ -369,7 +368,13 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, cptvf);
cptvf->pdev = pdev;
- cptvf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
+ /* Map VF's configuration registers */
+ cptvf->reg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
+ if (!cptvf->reg_base) {
+ ret = -ENOMEM;
+ dev_err(dev, "Couldn't ioremap PCI resource 0x%x\n", ret);
+ goto clear_drvdata;
+ }
otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag);
@@ -448,7 +453,7 @@ static struct pci_driver otx2_cptvf_pci_driver = {
module_pci_driver(otx2_cptvf_pci_driver);
-MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
+MODULE_IMPORT_NS("CRYPTO_DEV_OCTEONTX2_CPT");
MODULE_AUTHOR("Marvell");
MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver");