summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c87
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h4
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h1
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.h4
-rw-r--r--drivers/crypto/atmel-tdes.c2
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c326
-rw-r--r--drivers/crypto/bcm/Makefile2
-rw-r--r--drivers/crypto/bcm/cipher.c54
-rw-r--r--drivers/crypto/bcm/cipher.h4
-rw-r--r--drivers/crypto/bcm/util.c40
-rw-r--r--drivers/crypto/bcm/util.h6
-rw-r--r--drivers/crypto/caam/Kconfig1
-rw-r--r--drivers/crypto/caam/caamalg.c240
-rw-r--r--drivers/crypto/caam/caamalg_desc.c18
-rw-r--r--drivers/crypto/caam/caamalg_qi.c23
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c29
-rw-r--r--drivers/crypto/caam/caamhash.c393
-rw-r--r--drivers/crypto/caam/caamhash_desc.c68
-rw-r--r--drivers/crypto/caam/caamhash_desc.h8
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/ctrl.c25
-rw-r--r--drivers/crypto/caam/desc.h1
-rw-r--r--drivers/crypto/caam/error.c6
-rw-r--r--drivers/crypto/caam/error.h9
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.c27
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.h5
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c2
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c52
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c2
-rw-r--r--drivers/crypto/ccp/ccp-debugfs.c36
-rw-r--r--drivers/crypto/ccp/ccp-ops.c2
-rw-r--r--drivers/crypto/ccp/psp-dev.c18
-rw-r--r--drivers/crypto/ccp/psp-dev.h2
-rw-r--r--drivers/crypto/ccp/sp-dev.c2
-rw-r--r--drivers/crypto/ccp/sp-dev.h2
-rw-r--r--drivers/crypto/ccp/sp-pci.c2
-rw-r--r--drivers/crypto/ccp/sp-platform.c2
-rw-r--r--drivers/crypto/ccree/cc_aead.c40
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c87
-rw-r--r--drivers/crypto/ccree/cc_cipher.c9
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c22
-rw-r--r--drivers/crypto/ccree/cc_debugfs.h8
-rw-r--r--drivers/crypto/ccree/cc_driver.c13
-rw-r--r--drivers/crypto/ccree/cc_driver.h2
-rw-r--r--drivers/crypto/chelsio/Makefile2
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c12
-rw-r--r--drivers/crypto/chelsio/chcr_core.h2
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c13
-rw-r--r--drivers/crypto/chelsio/chtls/Makefile3
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c12
-rw-r--r--drivers/crypto/hifn_795x.c3
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c2
-rw-r--r--drivers/crypto/ixp4xx_crypto.c4
-rw-r--r--drivers/crypto/marvell/cipher.c2
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/omap-des.c2
-rw-r--r--drivers/crypto/picoxcell_crypto.c3
-rw-r--r--drivers/crypto/qat/qat_c3xxx/Makefile2
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_drv.c5
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/Makefile2
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c5
-rw-r--r--drivers/crypto/qat/qat_c62x/Makefile2
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_drv.c5
-rw-r--r--drivers/crypto/qat/qat_c62xvf/Makefile2
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c5
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c7
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c7
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_debug.c15
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/Makefile2
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c5
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/Makefile2
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c5
-rw-r--r--drivers/crypto/qce/ablkcipher.c4
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c2
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c2
-rw-r--r--drivers/crypto/talitos.c28
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c26
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c2
83 files changed, 1007 insertions, 887 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 5a90075f719d..0be55fcc19ba 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU
depends on ARCH_BCM_IPROC
depends on MAILBOX
default m
+ select CRYPTO_AUTHENC
select CRYPTO_DES
select CRYPTO_MD5
select CRYPTO_SHA1
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 63cb6956c948..f869e8cc4e0b 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -40,9 +40,11 @@
#include <crypto/ctr.h>
#include <crypto/gcm.h>
#include <crypto/sha.h>
+#include <crypto/rng.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
@@ -1035,6 +1037,10 @@ static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
rc = crypto_register_ahash(&alg->alg.u.hash);
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ rc = crypto_register_rng(&alg->alg.u.rng);
+ break;
+
default:
rc = crypto_register_skcipher(&alg->alg.u.cipher);
break;
@@ -1064,6 +1070,10 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
crypto_unregister_aead(&alg->alg.u.aead);
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ crypto_unregister_rng(&alg->alg.u.rng);
+ break;
+
default:
crypto_unregister_skcipher(&alg->alg.u.cipher);
}
@@ -1122,6 +1132,69 @@ static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
PPC4XX_TMO_ERR_INT);
}
+static int ppc4xx_prng_data_read(struct crypto4xx_device *dev,
+ u8 *data, unsigned int max)
+{
+ unsigned int i, curr = 0;
+ u32 val[2];
+
+ do {
+ /* trigger PRN generation */
+ writel(PPC4XX_PRNG_CTRL_AUTO_EN,
+ dev->ce_base + CRYPTO4XX_PRNG_CTRL);
+
+ for (i = 0; i < 1024; i++) {
+ /* usually 19 iterations are enough */
+ if ((readl(dev->ce_base + CRYPTO4XX_PRNG_STAT) &
+ CRYPTO4XX_PRNG_STAT_BUSY))
+ continue;
+
+ val[0] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_0);
+ val[1] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_1);
+ break;
+ }
+ if (i == 1024)
+ return -ETIMEDOUT;
+
+ if ((max - curr) >= 8) {
+ memcpy(data, &val, 8);
+ data += 8;
+ curr += 8;
+ } else {
+ /* copy only remaining bytes */
+ memcpy(data, &val, max - curr);
+ break;
+ }
+ } while (curr < max);
+
+ return curr;
+}
+
+static int crypto4xx_prng_generate(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dstn, unsigned int dlen)
+{
+ struct rng_alg *alg = crypto_rng_alg(tfm);
+ struct crypto4xx_alg *amcc_alg;
+ struct crypto4xx_device *dev;
+ int ret;
+
+ amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.rng);
+ dev = amcc_alg->dev;
+
+ mutex_lock(&dev->core_dev->rng_lock);
+ ret = ppc4xx_prng_data_read(dev, dstn, dlen);
+ mutex_unlock(&dev->core_dev->rng_lock);
+ return ret;
+}
+
+
+static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ return 0;
+}
+
/**
* Supported Crypto Algorithms
*/
@@ -1291,6 +1364,18 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_module = THIS_MODULE,
},
} },
+ { .type = CRYPTO_ALG_TYPE_RNG, .u.rng = {
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "crypto4xx_rng",
+ .cra_priority = 300,
+ .cra_ctxsize = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .generate = crypto4xx_prng_generate,
+ .seed = crypto4xx_prng_seed,
+ .seedsize = 0,
+ } },
};
/**
@@ -1360,6 +1445,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
core_dev->dev->core_dev = core_dev;
core_dev->dev->is_revb = is_revb;
core_dev->device = dev;
+ mutex_init(&core_dev->rng_lock);
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
ratelimit_default_init(&core_dev->dev->aead_ratelimit);
@@ -1439,6 +1525,7 @@ static int crypto4xx_remove(struct platform_device *ofdev)
tasklet_kill(&core_dev->tasklet);
/* Un-register with Linux CryptoAPI */
crypto4xx_unregister_alg(core_dev->dev);
+ mutex_destroy(&core_dev->rng_lock);
/* Free all allocated memory */
crypto4xx_stop_all(core_dev);
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index e2ca56722f07..18df695ca6b1 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -23,8 +23,10 @@
#define __CRYPTO4XX_CORE_H__
#include <linux/ratelimit.h>
+#include <linux/mutex.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_sa.h"
@@ -119,6 +121,7 @@ struct crypto4xx_core_device {
u32 irq;
struct tasklet_struct tasklet;
spinlock_t lock;
+ struct mutex rng_lock;
};
struct crypto4xx_ctx {
@@ -143,6 +146,7 @@ struct crypto4xx_alg_common {
struct skcipher_alg cipher;
struct ahash_alg hash;
struct aead_alg aead;
+ struct rng_alg rng;
} u;
};
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index 472331787e04..80c67490bbf6 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -100,6 +100,7 @@
#define CRYPTO4XX_ENDIAN_CFG 0x000600d8
#define CRYPTO4XX_PRNG_STAT 0x00070000
+#define CRYPTO4XX_PRNG_STAT_BUSY 0x1
#define CRYPTO4XX_PRNG_CTRL 0x00070004
#define CRYPTO4XX_PRNG_SEED_L 0x00070008
#define CRYPTO4XX_PRNG_SEED_H 0x0007000c
diff --git a/drivers/crypto/amcc/crypto4xx_trng.h b/drivers/crypto/amcc/crypto4xx_trng.h
index 931d22531f51..7bbda51b7337 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.h
+++ b/drivers/crypto/amcc/crypto4xx_trng.h
@@ -26,9 +26,9 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev);
void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev);
#else
static inline void ppc4xx_trng_probe(
- struct crypto4xx_device *dev __maybe_unused) { }
+ struct crypto4xx_core_device *dev __maybe_unused) { }
static inline void ppc4xx_trng_remove(
- struct crypto4xx_device *dev __maybe_unused) { }
+ struct crypto4xx_core_device *dev __maybe_unused) { }
#endif
#endif
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 438e1ffb2ec0..65bf1a299562 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -785,7 +785,7 @@ static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
}
err = des_ekey(tmp, key);
- if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
ctfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index f3442c2bdbdc..57e5dca3253f 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -135,8 +135,6 @@
#define regk_crypto_ext 0x00000001
#define regk_crypto_hmac_sha1 0x00000007
#define regk_crypto_hmac_sha256 0x00000009
-#define regk_crypto_hmac_sha384 0x0000000b
-#define regk_crypto_hmac_sha512 0x0000000d
#define regk_crypto_init 0x00000000
#define regk_crypto_key_128 0x00000000
#define regk_crypto_key_192 0x00000001
@@ -144,8 +142,6 @@
#define regk_crypto_null 0x00000000
#define regk_crypto_sha1 0x00000006
#define regk_crypto_sha256 0x00000008
-#define regk_crypto_sha384 0x0000000a
-#define regk_crypto_sha512 0x0000000c
/* DMA descriptor structures */
struct pdma_descr_ctrl {
@@ -190,8 +186,6 @@ struct pdma_stat_descr {
/* Hash modes (including HMAC variants) */
#define ARTPEC6_CRYPTO_HASH_SHA1 1
#define ARTPEC6_CRYPTO_HASH_SHA256 2
-#define ARTPEC6_CRYPTO_HASH_SHA384 3
-#define ARTPEC6_CRYPTO_HASH_SHA512 4
/* Crypto modes */
#define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
@@ -284,6 +278,7 @@ enum artpec6_crypto_hash_flags {
struct artpec6_crypto_req_common {
struct list_head list;
+ struct list_head complete_in_progress;
struct artpec6_crypto_dma_descriptors *dma;
struct crypto_async_request *req;
void (*complete)(struct crypto_async_request *req);
@@ -291,11 +286,11 @@ struct artpec6_crypto_req_common {
};
struct artpec6_hash_request_context {
- char partial_buffer[SHA512_BLOCK_SIZE];
- char partial_buffer_out[SHA512_BLOCK_SIZE];
- char key_buffer[SHA512_BLOCK_SIZE];
- char pad_buffer[SHA512_BLOCK_SIZE + 32];
- unsigned char digeststate[SHA512_DIGEST_SIZE];
+ char partial_buffer[SHA256_BLOCK_SIZE];
+ char partial_buffer_out[SHA256_BLOCK_SIZE];
+ char key_buffer[SHA256_BLOCK_SIZE];
+ char pad_buffer[SHA256_BLOCK_SIZE + 32];
+ unsigned char digeststate[SHA256_DIGEST_SIZE];
size_t partial_bytes;
u64 digcnt;
u32 key_md;
@@ -305,8 +300,8 @@ struct artpec6_hash_request_context {
};
struct artpec6_hash_export_state {
- char partial_buffer[SHA512_BLOCK_SIZE];
- unsigned char digeststate[SHA512_DIGEST_SIZE];
+ char partial_buffer[SHA256_BLOCK_SIZE];
+ unsigned char digeststate[SHA256_DIGEST_SIZE];
size_t partial_bytes;
u64 digcnt;
int oper;
@@ -314,7 +309,7 @@ struct artpec6_hash_export_state {
};
struct artpec6_hashalg_context {
- char hmac_key[SHA512_BLOCK_SIZE];
+ char hmac_key[SHA256_BLOCK_SIZE];
size_t hmac_key_length;
struct crypto_shash *child_hash;
};
@@ -670,8 +665,8 @@ artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
* to be written.
*/
return artpec6_crypto_dma_map_single(common,
- dma->stat + dma->in_cnt - 1,
- sizeof(dma->stat[0]),
+ dma->stat,
+ sizeof(dma->stat[0]) * dma->in_cnt,
DMA_BIDIRECTIONAL,
&dma->stat_dma_addr);
}
@@ -1315,8 +1310,7 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
- size_t contextsize = digestsize == SHA384_DIGEST_SIZE ?
- SHA512_DIGEST_SIZE : digestsize;
+ size_t contextsize = digestsize;
size_t blocksize = crypto_tfm_alg_blocksize(
crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
struct artpec6_crypto_req_common *common = &req_ctx->common;
@@ -1456,7 +1450,6 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
/* Finalize */
if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
- bool needtrim = contextsize != digestsize;
size_t hash_pad_len;
u64 digest_bits;
u32 oper;
@@ -1502,19 +1495,10 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
/* Descriptor for the final result */
error = artpec6_crypto_setup_in_descr(common, areq->result,
digestsize,
- !needtrim);
+ true);
if (error)
return error;
- if (needtrim) {
- /* Discard the extra context bytes for SHA-384 */
- error = artpec6_crypto_setup_in_descr(common,
- req_ctx->partial_buffer,
- digestsize - contextsize, true);
- if (error)
- return error;
- }
-
} else { /* This is not the final operation for this request */
if (!run_hw)
return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
@@ -1923,7 +1907,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
/* For the decryption, cryptlen includes the tag. */
input_length = areq->cryptlen;
if (req_ctx->decrypt)
- input_length -= AES_BLOCK_SIZE;
+ input_length -= crypto_aead_authsize(cipher);
/* Prepare the context buffer */
req_ctx->hw_ctx.aad_length_bits =
@@ -1988,7 +1972,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
size_t output_len = areq->cryptlen;
if (req_ctx->decrypt)
- output_len -= AES_BLOCK_SIZE;
+ output_len -= crypto_aead_authsize(cipher);
artpec6_crypto_walk_init(&walk, areq->dst);
@@ -2017,19 +2001,32 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
* the output ciphertext. For decryption it is put in a context
* buffer for later compare against the input tag.
*/
- count = AES_BLOCK_SIZE;
if (req_ctx->decrypt) {
ret = artpec6_crypto_setup_in_descr(common,
- req_ctx->decryption_tag, count, false);
+ req_ctx->decryption_tag, AES_BLOCK_SIZE, false);
if (ret)
return ret;
} else {
+ /* For encryption the requested tag size may be smaller
+ * than the hardware's generated tag.
+ */
+ size_t authsize = crypto_aead_authsize(cipher);
+
ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
- count);
+ authsize);
if (ret)
return ret;
+
+ if (authsize < AES_BLOCK_SIZE) {
+ count = AES_BLOCK_SIZE - authsize;
+ ret = artpec6_crypto_setup_in_descr(common,
+ ac->pad_buffer,
+ count, false);
+ if (ret)
+ return ret;
+ }
}
}
@@ -2045,7 +2042,8 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
return artpec6_crypto_dma_map_descs(common);
}
-static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
+static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
+ struct list_head *completions)
{
struct artpec6_crypto_req_common *req;
@@ -2056,7 +2054,7 @@ static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
list_move_tail(&req->list, &ac->pending);
artpec6_crypto_start_dma(req);
- req->req->complete(req->req, -EINPROGRESS);
+ list_add_tail(&req->complete_in_progress, completions);
}
/*
@@ -2086,6 +2084,11 @@ static void artpec6_crypto_task(unsigned long data)
struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
struct artpec6_crypto_req_common *req;
struct artpec6_crypto_req_common *n;
+ struct list_head complete_done;
+ struct list_head complete_in_progress;
+
+ INIT_LIST_HEAD(&complete_done);
+ INIT_LIST_HEAD(&complete_in_progress);
if (list_empty(&ac->pending)) {
pr_debug("Spurious IRQ\n");
@@ -2097,9 +2100,12 @@ static void artpec6_crypto_task(unsigned long data)
list_for_each_entry_safe(req, n, &ac->pending, list) {
struct artpec6_crypto_dma_descriptors *dma = req->dma;
u32 stat;
+ dma_addr_t stataddr;
- dma_sync_single_for_cpu(artpec6_crypto_dev, dma->stat_dma_addr,
- sizeof(dma->stat[0]),
+ stataddr = dma->stat_dma_addr + 4 * (req->dma->in_cnt - 1);
+ dma_sync_single_for_cpu(artpec6_crypto_dev,
+ stataddr,
+ 4,
DMA_BIDIRECTIONAL);
stat = req->dma->stat[req->dma->in_cnt-1];
@@ -2119,19 +2125,30 @@ static void artpec6_crypto_task(unsigned long data)
pr_debug("Completing request %p\n", req);
- list_del(&req->list);
+ list_move_tail(&req->list, &complete_done);
+ ac->pending_count--;
+ }
+
+ artpec6_crypto_process_queue(ac, &complete_in_progress);
+
+ spin_unlock_bh(&ac->queue_lock);
+
+ /* Perform the completion callbacks without holding the queue lock
+ * to allow new request submissions from the callbacks.
+ */
+ list_for_each_entry_safe(req, n, &complete_done, list) {
artpec6_crypto_dma_unmap_all(req);
artpec6_crypto_copy_bounce_buffers(req);
-
- ac->pending_count--;
artpec6_crypto_common_destroy(req);
+
req->complete(req->req);
}
- artpec6_crypto_process_queue(ac);
-
- spin_unlock_bh(&ac->queue_lock);
+ list_for_each_entry_safe(req, n, &complete_in_progress,
+ complete_in_progress) {
+ req->req->complete(req->req, -EINPROGRESS);
+ }
}
static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
@@ -2170,27 +2187,29 @@ static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
/* Verify GCM hashtag. */
struct aead_request *areq = container_of(req,
struct aead_request, base);
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
if (req_ctx->decrypt) {
u8 input_tag[AES_BLOCK_SIZE];
+ unsigned int authsize = crypto_aead_authsize(aead);
sg_pcopy_to_buffer(areq->src,
sg_nents(areq->src),
input_tag,
- AES_BLOCK_SIZE,
+ authsize,
areq->assoclen + areq->cryptlen -
- AES_BLOCK_SIZE);
+ authsize);
- if (memcmp(req_ctx->decryption_tag,
- input_tag,
- AES_BLOCK_SIZE)) {
+ if (crypto_memneq(req_ctx->decryption_tag,
+ input_tag,
+ authsize)) {
pr_debug("***EBADMSG:\n");
print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
- input_tag, AES_BLOCK_SIZE, true);
+ input_tag, authsize, true);
print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
req_ctx->decryption_tag,
- AES_BLOCK_SIZE, true);
+ authsize, true);
result = -EBADMSG;
}
@@ -2266,13 +2285,6 @@ artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
case ARTPEC6_CRYPTO_HASH_SHA256:
oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
break;
- case ARTPEC6_CRYPTO_HASH_SHA384:
- oper = hmac ? regk_crypto_hmac_sha384 : regk_crypto_sha384;
- break;
- case ARTPEC6_CRYPTO_HASH_SHA512:
- oper = hmac ? regk_crypto_hmac_sha512 : regk_crypto_sha512;
- break;
-
default:
pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
return -EINVAL;
@@ -2368,53 +2380,11 @@ static int artpec6_crypto_sha256_digest(struct ahash_request *req)
return artpec6_crypto_prepare_submit_hash(req);
}
-static int __maybe_unused artpec6_crypto_sha384_init(struct ahash_request *req)
-{
- return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
-}
-
-static int __maybe_unused
-artpec6_crypto_sha384_digest(struct ahash_request *req)
-{
- struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
-
- artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
- req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
-
- return artpec6_crypto_prepare_submit_hash(req);
-}
-
-static int artpec6_crypto_sha512_init(struct ahash_request *req)
-{
- return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
-}
-
-static int artpec6_crypto_sha512_digest(struct ahash_request *req)
-{
- struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
-
- artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
- req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
-
- return artpec6_crypto_prepare_submit_hash(req);
-}
-
static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
{
return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
}
-static int __maybe_unused
-artpec6_crypto_hmac_sha384_init(struct ahash_request *req)
-{
- return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
-}
-
-static int artpec6_crypto_hmac_sha512_init(struct ahash_request *req)
-{
- return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
-}
-
static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
{
struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
@@ -2425,27 +2395,6 @@ static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
return artpec6_crypto_prepare_submit_hash(req);
}
-static int __maybe_unused
-artpec6_crypto_hmac_sha384_digest(struct ahash_request *req)
-{
- struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
-
- artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
- req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
-
- return artpec6_crypto_prepare_submit_hash(req);
-}
-
-static int artpec6_crypto_hmac_sha512_digest(struct ahash_request *req)
-{
- struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
-
- artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
- req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
-
- return artpec6_crypto_prepare_submit_hash(req);
-}
-
static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
const char *base_hash_name)
{
@@ -2480,17 +2429,6 @@ static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
return artpec6_crypto_ahash_init_common(tfm, "sha256");
}
-static int __maybe_unused
-artpec6_crypto_ahash_init_hmac_sha384(struct crypto_tfm *tfm)
-{
- return artpec6_crypto_ahash_init_common(tfm, "sha384");
-}
-
-static int artpec6_crypto_ahash_init_hmac_sha512(struct crypto_tfm *tfm)
-{
- return artpec6_crypto_ahash_init_common(tfm, "sha512");
-}
-
static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
{
struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
@@ -2761,103 +2699,6 @@ static struct ahash_alg hash_algos[] = {
},
};
-static struct ahash_alg artpec7_hash_algos[] = {
- /* SHA-384 */
- {
- .init = artpec6_crypto_sha384_init,
- .update = artpec6_crypto_hash_update,
- .final = artpec6_crypto_hash_final,
- .digest = artpec6_crypto_sha384_digest,
- .import = artpec6_crypto_hash_import,
- .export = artpec6_crypto_hash_export,
- .halg.digestsize = SHA384_DIGEST_SIZE,
- .halg.statesize = sizeof(struct artpec6_hash_export_state),
- .halg.base = {
- .cra_name = "sha384",
- .cra_driver_name = "artpec-sha384",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
- .cra_alignmask = 3,
- .cra_module = THIS_MODULE,
- .cra_init = artpec6_crypto_ahash_init,
- .cra_exit = artpec6_crypto_ahash_exit,
- }
- },
- /* HMAC SHA-384 */
- {
- .init = artpec6_crypto_hmac_sha384_init,
- .update = artpec6_crypto_hash_update,
- .final = artpec6_crypto_hash_final,
- .digest = artpec6_crypto_hmac_sha384_digest,
- .import = artpec6_crypto_hash_import,
- .export = artpec6_crypto_hash_export,
- .setkey = artpec6_crypto_hash_set_key,
- .halg.digestsize = SHA384_DIGEST_SIZE,
- .halg.statesize = sizeof(struct artpec6_hash_export_state),
- .halg.base = {
- .cra_name = "hmac(sha384)",
- .cra_driver_name = "artpec-hmac-sha384",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
- .cra_alignmask = 3,
- .cra_module = THIS_MODULE,
- .cra_init = artpec6_crypto_ahash_init_hmac_sha384,
- .cra_exit = artpec6_crypto_ahash_exit,
- }
- },
- /* SHA-512 */
- {
- .init = artpec6_crypto_sha512_init,
- .update = artpec6_crypto_hash_update,
- .final = artpec6_crypto_hash_final,
- .digest = artpec6_crypto_sha512_digest,
- .import = artpec6_crypto_hash_import,
- .export = artpec6_crypto_hash_export,
- .halg.digestsize = SHA512_DIGEST_SIZE,
- .halg.statesize = sizeof(struct artpec6_hash_export_state),
- .halg.base = {
- .cra_name = "sha512",
- .cra_driver_name = "artpec-sha512",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
- .cra_alignmask = 3,
- .cra_module = THIS_MODULE,
- .cra_init = artpec6_crypto_ahash_init,
- .cra_exit = artpec6_crypto_ahash_exit,
- }
- },
- /* HMAC SHA-512 */
- {
- .init = artpec6_crypto_hmac_sha512_init,
- .update = artpec6_crypto_hash_update,
- .final = artpec6_crypto_hash_final,
- .digest = artpec6_crypto_hmac_sha512_digest,
- .import = artpec6_crypto_hash_import,
- .export = artpec6_crypto_hash_export,
- .setkey = artpec6_crypto_hash_set_key,
- .halg.digestsize = SHA512_DIGEST_SIZE,
- .halg.statesize = sizeof(struct artpec6_hash_export_state),
- .halg.base = {
- .cra_name = "hmac(sha512)",
- .cra_driver_name = "artpec-hmac-sha512",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
- .cra_alignmask = 3,
- .cra_module = THIS_MODULE,
- .cra_init = artpec6_crypto_ahash_init_hmac_sha512,
- .cra_exit = artpec6_crypto_ahash_exit,
- }
- },
-};
-
/* Crypto */
static struct skcipher_alg crypto_algos[] = {
/* AES - ECB */
@@ -2984,12 +2825,6 @@ static void artpec6_crypto_init_debugfs(void)
{
dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
- if (!dbgfs_root || IS_ERR(dbgfs_root)) {
- dbgfs_root = NULL;
- pr_err("%s: Could not initialise debugfs!\n", MODULE_NAME);
- return;
- }
-
#ifdef CONFIG_FAULT_INJECTION
fault_create_debugfs_attr("fail_status_read", dbgfs_root,
&artpec6_crypto_fail_status_read);
@@ -3001,9 +2836,6 @@ static void artpec6_crypto_init_debugfs(void)
static void artpec6_crypto_free_debugfs(void)
{
- if (!dbgfs_root)
- return;
-
debugfs_remove_recursive(dbgfs_root);
dbgfs_root = NULL;
}
@@ -3104,19 +2936,10 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
goto disable_hw;
}
- if (variant != ARTPEC6_CRYPTO) {
- err = crypto_register_ahashes(artpec7_hash_algos,
- ARRAY_SIZE(artpec7_hash_algos));
- if (err) {
- dev_err(dev, "Failed to register ahashes\n");
- goto unregister_ahashes;
- }
- }
-
err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
if (err) {
dev_err(dev, "Failed to register ciphers\n");
- goto unregister_a7_ahashes;
+ goto unregister_ahashes;
}
err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
@@ -3129,10 +2952,6 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
unregister_algs:
crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
-unregister_a7_ahashes:
- if (variant != ARTPEC6_CRYPTO)
- crypto_unregister_ahashes(artpec7_hash_algos,
- ARRAY_SIZE(artpec7_hash_algos));
unregister_ahashes:
crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
disable_hw:
@@ -3148,9 +2967,6 @@ static int artpec6_crypto_remove(struct platform_device *pdev)
int irq = platform_get_irq(pdev, 0);
crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
- if (ac->variant != ARTPEC6_CRYPTO)
- crypto_unregister_ahashes(artpec7_hash_algos,
- ARRAY_SIZE(artpec7_hash_algos));
crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
diff --git a/drivers/crypto/bcm/Makefile b/drivers/crypto/bcm/Makefile
index 13cb80eb2665..7469e19afe85 100644
--- a/drivers/crypto/bcm/Makefile
+++ b/drivers/crypto/bcm/Makefile
@@ -11,5 +11,3 @@
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) := bcm_crypto_spu.o
bcm_crypto_spu-objs := util.o spu.o spu2.o cipher.o
-
-ccflags-y += -I. -DBCMDRIVER
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index c9393ffb70ed..28f592f7e1b7 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -717,7 +717,7 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
*/
unsigned int new_data_len;
- unsigned int chunk_start = 0;
+ unsigned int __maybe_unused chunk_start = 0;
u32 db_size; /* Length of data field, incl gcm and hash padding */
int pad_len = 0; /* total pad len, including gcm, hash, stat padding */
u32 data_pad_len = 0; /* length of GCM/CCM padding */
@@ -1675,8 +1675,6 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg)
struct spu_hw *spu = &iproc_priv.spu;
struct brcm_message *mssg = msg;
struct iproc_reqctx_s *rctx;
- struct iproc_ctx_s *ctx;
- struct crypto_async_request *areq;
int err = 0;
rctx = mssg->ctx;
@@ -1686,8 +1684,6 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg)
err = -EFAULT;
goto cb_finish;
}
- areq = rctx->parent;
- ctx = rctx->ctx;
/* process the SPU status */
err = spu->spu_status_process(rctx->msg_buf.rx_stat);
@@ -1822,7 +1818,7 @@ static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
if (keylen == DES_KEY_SIZE) {
if (des_ekey(tmp, key) == 0) {
if (crypto_ablkcipher_get_flags(cipher) &
- CRYPTO_TFM_REQ_WEAK_KEY) {
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
crypto_ablkcipher_set_flags(cipher, flags);
@@ -2845,44 +2841,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
struct spu_hw *spu = &iproc_priv.spu;
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
- struct rtattr *rta = (void *)key;
- struct crypto_authenc_key_param *param;
- const u8 *origkey = key;
- const unsigned int origkeylen = keylen;
-
- int ret = 0;
+ struct crypto_authenc_keys keys;
+ int ret;
flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
keylen);
flow_dump(" key: ", key, keylen);
- if (!RTA_OK(rta, keylen))
- goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
+ ret = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (ret)
goto badkey;
- param = RTA_DATA(rta);
- ctx->enckeylen = be32_to_cpu(param->enckeylen);
-
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
-
- if (keylen < ctx->enckeylen)
- goto badkey;
- if (ctx->enckeylen > MAX_KEY_SIZE)
+ if (keys.enckeylen > MAX_KEY_SIZE ||
+ keys.authkeylen > MAX_KEY_SIZE)
goto badkey;
- ctx->authkeylen = keylen - ctx->enckeylen;
-
- if (ctx->authkeylen > MAX_KEY_SIZE)
- goto badkey;
+ ctx->enckeylen = keys.enckeylen;
+ ctx->authkeylen = keys.authkeylen;
- memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
+ memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
/* May end up padding auth key. So make sure it's zeroed. */
memset(ctx->authkey, 0, sizeof(ctx->authkey));
- memcpy(ctx->authkey, key, ctx->authkeylen);
+ memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
switch (ctx->alg->cipher_info.alg) {
case CIPHER_ALG_DES:
@@ -2890,9 +2870,9 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
u32 tmp[DES_EXPKEY_WORDS];
u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
- if (des_ekey(tmp, key) == 0) {
+ if (des_ekey(tmp, keys.enckey) == 0) {
if (crypto_aead_get_flags(cipher) &
- CRYPTO_TFM_REQ_WEAK_KEY) {
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
crypto_aead_set_flags(cipher, flags);
return -EINVAL;
}
@@ -2905,7 +2885,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
break;
case CIPHER_ALG_3DES:
if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
- const u32 *K = (const u32 *)key;
+ const u32 *K = (const u32 *)keys.enckey;
u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
@@ -2956,9 +2936,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
ctx->fallback_cipher->base.crt_flags |=
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
- ret =
- crypto_aead_setkey(ctx->fallback_cipher, origkey,
- origkeylen);
+ ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
if (ret) {
flow_log(" fallback setkey() returned:%d\n", ret);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 763c425c41ca..f6da49758954 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -23,6 +23,7 @@
#include <crypto/aes.h>
#include <crypto/internal/hash.h>
#include <crypto/aead.h>
+#include <crypto/arc4.h>
#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/sha3.h>
@@ -34,9 +35,6 @@
/* Driver supports up to MAX_SPUS SPU blocks */
#define MAX_SPUS 16
-#define ARC4_MIN_KEY_SIZE 1
-#define ARC4_MAX_KEY_SIZE 256
-#define ARC4_BLOCK_SIZE 1
#define ARC4_STATE_SIZE 4
#define CCM_AES_IV_SIZE 16
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index a912c6ad3e85..d8cda5fb75ad 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -201,46 +201,6 @@ struct sdesc {
char ctx[];
};
-/* do a synchronous decrypt operation */
-int do_decrypt(char *alg_name,
- void *key_ptr, unsigned int key_len,
- void *iv_ptr, void *src_ptr, void *dst_ptr,
- unsigned int block_len)
-{
- struct scatterlist sg_in[1], sg_out[1];
- struct crypto_blkcipher *tfm =
- crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
- struct blkcipher_desc desc = {.tfm = tfm, .flags = 0 };
- int ret = 0;
- void *iv;
- int ivsize;
-
- flow_log("%s() name:%s block_len:%u\n", __func__, alg_name, block_len);
-
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- crypto_blkcipher_setkey((void *)tfm, key_ptr, key_len);
-
- sg_init_table(sg_in, 1);
- sg_set_buf(sg_in, src_ptr, block_len);
-
- sg_init_table(sg_out, 1);
- sg_set_buf(sg_out, dst_ptr, block_len);
-
- iv = crypto_blkcipher_crt(tfm)->iv;
- ivsize = crypto_blkcipher_ivsize(tfm);
- memcpy(iv, iv_ptr, ivsize);
-
- ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, block_len);
- crypto_free_blkcipher(tfm);
-
- if (ret < 0)
- pr_err("aes_decrypt failed %d\n", ret);
-
- return ret;
-}
-
/**
* do_shash() - Do a synchronous hash operation in software
* @name: The name of the hash algorithm
diff --git a/drivers/crypto/bcm/util.h b/drivers/crypto/bcm/util.h
index 712e029795f8..15c60356518a 100644
--- a/drivers/crypto/bcm/util.h
+++ b/drivers/crypto/bcm/util.h
@@ -95,12 +95,6 @@ u32 spu_msg_sg_add(struct scatterlist **to_sg,
void add_to_ctr(u8 *ctr_pos, unsigned int increment);
-/* do a synchronous decrypt operation */
-int do_decrypt(char *alg_name,
- void *key_ptr, unsigned int key_len,
- void *iv_ptr, void *src_ptr, void *dst_ptr,
- unsigned int block_len);
-
/* produce a message digest from data of length n bytes */
int do_shash(unsigned char *name, unsigned char *result,
const u8 *data1, unsigned int data1_len,
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index c4b1cade55c1..577c9844b322 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -91,6 +91,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
select CRYPTO_AEAD
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
+ select CRYPTO_DES
help
Selecting this will offload crypto for users of the
scatterlist crypto API (such as the linux native IPSec
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 92e593e2069a..9eac5099098e 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2016-2018 NXP
+ * Copyright 2016-2019 NXP
*
* Based on talitos crypto API driver.
*
@@ -766,6 +766,27 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
return 0;
}
+static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 tmp[DES3_EDE_EXPKEY_WORDS];
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+
+ if (keylen == DES3_EDE_KEY_SIZE &&
+ __des3_ede_setkey(tmp, &tfm->crt_flags, key, DES3_EDE_KEY_SIZE)) {
+ return -EINVAL;
+ }
+
+ if (!des_ekey(tmp, key) && (crypto_skcipher_get_flags(skcipher) &
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_WEAK_KEY);
+ return -EINVAL;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen);
+}
+
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
@@ -802,6 +823,8 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
* aead_edesc - s/w-extended aead descriptor
* @src_nents: number of segments in input s/w scatterlist
* @dst_nents: number of segments in output s/w scatterlist
+ * @mapped_src_nents: number of segments in input h/w link table
+ * @mapped_dst_nents: number of segments in output h/w link table
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
@@ -810,6 +833,8 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
struct aead_edesc {
int src_nents;
int dst_nents;
+ int mapped_src_nents;
+ int mapped_dst_nents;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -820,6 +845,8 @@ struct aead_edesc {
* skcipher_edesc - s/w-extended skcipher descriptor
* @src_nents: number of segments in input s/w scatterlist
* @dst_nents: number of segments in output s/w scatterlist
+ * @mapped_src_nents: number of segments in input h/w link table
+ * @mapped_dst_nents: number of segments in output h/w link table
* @iv_dma: dma address of iv for checking continuity and link table
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
@@ -830,6 +857,8 @@ struct aead_edesc {
struct skcipher_edesc {
int src_nents;
int dst_nents;
+ int mapped_src_nents;
+ int mapped_dst_nents;
dma_addr_t iv_dma;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
@@ -846,7 +875,8 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
if (dst != src) {
if (src_nents)
dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ if (dst_nents)
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
} else {
dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}
@@ -961,8 +991,9 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
* The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block. This is used e.g. by the CTS mode.
*/
- scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
- ivsize, 0);
+ if (ivsize)
+ scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
+ ivsize, ivsize, 0);
kfree(edesc);
@@ -1023,11 +1054,12 @@ static void init_aead_job(struct aead_request *req,
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
if (all_contig) {
- src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
+ src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) :
+ 0;
in_options = 0;
} else {
src_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
+ sec4_sg_index += edesc->mapped_src_nents;
in_options = LDST_SGF;
}
@@ -1038,8 +1070,11 @@ static void init_aead_job(struct aead_request *req,
out_options = in_options;
if (unlikely(req->src != req->dst)) {
- if (edesc->dst_nents == 1) {
+ if (!edesc->mapped_dst_nents) {
+ dst_dma = 0;
+ } else if (edesc->mapped_dst_nents == 1) {
dst_dma = sg_dma_address(req->dst);
+ out_options = 0;
} else {
dst_dma = edesc->sec4_sg_dma +
sec4_sg_index *
@@ -1183,9 +1218,9 @@ static void init_skcipher_job(struct skcipher_request *req,
int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc = edesc->hw_desc;
u32 *sh_desc;
- u32 out_options = 0;
- dma_addr_t dst_dma, ptr;
- int len;
+ u32 in_options = 0, out_options = 0;
+ dma_addr_t src_dma, dst_dma, ptr;
+ int len, sec4_sg_index = 0;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
@@ -1203,21 +1238,27 @@ static void init_skcipher_job(struct skcipher_request *req,
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
- append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->cryptlen + ivsize,
- LDST_SGF);
+ if (ivsize || edesc->mapped_src_nents > 1) {
+ src_dma = edesc->sec4_sg_dma;
+ sec4_sg_index = edesc->mapped_src_nents + !!ivsize;
+ in_options = LDST_SGF;
+ } else {
+ src_dma = sg_dma_address(req->src);
+ }
+
+ append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
if (likely(req->src == req->dst)) {
- dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
- out_options = LDST_SGF;
+ dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
+ out_options = in_options;
+ } else if (edesc->mapped_dst_nents == 1) {
+ dst_dma = sg_dma_address(req->dst);
} else {
- if (edesc->dst_nents == 1) {
- dst_dma = sg_dma_address(req->dst);
- } else {
- dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
- sizeof(struct sec4_sg_entry);
- out_options = LDST_SGF;
- }
+ dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
+ sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
}
+
append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
}
@@ -1289,12 +1330,19 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
mapped_src_nents = 0;
}
- mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(jrdev, "unable to map destination\n");
- dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
- return ERR_PTR(-ENOMEM);
+ /* Cover also the case of null (zero length) output data */
+ if (dst_nents) {
+ mapped_dst_nents = dma_map_sg(jrdev, req->dst,
+ dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(jrdev, "unable to map destination\n");
+ dma_unmap_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_dst_nents = 0;
}
}
@@ -1313,6 +1361,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
+ edesc->mapped_src_nents = mapped_src_nents;
+ edesc->mapped_dst_nents = mapped_dst_nents;
edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
desc_bytes;
*all_contig_ptr = !(mapped_src_nents > 1);
@@ -1586,7 +1636,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
struct skcipher_edesc *edesc;
- dma_addr_t iv_dma;
+ dma_addr_t iv_dma = 0;
u8 *iv;
int ivsize = crypto_skcipher_ivsize(skcipher);
int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
@@ -1621,7 +1671,6 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
dev_err(jrdev, "unable to map source\n");
return ERR_PTR(-ENOMEM);
}
-
mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
DMA_FROM_DEVICE);
if (unlikely(!mapped_dst_nents)) {
@@ -1631,7 +1680,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
}
}
- sec4_sg_ents = 1 + mapped_src_nents;
+ if (!ivsize && mapped_src_nents == 1)
+ sec4_sg_ents = 0; // no need for an input hw s/g table
+ else
+ sec4_sg_ents = mapped_src_nents + !!ivsize;
dst_sg_idx = sec4_sg_ents;
sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
@@ -1650,39 +1702,48 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
+ edesc->mapped_src_nents = mapped_src_nents;
+ edesc->mapped_dst_nents = mapped_dst_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
desc_bytes);
/* Make sure IV is located in a DMAable area */
- iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
- memcpy(iv, req->iv, ivsize);
+ if (ivsize) {
+ iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
+ memcpy(iv, req->iv, ivsize);
+
+ iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents,
+ dst_nents, 0, 0, 0, 0);
+ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
- iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, iv_dma)) {
- dev_err(jrdev, "unable to map IV\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
+ dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
}
-
- dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
- sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
+ if (dst_sg_idx)
+ sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg +
+ !!ivsize, 0);
if (mapped_dst_nents > 1) {
sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
edesc->sec4_sg + dst_sg_idx, 0);
}
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
+ if (sec4_sg_bytes) {
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents,
+ dst_nents, iv_dma, ivsize, 0, 0);
+ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
}
edesc->iv_dma = iv_dma;
@@ -1749,8 +1810,9 @@ static int skcipher_decrypt(struct skcipher_request *req)
* The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block.
*/
- scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
- ivsize, 0);
+ if (ivsize)
+ scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
+ ivsize, ivsize, 0);
/* Create and submit job descriptor*/
init_skcipher_job(req, edesc, false);
@@ -1796,7 +1858,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-3des-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = des_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
@@ -1812,7 +1874,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-des-caam",
.cra_blocksize = DES_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = des_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES_KEY_SIZE,
@@ -1878,6 +1940,66 @@ static struct caam_skcipher_alg driver_algs[] = {
},
.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
},
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ecb-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = des_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ecb(arc4)",
+ .cra_driver_name = "ecb-arc4-caam",
+ .cra_blocksize = ARC4_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = ARC4_MIN_KEY_SIZE,
+ .max_keysize = ARC4_MAX_KEY_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB,
+ },
};
static struct caam_aead_alg driver_aeads[] = {
@@ -3337,6 +3459,7 @@ static int __init caam_algapi_init(void)
struct caam_drv_private *priv;
int i = 0, err = 0;
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
+ u32 arc4_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false;
@@ -3381,6 +3504,8 @@ static int __init caam_algapi_init(void)
CHA_ID_LS_DES_SHIFT;
aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >>
+ CHA_ID_LS_ARC4_SHIFT;
ccha_inst = 0;
ptha_inst = 0;
} else {
@@ -3397,6 +3522,7 @@ static int __init caam_algapi_init(void)
md_inst = mdha & CHA_VER_NUM_MASK;
ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
+ arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
}
/* If MD is present, limit digest size based on LP256 */
@@ -3417,6 +3543,10 @@ static int __init caam_algapi_init(void)
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
continue;
+ /* Skip ARC4 algorithms if not supported by device */
+ if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4)
+ continue;
+
/*
* Check support for AES modes not available
* on LP devices.
@@ -3476,7 +3606,7 @@ static int __init caam_algapi_init(void)
* Skip algorithms requiring message digests
* if MD or MD size is not supported by device.
*/
- if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
+ if (is_mdha(c2_alg_sel) &&
(!md_inst || t_alg->aead.maxauthsize > md_limit))
continue;
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 7db1640d3577..1e1a376edc2f 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -2,7 +2,7 @@
/*
* Shared descriptors for aead, skcipher algorithms
*
- * Copyright 2016-2018 NXP
+ * Copyright 2016-2019 NXP
*/
#include "compat.h"
@@ -1396,9 +1396,11 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
set_jump_tgt_here(desc, key_jump_cmd);
- /* Load iv */
- append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ /* Load IV, if there is one */
+ if (ivsize)
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off <<
+ LDST_OFFSET_SHIFT));
/* Load counter into CONTEXT1 reg */
if (is_rfc3686)
@@ -1462,9 +1464,11 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
set_jump_tgt_here(desc, key_jump_cmd);
- /* load IV */
- append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ /* Load IV, if there is one */
+ if (ivsize)
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off <<
+ LDST_OFFSET_SHIFT));
/* Load counter into CONTEXT1 reg */
if (is_rfc3686)
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index c0d55310aade..7bce97884663 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -802,7 +802,8 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
if (dst != src) {
if (src_nents)
dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ if (dst_nents)
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
} else {
dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}
@@ -955,13 +956,19 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
mapped_src_nents = 0;
}
- mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(qidev, "unable to map destination\n");
- dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
+ if (dst_nents) {
+ mapped_dst_nents = dma_map_sg(qidev, req->dst,
+ dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(qidev, "unable to map destination\n");
+ dma_unmap_sg(qidev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_dst_nents = 0;
}
}
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 77f4c0045de2..185365a35772 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -25,13 +25,6 @@
#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
SHA512_DIGEST_SIZE * 2)
-#if !IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM)
-bool caam_little_end;
-EXPORT_SYMBOL(caam_little_end);
-bool caam_imx;
-EXPORT_SYMBOL(caam_imx);
-#endif
-
/*
* This is a a cache of buffers, from which the users of CAAM QI driver
* can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
@@ -151,7 +144,8 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
if (dst != src) {
if (src_nents)
dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ if (dst_nents)
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
} else {
dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}
@@ -392,13 +386,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
mapped_src_nents = 0;
}
- mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(dev, "unable to map destination\n");
- dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
+ if (dst_nents) {
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ dma_unmap_sg(dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_dst_nents = 0;
}
} else {
src_nents = sg_nents_for_len(req->src, req->assoclen +
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 81712aa5d0f2..3bba3cb92f03 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for ahash functions of crypto API
*
* Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2018 NXP
+ * Copyright 2018-2019 NXP
*
* Based on caamalg.c crypto API driver.
*
@@ -98,13 +98,14 @@ struct caam_hash_ctx {
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+ u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
dma_addr_t sh_desc_update_dma ____cacheline_aligned;
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
+ dma_addr_t key_dma;
enum dma_data_direction dir;
struct device *jrdev;
- u8 key[CAAM_MAX_HASH_KEY_SIZE];
int ctx_len;
struct alginfo adata;
};
@@ -158,6 +159,11 @@ static inline int *alt_buflen(struct caam_hash_state *state)
return state->current_buf ? &state->buflen_0 : &state->buflen_1;
}
+static inline bool is_cmac_aes(u32 algtype)
+{
+ return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
+ (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
+}
/* Common job descriptor seq in/out ptr routines */
/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
@@ -178,18 +184,6 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
return 0;
}
-/* Map req->result, and append seq_out_ptr command that points to it */
-static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
- u8 *result, int digestsize)
-{
- dma_addr_t dst_dma;
-
- dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
- append_seq_out_ptr(desc, dst_dma, digestsize, 0);
-
- return dst_dma;
-}
-
/* Map current buffer in state (if length > 0) and put it in link table */
static inline int buf_map_to_sec4_sg(struct device *jrdev,
struct sec4_sg_entry *sec4_sg,
@@ -292,6 +286,111 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
return 0;
}
+static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct device *jrdev = ctx->jrdev;
+ u32 *desc;
+
+ /* key is loaded from memory for UPDATE and FINALIZE states */
+ ctx->adata.key_dma = ctx->key_dma;
+
+ /* shared descriptor for ahash_update */
+ desc = ctx->sh_desc_update;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
+ ctx->ctx_len, ctx->ctx_len, 0);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* shared descriptor for ahash_{final,finup} */
+ desc = ctx->sh_desc_fin;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
+ digestsize, ctx->ctx_len, 0);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* key is immediate data for INIT and INITFINAL states */
+ ctx->adata.key_virt = ctx->key;
+
+ /* shared descriptor for first invocation of ahash_update */
+ desc = ctx->sh_desc_update_first;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+ ctx->ctx_len, ctx->key_dma);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* shared descriptor for ahash_digest */
+ desc = ctx->sh_desc_digest;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
+ digestsize, ctx->ctx_len, 0);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+ return 0;
+}
+
+static int acmac_set_sh_desc(struct crypto_ahash *ahash)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct device *jrdev = ctx->jrdev;
+ u32 *desc;
+
+ /* shared descriptor for ahash_update */
+ desc = ctx->sh_desc_update;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
+ ctx->ctx_len, ctx->ctx_len, 0);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+
+ /* shared descriptor for ahash_{final,finup} */
+ desc = ctx->sh_desc_fin;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
+ digestsize, ctx->ctx_len, 0);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+
+ /* shared descriptor for first invocation of ahash_update */
+ desc = ctx->sh_desc_update_first;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+ ctx->ctx_len, 0);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+
+ /* shared descriptor for ahash_digest */
+ desc = ctx->sh_desc_digest;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
+ digestsize, ctx->ctx_len, 0);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+
+ return 0;
+}
+
/* Digest hash size if it is too large */
static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 *keylen, u8 *key_out, u32 digestsize)
@@ -424,9 +523,39 @@ static int ahash_setkey(struct crypto_ahash *ahash,
return -EINVAL;
}
+static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *jrdev = ctx->jrdev;
+
+ memcpy(ctx->key, key, keylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ ctx->adata.keylen = keylen;
+
+ print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
+
+ return axcbc_set_sh_desc(ahash);
+}
+
+static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+
+ /* key is immediate data for all cmac shared descriptors */
+ ctx->adata.key_virt = key;
+ ctx->adata.keylen = keylen;
+
+ print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ return acmac_set_sh_desc(ahash);
+}
+
/*
* ahash_edesc - s/w-extended ahash descriptor
- * @dst_dma: physical mapped address of req->result
* @sec4_sg_dma: physical mapped address of h/w link table
* @src_nents: number of segments in input scatterlist
* @sec4_sg_bytes: length of dma mapped sec4_sg space
@@ -434,7 +563,6 @@ static int ahash_setkey(struct crypto_ahash *ahash,
* @sec4_sg: h/w link table
*/
struct ahash_edesc {
- dma_addr_t dst_dma;
dma_addr_t sec4_sg_dma;
int src_nents;
int sec4_sg_bytes;
@@ -450,8 +578,6 @@ static inline void ahash_unmap(struct device *dev,
if (edesc->src_nents)
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
- if (edesc->dst_dma)
- dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
if (edesc->sec4_sg_bytes)
dma_unmap_single(dev, edesc->sec4_sg_dma,
@@ -486,9 +612,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
#ifdef DEBUG
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
@@ -497,17 +623,14 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
if (err)
caam_jr_strstatus(jrdev, err);
- ahash_unmap(jrdev, edesc, req, digestsize);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ memcpy(req->result, state->caam_ctx, digestsize);
kfree(edesc);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- if (req->result)
- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
#endif
req->base.complete(&req->base, err);
@@ -555,9 +678,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
#ifdef DEBUG
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
@@ -566,17 +689,14 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
if (err)
caam_jr_strstatus(jrdev, err);
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
+ memcpy(req->result, state->caam_ctx, digestsize);
kfree(edesc);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- if (req->result)
- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
#endif
req->base.complete(&req->base, err);
@@ -688,6 +808,7 @@ static int ahash_update_ctx(struct ahash_request *req)
u8 *buf = current_buf(state);
int *buflen = current_buflen(state);
u8 *next_buf = alt_buf(state);
+ int blocksize = crypto_ahash_blocksize(ahash);
int *next_buflen = alt_buflen(state), last_buflen;
int in_len = *buflen + req->nbytes, to_hash;
u32 *desc;
@@ -696,9 +817,20 @@ static int ahash_update_ctx(struct ahash_request *req)
int ret = 0;
last_buflen = *next_buflen;
- *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ *next_buflen = in_len & (blocksize - 1);
to_hash = in_len - *next_buflen;
+ /*
+ * For XCBC and CMAC, if to_hash is multiple of block size,
+ * keep last block in internal buffer
+ */
+ if ((is_xcbc_aes(ctx->adata.algtype) ||
+ is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
+ (*next_buflen == 0)) {
+ *next_buflen = blocksize;
+ to_hash -= blocksize;
+ }
+
if (to_hash) {
src_nents = sg_nents_for_len(req->src,
req->nbytes - (*next_buflen));
@@ -801,7 +933,7 @@ static int ahash_update_ctx(struct ahash_request *req)
#endif
return ret;
- unmap_ctx:
+unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
kfree(edesc);
return ret;
@@ -837,7 +969,7 @@ static int ahash_final_ctx(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
- edesc->sec4_sg, DMA_TO_DEVICE);
+ edesc->sec4_sg, DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
@@ -857,14 +989,7 @@ static int ahash_final_ctx(struct ahash_request *req)
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
LDST_SGF);
-
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
- digestsize);
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
- dev_err(jrdev, "unable to map dst\n");
- ret = -ENOMEM;
- goto unmap_ctx;
- }
+ append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -877,7 +1002,7 @@ static int ahash_final_ctx(struct ahash_request *req)
return -EINPROGRESS;
unmap_ctx:
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
kfree(edesc);
return ret;
}
@@ -931,7 +1056,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
- edesc->sec4_sg, DMA_TO_DEVICE);
+ edesc->sec4_sg, DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
@@ -945,13 +1070,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
- digestsize);
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
- dev_err(jrdev, "unable to map dst\n");
- ret = -ENOMEM;
- goto unmap_ctx;
- }
+ append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -964,7 +1083,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
return -EINPROGRESS;
unmap_ctx:
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
kfree(edesc);
return ret;
}
@@ -1023,10 +1142,8 @@ static int ahash_digest(struct ahash_request *req)
desc = edesc->hw_desc;
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
- digestsize);
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
- dev_err(jrdev, "unable to map dst\n");
+ ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
+ if (ret) {
ahash_unmap(jrdev, edesc, req, digestsize);
kfree(edesc);
return -ENOMEM;
@@ -1041,7 +1158,7 @@ static int ahash_digest(struct ahash_request *req)
if (!ret) {
ret = -EINPROGRESS;
} else {
- ahash_unmap(jrdev, edesc, req, digestsize);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
kfree(edesc);
}
@@ -1072,20 +1189,20 @@ static int ahash_final_no_ctx(struct ahash_request *req)
desc = edesc->hw_desc;
- state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, state->buf_dma)) {
- dev_err(jrdev, "unable to map src\n");
- goto unmap;
- }
+ if (buflen) {
+ state->buf_dma = dma_map_single(jrdev, buf, buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, state->buf_dma)) {
+ dev_err(jrdev, "unable to map src\n");
+ goto unmap;
+ }
- append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
+ append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
+ }
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
- digestsize);
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
- dev_err(jrdev, "unable to map dst\n");
+ ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
+ if (ret)
goto unmap;
- }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1096,7 +1213,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
if (!ret) {
ret = -EINPROGRESS;
} else {
- ahash_unmap(jrdev, edesc, req, digestsize);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
kfree(edesc);
}
@@ -1119,6 +1236,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
GFP_KERNEL : GFP_ATOMIC;
u8 *buf = current_buf(state);
int *buflen = current_buflen(state);
+ int blocksize = crypto_ahash_blocksize(ahash);
u8 *next_buf = alt_buf(state);
int *next_buflen = alt_buflen(state);
int in_len = *buflen + req->nbytes, to_hash;
@@ -1127,9 +1245,20 @@ static int ahash_update_no_ctx(struct ahash_request *req)
u32 *desc;
int ret = 0;
- *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ *next_buflen = in_len & (blocksize - 1);
to_hash = in_len - *next_buflen;
+ /*
+ * For XCBC and CMAC, if to_hash is multiple of block size,
+ * keep last block in internal buffer
+ */
+ if ((is_xcbc_aes(ctx->adata.algtype) ||
+ is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
+ (*next_buflen == 0)) {
+ *next_buflen = blocksize;
+ to_hash -= blocksize;
+ }
+
if (to_hash) {
src_nents = sg_nents_for_len(req->src,
req->nbytes - *next_buflen);
@@ -1295,12 +1424,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
goto unmap;
}
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
- digestsize);
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
- dev_err(jrdev, "unable to map dst\n");
+ ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
+ if (ret)
goto unmap;
- }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1311,7 +1437,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
if (!ret) {
ret = -EINPROGRESS;
} else {
- ahash_unmap(jrdev, edesc, req, digestsize);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
kfree(edesc);
}
@@ -1335,15 +1461,26 @@ static int ahash_update_first(struct ahash_request *req)
u8 *next_buf = alt_buf(state);
int *next_buflen = alt_buflen(state);
int to_hash;
+ int blocksize = crypto_ahash_blocksize(ahash);
u32 *desc;
int src_nents, mapped_nents;
struct ahash_edesc *edesc;
int ret = 0;
- *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
- 1);
+ *next_buflen = req->nbytes & (blocksize - 1);
to_hash = req->nbytes - *next_buflen;
+ /*
+ * For XCBC and CMAC, if to_hash is multiple of block size,
+ * keep last block in internal buffer
+ */
+ if ((is_xcbc_aes(ctx->adata.algtype) ||
+ is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
+ (*next_buflen == 0)) {
+ *next_buflen = blocksize;
+ to_hash -= blocksize;
+ }
+
if (to_hash) {
src_nents = sg_nents_for_len(req->src,
req->nbytes - *next_buflen);
@@ -1651,6 +1788,44 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_MD5,
+ }, {
+ .hmac_name = "xcbc(aes)",
+ .hmac_driver_name = "xcbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = axcbc_setkey,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
+ }, {
+ .hmac_name = "cmac(aes)",
+ .hmac_driver_name = "cmac-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = acmac_setkey,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
},
};
@@ -1692,7 +1867,32 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
}
priv = dev_get_drvdata(ctx->jrdev->parent);
- ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+ if (is_xcbc_aes(caam_hash->alg_type)) {
+ ctx->dir = DMA_TO_DEVICE;
+ ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
+ ctx->ctx_len = 48;
+
+ ctx->key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
+ ARRAY_SIZE(ctx->key),
+ DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
+ dev_err(ctx->jrdev, "unable to map key\n");
+ caam_jr_free(ctx->jrdev);
+ return -ENOMEM;
+ }
+ } else if (is_cmac_aes(caam_hash->alg_type)) {
+ ctx->dir = DMA_TO_DEVICE;
+ ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
+ ctx->ctx_len = 32;
+ } else {
+ ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
+ ctx->ctx_len = runninglen[(ctx->adata.algtype &
+ OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT];
+ }
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
offsetof(struct caam_hash_ctx,
@@ -1700,6 +1900,13 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map shared descriptors\n");
+
+ if (is_xcbc_aes(caam_hash->alg_type))
+ dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma,
+ ARRAY_SIZE(ctx->key),
+ DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+
caam_jr_free(ctx->jrdev);
return -ENOMEM;
}
@@ -1713,16 +1920,14 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
sh_desc_digest);
- /* copy descriptor header template value */
- ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
-
- ctx->ctx_len = runninglen[(ctx->adata.algtype &
- OP_ALG_ALGSEL_SUBMASK) >>
- OP_ALG_ALGSEL_SHIFT];
-
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct caam_hash_state));
- return ahash_set_sh_desc(ahash);
+
+ /*
+ * For keyed hash algorithms shared descriptors
+ * will be created later in setkey() callback
+ */
+ return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
}
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
@@ -1730,9 +1935,12 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
- offsetof(struct caam_hash_ctx,
- sh_desc_update_dma),
+ offsetof(struct caam_hash_ctx, key),
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (is_xcbc_aes(ctx->adata.algtype))
+ dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma,
+ ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
@@ -1863,14 +2071,16 @@ static int __init caam_algapi_hash_init(void)
struct caam_hash_template *alg = driver_hash + i;
/* If MD size is not supported by device, skip registration */
- if (alg->template_ahash.halg.digestsize > md_limit)
+ if (is_mdha(alg->alg_type) &&
+ alg->template_ahash.halg.digestsize > md_limit)
continue;
/* register hmac version */
t_alg = caam_hash_alloc(alg, true);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- pr_warn("%s alg allocation failed\n", alg->driver_name);
+ pr_warn("%s alg allocation failed\n",
+ alg->hmac_driver_name);
continue;
}
@@ -1883,6 +2093,9 @@ static int __init caam_algapi_hash_init(void)
} else
list_add_tail(&t_alg->entry, &hash_list);
+ if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
+ continue;
+
/* register unkeyed version */
t_alg = caam_hash_alloc(alg, false);
if (IS_ERR(t_alg)) {
diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c
index a12f7959a2c3..71d018343ee4 100644
--- a/drivers/crypto/caam/caamhash_desc.c
+++ b/drivers/crypto/caam/caamhash_desc.c
@@ -2,7 +2,7 @@
/*
* Shared descriptors for ahash algorithms
*
- * Copyright 2017 NXP
+ * Copyright 2017-2019 NXP
*/
#include "compat.h"
@@ -75,6 +75,72 @@ void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
}
EXPORT_SYMBOL(cnstr_shdsc_ahash);
+/**
+ * cnstr_shdsc_sk_hash - shared descriptor for symmetric key cipher-based
+ * hash algorithms
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
+ * @digestsize: algorithm's digest size
+ * @ctx_len: size of Context Register
+ * @key_dma: I/O Virtual Address of the key
+ */
+void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state,
+ int digestsize, int ctx_len, dma_addr_t key_dma)
+{
+ u32 *skip_key_load;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+
+ /* Skip loading of key, context if already shared */
+ skip_key_load = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+
+ if (state == OP_ALG_AS_INIT || state == OP_ALG_AS_INITFINAL) {
+ append_key_as_imm(desc, adata->key_virt, adata->keylen,
+ adata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ } else { /* UPDATE, FINALIZE */
+ if (is_xcbc_aes(adata->algtype))
+ /* Load K1 */
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_1 | KEY_DEST_CLASS_REG | KEY_ENC);
+ else /* CMAC */
+ append_key_as_imm(desc, adata->key_virt, adata->keylen,
+ adata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ /* Restore context */
+ append_seq_load(desc, ctx_len, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+ }
+
+ set_jump_tgt_here(desc, skip_key_load);
+
+ /* Class 1 operation */
+ append_operation(desc, adata->algtype | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
+ * Calculate remaining bytes to read
+ */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_LAST1 |
+ FIFOLD_TYPE_MSG | FIFOLDST_VLF);
+
+ /*
+ * Save context:
+ * - xcbc: partial hash, keys K2 and K3
+ * - cmac: partial hash, constant L = E(K,0)
+ */
+ append_seq_store(desc, digestsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+ if (is_xcbc_aes(adata->algtype) && state == OP_ALG_AS_INIT)
+ /* Save K1 */
+ append_fifo_store(desc, key_dma, adata->keylen,
+ LDST_CLASS_1_CCB | FIFOST_TYPE_KEY_KEK);
+}
+EXPORT_SYMBOL(cnstr_shdsc_sk_hash);
+
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
MODULE_AUTHOR("NXP Semiconductors");
diff --git a/drivers/crypto/caam/caamhash_desc.h b/drivers/crypto/caam/caamhash_desc.h
index 631fc1ac312c..6947ee1f200c 100644
--- a/drivers/crypto/caam/caamhash_desc.h
+++ b/drivers/crypto/caam/caamhash_desc.h
@@ -15,7 +15,15 @@
#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+static inline bool is_xcbc_aes(u32 algtype)
+{
+ return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
+ (OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC);
+}
+
void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
int digestsize, int ctx_len, bool import_ctx, int era);
+void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state,
+ int digestsize, int ctx_len, dma_addr_t key_dma);
#endif /* _CAAMHASH_DESC_H_ */
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 87d9efe4c7aa..8639b2df0371 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -43,6 +43,7 @@
#include <crypto/akcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
+#include <crypto/arc4.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/rsa.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 16bbc72f041a..858bdc9ab4a3 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -18,12 +18,8 @@
#include "desc_constr.h"
#include "ctrl.h"
-bool caam_little_end;
-EXPORT_SYMBOL(caam_little_end);
bool caam_dpaa2;
EXPORT_SYMBOL(caam_dpaa2);
-bool caam_imx;
-EXPORT_SYMBOL(caam_imx);
#ifdef CONFIG_CAAM_QI
#include "qi.h"
@@ -863,27 +859,18 @@ static int caam_probe(struct platform_device *pdev)
/* Internal covering keys (useful in non-secure mode only) */
ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
- ctrlpriv->ctl_kek = debugfs_create_blob("kek",
- S_IRUSR |
- S_IRGRP | S_IROTH,
- ctrlpriv->ctl,
- &ctrlpriv->ctl_kek_wrap);
+ debugfs_create_blob("kek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
+ &ctrlpriv->ctl_kek_wrap);
ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
- ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
- S_IRUSR |
- S_IRGRP | S_IROTH,
- ctrlpriv->ctl,
- &ctrlpriv->ctl_tkek_wrap);
+ debugfs_create_blob("tkek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
+ &ctrlpriv->ctl_tkek_wrap);
ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
- ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
- S_IRUSR |
- S_IRGRP | S_IROTH,
- ctrlpriv->ctl,
- &ctrlpriv->ctl_tdsk_wrap);
+ debugfs_create_blob("tdsk", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
+ &ctrlpriv->ctl_tdsk_wrap);
#endif
return 0;
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index ec10230178c5..4b6854bf896a 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -1155,6 +1155,7 @@
#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 7e8d690f2827..21a70fd32f5d 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -50,6 +50,12 @@ void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
#endif /* DEBUG */
EXPORT_SYMBOL(caam_dump_sg);
+bool caam_little_end;
+EXPORT_SYMBOL(caam_little_end);
+
+bool caam_imx;
+EXPORT_SYMBOL(caam_imx);
+
static const struct {
u8 value;
const char *error_text;
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 67ea94079837..8c6b83e02a70 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -7,6 +7,9 @@
#ifndef CAAM_ERROR_H
#define CAAM_ERROR_H
+
+#include "desc.h"
+
#define CAAM_ERROR_STR_MAX 302
void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
@@ -17,4 +20,10 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
size_t tlen, bool ascii);
+
+static inline bool is_mdha(u32 algtype)
+{
+ return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) ==
+ OP_ALG_CHA_MDHA;
+}
#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index babc78abd155..5869ad58d497 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -106,7 +106,6 @@ struct caam_drv_private {
struct dentry *dfs_root;
struct dentry *ctl; /* controller dir */
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
- struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
#endif
};
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
index 0196b992280f..848ec93d4333 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
@@ -55,31 +55,14 @@ void nitrox_debugfs_exit(struct nitrox_device *ndev)
ndev->debugfs_dir = NULL;
}
-int nitrox_debugfs_init(struct nitrox_device *ndev)
+void nitrox_debugfs_init(struct nitrox_device *ndev)
{
- struct dentry *dir, *f;
+ struct dentry *dir;
dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!dir)
- return -ENOMEM;
ndev->debugfs_dir = dir;
- f = debugfs_create_file("firmware", 0400, dir, ndev,
- &firmware_fops);
- if (!f)
- goto err;
- f = debugfs_create_file("device", 0400, dir, ndev,
- &device_fops);
- if (!f)
- goto err;
- f = debugfs_create_file("stats", 0400, dir, ndev,
- &stats_fops);
- if (!f)
- goto err;
-
- return 0;
-
-err:
- nitrox_debugfs_exit(ndev);
- return -ENODEV;
+ debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
+ debugfs_create_file("device", 0400, dir, ndev, &device_fops);
+ debugfs_create_file("stats", 0400, dir, ndev, &stats_fops);
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
index a8d85ffa619c..f177b79bbab0 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
@@ -5,12 +5,11 @@
#include "nitrox_dev.h"
#ifdef CONFIG_DEBUG_FS
-int nitrox_debugfs_init(struct nitrox_device *ndev);
+void nitrox_debugfs_init(struct nitrox_device *ndev);
void nitrox_debugfs_exit(struct nitrox_device *ndev);
#else
-static inline int nitrox_debugfs_init(struct nitrox_device *ndev)
+static inline void nitrox_debugfs_init(struct nitrox_device *ndev)
{
- return 0;
}
static inline void nitrox_debugfs_exit(struct nitrox_device *ndev)
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index 014e9863c20e..faa78f651238 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -404,9 +404,7 @@ static int nitrox_probe(struct pci_dev *pdev,
if (err)
goto pf_hw_fail;
- err = nitrox_debugfs_init(ndev);
- if (err)
- goto pf_hw_fail;
+ nitrox_debugfs_init(ndev);
/* clear the statistics */
atomic64_set(&ndev->stats.posted, 0);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index e34e4df8fd24..fe070d75c842 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -567,10 +567,10 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
/* ORH error code */
err = READ_ONCE(*sr->resp.orh) & 0xff;
- softreq_destroy(sr);
if (sr->callback)
sr->callback(sr->cb_arg, err);
+ softreq_destroy(sr);
req_completed++;
}
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
index be055b9547f6..e6b09e784e66 100644
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ b/drivers/crypto/cavium/zip/zip_main.c
@@ -618,41 +618,23 @@ static const struct file_operations zip_regs_fops = {
/* Root directory for thunderx_zip debugfs entry */
static struct dentry *zip_debugfs_root;
-static int __init zip_debugfs_init(void)
+static void __init zip_debugfs_init(void)
{
- struct dentry *zip_stats, *zip_clear, *zip_regs;
-
if (!debugfs_initialized())
- return -ENODEV;
+ return;
zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL);
- if (!zip_debugfs_root)
- return -ENOMEM;
/* Creating files for entries inside thunderx_zip directory */
- zip_stats = debugfs_create_file("zip_stats", 0444,
- zip_debugfs_root,
- NULL, &zip_stats_fops);
- if (!zip_stats)
- goto failed_to_create;
-
- zip_clear = debugfs_create_file("zip_clear", 0444,
- zip_debugfs_root,
- NULL, &zip_clear_fops);
- if (!zip_clear)
- goto failed_to_create;
-
- zip_regs = debugfs_create_file("zip_regs", 0444,
- zip_debugfs_root,
- NULL, &zip_regs_fops);
- if (!zip_regs)
- goto failed_to_create;
+ debugfs_create_file("zip_stats", 0444, zip_debugfs_root, NULL,
+ &zip_stats_fops);
- return 0;
+ debugfs_create_file("zip_clear", 0444, zip_debugfs_root, NULL,
+ &zip_clear_fops);
+
+ debugfs_create_file("zip_regs", 0444, zip_debugfs_root, NULL,
+ &zip_regs_fops);
-failed_to_create:
- debugfs_remove_recursive(zip_debugfs_root);
- return -ENOENT;
}
static void __exit zip_debugfs_exit(void)
@@ -661,13 +643,8 @@ static void __exit zip_debugfs_exit(void)
}
#else
-static int __init zip_debugfs_init(void)
-{
- return 0;
-}
-
+static void __init zip_debugfs_init(void) { }
static void __exit zip_debugfs_exit(void) { }
-
#endif
/* debugfs - end */
@@ -691,17 +668,10 @@ static int __init zip_init_module(void)
}
/* comp-decomp statistics are handled with debugfs interface */
- ret = zip_debugfs_init();
- if (ret < 0) {
- zip_err("ZIP: debugfs initialization failed\n");
- goto err_crypto_unregister;
- }
+ zip_debugfs_init();
return ret;
-err_crypto_unregister:
- zip_unregister_compression_device();
-
err_pci_unregister:
pci_unregister_driver(&zip_driver);
return ret;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 9108015e56cc..f6e252c1d6fb 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index ae87b741f9d5..c2ff551d215b 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -57,7 +57,7 @@ static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
- (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 2ca64bb57d2e..10a61cd54fce 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) SHA crypto API support
*
- * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
index 1a734bd2070a..4bd26af7098d 100644
--- a/drivers/crypto/ccp/ccp-debugfs.c
+++ b/drivers/crypto/ccp/ccp-debugfs.c
@@ -286,10 +286,7 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
{
struct ccp_cmd_queue *cmd_q;
char name[MAX_NAME_LEN + 1];
- struct dentry *debugfs_info;
- struct dentry *debugfs_stats;
struct dentry *debugfs_q_instance;
- struct dentry *debugfs_q_stats;
int i;
if (!debugfs_initialized())
@@ -299,24 +296,14 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
if (!ccp_debugfs_dir)
ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
mutex_unlock(&ccp_debugfs_lock);
- if (!ccp_debugfs_dir)
- return;
ccp->debugfs_instance = debugfs_create_dir(ccp->name, ccp_debugfs_dir);
- if (!ccp->debugfs_instance)
- goto err;
- debugfs_info = debugfs_create_file("info", 0400,
- ccp->debugfs_instance, ccp,
- &ccp_debugfs_info_ops);
- if (!debugfs_info)
- goto err;
+ debugfs_create_file("info", 0400, ccp->debugfs_instance, ccp,
+ &ccp_debugfs_info_ops);
- debugfs_stats = debugfs_create_file("stats", 0600,
- ccp->debugfs_instance, ccp,
- &ccp_debugfs_stats_ops);
- if (!debugfs_stats)
- goto err;
+ debugfs_create_file("stats", 0600, ccp->debugfs_instance, ccp,
+ &ccp_debugfs_stats_ops);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
@@ -325,21 +312,12 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
debugfs_q_instance =
debugfs_create_dir(name, ccp->debugfs_instance);
- if (!debugfs_q_instance)
- goto err;
-
- debugfs_q_stats =
- debugfs_create_file("stats", 0600,
- debugfs_q_instance, cmd_q,
- &ccp_debugfs_queue_ops);
- if (!debugfs_q_stats)
- goto err;
+
+ debugfs_create_file("stats", 0600, debugfs_q_instance, cmd_q,
+ &ccp_debugfs_queue_ops);
}
return;
-
-err:
- debugfs_remove_recursive(ccp->debugfs_instance);
}
void ccp5_debugfs_destroy(void)
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 0ea43cdeb05f..267a367bd076 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index b16be8a11d92..638f138debd7 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -1,7 +1,7 @@
/*
* AMD Platform Security Processor (PSP) interface
*
- * Copyright (C) 2016-2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2018 Advanced Micro Devices, Inc.
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*
@@ -437,6 +437,7 @@ static int sev_get_api_version(void)
psp_master->api_major = status->api_major;
psp_master->api_minor = status->api_minor;
psp_master->build = status->build;
+ psp_master->sev_state = status->state;
return 0;
}
@@ -964,6 +965,21 @@ void psp_pci_init(void)
if (sev_get_api_version())
goto err;
+ /*
+ * If platform is not in UNINIT state then firmware upgrade and/or
+ * platform INIT command will fail. These command require UNINIT state.
+ *
+ * In a normal boot we should never run into case where the firmware
+ * is not in UNINIT state on boot. But in case of kexec boot, a reboot
+ * may not go through a typical shutdown sequence and may leave the
+ * firmware in INIT or WORKING state.
+ */
+
+ if (psp_master->sev_state != SEV_STATE_UNINIT) {
+ sev_platform_shutdown(NULL);
+ psp_master->sev_state = SEV_STATE_UNINIT;
+ }
+
if (SEV_VERSION_GREATER_OR_EQUAL(0, 15) &&
sev_update_firmware(psp_master->dev) == 0)
sev_get_api_version();
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index 8b53a9674ecb..f5afeccf42a1 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -1,7 +1,7 @@
/*
* AMD Platform Security Processor (PSP) interface driver
*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
index e0459002eb71..b2879767fc98 100644
--- a/drivers/crypto/ccp/sp-dev.c
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -1,7 +1,7 @@
/*
* AMD Secure Processor driver
*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 14398cad1625..5b0790025db3 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -1,7 +1,7 @@
/*
* AMD Secure Processor driver
*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 7da93e9bebed..6d730d3e3f6f 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -1,7 +1,7 @@
/*
* AMD Secure Processor device driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index b75dc7db2d4a..d24228efbaaa 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -1,7 +1,7 @@
/*
* AMD Secure Processor device driver
*
- * Copyright (C) 2014,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2014,2018 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index f2643cda45db..a3527c00b29a 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -549,13 +549,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct rtattr *rta = (struct rtattr *)key;
struct cc_crypto_req cc_req = {};
- struct crypto_authenc_key_param *param;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
- int rc = -EINVAL;
unsigned int seq_len = 0;
struct device *dev = drvdata_to_dev(ctx->drvdata);
+ const u8 *enckey, *authkey;
+ int rc;
dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
@@ -563,35 +562,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
/* STAT_PHASE_0: Init and sanity checks */
if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
- if (!RTA_OK(rta, keylen))
- goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
- param = RTA_DATA(rta);
- ctx->enc_keylen = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
- if (keylen < ctx->enc_keylen)
+ struct crypto_authenc_keys keys;
+
+ rc = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (rc)
goto badkey;
- ctx->auth_keylen = keylen - ctx->enc_keylen;
+ enckey = keys.enckey;
+ authkey = keys.authkey;
+ ctx->enc_keylen = keys.enckeylen;
+ ctx->auth_keylen = keys.authkeylen;
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
/* the nonce is stored in bytes at end of key */
+ rc = -EINVAL;
if (ctx->enc_keylen <
(AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
goto badkey;
/* Copy nonce from last 4 bytes in CTR key to
* first 4 bytes in CTR IV
*/
- memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
- ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
- CTR_RFC3686_NONCE_SIZE);
+ memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
+ CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
/* Set CTR key size */
ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
}
} else { /* non-authenc - has just one key */
+ enckey = key;
+ authkey = NULL;
ctx->enc_keylen = keylen;
ctx->auth_keylen = 0;
}
@@ -603,13 +600,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
/* STAT_PHASE_1: Copy key to ctx */
/* Get key material */
- memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
+ memcpy(ctx->enckey, enckey, ctx->enc_keylen);
if (ctx->enc_keylen == 24)
memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
- memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
+ memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
+ ctx->auth_keylen);
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
- rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
+ rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
if (rc)
goto badkey;
}
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index dd948e1df9e5..0ee1c52da0a4 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -156,8 +156,11 @@ static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
/* Verify there is no memory overflow*/
new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
- if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
+ if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
+ dev_err(dev, "Too many mlli entries. current %d max %d\n",
+ new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
return -ENOMEM;
+ }
/*handle buffer longer than 64 kbytes */
while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
@@ -511,10 +514,8 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
/* Map the src SGL */
rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
- if (rc) {
- rc = -ENOMEM;
+ if (rc)
goto cipher_exit;
- }
if (mapped_nents > 1)
req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
@@ -528,12 +529,11 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
}
} else {
/* Map the dst sg */
- if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
- &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy, &mapped_nents)) {
- rc = -ENOMEM;
+ rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+ &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents);
+ if (rc)
goto cipher_exit;
- }
if (mapped_nents > 1)
req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
@@ -614,10 +614,10 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
hw_iv_size, DMA_BIDIRECTIONAL);
}
- /*In case a pool was set, a table was
- *allocated and should be released
- */
- if (areq_ctx->mlli_params.curr_pool) {
+ /* Release pool */
+ if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
+ (areq_ctx->mlli_params.mlli_virt_addr)) {
dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
&areq_ctx->mlli_params.mlli_dma_addr,
areq_ctx->mlli_params.mlli_virt_addr);
@@ -1078,10 +1078,8 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
&areq_ctx->dst.nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
&dst_mapped_nents);
- if (rc) {
- rc = -ENOMEM;
+ if (rc)
goto chain_data_exit;
- }
}
dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
@@ -1235,11 +1233,10 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
}
areq_ctx->ccm_iv0_dma_addr = dma_addr;
- if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
- &sg_data, req->assoclen)) {
- rc = -ENOMEM;
+ rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
+ &sg_data, req->assoclen);
+ if (rc)
goto aead_map_failure;
- }
}
if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
@@ -1299,10 +1296,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
LLI_MAX_NUM_OF_DATA_ENTRIES),
&dummy, &mapped_nents);
- if (rc) {
- rc = -ENOMEM;
+ if (rc)
goto aead_map_failure;
- }
if (areq_ctx->is_single_pass) {
/*
@@ -1386,6 +1381,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
struct buffer_array sg_data;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ int rc = 0;
u32 dummy = 0;
u32 mapped_nents = 0;
@@ -1405,18 +1401,18 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
/*TODO: copy data in case that buffer is enough for operation */
/* map the previous buffer */
if (*curr_buff_cnt) {
- if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
- &sg_data)) {
- return -ENOMEM;
- }
+ rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data);
+ if (rc)
+ return rc;
}
if (src && nbytes > 0 && do_update) {
- if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
- &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy, &mapped_nents)) {
+ rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
+ &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents);
+ if (rc)
goto unmap_curr_buff;
- }
if (src && mapped_nents == 1 &&
areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
memcpy(areq_ctx->buff_sg, src,
@@ -1435,7 +1431,8 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
/* add the src data to the sg_data */
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
0, true, &areq_ctx->mlli_nents);
- if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
goto fail_unmap_din;
}
/* change the buffer index for the unmap function */
@@ -1451,7 +1448,7 @@ unmap_curr_buff:
if (*curr_buff_cnt)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
- return -ENOMEM;
+ return rc;
}
int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
@@ -1470,6 +1467,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
struct buffer_array sg_data;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
unsigned int swap_index = 0;
+ int rc = 0;
u32 dummy = 0;
u32 mapped_nents = 0;
@@ -1514,21 +1512,21 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
}
if (*curr_buff_cnt) {
- if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
- &sg_data)) {
- return -ENOMEM;
- }
+ rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data);
+ if (rc)
+ return rc;
/* change the buffer index for next operation */
swap_index = 1;
}
if (update_data_len > *curr_buff_cnt) {
- if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
- DMA_TO_DEVICE, &areq_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
- &mapped_nents)) {
+ rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
+ DMA_TO_DEVICE, &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+ &mapped_nents);
+ if (rc)
goto unmap_curr_buff;
- }
if (mapped_nents == 1 &&
areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
/* only one entry in the SG and no previous data */
@@ -1548,7 +1546,8 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
(update_data_len - *curr_buff_cnt), 0, true,
&areq_ctx->mlli_nents);
- if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
goto fail_unmap_din;
}
areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
@@ -1562,7 +1561,7 @@ unmap_curr_buff:
if (*curr_buff_cnt)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
- return -ENOMEM;
+ return rc;
}
void cc_unmap_hash_request(struct device *dev, void *ctx,
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index cc92b031fad1..5e3361a363b5 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -352,7 +352,8 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
dev_dbg(dev, "weak 3DES key");
return -EINVAL;
} else if (!des_ekey(tmp, key) &&
- (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ (crypto_tfm_get_flags(tfm) &
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
dev_dbg(dev, "weak DES key");
return -EINVAL;
@@ -652,6 +653,8 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
unsigned int len;
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+
switch (ctx_p->cipher_mode) {
case DRV_CIPHER_CBC:
/*
@@ -681,7 +684,6 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
break;
}
- cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
kzfree(req_ctx->iv);
skcipher_request_complete(req, err);
@@ -799,7 +801,8 @@ static int cc_cipher_decrypt(struct skcipher_request *req)
memset(req_ctx, 0, sizeof(*req_ctx));
- if (ctx_p->cipher_mode == DRV_CIPHER_CBC) {
+ if ((ctx_p->cipher_mode == DRV_CIPHER_CBC) &&
+ (req->cryptlen >= ivsize)) {
/* Allocate and save the last IV sized bytes of the source,
* which will be lost in case of in-place decryption.
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index 5ca184e42483..5fa05a7bcf36 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -39,11 +39,9 @@ static struct debugfs_reg32 debug_regs[] = {
CC_DEBUG_REG(AXIM_MON_COMP),
};
-int __init cc_debugfs_global_init(void)
+void __init cc_debugfs_global_init(void)
{
cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
-
- return !cc_debugfs_dir;
}
void __exit cc_debugfs_global_fini(void)
@@ -56,7 +54,6 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
struct device *dev = drvdata_to_dev(drvdata);
struct cc_debugfs_ctx *ctx;
struct debugfs_regset32 *regset;
- struct dentry *file;
debug_regs[0].offset = drvdata->sig_offset;
debug_regs[1].offset = drvdata->ver_offset;
@@ -74,22 +71,9 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
regset->base = drvdata->cc_base;
ctx->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir);
- if (!ctx->dir)
- return -ENFILE;
-
- file = debugfs_create_regset32("regs", 0400, ctx->dir, regset);
- if (!file) {
- debugfs_remove(ctx->dir);
- return -ENFILE;
- }
- file = debugfs_create_bool("coherent", 0400, ctx->dir,
- &drvdata->coherent);
-
- if (!file) {
- debugfs_remove_recursive(ctx->dir);
- return -ENFILE;
- }
+ debugfs_create_regset32("regs", 0400, ctx->dir, regset);
+ debugfs_create_bool("coherent", 0400, ctx->dir, &drvdata->coherent);
drvdata->debugfs = ctx;
diff --git a/drivers/crypto/ccree/cc_debugfs.h b/drivers/crypto/ccree/cc_debugfs.h
index 5b5320eca7d2..01cbd9a95659 100644
--- a/drivers/crypto/ccree/cc_debugfs.h
+++ b/drivers/crypto/ccree/cc_debugfs.h
@@ -5,7 +5,7 @@
#define __CC_DEBUGFS_H__
#ifdef CONFIG_DEBUG_FS
-int cc_debugfs_global_init(void);
+void cc_debugfs_global_init(void);
void cc_debugfs_global_fini(void);
int cc_debugfs_init(struct cc_drvdata *drvdata);
@@ -13,11 +13,7 @@ void cc_debugfs_fini(struct cc_drvdata *drvdata);
#else
-static inline int cc_debugfs_global_init(void)
-{
- return 0;
-}
-
+static inline void cc_debugfs_global_init(void) {}
static inline void cc_debugfs_global_fini(void) {}
static inline int cc_debugfs_init(struct cc_drvdata *drvdata)
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 8ada308d72ee..210cc86605e9 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -103,10 +103,10 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
/* read the interrupt status */
irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
dev_dbg(dev, "Got IRR=0x%08X\n", irr);
- if (irr == 0) { /* Probably shared interrupt line */
- dev_err(dev, "Got interrupt with empty IRR\n");
+
+ if (irr == 0) /* Probably shared interrupt line */
return IRQ_NONE;
- }
+
imr = cc_ioread(drvdata, CC_REG(HOST_IMR));
/* clear interrupt - must be before processing events */
@@ -538,13 +538,8 @@ static struct platform_driver ccree_driver = {
static int __init ccree_init(void)
{
- int ret;
-
cc_hash_global_init();
-
- ret = cc_debugfs_global_init();
- if (ret)
- return ret;
+ cc_debugfs_global_init();
return platform_driver_register(&ccree_driver);
}
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 5be7fd431b05..33dbf3e6d15d 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -111,13 +111,11 @@ struct cc_crypto_req {
* @cc_base: virt address of the CC registers
* @irq: device IRQ number
* @irq_mask: Interrupt mask shadow (1 for masked interrupts)
- * @fw_ver: SeP loaded firmware version
*/
struct cc_drvdata {
void __iomem *cc_base;
int irq;
u32 irq_mask;
- u32 fw_ver;
struct completion hw_queue_avail; /* wait for HW queue availability */
struct platform_device *plat_dev;
cc_sram_addr_t mlli_sram_addr;
diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile
index 639e5718dff4..b7bd980a27d8 100644
--- a/drivers/crypto/chelsio/Makefile
+++ b/drivers/crypto/chelsio/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
+ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
chcr-objs := chcr_core.o chcr_algo.o
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index bcef76508dfa..8d8cf80b9294 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -1368,7 +1368,6 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
static int chcr_device_init(struct chcr_context *ctx)
{
struct uld_ctx *u_ctx = NULL;
- struct adapter *adap;
unsigned int id;
int txq_perchan, txq_idx, ntxq;
int err = 0, rxq_perchan, rxq_idx;
@@ -1382,7 +1381,6 @@ static int chcr_device_init(struct chcr_context *ctx)
goto out;
}
ctx->dev = &u_ctx->dev;
- adap = padap(ctx->dev);
ntxq = u_ctx->lldi.ntxq;
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
txq_perchan = ntxq / u_ctx->lldi.nchan;
@@ -2762,7 +2760,7 @@ static int set_msg_len(u8 *block, unsigned int msglen, int csize)
return 0;
}
-static void generate_b0(struct aead_request *req, u8 *ivptr,
+static int generate_b0(struct aead_request *req, u8 *ivptr,
unsigned short op_type)
{
unsigned int l, lp, m;
@@ -2787,6 +2785,8 @@ static void generate_b0(struct aead_request *req, u8 *ivptr,
rc = set_msg_len(b0 + 16 - l,
(op_type == CHCR_DECRYPT_OP) ?
req->cryptlen - m : req->cryptlen, l);
+
+ return rc;
}
static inline int crypto_ccm_check_iv(const u8 *iv)
@@ -2821,7 +2821,7 @@ static int ccm_format_packet(struct aead_request *req,
*((unsigned short *)(reqctx->scratch_pad + 16)) =
htons(assoclen);
- generate_b0(req, ivptr, op_type);
+ rc = generate_b0(req, ivptr, op_type);
/* zero the ctr value */
memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
return rc;
@@ -3676,9 +3676,9 @@ static int chcr_aead_op(struct aead_request *req,
/* Form a WR from req */
skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
- if (IS_ERR(skb) || !skb) {
+ if (IS_ERR_OR_NULL(skb)) {
chcr_dec_wrcount(cdev);
- return PTR_ERR(skb);
+ return PTR_ERR_OR_ZERO(skb);
}
skb->dev = u_ctx->lldi.ports[0];
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 1159dee964ed..ad874d548aa5 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -183,7 +183,7 @@ struct chcr_ipsec_aadiv {
struct ipsec_sa_entry {
int hmac_ctrl;
u16 esn;
- u16 imm;
+ u16 resv;
unsigned int enckey_len;
unsigned int kctx_len;
unsigned int authsize;
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 2fb48cce4462..0c826d0e1bfc 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -303,6 +303,9 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
return false;
}
+ /* Inline single pdu */
+ if (skb_shinfo(skb)->gso_size)
+ return false;
return true;
}
@@ -415,12 +418,12 @@ inline void *copy_esn_pktxt(struct sk_buff *skb,
iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
memcpy(aadiv->iv, iv, 8);
- if (sa_entry->imm) {
+ if (is_eth_imm(skb, sa_entry)) {
sc_imm = (struct ulptx_idata *)(pos +
(DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
sizeof(__be64)) << 3));
- sc_imm->cmd_more = FILL_CMD_MORE(!sa_entry->imm);
- sc_imm->len = cpu_to_be32(sa_entry->imm);
+ sc_imm->cmd_more = FILL_CMD_MORE(0);
+ sc_imm->len = cpu_to_be32(skb->len);
}
pos += len;
return pos;
@@ -548,10 +551,8 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
if (sa_entry->esn)
ivdrop = 1;
- if (is_eth_imm(skb, sa_entry)) {
+ if (is_eth_imm(skb, sa_entry))
immdatalen = skb->len;
- sa_entry->imm = immdatalen;
- }
if (sa_entry->esn)
esnlen = sizeof(struct chcr_ipsec_aadiv);
diff --git a/drivers/crypto/chelsio/chtls/Makefile b/drivers/crypto/chelsio/chtls/Makefile
index df1379570a8e..b958f1b8ec39 100644
--- a/drivers/crypto/chelsio/chtls/Makefile
+++ b/drivers/crypto/chelsio/chtls/Makefile
@@ -1,4 +1,5 @@
-ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4 -Idrivers/crypto/chelsio/
+ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4 \
+ -I $(srctree)/drivers/crypto/chelsio
obj-$(CONFIG_CRYPTO_DEV_CHELSIO_TLS) += chtls.o
chtls-objs := chtls_main.o chtls_cm.o chtls_io.o chtls_hw.o
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 18f553fcc167..1285a1bceda7 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -922,14 +922,13 @@ static int csk_wait_memory(struct chtls_dev *cdev,
struct sock *sk, long *timeo_p)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int sndbuf, err = 0;
+ int err = 0;
long current_timeo;
long vm_wait = 0;
bool noblock;
current_timeo = *timeo_p;
noblock = (*timeo_p ? false : true);
- sndbuf = cdev->max_host_sndbuf;
if (csk_mem_free(cdev, sk)) {
current_timeo = (prandom_u32() % (HZ / 5)) + 2;
vm_wait = (prandom_u32() % (HZ / 5)) + 2;
@@ -1401,23 +1400,18 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
- struct net_device *dev = csk->egress_dev;
struct chtls_hws *hws = &csk->tlshws;
struct tcp_sock *tp = tcp_sk(sk);
- struct adapter *adap;
unsigned long avail;
int buffers_freed;
int copied = 0;
- int request;
int target;
long timeo;
- adap = netdev2adap(dev);
buffers_freed = 0;
timeo = sock_rcvtimeo(sk, nonblock);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
- request = len;
if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
chtls_cleanup_rbuf(sk, copied);
@@ -1694,11 +1688,9 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
{
struct tcp_sock *tp = tcp_sk(sk);
struct chtls_sock *csk;
- struct chtls_hws *hws;
unsigned long avail; /* amount of available data in current skb */
int buffers_freed;
int copied = 0;
- int request;
long timeo;
int target; /* Read at least this many bytes */
@@ -1718,7 +1710,6 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
lock_sock(sk);
csk = rcu_dereference_sk_user_data(sk);
- hws = &csk->tlshws;
if (is_tls_rx(csk))
return chtls_pt_recvmsg(sk, msg, len, nonblock,
@@ -1726,7 +1717,6 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
timeo = sock_rcvtimeo(sk, nonblock);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
- request = len;
if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
chtls_cleanup_rbuf(sk, copied);
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index a5a36fe7bf2c..dad212cabe63 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -1961,7 +1961,8 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
u32 tmp[DES_EXPKEY_WORDS];
int ret = des_ekey(tmp, key);
- if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ if (unlikely(ret == 0) &&
+ (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index d531c14020dc..7ef30a98cb24 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -940,7 +940,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
}
ret = des_ekey(tmp, key);
- if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 19fba998b86b..95c1af227bd5 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -847,7 +847,7 @@ static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
goto out;
if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
- if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
+ if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
ret = -EINVAL;
} else {
*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
@@ -1125,7 +1125,7 @@ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
goto out;
if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
- if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
+ if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
ret = -EINVAL;
goto out;
} else {
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 0ae84ec9e21c..066830dcc53e 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -286,7 +286,7 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
}
ret = des_ekey(tmp, key);
- if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 55f34cfc43ff..9450c41211b2 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -772,7 +772,7 @@ static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
}
err = des_ekey(tmp, key);
- if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 6369019219d4..1ba2633e90d6 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -662,7 +662,7 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
pr_debug("enter, keylen: %d\n", keylen);
/* Do we need to test against weak key? */
- if (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY) {
+ if (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
u32 tmp[DES_EXPKEY_WORDS];
int ret = des_ekey(tmp, key);
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 17068b55fea5..1b3acdeffede 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -759,7 +759,8 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
}
if (unlikely(!des_ekey(tmp, key)) &&
- (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ (crypto_ablkcipher_get_flags(cipher) &
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
diff --git a/drivers/crypto/qat/qat_c3xxx/Makefile b/drivers/crypto/qat/qat_c3xxx/Makefile
index 8f5fd4838a96..822b5de58ec6 100644
--- a/drivers/crypto/qat/qat_c3xxx/Makefile
+++ b/drivers/crypto/qat/qat_c3xxx/Makefile
@@ -1,3 +1,3 @@
-ccflags-y := -I$(src)/../qat_common
+ccflags-y := -I $(srctree)/$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index 763c2166ee0e..d937cc7248a5 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -193,11 +193,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
PCI_FUNC(pdev->devfn));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
- if (!accel_dev->debugfs_dir) {
- dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
- ret = -EINVAL;
- goto out_err;
- }
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/Makefile b/drivers/crypto/qat/qat_c3xxxvf/Makefile
index 16d178e2eaa2..8f56d27c7479 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/Makefile
+++ b/drivers/crypto/qat/qat_c3xxxvf/Makefile
@@ -1,3 +1,3 @@
-ccflags-y := -I$(src)/../qat_common
+ccflags-y := -I $(srctree)/$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 613c7d5644ce..1dc5ac859f7b 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -177,11 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
PCI_FUNC(pdev->devfn));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
- if (!accel_dev->debugfs_dir) {
- dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
- ret = -EINVAL;
- goto out_err;
- }
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
diff --git a/drivers/crypto/qat/qat_c62x/Makefile b/drivers/crypto/qat/qat_c62x/Makefile
index bd75ace59b76..6dcd404578fc 100644
--- a/drivers/crypto/qat/qat_c62x/Makefile
+++ b/drivers/crypto/qat/qat_c62x/Makefile
@@ -1,3 +1,3 @@
-ccflags-y := -I$(src)/../qat_common
+ccflags-y := -I $(srctree)/$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 9cb832963357..2bc06c89d2fe 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -193,11 +193,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
PCI_FUNC(pdev->devfn));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
- if (!accel_dev->debugfs_dir) {
- dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
- ret = -EINVAL;
- goto out_err;
- }
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
diff --git a/drivers/crypto/qat/qat_c62xvf/Makefile b/drivers/crypto/qat/qat_c62xvf/Makefile
index ecd708c213b2..1e5d51de778f 100644
--- a/drivers/crypto/qat/qat_c62xvf/Makefile
+++ b/drivers/crypto/qat/qat_c62xvf/Makefile
@@ -1,3 +1,3 @@
-ccflags-y := -I$(src)/../qat_common
+ccflags-y := -I $(srctree)/$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index 278452b8ef81..a68358b31292 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -177,11 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
PCI_FUNC(pdev->devfn));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
- if (!accel_dev->debugfs_dir) {
- dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
- ret = -EINVAL;
- goto out_err;
- }
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index d0879790561f..5c7fdb0fc53d 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -141,13 +141,6 @@ int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
accel_dev->debugfs_dir,
dev_cfg_data,
&qat_dev_cfg_fops);
- if (!dev_cfg_data->debug) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to create qat cfg debugfs entry.\n");
- kfree(dev_cfg_data);
- accel_dev->cfg = NULL;
- return -EFAULT;
- }
return 0;
}
EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 57d2622728a5..2136cbe4bf6c 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -486,12 +486,6 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
/* accel_dev->debugfs_dir should always be non-NULL here */
etr_data->debug = debugfs_create_dir("transport",
accel_dev->debugfs_dir);
- if (!etr_data->debug) {
- dev_err(&GET_DEV(accel_dev),
- "Unable to create transport debugfs entry\n");
- ret = -ENOENT;
- goto err_bank_debug;
- }
for (i = 0; i < num_banks; i++) {
ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
@@ -504,7 +498,6 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
err_bank_all:
debugfs_remove(etr_data->debug);
-err_bank_debug:
kfree(etr_data->banks);
err_bank:
kfree(etr_data);
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
index 52340b9bb387..e794e9d97b2c 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -163,11 +163,6 @@ int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR,
ring->bank->bank_debug_dir,
ring, &adf_ring_debug_fops);
- if (!ring_debug->debug) {
- pr_err("QAT: Failed to create ring debug entry.\n");
- kfree(ring_debug);
- return -EFAULT;
- }
ring->ring_debug = ring_debug;
return 0;
}
@@ -271,19 +266,9 @@ int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
bank->bank_debug_dir = debugfs_create_dir(name, parent);
- if (!bank->bank_debug_dir) {
- pr_err("QAT: Failed to create bank debug dir.\n");
- return -EFAULT;
- }
-
bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR,
bank->bank_debug_dir, bank,
&adf_bank_debug_fops);
- if (!bank->bank_debug_cfg) {
- pr_err("QAT: Failed to create bank debug entry.\n");
- debugfs_remove(bank->bank_debug_dir);
- return -EFAULT;
- }
return 0;
}
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile
index 180a00ed7f89..0fc06b1e1632 100644
--- a/drivers/crypto/qat/qat_dh895xcc/Makefile
+++ b/drivers/crypto/qat/qat_dh895xcc/Makefile
@@ -1,3 +1,3 @@
-ccflags-y := -I$(src)/../qat_common
+ccflags-y := -I $(srctree)/$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 3a9708ef4ce2..b11bf8c0e683 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -193,11 +193,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
PCI_FUNC(pdev->devfn));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
- if (!accel_dev->debugfs_dir) {
- dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
- ret = -EINVAL;
- goto out_err;
- }
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/qat/qat_dh895xccvf/Makefile
index 5c3ccf8267eb..9ce906af6034 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/Makefile
+++ b/drivers/crypto/qat/qat_dh895xccvf/Makefile
@@ -1,3 +1,3 @@
-ccflags-y := -I$(src)/../qat_common
+ccflags-y := -I $(srctree)/$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index 3da0f951cb59..1b762eefc6c1 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -177,11 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
PCI_FUNC(pdev->devfn));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
- if (!accel_dev->debugfs_dir) {
- dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
- ret = -EINVAL;
- goto out_err;
- }
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index 25c13e26d012..154b6baa124e 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -180,8 +180,8 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
u32 tmp[DES_EXPKEY_WORDS];
ret = des_ekey(tmp, key);
- if (!ret && crypto_ablkcipher_get_flags(ablk) &
- CRYPTO_TFM_REQ_WEAK_KEY)
+ if (!ret && (crypto_ablkcipher_get_flags(ablk) &
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS))
goto weakkey;
}
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
index 639c15c5364b..87dd571466c1 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
@@ -60,7 +60,7 @@ static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
if (keylen == DES_KEY_SIZE) {
if (!des_ekey(tmp, key) &&
- (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index 5cf64746731a..54fd714d53ca 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -517,7 +517,7 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
flags = crypto_skcipher_get_flags(tfm);
ret = des_ekey(tmp, key);
- if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
dev_dbg(ss->dev, "Weak key %u\n", keylen);
return -EINVAL;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 45e20707cef8..de78b54bcfb1 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
- void *err;
if (cryptlen + authsize > max_len) {
dev_err(dev, "length exceeds h/w max limit\n");
return ERR_PTR(-EINVAL);
}
- if (ivsize)
- iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
-
if (!dst || dst == src) {
src_len = assoclen + cryptlen + authsize;
src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
- err = ERR_PTR(-EINVAL);
- goto error_sg;
+ return ERR_PTR(-EINVAL);
}
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_nents = dst ? src_nents : 0;
@@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
- err = ERR_PTR(-EINVAL);
- goto error_sg;
+ return ERR_PTR(-EINVAL);
}
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
dst_nents = sg_nents_for_len(dst, dst_len);
if (dst_nents < 0) {
dev_err(dev, "Invalid number of dst SG.\n");
- err = ERR_PTR(-EINVAL);
- goto error_sg;
+ return ERR_PTR(-EINVAL);
}
dst_nents = (dst_nents == 1) ? 0 : dst_nents;
}
@@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
/* if its a ahash, add space for a second desc next to the first one */
if (is_sec1 && !dst)
alloc_len += sizeof(struct talitos_desc);
+ alloc_len += ivsize;
edesc = kmalloc(alloc_len, GFP_DMA | flags);
- if (!edesc) {
- err = ERR_PTR(-ENOMEM);
- goto error_sg;
+ if (!edesc)
+ return ERR_PTR(-ENOMEM);
+ if (ivsize) {
+ iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
}
memset(&edesc->desc, 0, sizeof(edesc->desc));
@@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
DMA_BIDIRECTIONAL);
}
return edesc;
-error_sg:
- if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
- return err;
}
static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
@@ -1543,7 +1535,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
}
if (unlikely(crypto_ablkcipher_get_flags(cipher) &
- CRYPTO_TFM_REQ_WEAK_KEY) &&
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
!des_ekey(tmp, key)) {
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
return -EINVAL;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index a92a66b1ff46..3235611928f2 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -595,6 +595,12 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
}
cookie = dmaengine_submit(desc);
+ if (dma_submit_error(cookie)) {
+ dev_dbg(ctx->device->dev, "[%s]: DMA submission failed\n",
+ __func__);
+ return cookie;
+ }
+
dma_async_issue_pending(channel);
return 0;
@@ -994,10 +1000,11 @@ static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
}
ret = des_ekey(tmp, key);
- if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ if (unlikely(ret == 0) &&
+ (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
- pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
- __func__);
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
+ __func__);
return -EINVAL;
}
@@ -1028,18 +1035,19 @@ static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
/* Checking key interdependency for weak key detection. */
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
- (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
- pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
- __func__);
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
+ __func__);
return -EINVAL;
}
for (i = 0; i < 3; i++) {
ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
- if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ if (unlikely(ret == 0) &&
+ (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
- pr_debug(DEV_DBG_NAME " [%s]: "
- "CRYPTO_TFM_REQ_WEAK_KEY", __func__);
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
+ __func__);
return -EINVAL;
}
}
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 2c573d1aaa64..0704833ece92 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -406,7 +406,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
} else {
req_data->header.session_id =
cpu_to_le64(ctx->dec_sess_info.session_id);
- req_data->header.opcode =
+ req_data->header.opcode =
cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
}
req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);