summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig14
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c15
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c6
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h2
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-hash.c800
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.h28
-rw-r--r--drivers/crypto/atmel-aes.c1
-rw-r--r--drivers/crypto/atmel-sha.c1
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c2
-rw-r--r--drivers/crypto/caam/Makefile4
-rw-r--r--drivers/crypto/caam/ctrl.c13
-rw-r--r--drivers/crypto/caam/debugfs.c2
-rw-r--r--drivers/crypto/caam/debugfs.h2
-rw-r--r--drivers/crypto/caam/intern.h5
-rw-r--r--drivers/crypto/caam/jr.c3
-rw-r--r--drivers/crypto/caam/qi.c5
-rw-r--r--drivers/crypto/ccp/ccp-debugfs.c3
-rw-r--r--drivers/crypto/ccp/ccp-ops.c163
-rw-r--r--drivers/crypto/ccp/sev-dev.c26
-rw-r--r--drivers/crypto/ccp/sp-pci.c1
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c54
-rw-r--r--drivers/crypto/ccree/cc_cipher.c4
-rw-r--r--drivers/crypto/ccree/cc_hash.c30
-rw-r--r--drivers/crypto/ccree/cc_pm.c1
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c10
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c8
-rw-r--r--drivers/crypto/hisilicon/qm.c1
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h63
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c587
-rw-r--r--drivers/crypto/hisilicon/sgl.c15
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c13
-rw-r--r--drivers/crypto/img-hash.c8
-rw-r--r--drivers/crypto/inside-secure/safexcel.c1
-rw-r--r--drivers/crypto/inside-secure/safexcel.h1
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c100
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c8
-rw-r--r--drivers/crypto/intel/keembay/ocs-aes.c4
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c18
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c14
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c129
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h22
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h40
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_bank_state.c238
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_bank_state.h49
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_common.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.c45
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.h13
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c229
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c105
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h24
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c124
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c146
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_tl.h198
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_isr.c5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c52
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h36
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.c86
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl_admin.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sriov.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport_debug.c21
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs.c26
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.c8
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c4
-rw-r--r--drivers/crypto/marvell/cesa/hash.c10
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h5
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h128
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h3
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c13
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c55
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c6
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c26
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c13
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c14
-rw-r--r--drivers/crypto/omap-aes-gcm.c1
-rw-r--r--drivers/crypto/omap-aes.c1
-rw-r--r--drivers/crypto/omap-des.c1
-rw-r--r--drivers/crypto/omap-sham.c1
-rw-r--r--drivers/crypto/starfive/jh7110-hash.c16
-rw-r--r--drivers/crypto/stm32/Kconfig9
-rw-r--r--drivers/crypto/stm32/Makefile1
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c480
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c1
-rw-r--r--drivers/crypto/stm32/stm32-hash.c1
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_mgr.c36
100 files changed, 2378 insertions, 2165 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 5686369779be..04b4c43b6bae 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -180,6 +180,7 @@ config CRYPTO_PAES_S390
depends on PKEY
select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
+ select CRYPTO_ENGINE
help
This is the s390 hardware accelerated implementation of the
AES cipher algorithms for use with protected key.
@@ -187,6 +188,19 @@ config CRYPTO_PAES_S390
Select this option if you want to use the paes cipher
for example to use protected key encrypted devices.
+config CRYPTO_PHMAC_S390
+ tristate "PHMAC cipher algorithms"
+ depends on S390
+ depends on PKEY
+ select CRYPTO_HASH
+ select CRYPTO_ENGINE
+ help
+ This is the s390 hardware accelerated implementation of the
+ protected key HMAC support for SHA224, SHA256, SHA384 and SHA512.
+
+ Select this option if you want to use the phmac digests
+ for example to use dm-integrity with secure/protected keys.
+
config S390_PRNG
tristate "Pseudo random number generator device driver"
depends on S390
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index f9cf00d690e2..5663df49dd81 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -206,15 +206,14 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
cet->t_key = desc_addr_val_le32(ce, rctx->addr_key);
ivsize = crypto_skcipher_ivsize(tfm);
- if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
- rctx->ivlen = ivsize;
+ if (areq->iv && ivsize > 0) {
if (rctx->op_dir & CE_DECRYPTION) {
offset = areq->cryptlen - ivsize;
scatterwalk_map_and_copy(chan->backup_iv, areq->src,
offset, ivsize, 0);
}
memcpy(chan->bounce_iv, areq->iv, ivsize);
- rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, rctx->ivlen,
+ rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, ivsize,
DMA_TO_DEVICE);
if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
dev_err(ce->dev, "Cannot DMA MAP IV\n");
@@ -278,8 +277,8 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
}
chan->timeout = areq->cryptlen;
- rctx->nr_sgs = nr_sgs;
- rctx->nr_sgd = nr_sgd;
+ rctx->nr_sgs = ns;
+ rctx->nr_sgd = nd;
return 0;
theend_sgs:
@@ -296,7 +295,8 @@ theend_sgs:
theend_iv:
if (areq->iv && ivsize > 0) {
if (!dma_mapping_error(ce->dev, rctx->addr_iv))
- dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
+ dma_unmap_single(ce->dev, rctx->addr_iv, ivsize,
+ DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
@@ -345,7 +345,8 @@ static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
if (areq->iv && ivsize > 0) {
if (cet->t_iv)
- dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
+ dma_unmap_single(ce->dev, rctx->addr_iv, ivsize,
+ DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
memcpy(areq->iv, chan->backup_iv, ivsize);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index bef44f350167..13bdfb8a2c62 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -342,8 +342,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
ce = algt->ce;
- bs = algt->alg.hash.base.halg.base.cra_blocksize;
- digestsize = algt->alg.hash.base.halg.digestsize;
+ bs = crypto_ahash_blocksize(tfm);
+ digestsize = crypto_ahash_digestsize(tfm);
if (digestsize == SHA224_DIGEST_SIZE)
digestsize = SHA256_DIGEST_SIZE;
if (digestsize == SHA384_DIGEST_SIZE)
@@ -455,7 +455,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
err_unmap_result:
dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
if (!err)
- memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
+ memcpy(areq->result, result, crypto_ahash_digestsize(tfm));
err_unmap_src:
dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 83df4d719053..0f9a89067016 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -260,7 +260,6 @@ static inline __le32 desc_addr_val_le32(struct sun8i_ce_dev *dev,
* struct sun8i_cipher_req_ctx - context for a skcipher request
* @op_dir: direction (encrypt vs decrypt) for this request
* @flow: the flow to use for this request
- * @ivlen: size of bounce_iv
* @nr_sgs: The number of source SG (as given by dma_map_sg())
* @nr_sgd: The number of destination SG (as given by dma_map_sg())
* @addr_iv: The IV addr returned by dma_map_single, need to unmap later
@@ -270,7 +269,6 @@ static inline __le32 desc_addr_val_le32(struct sun8i_ce_dev *dev,
struct sun8i_cipher_req_ctx {
u32 op_dir;
int flow;
- unsigned int ivlen;
int nr_sgs;
int nr_sgd;
dma_addr_t addr_iv;
diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
index 0b6e49c06eff..f8f37c9d5f3c 100644
--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -5,7 +5,6 @@
#include "aspeed-hace.h"
#include <crypto/engine.h>
-#include <crypto/hmac.h>
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <crypto/sha1.h>
@@ -14,6 +13,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/scatterlist.h>
#include <linux/string.h>
#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
@@ -59,6 +59,46 @@ static const __be64 sha512_iv[8] = {
cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7)
};
+static int aspeed_sham_init(struct ahash_request *req);
+static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev);
+
+static int aspeed_sham_export(struct ahash_request *req, void *out)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ union {
+ u8 *u8;
+ u64 *u64;
+ } p = { .u8 = out };
+
+ memcpy(out, rctx->digest, rctx->ivsize);
+ p.u8 += rctx->ivsize;
+ put_unaligned(rctx->digcnt[0], p.u64++);
+ if (rctx->ivsize == 64)
+ put_unaligned(rctx->digcnt[1], p.u64);
+ return 0;
+}
+
+static int aspeed_sham_import(struct ahash_request *req, const void *in)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ union {
+ const u8 *u8;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int err;
+
+ err = aspeed_sham_init(req);
+ if (err)
+ return err;
+
+ memcpy(rctx->digest, in, rctx->ivsize);
+ p.u8 += rctx->ivsize;
+ rctx->digcnt[0] = get_unaligned(p.u64++);
+ if (rctx->ivsize == 64)
+ rctx->digcnt[1] = get_unaligned(p.u64);
+ return 0;
+}
+
/* The purpose of this padding is to ensure that the padded message is a
* multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
* The bit "1" is appended at the end of the message followed by
@@ -74,10 +114,10 @@ static const __be64 sha512_iv[8] = {
* - if message length < 112 bytes then padlen = 112 - message length
* - else padlen = 128 + 112 - message length
*/
-static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
- struct aspeed_sham_reqctx *rctx)
+static int aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
+ struct aspeed_sham_reqctx *rctx, u8 *buf)
{
- unsigned int index, padlen;
+ unsigned int index, padlen, bitslen;
__be64 bits[2];
AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags);
@@ -87,25 +127,32 @@ static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
case SHA_FLAGS_SHA224:
case SHA_FLAGS_SHA256:
bits[0] = cpu_to_be64(rctx->digcnt[0] << 3);
- index = rctx->bufcnt & 0x3f;
+ index = rctx->digcnt[0] & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
- *(rctx->buffer + rctx->bufcnt) = 0x80;
- memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
- memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 8);
- rctx->bufcnt += padlen + 8;
+ bitslen = 8;
break;
default:
bits[1] = cpu_to_be64(rctx->digcnt[0] << 3);
bits[0] = cpu_to_be64(rctx->digcnt[1] << 3 |
rctx->digcnt[0] >> 61);
- index = rctx->bufcnt & 0x7f;
+ index = rctx->digcnt[0] & 0x7f;
padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
- *(rctx->buffer + rctx->bufcnt) = 0x80;
- memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
- memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 16);
- rctx->bufcnt += padlen + 16;
+ bitslen = 16;
break;
}
+ buf[0] = 0x80;
+ memset(buf + 1, 0, padlen - 1);
+ memcpy(buf + padlen, bits, bitslen);
+ return padlen + bitslen;
+}
+
+static void aspeed_ahash_update_counter(struct aspeed_sham_reqctx *rctx,
+ unsigned int len)
+{
+ rctx->offset += len;
+ rctx->digcnt[0] += len;
+ if (rctx->digcnt[0] < len)
+ rctx->digcnt[1]++;
}
/*
@@ -117,31 +164,31 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
struct ahash_request *req = hash_engine->req;
struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
- int length, remain;
+ unsigned int length, remain;
+ bool final = false;
- length = rctx->total + rctx->bufcnt;
- remain = length % rctx->block_size;
+ length = rctx->total - rctx->offset;
+ remain = length - round_down(length, rctx->block_size);
AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain);
- if (rctx->bufcnt)
- memcpy(hash_engine->ahash_src_addr, rctx->buffer, rctx->bufcnt);
-
- if (rctx->total + rctx->bufcnt < ASPEED_CRYPTO_SRC_DMA_BUF_LEN) {
- scatterwalk_map_and_copy(hash_engine->ahash_src_addr +
- rctx->bufcnt, rctx->src_sg,
- rctx->offset, rctx->total - remain, 0);
- rctx->offset += rctx->total - remain;
-
- } else {
- dev_warn(hace_dev->dev, "Hash data length is too large\n");
- return -EINVAL;
- }
-
- scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg,
- rctx->offset, remain, 0);
+ if (length > ASPEED_HASH_SRC_DMA_BUF_LEN)
+ length = ASPEED_HASH_SRC_DMA_BUF_LEN;
+ else if (rctx->flags & SHA_FLAGS_FINUP) {
+ if (round_up(length, rctx->block_size) + rctx->block_size >
+ ASPEED_CRYPTO_SRC_DMA_BUF_LEN)
+ length = round_down(length - 1, rctx->block_size);
+ else
+ final = true;
+ } else
+ length -= remain;
+ scatterwalk_map_and_copy(hash_engine->ahash_src_addr, rctx->src_sg,
+ rctx->offset, length, 0);
+ aspeed_ahash_update_counter(rctx, length);
+ if (final)
+ length += aspeed_ahash_fill_padding(
+ hace_dev, rctx, hash_engine->ahash_src_addr + length);
- rctx->bufcnt = remain;
rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
SHA512_DIGEST_SIZE,
DMA_BIDIRECTIONAL);
@@ -150,7 +197,7 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
return -ENOMEM;
}
- hash_engine->src_length = length - remain;
+ hash_engine->src_length = length;
hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
hash_engine->digest_dma = rctx->digest_dma_addr;
@@ -166,16 +213,20 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
struct ahash_request *req = hash_engine->req;
struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ bool final = rctx->flags & SHA_FLAGS_FINUP;
+ int remain, sg_len, i, max_sg_nents;
+ unsigned int length, offset, total;
struct aspeed_sg_list *src_list;
struct scatterlist *s;
- int length, remain, sg_len, i;
int rc = 0;
- remain = (rctx->total + rctx->bufcnt) % rctx->block_size;
- length = rctx->total + rctx->bufcnt - remain;
+ offset = rctx->offset;
+ length = rctx->total - offset;
+ remain = final ? 0 : length - round_down(length, rctx->block_size);
+ length -= remain;
- AHASH_DBG(hace_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x\n",
- "rctx total", rctx->total, "bufcnt", rctx->bufcnt,
+ AHASH_DBG(hace_dev, "%s:0x%x, %s:0x%x, %s:0x%x\n",
+ "rctx total", rctx->total,
"length", length, "remain", remain);
sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
@@ -186,6 +237,8 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
goto end;
}
+ max_sg_nents = ASPEED_HASH_SRC_DMA_BUF_LEN / sizeof(*src_list) - final;
+ sg_len = min(sg_len, max_sg_nents);
src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr;
rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
SHA512_DIGEST_SIZE,
@@ -196,68 +249,66 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
goto free_src_sg;
}
- if (rctx->bufcnt != 0) {
- u32 phy_addr;
- u32 len;
+ total = 0;
+ for_each_sg(rctx->src_sg, s, sg_len, i) {
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
- rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
- rctx->buffer,
- rctx->block_size * 2,
- DMA_TO_DEVICE);
- if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
- rc = -ENOMEM;
- goto free_rctx_digest;
+ if (len <= offset) {
+ offset -= len;
+ continue;
}
- phy_addr = rctx->buffer_dma_addr;
- len = rctx->bufcnt;
- length -= len;
+ len -= offset;
+ phy_addr += offset;
+ offset = 0;
- /* Last sg list */
- if (length == 0)
- len |= HASH_SG_LAST_LIST;
+ if (length > len)
+ length -= len;
+ else {
+ /* Last sg list */
+ len = length;
+ length = 0;
+ }
- src_list[0].phy_addr = cpu_to_le32(phy_addr);
- src_list[0].len = cpu_to_le32(len);
- src_list++;
+ total += len;
+ src_list[i].phy_addr = cpu_to_le32(phy_addr);
+ src_list[i].len = cpu_to_le32(len);
}
if (length != 0) {
- for_each_sg(rctx->src_sg, s, sg_len, i) {
- u32 phy_addr = sg_dma_address(s);
- u32 len = sg_dma_len(s);
-
- if (length > len)
- length -= len;
- else {
- /* Last sg list */
- len = length;
- len |= HASH_SG_LAST_LIST;
- length = 0;
- }
+ total = round_down(total, rctx->block_size);
+ final = false;
+ }
+
+ aspeed_ahash_update_counter(rctx, total);
+ if (final) {
+ int len = aspeed_ahash_fill_padding(hace_dev, rctx,
+ rctx->buffer);
- src_list[i].phy_addr = cpu_to_le32(phy_addr);
- src_list[i].len = cpu_to_le32(len);
+ total += len;
+ rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
+ rctx->buffer,
+ sizeof(rctx->buffer),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+ rc = -ENOMEM;
+ goto free_rctx_digest;
}
- }
- if (length != 0) {
- rc = -EINVAL;
- goto free_rctx_buffer;
+ src_list[i].phy_addr = cpu_to_le32(rctx->buffer_dma_addr);
+ src_list[i].len = cpu_to_le32(len);
+ i++;
}
+ src_list[i - 1].len |= cpu_to_le32(HASH_SG_LAST_LIST);
- rctx->offset = rctx->total - remain;
- hash_engine->src_length = rctx->total + rctx->bufcnt - remain;
+ hash_engine->src_length = total;
hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
hash_engine->digest_dma = rctx->digest_dma_addr;
return 0;
-free_rctx_buffer:
- if (rctx->bufcnt != 0)
- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
- rctx->block_size * 2, DMA_TO_DEVICE);
free_rctx_digest:
dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
@@ -272,24 +323,6 @@ static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev)
{
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
struct ahash_request *req = hash_engine->req;
-
- AHASH_DBG(hace_dev, "\n");
-
- hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
-
- crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req, 0);
-
- return 0;
-}
-
-/*
- * Copy digest to the corresponding request result.
- * This function will be called at final() stage.
- */
-static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev)
-{
- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
- struct ahash_request *req = hash_engine->req;
struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
AHASH_DBG(hace_dev, "\n");
@@ -297,12 +330,19 @@ static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev)
dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
- rctx->block_size * 2, DMA_TO_DEVICE);
+ if (rctx->total - rctx->offset >= rctx->block_size ||
+ (rctx->total != rctx->offset && rctx->flags & SHA_FLAGS_FINUP))
+ return aspeed_ahash_req_update(hace_dev);
- memcpy(req->result, rctx->digest, rctx->digsize);
+ hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
- return aspeed_ahash_complete(hace_dev);
+ if (rctx->flags & SHA_FLAGS_FINUP)
+ memcpy(req->result, rctx->digest, rctx->digsize);
+
+ crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req,
+ rctx->total - rctx->offset);
+
+ return 0;
}
/*
@@ -338,118 +378,6 @@ static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev,
return -EINPROGRESS;
}
-/*
- * HMAC resume aims to do the second pass produces
- * the final HMAC code derived from the inner hash
- * result and the outer key.
- */
-static int aspeed_ahash_hmac_resume(struct aspeed_hace_dev *hace_dev)
-{
- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
- struct ahash_request *req = hash_engine->req;
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
- int rc = 0;
-
- AHASH_DBG(hace_dev, "\n");
-
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-
- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
- rctx->block_size * 2, DMA_TO_DEVICE);
-
- /* o key pad + hash sum 1 */
- memcpy(rctx->buffer, bctx->opad, rctx->block_size);
- memcpy(rctx->buffer + rctx->block_size, rctx->digest, rctx->digsize);
-
- rctx->bufcnt = rctx->block_size + rctx->digsize;
- rctx->digcnt[0] = rctx->block_size + rctx->digsize;
-
- aspeed_ahash_fill_padding(hace_dev, rctx);
- memcpy(rctx->digest, rctx->sha_iv, rctx->ivsize);
-
- rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
- SHA512_DIGEST_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
- rc = -ENOMEM;
- goto end;
- }
-
- rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer,
- rctx->block_size * 2,
- DMA_TO_DEVICE);
- if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
- rc = -ENOMEM;
- goto free_rctx_digest;
- }
-
- hash_engine->src_dma = rctx->buffer_dma_addr;
- hash_engine->src_length = rctx->bufcnt;
- hash_engine->digest_dma = rctx->digest_dma_addr;
-
- return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
-
-free_rctx_digest:
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-end:
- return rc;
-}
-
-static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev)
-{
- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
- struct ahash_request *req = hash_engine->req;
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
- int rc = 0;
-
- AHASH_DBG(hace_dev, "\n");
-
- aspeed_ahash_fill_padding(hace_dev, rctx);
-
- rctx->digest_dma_addr = dma_map_single(hace_dev->dev,
- rctx->digest,
- SHA512_DIGEST_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
- rc = -ENOMEM;
- goto end;
- }
-
- rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
- rctx->buffer,
- rctx->block_size * 2,
- DMA_TO_DEVICE);
- if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
- rc = -ENOMEM;
- goto free_rctx_digest;
- }
-
- hash_engine->src_dma = rctx->buffer_dma_addr;
- hash_engine->src_length = rctx->bufcnt;
- hash_engine->digest_dma = rctx->digest_dma_addr;
-
- if (rctx->flags & SHA_FLAGS_HMAC)
- return aspeed_hace_ahash_trigger(hace_dev,
- aspeed_ahash_hmac_resume);
-
- return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
-
-free_rctx_digest:
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-end:
- return rc;
-}
-
static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
{
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
@@ -461,40 +389,12 @@ static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
DMA_TO_DEVICE);
- if (rctx->bufcnt != 0)
+ if (rctx->flags & SHA_FLAGS_FINUP && rctx->total == rctx->offset)
dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
- rctx->block_size * 2,
- DMA_TO_DEVICE);
+ sizeof(rctx->buffer), DMA_TO_DEVICE);
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-
- scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset,
- rctx->total - rctx->offset, 0);
-
- rctx->bufcnt = rctx->total - rctx->offset;
rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL;
- if (rctx->flags & SHA_FLAGS_FINUP)
- return aspeed_ahash_req_final(hace_dev);
-
- return aspeed_ahash_complete(hace_dev);
-}
-
-static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev)
-{
- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
- struct ahash_request *req = hash_engine->req;
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-
- AHASH_DBG(hace_dev, "\n");
-
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-
- if (rctx->flags & SHA_FLAGS_FINUP)
- return aspeed_ahash_req_final(hace_dev);
-
return aspeed_ahash_complete(hace_dev);
}
@@ -513,7 +413,7 @@ static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev)
resume = aspeed_ahash_update_resume_sg;
} else {
- resume = aspeed_ahash_update_resume;
+ resume = aspeed_ahash_complete;
}
ret = hash_engine->dma_prepare(hace_dev);
@@ -530,26 +430,47 @@ static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev,
hace_dev->crypt_engine_hash, req);
}
+static noinline int aspeed_ahash_fallback(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ HASH_FBREQ_ON_STACK(fbreq, req);
+ u8 *state = rctx->buffer;
+ struct scatterlist sg[2];
+ struct scatterlist *ssg;
+ int ret;
+
+ ssg = scatterwalk_ffwd(sg, req->src, rctx->offset);
+ ahash_request_set_crypt(fbreq, ssg, req->result,
+ rctx->total - rctx->offset);
+
+ ret = aspeed_sham_export(req, state) ?:
+ crypto_ahash_import_core(fbreq, state);
+
+ if (rctx->flags & SHA_FLAGS_FINUP)
+ ret = ret ?: crypto_ahash_finup(fbreq);
+ else
+ ret = ret ?: crypto_ahash_update(fbreq) ?:
+ crypto_ahash_export_core(fbreq, state) ?:
+ aspeed_sham_import(req, state);
+ HASH_REQUEST_ZERO(fbreq);
+ return ret;
+}
+
static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
{
struct ahash_request *req = ahash_request_cast(areq);
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
struct aspeed_engine_hash *hash_engine;
- int ret = 0;
+ int ret;
hash_engine = &hace_dev->hash_engine;
hash_engine->flags |= CRYPTO_FLAGS_BUSY;
- if (rctx->op == SHA_OP_UPDATE)
- ret = aspeed_ahash_req_update(hace_dev);
- else if (rctx->op == SHA_OP_FINAL)
- ret = aspeed_ahash_req_final(hace_dev);
-
+ ret = aspeed_ahash_req_update(hace_dev);
if (ret != -EINPROGRESS)
- return ret;
+ return aspeed_ahash_fallback(req);
return 0;
}
@@ -590,45 +511,7 @@ static int aspeed_sham_update(struct ahash_request *req)
rctx->total = req->nbytes;
rctx->src_sg = req->src;
rctx->offset = 0;
- rctx->src_nents = sg_nents(req->src);
- rctx->op = SHA_OP_UPDATE;
-
- rctx->digcnt[0] += rctx->total;
- if (rctx->digcnt[0] < rctx->total)
- rctx->digcnt[1]++;
-
- if (rctx->bufcnt + rctx->total < rctx->block_size) {
- scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt,
- rctx->src_sg, rctx->offset,
- rctx->total, 0);
- rctx->bufcnt += rctx->total;
-
- return 0;
- }
-
- return aspeed_hace_hash_handle_queue(hace_dev, req);
-}
-
-static int aspeed_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
- const u8 *data, unsigned int len, u8 *out)
-{
- SHASH_DESC_ON_STACK(shash, tfm);
-
- shash->tfm = tfm;
-
- return crypto_shash_digest(shash, data, len, out);
-}
-
-static int aspeed_sham_final(struct ahash_request *req)
-{
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
- struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
-
- AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n",
- req->nbytes, rctx->total);
- rctx->op = SHA_OP_FINAL;
+ rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
return aspeed_hace_hash_handle_queue(hace_dev, req);
}
@@ -639,23 +522,12 @@ static int aspeed_sham_finup(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
- int rc1, rc2;
AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
rctx->flags |= SHA_FLAGS_FINUP;
- rc1 = aspeed_sham_update(req);
- if (rc1 == -EINPROGRESS || rc1 == -EBUSY)
- return rc1;
-
- /*
- * final() has to be always called to cleanup resources
- * even if update() failed, except EINPROGRESS
- */
- rc2 = aspeed_sham_final(req);
-
- return rc1 ? : rc2;
+ return aspeed_sham_update(req);
}
static int aspeed_sham_init(struct ahash_request *req)
@@ -664,7 +536,6 @@ static int aspeed_sham_init(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
AHASH_DBG(hace_dev, "%s: digest size:%d\n",
crypto_tfm_alg_name(&tfm->base),
@@ -679,7 +550,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA1;
rctx->digsize = SHA1_DIGEST_SIZE;
rctx->block_size = SHA1_BLOCK_SIZE;
- rctx->sha_iv = sha1_iv;
rctx->ivsize = 32;
memcpy(rctx->digest, sha1_iv, rctx->ivsize);
break;
@@ -688,7 +558,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA224;
rctx->digsize = SHA224_DIGEST_SIZE;
rctx->block_size = SHA224_BLOCK_SIZE;
- rctx->sha_iv = sha224_iv;
rctx->ivsize = 32;
memcpy(rctx->digest, sha224_iv, rctx->ivsize);
break;
@@ -697,7 +566,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA256;
rctx->digsize = SHA256_DIGEST_SIZE;
rctx->block_size = SHA256_BLOCK_SIZE;
- rctx->sha_iv = sha256_iv;
rctx->ivsize = 32;
memcpy(rctx->digest, sha256_iv, rctx->ivsize);
break;
@@ -707,7 +575,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA384;
rctx->digsize = SHA384_DIGEST_SIZE;
rctx->block_size = SHA384_BLOCK_SIZE;
- rctx->sha_iv = (const __be32 *)sha384_iv;
rctx->ivsize = 64;
memcpy(rctx->digest, sha384_iv, rctx->ivsize);
break;
@@ -717,7 +584,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA512;
rctx->digsize = SHA512_DIGEST_SIZE;
rctx->block_size = SHA512_BLOCK_SIZE;
- rctx->sha_iv = (const __be32 *)sha512_iv;
rctx->ivsize = 64;
memcpy(rctx->digest, sha512_iv, rctx->ivsize);
break;
@@ -727,19 +593,10 @@ static int aspeed_sham_init(struct ahash_request *req)
return -EINVAL;
}
- rctx->bufcnt = 0;
rctx->total = 0;
rctx->digcnt[0] = 0;
rctx->digcnt[1] = 0;
- /* HMAC init */
- if (tctx->flags & SHA_FLAGS_HMAC) {
- rctx->digcnt[0] = rctx->block_size;
- rctx->bufcnt = rctx->block_size;
- memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
- rctx->flags |= SHA_FLAGS_HMAC;
- }
-
return 0;
}
@@ -748,102 +605,14 @@ static int aspeed_sham_digest(struct ahash_request *req)
return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
}
-static int aspeed_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
+static int aspeed_sham_cra_init(struct crypto_ahash *tfm)
{
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
- struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
- int ds = crypto_shash_digestsize(bctx->shash);
- int bs = crypto_shash_blocksize(bctx->shash);
- int err = 0;
- int i;
-
- AHASH_DBG(hace_dev, "%s: keylen:%d\n", crypto_tfm_alg_name(&tfm->base),
- keylen);
-
- if (keylen > bs) {
- err = aspeed_sham_shash_digest(bctx->shash,
- crypto_shash_get_flags(bctx->shash),
- key, keylen, bctx->ipad);
- if (err)
- return err;
- keylen = ds;
-
- } else {
- memcpy(bctx->ipad, key, keylen);
- }
-
- memset(bctx->ipad + keylen, 0, bs - keylen);
- memcpy(bctx->opad, bctx->ipad, bs);
-
- for (i = 0; i < bs; i++) {
- bctx->ipad[i] ^= HMAC_IPAD_VALUE;
- bctx->opad[i] ^= HMAC_OPAD_VALUE;
- }
-
- return err;
-}
-
-static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
-{
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
- struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
struct aspeed_hace_alg *ast_alg;
ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash.base);
tctx->hace_dev = ast_alg->hace_dev;
- tctx->flags = 0;
-
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct aspeed_sham_reqctx));
-
- if (ast_alg->alg_base) {
- /* hmac related */
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
-
- tctx->flags |= SHA_FLAGS_HMAC;
- bctx->shash = crypto_alloc_shash(ast_alg->alg_base, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(bctx->shash)) {
- dev_warn(ast_alg->hace_dev->dev,
- "base driver '%s' could not be loaded.\n",
- ast_alg->alg_base);
- return PTR_ERR(bctx->shash);
- }
- }
-
- return 0;
-}
-
-static void aspeed_sham_cra_exit(struct crypto_tfm *tfm)
-{
- struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
- struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
-
- AHASH_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(tfm));
-
- if (tctx->flags & SHA_FLAGS_HMAC) {
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
-
- crypto_free_shash(bctx->shash);
- }
-}
-
-static int aspeed_sham_export(struct ahash_request *req, void *out)
-{
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-
- memcpy(out, rctx, sizeof(*rctx));
-
- return 0;
-}
-
-static int aspeed_sham_import(struct ahash_request *req, const void *in)
-{
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-
- memcpy(rctx, in, sizeof(*rctx));
return 0;
}
@@ -853,11 +622,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -867,13 +636,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
@@ -885,11 +654,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -899,13 +668,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
@@ -917,11 +686,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -931,118 +700,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha1",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name = "aspeed-hmac-sha1",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha224",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha224)",
- .cra_driver_name = "aspeed-hmac-sha224",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha256",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha256)",
- .cra_driver_name = "aspeed-hmac-sha256",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
@@ -1057,11 +721,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -1071,13 +735,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
@@ -1089,11 +753,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -1103,83 +767,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha384",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA384_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha384)",
- .cra_driver_name = "aspeed-hmac-sha384",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha512",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha512)",
- .cra_driver_name = "aspeed-hmac-sha512",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h
index 68f70e01fccb..b1d07730d543 100644
--- a/drivers/crypto/aspeed/aspeed-hace.h
+++ b/drivers/crypto/aspeed/aspeed-hace.h
@@ -119,7 +119,6 @@
#define SHA_FLAGS_SHA512 BIT(4)
#define SHA_FLAGS_SHA512_224 BIT(5)
#define SHA_FLAGS_SHA512_256 BIT(6)
-#define SHA_FLAGS_HMAC BIT(8)
#define SHA_FLAGS_FINUP BIT(9)
#define SHA_FLAGS_MASK (0xff)
@@ -161,22 +160,18 @@ struct aspeed_engine_hash {
aspeed_hace_fn_t dma_prepare;
};
-struct aspeed_sha_hmac_ctx {
- struct crypto_shash *shash;
- u8 ipad[SHA512_BLOCK_SIZE];
- u8 opad[SHA512_BLOCK_SIZE];
-};
-
struct aspeed_sham_ctx {
struct aspeed_hace_dev *hace_dev;
- unsigned long flags; /* hmac flag */
-
- struct aspeed_sha_hmac_ctx base[];
};
struct aspeed_sham_reqctx {
+ /* DMA buffer written by hardware */
+ u8 digest[SHA512_DIGEST_SIZE] __aligned(64);
+
+ /* Software state sorted by size. */
+ u64 digcnt[2];
+
unsigned long flags; /* final update flag should no use*/
- unsigned long op; /* final or update */
u32 cmd; /* trigger cmd */
/* walk state */
@@ -188,17 +183,12 @@ struct aspeed_sham_reqctx {
size_t digsize;
size_t block_size;
size_t ivsize;
- const __be32 *sha_iv;
- /* remain data buffer */
- u8 buffer[SHA512_BLOCK_SIZE * 2];
dma_addr_t buffer_dma_addr;
- size_t bufcnt; /* buffer counter */
-
- /* output buffer */
- u8 digest[SHA512_DIGEST_SIZE] __aligned(64);
dma_addr_t digest_dma_addr;
- u64 digcnt[2];
+
+ /* This is DMA too but read-only for hardware. */
+ u8 buffer[SHA512_BLOCK_SIZE + 16];
};
struct aspeed_engine_crypto {
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 27c5d000b4b2..3a2684208dda 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2297,6 +2297,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x800:
case 0x700:
case 0x600:
case 0x500:
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 2cc36da163e8..3d7573c7bd1c 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2534,6 +2534,7 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x800:
case 0x700:
case 0x600:
case 0x510:
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index f8d50bd227a6..75ee065da1ec 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2072,7 +2072,7 @@ static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
static void artpec6_crypto_timeout(struct timer_list *t)
{
- struct artpec6_crypto *ac = from_timer(ac, t, timer);
+ struct artpec6_crypto *ac = timer_container_of(ac, t, timer);
dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index acf1b197eb84..d2eaf5205b1c 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -25,10 +25,6 @@ caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_BLOB_GEN) += blob_gen.o
caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
-ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
- ccflags-y += -DCONFIG_CAAM_QI
-endif
-
caam-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 38ff931059b4..a93be395c878 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -24,7 +24,7 @@
bool caam_dpaa2;
EXPORT_SYMBOL(caam_dpaa2);
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
#include "qi.h"
#endif
@@ -573,7 +573,7 @@ static const struct soc_device_attribute caam_imx_soc_table[] = {
{ .soc_id = "i.MX7*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8M*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8ULP", .data = &caam_imx8ulp_data },
- { .soc_id = "i.MX8QM", .data = &caam_imx8ulp_data },
+ { .soc_id = "i.MX8Q*", .data = &caam_imx8ulp_data },
{ .soc_id = "VF*", .data = &caam_vf610_data },
{ .family = "Freescale i.MX" },
{ /* sentinel */ }
@@ -831,7 +831,7 @@ static int caam_ctrl_suspend(struct device *dev)
{
const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
- if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en)
+ if (ctrlpriv->caam_off_during_pm && !ctrlpriv->no_page0)
caam_state_save(dev);
return 0;
@@ -842,7 +842,7 @@ static int caam_ctrl_resume(struct device *dev)
struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
int ret = 0;
- if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en) {
+ if (ctrlpriv->caam_off_during_pm && !ctrlpriv->no_page0) {
caam_state_restore(dev);
/* HW and rng will be reset so deinstantiation can be removed */
@@ -908,6 +908,7 @@ static int caam_probe(struct platform_device *pdev)
imx_soc_data = imx_soc_match->data;
reg_access = reg_access && imx_soc_data->page0_access;
+ ctrlpriv->no_page0 = !reg_access;
/*
* CAAM clocks cannot be controlled from kernel.
*/
@@ -967,7 +968,7 @@ iomap_ctrl:
caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
/* If (DPAA 1.x) QI present, check whether dependencies are available */
if (ctrlpriv->qi_present && !caam_dpaa2) {
ret = qman_is_probed();
@@ -1098,7 +1099,7 @@ set_dma_mask:
wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
/* If QMAN driver is present, init CAAM-QI backend */
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
ret = caam_qi_init(pdev);
if (ret)
dev_err(dev, "caam qi i/f init failed: %d\n", ret);
diff --git a/drivers/crypto/caam/debugfs.c b/drivers/crypto/caam/debugfs.c
index 6358d3cabf57..718352b7afb5 100644
--- a/drivers/crypto/caam/debugfs.c
+++ b/drivers/crypto/caam/debugfs.c
@@ -22,7 +22,7 @@ static int caam_debugfs_u32_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
/*
* This is a counter for the number of times the congestion group (where all
* the request and response queueus are) reached congestion. Incremented
diff --git a/drivers/crypto/caam/debugfs.h b/drivers/crypto/caam/debugfs.h
index 8b5d1acd21a7..ef238c71f92a 100644
--- a/drivers/crypto/caam/debugfs.h
+++ b/drivers/crypto/caam/debugfs.h
@@ -18,7 +18,7 @@ static inline void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
{}
#endif
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_CAAM_QI)
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
void caam_debugfs_qi_congested(void);
void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv);
#else
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index e51320150872..a88da0d31b23 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -115,6 +115,7 @@ struct caam_drv_private {
u8 blob_present; /* Nonzero if BLOB support present in device */
u8 mc_en; /* Nonzero if MC f/w is active */
u8 optee_en; /* Nonzero if OP-TEE f/w is active */
+ u8 no_page0; /* Nonzero if register page 0 is not controlled by Linux */
bool pr_support; /* RNG prediction resistance available */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
@@ -226,7 +227,7 @@ static inline int caam_prng_register(struct device *dev)
static inline void caam_prng_unregister(void *data) {}
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PRNG_API */
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
int caam_qi_algapi_init(struct device *dev);
void caam_qi_algapi_exit(void);
@@ -242,7 +243,7 @@ static inline void caam_qi_algapi_exit(void)
{
}
-#endif /* CONFIG_CAAM_QI */
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI */
static inline u64 caam_get_dma_mask(struct device *dev)
{
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 9fcdb64084ac..0ef00df9730e 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -629,8 +629,7 @@ static int caam_jr_probe(struct platform_device *pdev)
}
/* Initialize crypto engine */
- jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, NULL,
- false,
+ jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, false,
CRYPTO_ENGINE_MAX_QLEN);
if (!jrpriv->engine) {
dev_err(jrdev, "Could not init crypto-engine\n");
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index b6e7c0b29d4e..1e731ed8702b 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -442,11 +442,8 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
if (!cpumask_test_cpu(*cpu, cpus)) {
int *pcpu = &get_cpu_var(last_cpu);
- *pcpu = cpumask_next(*pcpu, cpus);
- if (*pcpu >= nr_cpu_ids)
- *pcpu = cpumask_first(cpus);
+ *pcpu = cpumask_next_wrap(*pcpu, cpus);
*cpu = *pcpu;
-
put_cpu_var(last_cpu);
}
drv_ctx->cpu = *cpu;
diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
index a1055554b47a..dc26bc22c91d 100644
--- a/drivers/crypto/ccp/ccp-debugfs.c
+++ b/drivers/crypto/ccp/ccp-debugfs.c
@@ -319,5 +319,8 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
void ccp5_debugfs_destroy(void)
{
+ mutex_lock(&ccp_debugfs_lock);
debugfs_remove_recursive(ccp_debugfs_dir);
+ ccp_debugfs_dir = NULL;
+ mutex_unlock(&ccp_debugfs_lock);
}
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 109b5aef4034..d78865d9d5f0 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -633,10 +633,16 @@ static noinline_for_stack int
ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_aes_engine *aes = &cmd->u.aes;
- struct ccp_dm_workarea key, ctx, final_wa, tag;
- struct ccp_data src, dst;
- struct ccp_data aad;
- struct ccp_op op;
+ struct {
+ struct ccp_dm_workarea key;
+ struct ccp_dm_workarea ctx;
+ struct ccp_dm_workarea final;
+ struct ccp_dm_workarea tag;
+ struct ccp_data src;
+ struct ccp_data dst;
+ struct ccp_data aad;
+ struct ccp_op op;
+ } *wa __cleanup(kfree) = kzalloc(sizeof *wa, GFP_KERNEL);
unsigned int dm_offset;
unsigned int authsize;
unsigned int jobid;
@@ -650,6 +656,9 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
struct scatterlist *p_outp, sg_outp[2];
struct scatterlist *p_aad;
+ if (!wa)
+ return -ENOMEM;
+
if (!aes->iv)
return -EINVAL;
@@ -696,26 +705,26 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
jobid = CCP_NEW_JOBID(cmd_q->ccp);
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = jobid;
- op.sb_key = cmd_q->sb_key; /* Pre-allocated */
- op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
- op.init = 1;
- op.u.aes.type = aes->type;
+ memset(&wa->op, 0, sizeof(wa->op));
+ wa->op.cmd_q = cmd_q;
+ wa->op.jobid = jobid;
+ wa->op.sb_key = cmd_q->sb_key; /* Pre-allocated */
+ wa->op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+ wa->op.init = 1;
+ wa->op.u.aes.type = aes->type;
/* Copy the key to the LSB */
- ret = ccp_init_dm_workarea(&key, cmd_q,
+ ret = ccp_init_dm_workarea(&wa->key, cmd_q,
CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
DMA_TO_DEVICE);
if (ret)
return ret;
dm_offset = CCP_SB_BYTES - aes->key_len;
- ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ ret = ccp_set_dm_area(&wa->key, dm_offset, aes->key, 0, aes->key_len);
if (ret)
goto e_key;
- ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ ret = ccp_copy_to_sb(cmd_q, &wa->key, wa->op.jobid, wa->op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
@@ -726,58 +735,58 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
* There is an assumption here that the IV is 96 bits in length, plus
* a nonce of 32 bits. If no IV is present, use a zeroed buffer.
*/
- ret = ccp_init_dm_workarea(&ctx, cmd_q,
+ ret = ccp_init_dm_workarea(&wa->ctx, cmd_q,
CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
DMA_BIDIRECTIONAL);
if (ret)
goto e_key;
dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
- ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_set_dm_area(&wa->ctx, dm_offset, aes->iv, 0, aes->iv_len);
if (ret)
goto e_ctx;
- ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ ret = ccp_copy_to_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_ctx;
}
- op.init = 1;
+ wa->op.init = 1;
if (aes->aad_len > 0) {
/* Step 1: Run a GHASH over the Additional Authenticated Data */
- ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
+ ret = ccp_init_data(&wa->aad, cmd_q, p_aad, aes->aad_len,
AES_BLOCK_SIZE,
DMA_TO_DEVICE);
if (ret)
goto e_ctx;
- op.u.aes.mode = CCP_AES_MODE_GHASH;
- op.u.aes.action = CCP_AES_GHASHAAD;
+ wa->op.u.aes.mode = CCP_AES_MODE_GHASH;
+ wa->op.u.aes.action = CCP_AES_GHASHAAD;
- while (aad.sg_wa.bytes_left) {
- ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
+ while (wa->aad.sg_wa.bytes_left) {
+ ccp_prepare_data(&wa->aad, NULL, &wa->op, AES_BLOCK_SIZE, true);
- ret = cmd_q->ccp->vdata->perform->aes(&op);
+ ret = cmd_q->ccp->vdata->perform->aes(&wa->op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_aad;
}
- ccp_process_data(&aad, NULL, &op);
- op.init = 0;
+ ccp_process_data(&wa->aad, NULL, &wa->op);
+ wa->op.init = 0;
}
}
- op.u.aes.mode = CCP_AES_MODE_GCTR;
- op.u.aes.action = aes->action;
+ wa->op.u.aes.mode = CCP_AES_MODE_GCTR;
+ wa->op.u.aes.action = aes->action;
if (ilen > 0) {
/* Step 2: Run a GCTR over the plaintext */
in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
- ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
+ ret = ccp_init_data(&wa->src, cmd_q, p_inp, ilen,
AES_BLOCK_SIZE,
in_place ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE);
@@ -785,52 +794,52 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_aad;
if (in_place) {
- dst = src;
+ wa->dst = wa->src;
} else {
- ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
+ ret = ccp_init_data(&wa->dst, cmd_q, p_outp, ilen,
AES_BLOCK_SIZE, DMA_FROM_DEVICE);
if (ret)
goto e_src;
}
- op.soc = 0;
- op.eom = 0;
- op.init = 1;
- while (src.sg_wa.bytes_left) {
- ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
- if (!src.sg_wa.bytes_left) {
+ wa->op.soc = 0;
+ wa->op.eom = 0;
+ wa->op.init = 1;
+ while (wa->src.sg_wa.bytes_left) {
+ ccp_prepare_data(&wa->src, &wa->dst, &wa->op, AES_BLOCK_SIZE, true);
+ if (!wa->src.sg_wa.bytes_left) {
unsigned int nbytes = ilen % AES_BLOCK_SIZE;
if (nbytes) {
- op.eom = 1;
- op.u.aes.size = (nbytes * 8) - 1;
+ wa->op.eom = 1;
+ wa->op.u.aes.size = (nbytes * 8) - 1;
}
}
- ret = cmd_q->ccp->vdata->perform->aes(&op);
+ ret = cmd_q->ccp->vdata->perform->aes(&wa->op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
- ccp_process_data(&src, &dst, &op);
- op.init = 0;
+ ccp_process_data(&wa->src, &wa->dst, &wa->op);
+ wa->op.init = 0;
}
}
/* Step 3: Update the IV portion of the context with the original IV */
- ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ ret = ccp_copy_from_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
- ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_set_dm_area(&wa->ctx, dm_offset, aes->iv, 0, aes->iv_len);
if (ret)
goto e_dst;
- ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ ret = ccp_copy_to_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
@@ -840,75 +849,75 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
/* Step 4: Concatenate the lengths of the AAD and source, and
* hash that 16 byte buffer.
*/
- ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
+ ret = ccp_init_dm_workarea(&wa->final, cmd_q, AES_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
if (ret)
goto e_dst;
- final = (__be64 *)final_wa.address;
+ final = (__be64 *)wa->final.address;
final[0] = cpu_to_be64(aes->aad_len * 8);
final[1] = cpu_to_be64(ilen * 8);
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = jobid;
- op.sb_key = cmd_q->sb_key; /* Pre-allocated */
- op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
- op.init = 1;
- op.u.aes.type = aes->type;
- op.u.aes.mode = CCP_AES_MODE_GHASH;
- op.u.aes.action = CCP_AES_GHASHFINAL;
- op.src.type = CCP_MEMTYPE_SYSTEM;
- op.src.u.dma.address = final_wa.dma.address;
- op.src.u.dma.length = AES_BLOCK_SIZE;
- op.dst.type = CCP_MEMTYPE_SYSTEM;
- op.dst.u.dma.address = final_wa.dma.address;
- op.dst.u.dma.length = AES_BLOCK_SIZE;
- op.eom = 1;
- op.u.aes.size = 0;
- ret = cmd_q->ccp->vdata->perform->aes(&op);
+ memset(&wa->op, 0, sizeof(wa->op));
+ wa->op.cmd_q = cmd_q;
+ wa->op.jobid = jobid;
+ wa->op.sb_key = cmd_q->sb_key; /* Pre-allocated */
+ wa->op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+ wa->op.init = 1;
+ wa->op.u.aes.type = aes->type;
+ wa->op.u.aes.mode = CCP_AES_MODE_GHASH;
+ wa->op.u.aes.action = CCP_AES_GHASHFINAL;
+ wa->op.src.type = CCP_MEMTYPE_SYSTEM;
+ wa->op.src.u.dma.address = wa->final.dma.address;
+ wa->op.src.u.dma.length = AES_BLOCK_SIZE;
+ wa->op.dst.type = CCP_MEMTYPE_SYSTEM;
+ wa->op.dst.u.dma.address = wa->final.dma.address;
+ wa->op.dst.u.dma.length = AES_BLOCK_SIZE;
+ wa->op.eom = 1;
+ wa->op.u.aes.size = 0;
+ ret = cmd_q->ccp->vdata->perform->aes(&wa->op);
if (ret)
goto e_final_wa;
if (aes->action == CCP_AES_ACTION_ENCRYPT) {
/* Put the ciphered tag after the ciphertext. */
- ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
+ ccp_get_dm_area(&wa->final, 0, p_tag, 0, authsize);
} else {
/* Does this ciphered tag match the input? */
- ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
+ ret = ccp_init_dm_workarea(&wa->tag, cmd_q, authsize,
DMA_BIDIRECTIONAL);
if (ret)
goto e_final_wa;
- ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
+ ret = ccp_set_dm_area(&wa->tag, 0, p_tag, 0, authsize);
if (ret) {
- ccp_dm_free(&tag);
+ ccp_dm_free(&wa->tag);
goto e_final_wa;
}
- ret = crypto_memneq(tag.address, final_wa.address,
+ ret = crypto_memneq(wa->tag.address, wa->final.address,
authsize) ? -EBADMSG : 0;
- ccp_dm_free(&tag);
+ ccp_dm_free(&wa->tag);
}
e_final_wa:
- ccp_dm_free(&final_wa);
+ ccp_dm_free(&wa->final);
e_dst:
if (ilen > 0 && !in_place)
- ccp_free_data(&dst, cmd_q);
+ ccp_free_data(&wa->dst, cmd_q);
e_src:
if (ilen > 0)
- ccp_free_data(&src, cmd_q);
+ ccp_free_data(&wa->src, cmd_q);
e_aad:
if (aes->aad_len)
- ccp_free_data(&aad, cmd_q);
+ ccp_free_data(&wa->aad, cmd_q);
e_ctx:
- ccp_dm_free(&ctx);
+ ccp_dm_free(&wa->ctx);
e_key:
- ccp_dm_free(&key);
+ ccp_dm_free(&wa->key);
return ret;
}
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 3451bada884e..e058ba027792 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -434,7 +434,7 @@ cleanup:
return rc;
}
-static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order)
+static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order, bool locked)
{
unsigned long npages = 1ul << order, paddr;
struct sev_device *sev;
@@ -453,7 +453,7 @@ static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order)
return page;
paddr = __pa((unsigned long)page_address(page));
- if (rmp_mark_pages_firmware(paddr, npages, false))
+ if (rmp_mark_pages_firmware(paddr, npages, locked))
return NULL;
return page;
@@ -463,7 +463,7 @@ void *snp_alloc_firmware_page(gfp_t gfp_mask)
{
struct page *page;
- page = __snp_alloc_firmware_pages(gfp_mask, 0);
+ page = __snp_alloc_firmware_pages(gfp_mask, 0, false);
return page ? page_address(page) : NULL;
}
@@ -498,7 +498,7 @@ static void *sev_fw_alloc(unsigned long len)
{
struct page *page;
- page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len));
+ page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len), true);
if (!page)
return NULL;
@@ -1276,9 +1276,11 @@ static int __sev_platform_init_handle_init_ex_path(struct sev_device *sev)
static int __sev_platform_init_locked(int *error)
{
- int rc, psp_ret = SEV_RET_NO_FW_CALL;
+ int rc, psp_ret, dfflush_error;
struct sev_device *sev;
+ psp_ret = dfflush_error = SEV_RET_NO_FW_CALL;
+
if (!psp_master || !psp_master->sev_data)
return -ENODEV;
@@ -1320,10 +1322,10 @@ static int __sev_platform_init_locked(int *error)
/* Prepare for first SEV guest launch after INIT */
wbinvd_on_all_cpus();
- rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
+ rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, &dfflush_error);
if (rc) {
dev_err(sev->dev, "SEV: DF_FLUSH failed %#x, rc %d\n",
- *error, rc);
+ dfflush_error, rc);
return rc;
}
@@ -1785,8 +1787,14 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
sev->snp_initialized = false;
dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n");
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &snp_panic_notifier);
+ /*
+ * __sev_snp_shutdown_locked() deadlocks when it tries to unregister
+ * itself during panic as the panic notifier is called with RCU read
+ * lock held and notifier unregistration does RCU synchronization.
+ */
+ if (!panic)
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &snp_panic_notifier);
/* Reset TMR size back to default */
sev_es_tmr_size = SEV_TMR_SIZE;
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index e1be2072d680..e7bb803912a6 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -453,6 +453,7 @@ static const struct psp_vdata pspv6 = {
.cmdresp_reg = 0x10944, /* C2PMSG_17 */
.cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
.cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10510, /* P2CMSG_INTEN */
.intsts_reg = 0x10514, /* P2CMSG_INTSTS */
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index bcca55bff910..3963bb91321f 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -224,7 +224,7 @@ static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
/* Set MLLI size for the bypass operation */
mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
- dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
+ dev_dbg(dev, "MLLI params: virt_addr=%p dma_addr=%pad mlli_len=0x%X\n",
mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
mlli_params->mlli_len);
@@ -239,7 +239,7 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
{
unsigned int index = sgl_data->num_of_buffers;
- dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
+ dev_dbg(dev, "index=%u nents=%u sgl=%p data_len=0x%08X is_last=%d\n",
index, nents, sgl, data_len, is_last_table);
sgl_data->nents[index] = nents;
sgl_data->entry[index].sgl = sgl;
@@ -298,7 +298,7 @@ cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
dev_err(dev, "dma_map_sg() config buffer failed\n");
return -ENOMEM;
}
- dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%p offset=%u length=%u\n",
&sg_dma_address(&areq_ctx->ccm_adata_sg),
sg_page(&areq_ctx->ccm_adata_sg),
sg_virt(&areq_ctx->ccm_adata_sg),
@@ -323,7 +323,7 @@ static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
dev_err(dev, "dma_map_sg() src buffer failed\n");
return -ENOMEM;
}
- dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%p offset=%u length=%u\n",
&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
areq_ctx->buff_sg->length);
@@ -359,11 +359,11 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
if (src != dst) {
dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE);
dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE);
- dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
- dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+ dev_dbg(dev, "Unmapped req->dst=%p\n", sg_virt(dst));
+ dev_dbg(dev, "Unmapped req->src=%p\n", sg_virt(src));
} else {
dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
- dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+ dev_dbg(dev, "Unmapped req->src=%p\n", sg_virt(src));
}
}
@@ -391,11 +391,11 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
- dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping iv %u B at va=%p for DMA failed\n",
ivsize, info);
return -ENOMEM;
}
- dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped iv %u B at va=%p to dma=%pad\n",
ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
} else {
req_ctx->gen_ctx.iv_dma_addr = 0;
@@ -506,7 +506,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
(areq_ctx->mlli_params.mlli_virt_addr)) {
- dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%p\n",
&areq_ctx->mlli_params.mlli_dma_addr,
areq_ctx->mlli_params.mlli_virt_addr);
dma_pool_free(areq_ctx->mlli_params.curr_pool,
@@ -514,13 +514,13 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
areq_ctx->mlli_params.mlli_dma_addr);
}
- dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
+ dev_dbg(dev, "Unmapping src sgl: req->src=%p areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
areq_ctx->assoclen, req->cryptlen);
dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction);
if (req->src != req->dst) {
- dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
+ dev_dbg(dev, "Unmapping dst sgl: req->dst=%p\n",
sg_virt(req->dst));
dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE);
}
@@ -566,7 +566,7 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
- dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping iv %u B at va=%p for DMA failed\n",
hw_iv_size, req->iv);
kfree_sensitive(areq_ctx->gen_ctx.iv);
areq_ctx->gen_ctx.iv = NULL;
@@ -574,7 +574,7 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
goto chain_iv_exit;
}
- dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped iv %u B at va=%p to dma=%pad\n",
hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
chain_iv_exit:
@@ -977,7 +977,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping mac_buf %u B at va=%p for DMA failed\n",
MAX_MAC_SIZE, areq_ctx->mac_buf);
rc = -ENOMEM;
goto aead_map_failure;
@@ -991,7 +991,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping mac_buf %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, addr);
areq_ctx->ccm_iv0_dma_addr = 0;
rc = -ENOMEM;
@@ -1009,7 +1009,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping hkey %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, areq_ctx->hkey);
rc = -ENOMEM;
goto aead_map_failure;
@@ -1019,7 +1019,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping gcm_len_block %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
rc = -ENOMEM;
goto aead_map_failure;
@@ -1030,7 +1030,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
areq_ctx->gcm_iv_inc1_dma_addr = 0;
rc = -ENOMEM;
@@ -1042,7 +1042,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
areq_ctx->gcm_iv_inc2_dma_addr = 0;
rc = -ENOMEM;
@@ -1152,7 +1152,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
u32 dummy = 0;
u32 mapped_nents = 0;
- dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
+ dev_dbg(dev, "final params : curr_buff=%p curr_buff_cnt=0x%X nbytes = 0x%X src=%p curr_index=%u\n",
curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
@@ -1236,7 +1236,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
u32 dummy = 0;
u32 mapped_nents = 0;
- dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
+ dev_dbg(dev, " update params : curr_buff=%p curr_buff_cnt=0x%X nbytes=0x%X src=%p curr_index=%u\n",
curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
@@ -1246,7 +1246,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
areq_ctx->in_nents = 0;
if (total_in_len < block_size) {
- dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
+ dev_dbg(dev, " less than one block: curr_buff=%p *curr_buff_cnt=0x%X copy_to=%p\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
sg_copy_to_buffer(src, areq_ctx->in_nents,
@@ -1265,7 +1265,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
/* Copy the new residue to next buffer */
if (*next_buff_cnt) {
- dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
+ dev_dbg(dev, " handle residue: next buff %p skip data %u residue %u\n",
next_buff, (update_data_len - *curr_buff_cnt),
*next_buff_cnt);
cc_copy_sg_portion(dev, next_buff, src,
@@ -1338,7 +1338,7 @@ void cc_unmap_hash_request(struct device *dev, void *ctx,
*allocated and should be released
*/
if (areq_ctx->mlli_params.curr_pool) {
- dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%p\n",
&areq_ctx->mlli_params.mlli_dma_addr,
areq_ctx->mlli_params.mlli_virt_addr);
dma_pool_free(areq_ctx->mlli_params.curr_pool,
@@ -1347,14 +1347,14 @@ void cc_unmap_hash_request(struct device *dev, void *ctx,
}
if (src && areq_ctx->in_nents) {
- dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
+ dev_dbg(dev, "Unmapped sg src: virt=%p dma=%pad len=0x%X\n",
sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
dma_unmap_sg(dev, src,
areq_ctx->in_nents, DMA_TO_DEVICE);
}
if (*prev_len) {
- dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
+ dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%p dma=%pad len 0x%X\n",
sg_virt(areq_ctx->buff_sg),
&sg_dma_address(areq_ctx->buff_sg),
sg_dma_len(areq_ctx->buff_sg));
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index d39c067672fd..e2cbfdf7a0e4 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -211,11 +211,11 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
max_key_buf_size,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
- dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping Key %u B at va=%p for DMA failed\n",
max_key_buf_size, ctx_p->user.key);
goto free_key;
}
- dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped key %u B at va=%p to dma=%pad\n",
max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
return 0;
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index d0612bec4d58..c6d085c8ff79 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -125,7 +125,7 @@ static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
digestsize);
return -ENOMEM;
}
- dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped digest result buffer %u B at va=%p to dma=%pad\n",
digestsize, state->digest_result_buff,
&state->digest_result_dma_addr);
@@ -184,11 +184,11 @@ static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
dma_map_single(dev, state->digest_buff,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
- dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping digest len %d B at va=%p for DMA failed\n",
ctx->inter_digestsize, state->digest_buff);
return -EINVAL;
}
- dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped digest %d B at va=%p to dma=%pad\n",
ctx->inter_digestsize, state->digest_buff,
&state->digest_buff_dma_addr);
@@ -197,11 +197,11 @@ static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
dma_map_single(dev, state->digest_bytes_len,
HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
- dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping digest len %u B at va=%p for DMA failed\n",
HASH_MAX_LEN_SIZE, state->digest_bytes_len);
goto unmap_digest_buf;
}
- dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped digest len %u B at va=%p to dma=%pad\n",
HASH_MAX_LEN_SIZE, state->digest_bytes_len,
&state->digest_bytes_len_dma_addr);
}
@@ -212,12 +212,12 @@ static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
ctx->inter_digestsize,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
- dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping opad digest %d B at va=%p for DMA failed\n",
ctx->inter_digestsize,
state->opad_digest_buff);
goto unmap_digest_len;
}
- dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped opad digest %d B at va=%p to dma=%pad\n",
ctx->inter_digestsize, state->opad_digest_buff,
&state->opad_digest_dma_addr);
}
@@ -272,7 +272,7 @@ static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
if (state->digest_result_dma_addr) {
dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
DMA_BIDIRECTIONAL);
- dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
+ dev_dbg(dev, "unmpa digest result buffer va (%p) pa (%pad) len %u\n",
state->digest_result_buff,
&state->digest_result_dma_addr, digestsize);
memcpy(result, state->digest_result_buff, digestsize);
@@ -287,7 +287,7 @@ static void cc_update_complete(struct device *dev, void *cc_req, int err)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
- dev_dbg(dev, "req=%pK\n", req);
+ dev_dbg(dev, "req=%p\n", req);
if (err != -EINPROGRESS) {
/* Not a BACKLOG notification */
@@ -306,7 +306,7 @@ static void cc_digest_complete(struct device *dev, void *cc_req, int err)
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
- dev_dbg(dev, "req=%pK\n", req);
+ dev_dbg(dev, "req=%p\n", req);
if (err != -EINPROGRESS) {
/* Not a BACKLOG notification */
@@ -326,7 +326,7 @@ static void cc_hash_complete(struct device *dev, void *cc_req, int err)
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
- dev_dbg(dev, "req=%pK\n", req);
+ dev_dbg(dev, "req=%p\n", req);
if (err != -EINPROGRESS) {
/* Not a BACKLOG notification */
@@ -1077,11 +1077,11 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
- dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping digest len %zu B at va=%p for DMA failed\n",
sizeof(ctx->digest_buff), ctx->digest_buff);
goto fail;
}
- dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped digest %zu B at va=%p to dma=%pad\n",
sizeof(ctx->digest_buff), ctx->digest_buff,
&ctx->digest_buff_dma_addr);
@@ -1090,12 +1090,12 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
sizeof(ctx->opad_tmp_keys_buff),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
- dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping opad digest %zu B at va=%p for DMA failed\n",
sizeof(ctx->opad_tmp_keys_buff),
ctx->opad_tmp_keys_buff);
goto fail;
}
- dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%p to dma=%pad\n",
sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
&ctx->opad_tmp_keys_dma_addr);
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 6124fbbbed94..bbd118f8de0e 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -77,6 +77,5 @@ int cc_pm_get(struct device *dev)
void cc_pm_put_suspend(struct device *dev)
{
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index af37477ffd8d..be21e4e2016c 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -314,30 +314,30 @@ static int chcr_compute_partial_hash(struct shash_desc *desc,
if (digest_size == SHA1_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha1_st);
+ crypto_shash_export_core(desc, &sha1_st);
memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
} else if (digest_size == SHA224_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha256_st);
+ crypto_shash_export_core(desc, &sha256_st);
memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
} else if (digest_size == SHA256_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha256_st);
+ crypto_shash_export_core(desc, &sha256_st);
memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
} else if (digest_size == SHA384_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha512_st);
+ crypto_shash_export_core(desc, &sha512_st);
memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
} else if (digest_size == SHA512_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha512_st);
+ crypto_shash_export_core(desc, &sha512_st);
memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
} else {
error = -EINVAL;
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 61b5e1c5d019..1550c3818383 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -1491,11 +1491,13 @@ static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+ /* Do unmap before data processing */
+ hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+
p = sg_virt(areq->dst);
memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
- hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
kpp_request_complete(areq, ret);
atomic64_inc(&dfx[HPRE_RECV_CNT].value);
@@ -1808,9 +1810,11 @@ static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+ /* Do unmap before data processing */
+ hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+
hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
- hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
kpp_request_complete(areq, ret);
atomic64_inc(&dfx[HPRE_RECV_CNT].value);
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 7c41f9593d03..2e4ee7ecfdfb 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -912,7 +912,6 @@ static void qm_pm_put_sync(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return;
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 703920b49c7c..81d0beda93b2 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -7,6 +7,12 @@
#include <linux/hisi_acc_qm.h>
#include "sec_crypto.h"
+#define SEC_PBUF_SZ 512
+#define SEC_MAX_MAC_LEN 64
+#define SEC_IV_SIZE 24
+#define SEC_SGE_NR_NUM 4
+#define SEC_SGL_ALIGN_SIZE 64
+
/* Algorithm resource per hardware SEC queue */
struct sec_alg_res {
u8 *pbuf;
@@ -20,6 +26,40 @@ struct sec_alg_res {
u16 depth;
};
+struct sec_hw_sge {
+ dma_addr_t buf;
+ void *page_ctrl;
+ __le32 len;
+ __le32 pad;
+ __le32 pad0;
+ __le32 pad1;
+};
+
+struct sec_hw_sgl {
+ dma_addr_t next_dma;
+ __le16 entry_sum_in_chain;
+ __le16 entry_sum_in_sgl;
+ __le16 entry_length_in_sgl;
+ __le16 pad0;
+ __le64 pad1[5];
+ struct sec_hw_sgl *next;
+ struct sec_hw_sge sge_entries[SEC_SGE_NR_NUM];
+} __aligned(SEC_SGL_ALIGN_SIZE);
+
+struct sec_src_dst_buf {
+ struct sec_hw_sgl in;
+ struct sec_hw_sgl out;
+};
+
+struct sec_request_buf {
+ union {
+ struct sec_src_dst_buf data_buf;
+ __u8 pbuf[SEC_PBUF_SZ];
+ };
+ dma_addr_t in_dma;
+ dma_addr_t out_dma;
+};
+
/* Cipher request of SEC private */
struct sec_cipher_req {
struct hisi_acc_hw_sgl *c_out;
@@ -29,6 +69,7 @@ struct sec_cipher_req {
struct skcipher_request *sk_req;
u32 c_len;
bool encrypt;
+ __u8 c_ivin_buf[SEC_IV_SIZE];
};
struct sec_aead_req {
@@ -37,6 +78,13 @@ struct sec_aead_req {
u8 *a_ivin;
dma_addr_t a_ivin_dma;
struct aead_request *aead_req;
+ __u8 a_ivin_buf[SEC_IV_SIZE];
+ __u8 out_mac_buf[SEC_MAX_MAC_LEN];
+};
+
+struct sec_instance_backlog {
+ struct list_head list;
+ spinlock_t lock;
};
/* SEC request of Crypto */
@@ -55,15 +103,17 @@ struct sec_req {
dma_addr_t in_dma;
struct sec_cipher_req c_req;
struct sec_aead_req aead_req;
- struct list_head backlog_head;
+ struct crypto_async_request *base;
int err_type;
int req_id;
u32 flag;
- /* Status of the SEC request */
- bool fake_busy;
bool use_pbuf;
+
+ struct list_head list;
+ struct sec_instance_backlog *backlog;
+ struct sec_request_buf buf;
};
/**
@@ -119,9 +169,11 @@ struct sec_qp_ctx {
struct sec_alg_res *res;
struct sec_ctx *ctx;
spinlock_t req_lock;
- struct list_head backlog;
+ spinlock_t id_lock;
struct hisi_acc_sgl_pool *c_in_pool;
struct hisi_acc_sgl_pool *c_out_pool;
+ struct sec_instance_backlog backlog;
+ u16 send_head;
};
enum sec_alg_type {
@@ -139,9 +191,6 @@ struct sec_ctx {
/* Half queues for encipher, and half for decipher */
u32 hlf_q_num;
- /* Threshold for fake busy, trigger to return -EBUSY to user */
- u32 fake_req_limit;
-
/* Current cyclic index to select a queue for encipher */
atomic_t enc_qcyclic;
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 8ea5305bc320..d044ded0f290 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -67,7 +67,6 @@
#define SEC_MAX_CCM_AAD_LEN 65279
#define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth))
-#define SEC_PBUF_SZ 512
#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
#define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
@@ -102,6 +101,8 @@
#define IV_LAST_BYTE_MASK 0xFF
#define IV_CTR_INIT 0x1
#define IV_BYTE_OFFSET 0x8
+#define SEC_GCM_MIN_AUTH_SZ 0x8
+#define SEC_RETRY_MAX_CNT 5U
static DEFINE_MUTEX(sec_algs_lock);
static unsigned int sec_available_devs;
@@ -116,40 +117,19 @@ struct sec_aead {
struct aead_alg alg;
};
-/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
-static inline u32 sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
-{
- if (req->c_req.encrypt)
- return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
- ctx->hlf_q_num;
-
- return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
- ctx->hlf_q_num;
-}
-
-static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
-{
- if (req->c_req.encrypt)
- atomic_dec(&ctx->enc_qcyclic);
- else
- atomic_dec(&ctx->dec_qcyclic);
-}
+static int sec_aead_soft_crypto(struct sec_ctx *ctx,
+ struct aead_request *aead_req,
+ bool encrypt);
+static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
+ struct skcipher_request *sreq, bool encrypt);
static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
{
int req_id;
- spin_lock_bh(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->id_lock);
req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC);
- spin_unlock_bh(&qp_ctx->req_lock);
- if (unlikely(req_id < 0)) {
- dev_err(req->ctx->dev, "alloc req id fail!\n");
- return req_id;
- }
-
- req->qp_ctx = qp_ctx;
- qp_ctx->req_list[req_id] = req;
-
+ spin_unlock_bh(&qp_ctx->id_lock);
return req_id;
}
@@ -163,12 +143,9 @@ static void sec_free_req_id(struct sec_req *req)
return;
}
- qp_ctx->req_list[req_id] = NULL;
- req->qp_ctx = NULL;
-
- spin_lock_bh(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->id_lock);
idr_remove(&qp_ctx->req_idr, req_id);
- spin_unlock_bh(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->id_lock);
}
static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
@@ -229,6 +206,90 @@ static int sec_cb_status_check(struct sec_req *req,
return 0;
}
+static int qp_send_message(struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int ret;
+
+ if (atomic_read(&qp_ctx->qp->qp_status.used) == qp_ctx->qp->sq_depth - 1)
+ return -EBUSY;
+
+ spin_lock_bh(&qp_ctx->req_lock);
+ if (atomic_read(&qp_ctx->qp->qp_status.used) == qp_ctx->qp->sq_depth - 1) {
+ spin_unlock_bh(&qp_ctx->req_lock);
+ return -EBUSY;
+ }
+
+ if (qp_ctx->ctx->type_supported == SEC_BD_TYPE2) {
+ req->sec_sqe.type2.tag = cpu_to_le16((u16)qp_ctx->send_head);
+ qp_ctx->req_list[qp_ctx->send_head] = req;
+ }
+
+ ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
+ if (ret) {
+ spin_unlock_bh(&qp_ctx->req_lock);
+ return ret;
+ }
+ if (qp_ctx->ctx->type_supported == SEC_BD_TYPE2)
+ qp_ctx->send_head = (qp_ctx->send_head + 1) % qp_ctx->qp->sq_depth;
+
+ spin_unlock_bh(&qp_ctx->req_lock);
+
+ atomic64_inc(&req->ctx->sec->debug.dfx.send_cnt);
+ return -EINPROGRESS;
+}
+
+static void sec_alg_send_backlog_soft(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
+{
+ struct sec_req *req, *tmp;
+ int ret;
+
+ list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {
+ list_del(&req->list);
+ ctx->req_op->buf_unmap(ctx, req);
+ if (req->req_id >= 0)
+ sec_free_req_id(req);
+
+ if (ctx->alg_type == SEC_AEAD)
+ ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req,
+ req->c_req.encrypt);
+ else
+ ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req,
+ req->c_req.encrypt);
+
+ /* Wake up the busy thread first, then return the errno. */
+ crypto_request_complete(req->base, -EINPROGRESS);
+ crypto_request_complete(req->base, ret);
+ }
+}
+
+static void sec_alg_send_backlog(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
+{
+ struct sec_req *req, *tmp;
+ int ret;
+
+ spin_lock_bh(&qp_ctx->backlog.lock);
+ list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {
+ ret = qp_send_message(req);
+ switch (ret) {
+ case -EINPROGRESS:
+ list_del(&req->list);
+ crypto_request_complete(req->base, -EINPROGRESS);
+ break;
+ case -EBUSY:
+ /* Device is busy and stop send any request. */
+ goto unlock;
+ default:
+ /* Release memory resources and send all requests through software. */
+ sec_alg_send_backlog_soft(ctx, qp_ctx);
+ goto unlock;
+ }
+ }
+
+unlock:
+ spin_unlock_bh(&qp_ctx->backlog.lock);
+}
+
static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
@@ -273,40 +334,54 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
ctx->req_op->callback(ctx, req, err);
}
-static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
+static int sec_alg_send_message_retry(struct sec_req *req)
{
- struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int ctr = 0;
int ret;
- if (ctx->fake_req_limit <=
- atomic_read(&qp_ctx->qp->qp_status.used) &&
- !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EBUSY;
+ do {
+ ret = qp_send_message(req);
+ } while (ret == -EBUSY && ctr++ < SEC_RETRY_MAX_CNT);
- spin_lock_bh(&qp_ctx->req_lock);
- ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
- if (ctx->fake_req_limit <=
- atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
- list_add_tail(&req->backlog_head, &qp_ctx->backlog);
- atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
- atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
- spin_unlock_bh(&qp_ctx->req_lock);
+ return ret;
+}
+
+static int sec_alg_try_enqueue(struct sec_req *req)
+{
+ /* Check if any request is already backlogged */
+ if (!list_empty(&req->backlog->list))
return -EBUSY;
- }
- spin_unlock_bh(&qp_ctx->req_lock);
- if (unlikely(ret == -EBUSY))
- return -ENOBUFS;
+ /* Try to enqueue to HW ring */
+ return qp_send_message(req);
+}
- if (likely(!ret)) {
- ret = -EINPROGRESS;
- atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
- }
+
+static int sec_alg_send_message_maybacklog(struct sec_req *req)
+{
+ int ret;
+
+ ret = sec_alg_try_enqueue(req);
+ if (ret != -EBUSY)
+ return ret;
+
+ spin_lock_bh(&req->backlog->lock);
+ ret = sec_alg_try_enqueue(req);
+ if (ret == -EBUSY)
+ list_add_tail(&req->list, &req->backlog->list);
+ spin_unlock_bh(&req->backlog->lock);
return ret;
}
-/* Get DMA memory resources */
+static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
+{
+ if (req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ return sec_alg_send_message_maybacklog(req);
+
+ return sec_alg_send_message_retry(req);
+}
+
static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
{
u16 q_depth = res->depth;
@@ -558,7 +633,10 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id)
spin_lock_init(&qp_ctx->req_lock);
idr_init(&qp_ctx->req_idr);
- INIT_LIST_HEAD(&qp_ctx->backlog);
+ spin_lock_init(&qp_ctx->backlog.lock);
+ spin_lock_init(&qp_ctx->id_lock);
+ INIT_LIST_HEAD(&qp_ctx->backlog.list);
+ qp_ctx->send_head = 0;
ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx);
if (ret)
@@ -602,9 +680,6 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
ctx->hlf_q_num = sec->ctx_q_num >> 1;
ctx->pbuf_supported = ctx->sec->iommu_used;
-
- /* Half of queue depth is taken as fake requests limit in the queue. */
- ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1;
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
GFP_KERNEL);
if (!ctx->qp_ctx) {
@@ -706,7 +781,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
int ret;
ctx->alg_type = SEC_SKCIPHER;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
+ crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct sec_req));
ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
pr_err("get error skcipher iv size!\n");
@@ -883,24 +958,25 @@ GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
struct scatterlist *src)
{
- struct sec_aead_req *a_req = &req->aead_req;
- struct aead_request *aead_req = a_req->aead_req;
+ struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct sec_request_buf *buf = &req->buf;
struct device *dev = ctx->dev;
int copy_size, pbuf_length;
int req_id = req->req_id;
struct crypto_aead *tfm;
+ u8 *mac_offset, *pbuf;
size_t authsize;
- u8 *mac_offset;
if (ctx->alg_type == SEC_AEAD)
copy_size = aead_req->cryptlen + aead_req->assoclen;
else
copy_size = c_req->c_len;
- pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
- qp_ctx->res[req_id].pbuf, copy_size);
+
+ pbuf = req->req_id < 0 ? buf->pbuf : qp_ctx->res[req_id].pbuf;
+ pbuf_length = sg_copy_to_buffer(src, sg_nents(src), pbuf, copy_size);
if (unlikely(pbuf_length != copy_size)) {
dev_err(dev, "copy src data to pbuf error!\n");
return -EINVAL;
@@ -908,8 +984,17 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
tfm = crypto_aead_reqtfm(aead_req);
authsize = crypto_aead_authsize(tfm);
- mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize;
- memcpy(a_req->out_mac, mac_offset, authsize);
+ mac_offset = pbuf + copy_size - authsize;
+ memcpy(req->aead_req.out_mac, mac_offset, authsize);
+ }
+
+ if (req->req_id < 0) {
+ buf->in_dma = dma_map_single(dev, buf->pbuf, SEC_PBUF_SZ, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, buf->in_dma)))
+ return -ENOMEM;
+
+ buf->out_dma = buf->in_dma;
+ return 0;
}
req->in_dma = qp_ctx->res[req_id].pbuf_dma;
@@ -924,6 +1009,7 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct sec_request_buf *buf = &req->buf;
int copy_size, pbuf_length;
int req_id = req->req_id;
@@ -932,10 +1018,16 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
else
copy_size = c_req->c_len;
- pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
- qp_ctx->res[req_id].pbuf, copy_size);
+ if (req->req_id < 0)
+ pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), buf->pbuf, copy_size);
+ else
+ pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), qp_ctx->res[req_id].pbuf,
+ copy_size);
if (unlikely(pbuf_length != copy_size))
dev_err(ctx->dev, "copy pbuf data to dst error!\n");
+
+ if (req->req_id < 0)
+ dma_unmap_single(ctx->dev, buf->in_dma, SEC_PBUF_SZ, DMA_BIDIRECTIONAL);
}
static int sec_aead_mac_init(struct sec_aead_req *req)
@@ -957,14 +1049,95 @@ static int sec_aead_mac_init(struct sec_aead_req *req)
return 0;
}
-static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
- struct scatterlist *src, struct scatterlist *dst)
+static void fill_sg_to_hw_sge(struct scatterlist *sgl, struct sec_hw_sge *hw_sge)
+{
+ hw_sge->buf = sg_dma_address(sgl);
+ hw_sge->len = cpu_to_le32(sg_dma_len(sgl));
+ hw_sge->page_ctrl = sg_virt(sgl);
+}
+
+static int sec_cipher_to_hw_sgl(struct device *dev, struct scatterlist *src,
+ struct sec_hw_sgl *src_in, dma_addr_t *hw_sgl_dma,
+ int dma_dir)
+{
+ struct sec_hw_sge *curr_hw_sge = src_in->sge_entries;
+ u32 i, sg_n, sg_n_mapped;
+ struct scatterlist *sg;
+ u32 sge_var = 0;
+
+ sg_n = sg_nents(src);
+ sg_n_mapped = dma_map_sg(dev, src, sg_n, dma_dir);
+ if (unlikely(!sg_n_mapped)) {
+ dev_err(dev, "dma mapping for SG error!\n");
+ return -EINVAL;
+ } else if (unlikely(sg_n_mapped > SEC_SGE_NR_NUM)) {
+ dev_err(dev, "the number of entries in input scatterlist error!\n");
+ dma_unmap_sg(dev, src, sg_n, dma_dir);
+ return -EINVAL;
+ }
+
+ for_each_sg(src, sg, sg_n_mapped, i) {
+ fill_sg_to_hw_sge(sg, curr_hw_sge);
+ curr_hw_sge++;
+ sge_var++;
+ }
+
+ src_in->entry_sum_in_sgl = cpu_to_le16(sge_var);
+ src_in->entry_sum_in_chain = cpu_to_le16(SEC_SGE_NR_NUM);
+ src_in->entry_length_in_sgl = cpu_to_le16(SEC_SGE_NR_NUM);
+ *hw_sgl_dma = dma_map_single(dev, src_in, sizeof(struct sec_hw_sgl), dma_dir);
+ if (unlikely(dma_mapping_error(dev, *hw_sgl_dma))) {
+ dma_unmap_sg(dev, src, sg_n, dma_dir);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void sec_cipher_put_hw_sgl(struct device *dev, struct scatterlist *src,
+ dma_addr_t src_in, int dma_dir)
+{
+ dma_unmap_single(dev, src_in, sizeof(struct sec_hw_sgl), dma_dir);
+ dma_unmap_sg(dev, src, sg_nents(src), dma_dir);
+}
+
+static int sec_cipher_map_sgl(struct device *dev, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
+{
+ struct sec_hw_sgl *src_in = &req->buf.data_buf.in;
+ struct sec_hw_sgl *dst_out = &req->buf.data_buf.out;
+ int ret;
+
+ if (dst == src) {
+ ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma,
+ DMA_BIDIRECTIONAL);
+ req->buf.out_dma = req->buf.in_dma;
+ return ret;
+ }
+
+ ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma, DMA_TO_DEVICE);
+ if (unlikely(ret))
+ return ret;
+
+ ret = sec_cipher_to_hw_sgl(dev, dst, dst_out, &req->buf.out_dma,
+ DMA_FROM_DEVICE);
+ if (unlikely(ret)) {
+ sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sec_cipher_map_inner(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
{
struct sec_cipher_req *c_req = &req->c_req;
struct sec_aead_req *a_req = &req->aead_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct sec_alg_res *res = &qp_ctx->res[req->req_id];
struct device *dev = ctx->dev;
+ enum dma_data_direction src_direction;
int ret;
if (req->use_pbuf) {
@@ -977,10 +1150,9 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
a_req->out_mac_dma = res->pbuf_dma +
SEC_PBUF_MAC_OFFSET;
}
- ret = sec_cipher_pbuf_map(ctx, req, src);
-
- return ret;
+ return sec_cipher_pbuf_map(ctx, req, src);
}
+
c_req->c_ivin = res->c_ivin;
c_req->c_ivin_dma = res->c_ivin_dma;
if (ctx->alg_type == SEC_AEAD) {
@@ -990,10 +1162,11 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
a_req->out_mac_dma = res->out_mac_dma;
}
+ src_direction = dst == src ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
qp_ctx->c_in_pool,
req->req_id,
- &req->in_dma);
+ &req->in_dma, src_direction);
if (IS_ERR(req->in)) {
dev_err(dev, "fail to dma map input sgl buffers!\n");
return PTR_ERR(req->in);
@@ -1003,7 +1176,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
ret = sec_aead_mac_init(a_req);
if (unlikely(ret)) {
dev_err(dev, "fail to init mac data for ICV!\n");
- hisi_acc_sg_buf_unmap(dev, src, req->in);
+ hisi_acc_sg_buf_unmap(dev, src, req->in, src_direction);
return ret;
}
}
@@ -1015,11 +1188,12 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
qp_ctx->c_out_pool,
req->req_id,
- &c_req->c_out_dma);
+ &c_req->c_out_dma,
+ DMA_FROM_DEVICE);
if (IS_ERR(c_req->c_out)) {
dev_err(dev, "fail to dma map output sgl buffers!\n");
- hisi_acc_sg_buf_unmap(dev, src, req->in);
+ hisi_acc_sg_buf_unmap(dev, src, req->in, src_direction);
return PTR_ERR(c_req->c_out);
}
}
@@ -1027,19 +1201,108 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
return 0;
}
+static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
+{
+ struct sec_aead_req *a_req = &req->aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ bool is_aead = (ctx->alg_type == SEC_AEAD);
+ struct device *dev = ctx->dev;
+ int ret = -ENOMEM;
+
+ if (req->req_id >= 0)
+ return sec_cipher_map_inner(ctx, req, src, dst);
+
+ c_req->c_ivin = c_req->c_ivin_buf;
+ c_req->c_ivin_dma = dma_map_single(dev, c_req->c_ivin,
+ SEC_IV_SIZE, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, c_req->c_ivin_dma)))
+ return -ENOMEM;
+
+ if (is_aead) {
+ a_req->a_ivin = a_req->a_ivin_buf;
+ a_req->out_mac = a_req->out_mac_buf;
+ a_req->a_ivin_dma = dma_map_single(dev, a_req->a_ivin,
+ SEC_IV_SIZE, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, a_req->a_ivin_dma)))
+ goto free_c_ivin_dma;
+
+ a_req->out_mac_dma = dma_map_single(dev, a_req->out_mac,
+ SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, a_req->out_mac_dma)))
+ goto free_a_ivin_dma;
+ }
+ if (req->use_pbuf) {
+ ret = sec_cipher_pbuf_map(ctx, req, src);
+ if (unlikely(ret))
+ goto free_out_mac_dma;
+
+ return 0;
+ }
+
+ if (!c_req->encrypt && is_aead) {
+ ret = sec_aead_mac_init(a_req);
+ if (unlikely(ret)) {
+ dev_err(dev, "fail to init mac data for ICV!\n");
+ goto free_out_mac_dma;
+ }
+ }
+
+ ret = sec_cipher_map_sgl(dev, req, src, dst);
+ if (unlikely(ret)) {
+ dev_err(dev, "fail to dma map input sgl buffers!\n");
+ goto free_out_mac_dma;
+ }
+
+ return 0;
+
+free_out_mac_dma:
+ if (is_aead)
+ dma_unmap_single(dev, a_req->out_mac_dma, SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);
+free_a_ivin_dma:
+ if (is_aead)
+ dma_unmap_single(dev, a_req->a_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
+free_c_ivin_dma:
+ dma_unmap_single(dev, c_req->c_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
+ return ret;
+}
+
static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
struct scatterlist *src, struct scatterlist *dst)
{
+ struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct device *dev = ctx->dev;
+ if (req->req_id >= 0) {
+ if (req->use_pbuf) {
+ sec_cipher_pbuf_unmap(ctx, req, dst);
+ } else {
+ if (dst != src) {
+ hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out, DMA_FROM_DEVICE);
+ hisi_acc_sg_buf_unmap(dev, src, req->in, DMA_TO_DEVICE);
+ } else {
+ hisi_acc_sg_buf_unmap(dev, src, req->in, DMA_BIDIRECTIONAL);
+ }
+ }
+ return;
+ }
+
if (req->use_pbuf) {
sec_cipher_pbuf_unmap(ctx, req, dst);
} else {
- if (dst != src)
- hisi_acc_sg_buf_unmap(dev, src, req->in);
+ if (dst != src) {
+ sec_cipher_put_hw_sgl(dev, dst, req->buf.out_dma, DMA_FROM_DEVICE);
+ sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE);
+ } else {
+ sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_BIDIRECTIONAL);
+ }
+ }
- hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
+ dma_unmap_single(dev, c_req->c_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
+ if (ctx->alg_type == SEC_AEAD) {
+ dma_unmap_single(dev, a_req->a_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
+ dma_unmap_single(dev, a_req->out_mac_dma, SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);
}
}
@@ -1257,8 +1520,15 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
- sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
- sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+ if (req->req_id < 0) {
+ sec_sqe->type2.data_src_addr = cpu_to_le64(req->buf.in_dma);
+ sec_sqe->type2.data_dst_addr = cpu_to_le64(req->buf.out_dma);
+ } else {
+ sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
+ sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+ }
+ if (sec_sqe->type2.data_src_addr != sec_sqe->type2.data_dst_addr)
+ de = 0x1 << SEC_DE_OFFSET;
sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
SEC_CMODE_OFFSET);
@@ -1284,13 +1554,10 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
sec_sqe->sdm_addr_type |= da_type;
scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
- if (req->in_dma != c_req->c_out_dma)
- de = 0x1 << SEC_DE_OFFSET;
sec_sqe->sds_sa_type = (de | scene | sa_type);
sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
- sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
return 0;
}
@@ -1307,8 +1574,15 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
- sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
- sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+ if (req->req_id < 0) {
+ sec_sqe3->data_src_addr = cpu_to_le64(req->buf.in_dma);
+ sec_sqe3->data_dst_addr = cpu_to_le64(req->buf.out_dma);
+ } else {
+ sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
+ sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+ }
+ if (sec_sqe3->data_src_addr != sec_sqe3->data_dst_addr)
+ bd_param |= 0x1 << SEC_DE_OFFSET_V3;
sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
c_ctx->c_mode;
@@ -1334,8 +1608,6 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
}
bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
- if (req->in_dma != c_req->c_out_dma)
- bd_param |= 0x1 << SEC_DE_OFFSET_V3;
bd_param |= SEC_BD_TYPE3;
sec_sqe3->bd_param = cpu_to_le32(bd_param);
@@ -1367,15 +1639,12 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
size_t sz;
u8 *iv;
- if (req->c_req.encrypt)
- sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
- else
- sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
-
if (alg_type == SEC_SKCIPHER) {
+ sgl = req->c_req.encrypt ? sk_req->dst : sk_req->src;
iv = sk_req->iv;
cryptlen = sk_req->cryptlen;
} else {
+ sgl = req->c_req.encrypt ? aead_req->dst : aead_req->src;
iv = aead_req->iv;
cryptlen = aead_req->cryptlen;
}
@@ -1386,57 +1655,26 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
if (unlikely(sz != iv_size))
dev_err(req->ctx->dev, "copy output iv error!\n");
} else {
- sz = cryptlen / iv_size;
- if (cryptlen % iv_size)
- sz += 1;
+ sz = (cryptlen + iv_size - 1) / iv_size;
ctr_iv_inc(iv, iv_size, sz);
}
}
-static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
-{
- struct sec_req *backlog_req = NULL;
-
- spin_lock_bh(&qp_ctx->req_lock);
- if (ctx->fake_req_limit >=
- atomic_read(&qp_ctx->qp->qp_status.used) &&
- !list_empty(&qp_ctx->backlog)) {
- backlog_req = list_first_entry(&qp_ctx->backlog,
- typeof(*backlog_req), backlog_head);
- list_del(&backlog_req->backlog_head);
- }
- spin_unlock_bh(&qp_ctx->req_lock);
-
- return backlog_req;
-}
-
static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
int err)
{
- struct skcipher_request *sk_req = req->c_req.sk_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct skcipher_request *backlog_sk_req;
- struct sec_req *backlog_req;
- sec_free_req_id(req);
+ if (req->req_id >= 0)
+ sec_free_req_id(req);
/* IV output at encrypto of CBC/CTR mode */
if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
sec_update_iv(req, SEC_SKCIPHER);
- while (1) {
- backlog_req = sec_back_req_clear(ctx, qp_ctx);
- if (!backlog_req)
- break;
-
- backlog_sk_req = backlog_req->c_req.sk_req;
- skcipher_request_complete(backlog_sk_req, -EINPROGRESS);
- atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
- }
-
- skcipher_request_complete(sk_req, err);
+ crypto_request_complete(req->base, err);
+ sec_alg_send_backlog(ctx, qp_ctx);
}
static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
@@ -1675,21 +1913,14 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
struct aead_request *a_req = req->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
size_t authsize = crypto_aead_authsize(tfm);
- struct sec_aead_req *aead_req = &req->aead_req;
- struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct aead_request *backlog_aead_req;
- struct sec_req *backlog_req;
size_t sz;
- if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
- sec_update_iv(req, SEC_AEAD);
-
- /* Copy output mac */
- if (!err && c_req->encrypt) {
- struct scatterlist *sgl = a_req->dst;
+ if (!err && req->c_req.encrypt) {
+ if (c->c_ctx.c_mode == SEC_CMODE_CBC)
+ sec_update_iv(req, SEC_AEAD);
- sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac,
+ sz = sg_pcopy_from_buffer(a_req->dst, sg_nents(a_req->dst), req->aead_req.out_mac,
authsize, a_req->cryptlen + a_req->assoclen);
if (unlikely(sz != authsize)) {
dev_err(c->dev, "copy out mac err!\n");
@@ -1697,48 +1928,39 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
}
}
- sec_free_req_id(req);
+ if (req->req_id >= 0)
+ sec_free_req_id(req);
- while (1) {
- backlog_req = sec_back_req_clear(c, qp_ctx);
- if (!backlog_req)
- break;
-
- backlog_aead_req = backlog_req->aead_req.aead_req;
- aead_request_complete(backlog_aead_req, -EINPROGRESS);
- atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
- }
-
- aead_request_complete(a_req, err);
+ crypto_request_complete(req->base, err);
+ sec_alg_send_backlog(c, qp_ctx);
}
-static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
+static void sec_request_uninit(struct sec_req *req)
{
- sec_free_req_id(req);
- sec_free_queue_id(ctx, req);
+ if (req->req_id >= 0)
+ sec_free_req_id(req);
}
static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
{
struct sec_qp_ctx *qp_ctx;
- int queue_id;
-
- /* To load balance */
- queue_id = sec_alloc_queue_id(ctx, req);
- qp_ctx = &ctx->qp_ctx[queue_id];
+ int i;
- req->req_id = sec_alloc_req_id(req, qp_ctx);
- if (unlikely(req->req_id < 0)) {
- sec_free_queue_id(ctx, req);
- return req->req_id;
+ for (i = 0; i < ctx->sec->ctx_q_num; i++) {
+ qp_ctx = &ctx->qp_ctx[i];
+ req->req_id = sec_alloc_req_id(req, qp_ctx);
+ if (req->req_id >= 0)
+ break;
}
+ req->qp_ctx = qp_ctx;
+ req->backlog = &qp_ctx->backlog;
+
return 0;
}
static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
{
- struct sec_cipher_req *c_req = &req->c_req;
int ret;
ret = sec_request_init(ctx, req);
@@ -1755,8 +1977,7 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
sec_update_iv(req, ctx->alg_type);
ret = ctx->req_op->bd_send(ctx, req);
- if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
- (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
+ if (unlikely((ret != -EBUSY && ret != -EINPROGRESS))) {
dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
goto err_send_req;
}
@@ -1767,16 +1988,23 @@ err_send_req:
/* As failing, restore the IV from user */
if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
if (ctx->alg_type == SEC_SKCIPHER)
- memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
+ memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin,
ctx->c_ctx.ivsize);
else
- memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
+ memcpy(req->aead_req.aead_req->iv, req->c_req.c_ivin,
ctx->c_ctx.ivsize);
}
sec_request_untransfer(ctx, req);
+
err_uninit_req:
- sec_request_uninit(ctx, req);
+ sec_request_uninit(req);
+ if (ctx->alg_type == SEC_AEAD)
+ ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req,
+ req->c_req.encrypt);
+ else
+ ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req,
+ req->c_req.encrypt);
return ret;
}
@@ -1850,7 +2078,7 @@ static int sec_aead_init(struct crypto_aead *tfm)
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
int ret;
- crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
+ crypto_aead_set_reqsize_dma(tfm, sizeof(struct sec_req));
ctx->alg_type = SEC_AEAD;
ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||
@@ -2087,7 +2315,7 @@ static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
- struct sec_req *req = skcipher_request_ctx(sk_req);
+ struct sec_req *req = skcipher_request_ctx_dma(sk_req);
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
bool need_fallback = false;
int ret;
@@ -2102,6 +2330,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
req->c_req.sk_req = sk_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ req->base = &sk_req->base;
ret = sec_skcipher_param_check(ctx, req, &need_fallback);
if (unlikely(ret))
@@ -2236,6 +2465,9 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
return -EINVAL;
if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK))
return -EINVAL;
+ } else if (c_mode == SEC_CMODE_GCM) {
+ if (unlikely(sz < SEC_GCM_MIN_AUTH_SZ))
+ return -EINVAL;
}
return 0;
@@ -2309,7 +2541,7 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
- struct sec_req *req = aead_request_ctx(a_req);
+ struct sec_req *req = aead_request_ctx_dma(a_req);
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
size_t sz = crypto_aead_authsize(tfm);
bool need_fallback = false;
@@ -2319,6 +2551,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ req->base = &a_req->base;
req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz);
ret = sec_aead_param_check(ctx, req, &need_fallback);
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index c974f95cd126..7a9ef2a9972a 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -210,15 +210,15 @@ static void clear_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
* @pool: Pool which hw sgl memory will be allocated in.
* @index: Index of hisi_acc_hw_sgl in pool.
* @hw_sgl_dma: The dma address of allocated hw sgl.
+ * @dir: DMA direction.
*
* This function builds hw sgl according input sgl, user can use hw_sgl_dma
* as src/dst in its BD. Only support single hw sgl currently.
*/
struct hisi_acc_hw_sgl *
-hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
- struct scatterlist *sgl,
- struct hisi_acc_sgl_pool *pool,
- u32 index, dma_addr_t *hw_sgl_dma)
+hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, struct scatterlist *sgl,
+ struct hisi_acc_sgl_pool *pool, u32 index,
+ dma_addr_t *hw_sgl_dma, enum dma_data_direction dir)
{
struct hisi_acc_hw_sgl *curr_hw_sgl;
unsigned int i, sg_n_mapped;
@@ -232,7 +232,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
sg_n = sg_nents(sgl);
- sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+ sg_n_mapped = dma_map_sg(dev, sgl, sg_n, dir);
if (!sg_n_mapped) {
dev_err(dev, "DMA mapping for SG error!\n");
return ERR_PTR(-EINVAL);
@@ -276,16 +276,17 @@ EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl);
* @dev: The device which hw sgl belongs to.
* @sgl: Related scatterlist.
* @hw_sgl: Virtual address of hw sgl.
+ * @dir: DMA direction.
*
* This function unmaps allocated hw sgl.
*/
void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
- struct hisi_acc_hw_sgl *hw_sgl)
+ struct hisi_acc_hw_sgl *hw_sgl, enum dma_data_direction dir)
{
if (!dev || !sgl || !hw_sgl)
return;
- dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, sgl, sg_nents(sgl), dir);
clear_hw_sgl_sge(hw_sgl);
hw_sgl->entry_sum_in_chain = 0;
hw_sgl->entry_sum_in_sgl = 0;
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 7327f8f29b01..b97513981a3b 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -224,7 +224,8 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
return -EINVAL;
req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
- req->req_id << 1, &req->dma_src);
+ req->req_id << 1, &req->dma_src,
+ DMA_TO_DEVICE);
if (IS_ERR(req->hw_src)) {
dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
PTR_ERR(req->hw_src));
@@ -233,7 +234,7 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
(req->req_id << 1) + 1,
- &req->dma_dst);
+ &req->dma_dst, DMA_FROM_DEVICE);
if (IS_ERR(req->hw_dst)) {
ret = PTR_ERR(req->hw_dst);
dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
@@ -258,9 +259,9 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
return -EINPROGRESS;
err_unmap_output:
- hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
+ hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst, DMA_FROM_DEVICE);
err_unmap_input:
- hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
+ hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src, DMA_TO_DEVICE);
return ret;
}
@@ -303,8 +304,8 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
err = -EIO;
}
- hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
- hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst, DMA_FROM_DEVICE);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src, DMA_TO_DEVICE);
acomp_req->dlen = ops->get_dstlen(sqe);
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index e050f5ff5efb..76b7ecb5624b 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -436,7 +436,7 @@ static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
if (ctx->flags & DRIVER_FLAGS_SG)
- dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
+ dma_unmap_sg(hdev->dev, ctx->sg, 1, DMA_TO_DEVICE);
return 0;
}
@@ -705,17 +705,17 @@ static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "sha1-generic");
+ return img_hash_cra_init(tfm, "sha1-lib");
}
static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "sha224-generic");
+ return img_hash_cra_init(tfm, "sha224-lib");
}
static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "sha256-generic");
+ return img_hash_cra_init(tfm, "sha256-lib");
}
static void img_hash_cra_exit(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 9ca80d082c4f..c3b2b22934b7 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1218,7 +1218,6 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_xts_aes,
&safexcel_alg_gcm,
&safexcel_alg_ccm,
- &safexcel_alg_crc32,
&safexcel_alg_cbcmac,
&safexcel_alg_xcbcmac,
&safexcel_alg_cmac,
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 0c79ad78d1c0..0f27367a85fa 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -959,7 +959,6 @@ extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_xts_aes;
extern struct safexcel_alg_template safexcel_alg_gcm;
extern struct safexcel_alg_template safexcel_alg_ccm;
-extern struct safexcel_alg_template safexcel_alg_crc32;
extern struct safexcel_alg_template safexcel_alg_cbcmac;
extern struct safexcel_alg_template safexcel_alg_xcbcmac;
extern struct safexcel_alg_template safexcel_alg_cmac;
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index d2b632193beb..ef0ba4832928 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -249,7 +249,9 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
safexcel_complete(priv, ring);
if (sreq->nents) {
- dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, areq->src,
+ sg_nents_for_len(areq->src, areq->nbytes),
+ DMA_TO_DEVICE);
sreq->nents = 0;
}
@@ -289,14 +291,8 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
return 1;
}
- if (unlikely(sreq->digest == CONTEXT_CONTROL_DIGEST_XCM &&
- ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_CRC32)) {
- /* Undo final XOR with 0xffffffff ...*/
- *(__le32 *)areq->result = ~sreq->state[0];
- } else {
- memcpy(areq->result, sreq->state,
- crypto_ahash_digestsize(ahash));
- }
+ memcpy(areq->result, sreq->state,
+ crypto_ahash_digestsize(ahash));
}
cache_len = safexcel_queued_len(sreq);
@@ -497,7 +493,9 @@ unmap_result:
DMA_FROM_DEVICE);
unmap_sg:
if (req->nents) {
- dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, areq->src,
+ sg_nents_for_len(areq->src, areq->nbytes),
+ DMA_TO_DEVICE);
req->nents = 0;
}
cdesc_rollback:
@@ -1881,88 +1879,6 @@ struct safexcel_alg_template safexcel_alg_hmac_md5 = {
},
};
-static int safexcel_crc32_cra_init(struct crypto_tfm *tfm)
-{
- struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
- int ret = safexcel_ahash_cra_init(tfm);
-
- /* Default 'key' is all zeroes */
- memset(&ctx->base.ipad, 0, sizeof(u32));
- return ret;
-}
-
-static int safexcel_crc32_init(struct ahash_request *areq)
-{
- struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
-
- memset(req, 0, sizeof(*req));
-
- /* Start from loaded key */
- req->state[0] = cpu_to_le32(~ctx->base.ipad.word[0]);
- /* Set processed to non-zero to enable invalidation detection */
- req->len = sizeof(u32);
- req->processed = sizeof(u32);
-
- ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_CRC32;
- req->digest = CONTEXT_CONTROL_DIGEST_XCM;
- req->state_sz = sizeof(u32);
- req->digest_sz = sizeof(u32);
- req->block_sz = sizeof(u32);
-
- return 0;
-}
-
-static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
-
- if (keylen != sizeof(u32))
- return -EINVAL;
-
- memcpy(&ctx->base.ipad, key, sizeof(u32));
- return 0;
-}
-
-static int safexcel_crc32_digest(struct ahash_request *areq)
-{
- return safexcel_crc32_init(areq) ?: safexcel_ahash_finup(areq);
-}
-
-struct safexcel_alg_template safexcel_alg_crc32 = {
- .type = SAFEXCEL_ALG_TYPE_AHASH,
- .algo_mask = 0,
- .alg.ahash = {
- .init = safexcel_crc32_init,
- .update = safexcel_ahash_update,
- .final = safexcel_ahash_final,
- .finup = safexcel_ahash_finup,
- .digest = safexcel_crc32_digest,
- .setkey = safexcel_crc32_setkey,
- .export = safexcel_ahash_export,
- .import = safexcel_ahash_import,
- .halg = {
- .digestsize = sizeof(u32),
- .statesize = sizeof(struct safexcel_ahash_export_state),
- .base = {
- .cra_name = "crc32",
- .cra_driver_name = "safexcel-crc32",
- .cra_priority = SAFEXCEL_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
- .cra_init = safexcel_crc32_cra_init,
- .cra_exit = safexcel_ahash_cra_exit,
- .cra_module = THIS_MODULE,
- },
- },
- },
-};
-
static int safexcel_cbcmac_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
index 95dc8979918d..8f9e21ced0fe 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
@@ -68,6 +68,7 @@ struct ocs_hcu_ctx {
* @sg_data_total: Total data in the SG list at any time.
* @sg_data_offset: Offset into the data of the current individual SG node.
* @sg_dma_nents: Number of sg entries mapped in dma_list.
+ * @nents: Number of entries in the scatterlist.
*/
struct ocs_hcu_rctx {
struct ocs_hcu_dev *hcu_dev;
@@ -91,6 +92,7 @@ struct ocs_hcu_rctx {
unsigned int sg_data_total;
unsigned int sg_data_offset;
unsigned int sg_dma_nents;
+ unsigned int nents;
};
/**
@@ -199,7 +201,7 @@ static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req,
/* Unmap req->src (if mapped). */
if (rctx->sg_dma_nents) {
- dma_unmap_sg(dev, req->src, rctx->sg_dma_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->src, rctx->nents, DMA_TO_DEVICE);
rctx->sg_dma_nents = 0;
}
@@ -260,6 +262,10 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
rc = -ENOMEM;
goto cleanup;
}
+
+ /* Save the value of nents to pass to dma_unmap_sg. */
+ rctx->nents = nents;
+
/*
* The value returned by dma_map_sg() can be < nents; so update
* nents accordingly.
diff --git a/drivers/crypto/intel/keembay/ocs-aes.c b/drivers/crypto/intel/keembay/ocs-aes.c
index be9f32fc8f42..bb6f33f6b4d3 100644
--- a/drivers/crypto/intel/keembay/ocs-aes.c
+++ b/drivers/crypto/intel/keembay/ocs-aes.c
@@ -7,6 +7,7 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/swab.h>
@@ -1473,8 +1474,7 @@ int ocs_create_linked_list_from_sg(const struct ocs_aes_dev *aes_dev,
ll = dll_desc->vaddr;
for (i = 0; i < dma_nents; i++, sg = sg_next(sg)) {
ll[i].src_addr = sg_dma_address(sg) + data_offset;
- ll[i].src_len = (sg_dma_len(sg) - data_offset) < data_size ?
- (sg_dma_len(sg) - data_offset) : data_size;
+ ll[i].src_len = min(sg_dma_len(sg) - data_offset, data_size);
data_offset = 0;
data_size -= ll[i].src_len;
/* Current element points to the DMA address of the next one. */
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 7c3c0f561c95..53fa91d577ed 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -191,7 +191,6 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
ICP_ACCEL_CAPABILITIES_SM4 |
ICP_ACCEL_CAPABILITIES_AES_V2 |
ICP_ACCEL_CAPABILITIES_ZUC |
- ICP_ACCEL_CAPABILITIES_ZUC_256 |
ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT |
ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN;
@@ -223,17 +222,11 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
if (fusectl1 & ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
- capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT;
}
- if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE)
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
- capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
- }
-
- if (fusectl1 & ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE)
- capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
ICP_ACCEL_CAPABILITIES_SM2 |
@@ -303,11 +296,13 @@ static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
rl_data->pcie_scale_div = ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV;
rl_data->pcie_scale_mul = ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL;
rl_data->dcpr_correction = ADF_420XX_RL_DCPR_CORRECTION;
- rl_data->max_tp[ADF_SVC_ASYM] = ADF_420XX_RL_MAX_TP_ASYM;
- rl_data->max_tp[ADF_SVC_SYM] = ADF_420XX_RL_MAX_TP_SYM;
- rl_data->max_tp[ADF_SVC_DC] = ADF_420XX_RL_MAX_TP_DC;
+ rl_data->max_tp[SVC_ASYM] = ADF_420XX_RL_MAX_TP_ASYM;
+ rl_data->max_tp[SVC_SYM] = ADF_420XX_RL_MAX_TP_SYM;
+ rl_data->max_tp[SVC_DC] = ADF_420XX_RL_MAX_TP_DC;
rl_data->scan_interval = ADF_420XX_RL_SCANS_PER_SEC;
rl_data->scale_ref = ADF_420XX_RL_SLICE_REF;
+
+ adf_gen4_init_num_svc_aes(rl_data);
}
static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask)
@@ -473,6 +468,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_420XX_AE_FREQ;
hw_data->services_supported = adf_gen4_services_supported;
+ hw_data->get_svc_slice_cnt = adf_gen4_get_svc_slice_cnt;
adf_gen4_set_err_mask(&hw_data->dev_err_mask);
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index bd0b1b1015c0..740f68a36ac5 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -3,6 +3,7 @@
#include <linux/iopoll.h>
#include <adf_accel_devices.h>
#include <adf_admin.h>
+#include <adf_bank_state.h>
#include <adf_cfg.h>
#include <adf_cfg_services.h>
#include <adf_clock.h>
@@ -221,11 +222,13 @@ static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
rl_data->pcie_scale_div = ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV;
rl_data->pcie_scale_mul = ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL;
rl_data->dcpr_correction = ADF_4XXX_RL_DCPR_CORRECTION;
- rl_data->max_tp[ADF_SVC_ASYM] = ADF_4XXX_RL_MAX_TP_ASYM;
- rl_data->max_tp[ADF_SVC_SYM] = ADF_4XXX_RL_MAX_TP_SYM;
- rl_data->max_tp[ADF_SVC_DC] = ADF_4XXX_RL_MAX_TP_DC;
+ rl_data->max_tp[SVC_ASYM] = ADF_4XXX_RL_MAX_TP_ASYM;
+ rl_data->max_tp[SVC_SYM] = ADF_4XXX_RL_MAX_TP_SYM;
+ rl_data->max_tp[SVC_DC] = ADF_4XXX_RL_MAX_TP_DC;
rl_data->scan_interval = ADF_4XXX_RL_SCANS_PER_SEC;
rl_data->scale_ref = ADF_4XXX_RL_SLICE_REF;
+
+ adf_gen4_init_num_svc_aes(rl_data);
}
static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
@@ -448,8 +451,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
- hw_data->bank_state_save = adf_gen4_bank_state_save;
- hw_data->bank_state_restore = adf_gen4_bank_state_restore;
+ hw_data->bank_state_save = adf_bank_state_save;
+ hw_data->bank_state_restore = adf_bank_state_restore;
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
@@ -459,6 +462,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
hw_data->services_supported = adf_gen4_services_supported;
+ hw_data->get_svc_slice_cnt = adf_gen4_get_svc_slice_cnt;
adf_gen4_set_err_mask(&hw_data->dev_err_mask);
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
index 359a6447ccb8..bed88d3ce8ca 100644
--- a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
@@ -10,6 +10,7 @@
#include <adf_accel_devices.h>
#include <adf_admin.h>
+#include <adf_bank_state.h>
#include <adf_cfg.h>
#include <adf_cfg_services.h>
#include <adf_clock.h>
@@ -18,6 +19,7 @@
#include <adf_gen6_pm.h>
#include <adf_gen6_ras.h>
#include <adf_gen6_shared.h>
+#include <adf_gen6_tl.h>
#include <adf_timer.h>
#include "adf_6xxx_hw_data.h"
#include "icp_qat_fw_comp.h"
@@ -76,6 +78,10 @@ static const unsigned long thrd_mask_dcc[ADF_6XXX_MAX_ACCELENGINES] = {
0x00, 0x00, 0x00, 0x00, 0x07, 0x07, 0x03, 0x03, 0x00
};
+static const unsigned long thrd_mask_dcpr[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x00
+};
+
static const char *const adf_6xxx_fw_objs[] = {
[ADF_FW_CY_OBJ] = ADF_6XXX_CY_OBJ,
[ADF_FW_DC_OBJ] = ADF_6XXX_DC_OBJ,
@@ -97,7 +103,7 @@ static bool services_supported(unsigned long mask)
{
int num_svc;
- if (mask >= BIT(SVC_BASE_COUNT))
+ if (mask >= BIT(SVC_COUNT))
return false;
num_svc = hweight_long(mask);
@@ -126,10 +132,13 @@ static int get_service(unsigned long *mask)
if (test_and_clear_bit(SVC_DCC, mask))
return SVC_DCC;
+ if (test_and_clear_bit(SVC_DECOMP, mask))
+ return SVC_DECOMP;
+
return -EINVAL;
}
-static enum adf_cfg_service_type get_ring_type(enum adf_services service)
+static enum adf_cfg_service_type get_ring_type(unsigned int service)
{
switch (service) {
case SVC_SYM:
@@ -139,12 +148,14 @@ static enum adf_cfg_service_type get_ring_type(enum adf_services service)
case SVC_DC:
case SVC_DCC:
return COMP;
+ case SVC_DECOMP:
+ return DECOMP;
default:
return UNUSED;
}
}
-static const unsigned long *get_thrd_mask(enum adf_services service)
+static const unsigned long *get_thrd_mask(unsigned int service)
{
switch (service) {
case SVC_SYM:
@@ -155,6 +166,8 @@ static const unsigned long *get_thrd_mask(enum adf_services service)
return thrd_mask_cpr;
case SVC_DCC:
return thrd_mask_dcc;
+ case SVC_DECOMP:
+ return thrd_mask_dcpr;
default:
return NULL;
}
@@ -511,6 +524,55 @@ static int adf_gen6_init_thd2arb_map(struct adf_accel_dev *accel_dev)
return 0;
}
+static void init_num_svc_aes(struct adf_rl_hw_data *device_data)
+{
+ enum adf_fw_objs obj_type, obj_iter;
+ unsigned int svc, i, num_grp;
+ u32 ae_mask;
+
+ for (svc = 0; svc < SVC_BASE_COUNT; svc++) {
+ switch (svc) {
+ case SVC_SYM:
+ case SVC_ASYM:
+ obj_type = ADF_FW_CY_OBJ;
+ break;
+ case SVC_DC:
+ case SVC_DECOMP:
+ obj_type = ADF_FW_DC_OBJ;
+ break;
+ }
+
+ num_grp = ARRAY_SIZE(adf_default_fw_config);
+ for (i = 0; i < num_grp; i++) {
+ obj_iter = adf_default_fw_config[i].obj;
+ if (obj_iter == obj_type) {
+ ae_mask = adf_default_fw_config[i].ae_mask;
+ device_data->svc_ae_mask[svc] = hweight32(ae_mask);
+ break;
+ }
+ }
+ }
+}
+
+static u32 adf_gen6_get_svc_slice_cnt(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc)
+{
+ struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
+
+ switch (svc) {
+ case SVC_SYM:
+ return device_data->slices.cph_cnt;
+ case SVC_ASYM:
+ return device_data->slices.pke_cnt;
+ case SVC_DC:
+ return device_data->slices.cpr_cnt + device_data->slices.dcpr_cnt;
+ case SVC_DECOMP:
+ return device_data->slices.dcpr_cnt;
+ default:
+ return 0;
+ }
+}
+
static void set_vc_csr_for_bank(void __iomem *csr, u32 bank_number)
{
u32 value;
@@ -520,8 +582,8 @@ static void set_vc_csr_for_bank(void __iomem *csr, u32 bank_number)
* driver must program the ringmodectl CSRs.
*/
value = ADF_CSR_RD(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number));
- value |= FIELD_PREP(ADF_GEN6_RINGMODECTL_TC_MASK, ADF_GEN6_RINGMODECTL_TC_DEFAULT);
- value |= FIELD_PREP(ADF_GEN6_RINGMODECTL_TC_EN_MASK, ADF_GEN6_RINGMODECTL_TC_EN_OP1);
+ FIELD_MODIFY(ADF_GEN6_RINGMODECTL_TC_MASK, &value, ADF_GEN6_RINGMODECTL_TC_DEFAULT);
+ FIELD_MODIFY(ADF_GEN6_RINGMODECTL_TC_EN_MASK, &value, ADF_GEN6_RINGMODECTL_TC_EN_OP1);
ADF_CSR_WR(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number), value);
}
@@ -537,7 +599,7 @@ static int set_vc_config(struct adf_accel_dev *accel_dev)
* Read PVC0CTL then write the masked values.
*/
pci_read_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, &value);
- value |= FIELD_PREP(ADF_GEN6_PVC0CTL_TCVCMAP_MASK, ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT);
+ FIELD_MODIFY(ADF_GEN6_PVC0CTL_TCVCMAP_MASK, &value, ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT);
err = pci_write_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, value);
if (err) {
dev_err(&GET_DEV(accel_dev), "pci write to PVC0CTL failed\n");
@@ -546,8 +608,8 @@ static int set_vc_config(struct adf_accel_dev *accel_dev)
/* Read PVC1CTL then write masked values */
pci_read_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, &value);
- value |= FIELD_PREP(ADF_GEN6_PVC1CTL_TCVCMAP_MASK, ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT);
- value |= FIELD_PREP(ADF_GEN6_PVC1CTL_VCEN_MASK, ADF_GEN6_PVC1CTL_VCEN_ON);
+ FIELD_MODIFY(ADF_GEN6_PVC1CTL_TCVCMAP_MASK, &value, ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT);
+ FIELD_MODIFY(ADF_GEN6_PVC1CTL_VCEN_MASK, &value, ADF_GEN6_PVC1CTL_VCEN_ON);
err = pci_write_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, value);
if (err)
dev_err(&GET_DEV(accel_dev), "pci write to PVC1CTL failed\n");
@@ -618,7 +680,6 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
- capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
if (fusectl1 & ICP_ACCEL_GEN6_MASK_AUTH_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
@@ -627,7 +688,15 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
- capabilities_asym = 0;
+ capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_SM2 |
+ ICP_ACCEL_CAPABILITIES_ECEDMONT;
+
+ if (fusectl1 & ICP_ACCEL_GEN6_MASK_PKE_SLICE) {
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
+ }
capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
@@ -648,7 +717,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
caps |= capabilities_asym;
if (test_bit(SVC_SYM, &mask))
caps |= capabilities_sym;
- if (test_bit(SVC_DC, &mask))
+ if (test_bit(SVC_DC, &mask) || test_bit(SVC_DECOMP, &mask))
caps |= capabilities_dc;
if (test_bit(SVC_DCC, &mask)) {
/*
@@ -744,7 +813,16 @@ static int adf_init_device(struct adf_accel_dev *accel_dev)
static int enable_pm(struct adf_accel_dev *accel_dev)
{
- return adf_init_admin_pm(accel_dev, ADF_GEN6_PM_DEFAULT_IDLE_FILTER);
+ int ret;
+
+ ret = adf_init_admin_pm(accel_dev, ADF_GEN6_PM_DEFAULT_IDLE_FILTER);
+ if (ret)
+ return ret;
+
+ /* Initialize PM internal data */
+ adf_gen6_init_dev_pm_data(accel_dev);
+
+ return 0;
}
static int dev_config(struct adf_accel_dev *accel_dev)
@@ -776,6 +854,25 @@ static int dev_config(struct adf_accel_dev *accel_dev)
return ret;
}
+static void adf_gen6_init_rl_data(struct adf_rl_hw_data *rl_data)
+{
+ rl_data->pciout_tb_offset = ADF_GEN6_RL_TOKEN_PCIEOUT_BUCKET_OFFSET;
+ rl_data->pciin_tb_offset = ADF_GEN6_RL_TOKEN_PCIEIN_BUCKET_OFFSET;
+ rl_data->r2l_offset = ADF_GEN6_RL_R2L_OFFSET;
+ rl_data->l2c_offset = ADF_GEN6_RL_L2C_OFFSET;
+ rl_data->c2s_offset = ADF_GEN6_RL_C2S_OFFSET;
+ rl_data->pcie_scale_div = ADF_6XXX_RL_PCIE_SCALE_FACTOR_DIV;
+ rl_data->pcie_scale_mul = ADF_6XXX_RL_PCIE_SCALE_FACTOR_MUL;
+ rl_data->max_tp[SVC_ASYM] = ADF_6XXX_RL_MAX_TP_ASYM;
+ rl_data->max_tp[SVC_SYM] = ADF_6XXX_RL_MAX_TP_SYM;
+ rl_data->max_tp[SVC_DC] = ADF_6XXX_RL_MAX_TP_DC;
+ rl_data->max_tp[SVC_DECOMP] = ADF_6XXX_RL_MAX_TP_DECOMP;
+ rl_data->scan_interval = ADF_6XXX_RL_SCANS_PER_SEC;
+ rl_data->scale_ref = ADF_6XXX_RL_SLICE_REF;
+
+ init_num_svc_aes(rl_data);
+}
+
void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &adf_6xxx_class;
@@ -824,6 +921,8 @@ void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = ring_pair_reset;
hw_data->dev_config = dev_config;
+ hw_data->bank_state_save = adf_bank_state_save;
+ hw_data->bank_state_restore = adf_bank_state_restore;
hw_data->get_hb_clock = get_heartbeat_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->start_timer = adf_timer_start;
@@ -831,11 +930,17 @@ void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
hw_data->init_device = adf_init_device;
hw_data->enable_pm = enable_pm;
hw_data->services_supported = services_supported;
+ hw_data->num_rps = ADF_GEN6_ETR_MAX_BANKS;
+ hw_data->clock_frequency = ADF_6XXX_AE_FREQ;
+ hw_data->get_svc_slice_cnt = adf_gen6_get_svc_slice_cnt;
adf_gen6_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen6_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen6_init_dc_ops(&hw_data->dc_ops);
+ adf_gen6_init_vf_mig_ops(&hw_data->vfmig_ops);
adf_gen6_init_ras_ops(&hw_data->ras_ops);
+ adf_gen6_init_tl_data(&hw_data->tl_data);
+ adf_gen6_init_rl_data(&hw_data->rl_data);
}
void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
index 78e2e2c5816e..d822911fe68c 100644
--- a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
@@ -99,7 +99,7 @@
#define ADF_GEN6_PVC0CTL_OFFSET 0x204
#define ADF_GEN6_PVC0CTL_TCVCMAP_OFFSET 1
#define ADF_GEN6_PVC0CTL_TCVCMAP_MASK GENMASK(7, 1)
-#define ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT 0x7F
+#define ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT 0x3F
/* VC1 Resource Control Register */
#define ADF_GEN6_PVC1CTL_OFFSET 0x210
@@ -122,6 +122,13 @@
/* Number of heartbeat counter pairs */
#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE
+/* Rate Limiting */
+#define ADF_GEN6_RL_R2L_OFFSET 0x508000
+#define ADF_GEN6_RL_L2C_OFFSET 0x509000
+#define ADF_GEN6_RL_C2S_OFFSET 0x508818
+#define ADF_GEN6_RL_TOKEN_PCIEIN_BUCKET_OFFSET 0x508800
+#define ADF_GEN6_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804
+
/* Physical function fuses */
#define ADF_6XXX_ACCELENGINES_MASK GENMASK(8, 0)
#define ADF_6XXX_ADMIN_AE_MASK GENMASK(8, 8)
@@ -133,6 +140,19 @@
#define ADF_6XXX_DC_OBJ "qat_6xxx_dc.bin"
#define ADF_6XXX_ADMIN_OBJ "qat_6xxx_admin.bin"
+/* RL constants */
+#define ADF_6XXX_RL_PCIE_SCALE_FACTOR_DIV 100
+#define ADF_6XXX_RL_PCIE_SCALE_FACTOR_MUL 102
+#define ADF_6XXX_RL_SCANS_PER_SEC 954
+#define ADF_6XXX_RL_MAX_TP_ASYM 173750UL
+#define ADF_6XXX_RL_MAX_TP_SYM 95000UL
+#define ADF_6XXX_RL_MAX_TP_DC 40000UL
+#define ADF_6XXX_RL_MAX_TP_DECOMP 40000UL
+#define ADF_6XXX_RL_SLICE_REF 1000UL
+
+/* Clock frequency */
+#define ADF_6XXX_AE_FREQ (1000 * HZ_PER_MHZ)
+
enum icp_qat_gen6_slice_mask {
ICP_ACCEL_GEN6_MASK_UCS_SLICE = BIT(0),
ICP_ACCEL_GEN6_MASK_AUTH_SLICE = BIT(1),
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 66bb295ace28..89845754841b 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -4,6 +4,7 @@ ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CRYPTO_QAT"'
intel_qat-y := adf_accel_engine.o \
adf_admin.o \
adf_aer.o \
+ adf_bank_state.o \
adf_cfg.o \
adf_cfg_services.o \
adf_clock.o \
@@ -48,9 +49,12 @@ intel_qat-$(CONFIG_DEBUG_FS) += adf_cnv_dbgfs.o \
adf_fw_counters.o \
adf_gen4_pm_debugfs.o \
adf_gen4_tl.o \
+ adf_gen6_pm_dbgfs.o \
+ adf_gen6_tl.o \
adf_heartbeat_dbgfs.o \
adf_heartbeat.o \
adf_pm_dbgfs.o \
+ adf_pm_dbgfs_utils.o \
adf_telemetry.o \
adf_tl_debugfs.o \
adf_transport_debug.o
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 2ee526063213..9fe3239f0114 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -157,39 +157,7 @@ struct admin_info {
u32 mailbox_offset;
};
-struct ring_config {
- u64 base;
- u32 config;
- u32 head;
- u32 tail;
- u32 reserved0;
-};
-
-struct bank_state {
- u32 ringstat0;
- u32 ringstat1;
- u32 ringuostat;
- u32 ringestat;
- u32 ringnestat;
- u32 ringnfstat;
- u32 ringfstat;
- u32 ringcstat0;
- u32 ringcstat1;
- u32 ringcstat2;
- u32 ringcstat3;
- u32 iaintflagen;
- u32 iaintflagreg;
- u32 iaintflagsrcsel0;
- u32 iaintflagsrcsel1;
- u32 iaintcolen;
- u32 iaintcolctl;
- u32 iaintflagandcolen;
- u32 ringexpstat;
- u32 ringexpintenable;
- u32 ringsrvarben;
- u32 reserved0;
- struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK];
-};
+struct adf_bank_state;
struct adf_hw_csr_ops {
u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
@@ -338,9 +306,9 @@ struct adf_hw_device_data {
void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
int (*bank_state_save)(struct adf_accel_dev *accel_dev, u32 bank_number,
- struct bank_state *state);
+ struct adf_bank_state *state);
int (*bank_state_restore)(struct adf_accel_dev *accel_dev,
- u32 bank_number, struct bank_state *state);
+ u32 bank_number, struct adf_bank_state *state);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
@@ -351,6 +319,8 @@ struct adf_hw_device_data {
u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
int (*dev_config)(struct adf_accel_dev *accel_dev);
bool (*services_supported)(unsigned long mask);
+ u32 (*get_svc_slice_cnt)(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc);
struct adf_pfvf_ops pfvf_ops;
struct adf_hw_csr_ops csr_ops;
struct adf_dc_ops dc_ops;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
index 4cb8bd83f570..35679b21ff63 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -229,7 +229,7 @@ const struct pci_error_handlers adf_err_handler = {
};
EXPORT_SYMBOL_GPL(adf_err_handler);
-int adf_dev_autoreset(struct adf_accel_dev *accel_dev)
+static int adf_dev_autoreset(struct adf_accel_dev *accel_dev)
{
if (accel_dev->autoreset_on_error)
return adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_ASYNC);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_bank_state.c b/drivers/crypto/intel/qat/qat_common/adf_bank_state.c
new file mode 100644
index 000000000000..225d55d56a4b
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_bank_state.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+
+#define pr_fmt(fmt) "QAT: " fmt
+
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/printk.h>
+#include "adf_accel_devices.h"
+#include "adf_bank_state.h"
+#include "adf_common_drv.h"
+
+/* Ring interrupt masks */
+#define ADF_RP_INT_SRC_SEL_F_RISE_MASK GENMASK(1, 0)
+#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0)
+#define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4
+
+static inline int check_stat(u32 (*op)(void __iomem *, u32), u32 expect_val,
+ const char *name, void __iomem *base, u32 bank)
+{
+ u32 actual_val = op(base, bank);
+
+ if (expect_val == actual_val)
+ return 0;
+
+ pr_err("Fail to restore %s register. Expected %#x, actual %#x\n",
+ name, expect_val, actual_val);
+
+ return -EINVAL;
+}
+
+static void bank_state_save(struct adf_hw_csr_ops *ops, void __iomem *base,
+ u32 bank, struct adf_bank_state *state, u32 num_rings)
+{
+ u32 i;
+
+ state->ringstat0 = ops->read_csr_stat(base, bank);
+ state->ringuostat = ops->read_csr_uo_stat(base, bank);
+ state->ringestat = ops->read_csr_e_stat(base, bank);
+ state->ringnestat = ops->read_csr_ne_stat(base, bank);
+ state->ringnfstat = ops->read_csr_nf_stat(base, bank);
+ state->ringfstat = ops->read_csr_f_stat(base, bank);
+ state->ringcstat0 = ops->read_csr_c_stat(base, bank);
+ state->iaintflagen = ops->read_csr_int_en(base, bank);
+ state->iaintflagreg = ops->read_csr_int_flag(base, bank);
+ state->iaintflagsrcsel0 = ops->read_csr_int_srcsel(base, bank);
+ state->iaintcolen = ops->read_csr_int_col_en(base, bank);
+ state->iaintcolctl = ops->read_csr_int_col_ctl(base, bank);
+ state->iaintflagandcolen = ops->read_csr_int_flag_and_col(base, bank);
+ state->ringexpstat = ops->read_csr_exp_stat(base, bank);
+ state->ringexpintenable = ops->read_csr_exp_int_en(base, bank);
+ state->ringsrvarben = ops->read_csr_ring_srv_arb_en(base, bank);
+
+ for (i = 0; i < num_rings; i++) {
+ state->rings[i].head = ops->read_csr_ring_head(base, bank, i);
+ state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i);
+ state->rings[i].config = ops->read_csr_ring_config(base, bank, i);
+ state->rings[i].base = ops->read_csr_ring_base(base, bank, i);
+ }
+}
+
+static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base,
+ u32 bank, struct adf_bank_state *state, u32 num_rings,
+ int tx_rx_gap)
+{
+ u32 val, tmp_val, i;
+ int ret;
+
+ for (i = 0; i < num_rings; i++)
+ ops->write_csr_ring_base(base, bank, i, state->rings[i].base);
+
+ for (i = 0; i < num_rings; i++)
+ ops->write_csr_ring_config(base, bank, i, state->rings[i].config);
+
+ for (i = 0; i < num_rings / 2; i++) {
+ int tx = i * (tx_rx_gap + 1);
+ int rx = tx + tx_rx_gap;
+
+ ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
+ ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail);
+
+ /*
+ * The TX ring head needs to be updated again to make sure that
+ * the HW will not consider the ring as full when it is empty
+ * and the correct state flags are set to match the recovered state.
+ */
+ if (state->ringestat & BIT(tx)) {
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+ ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
+ }
+
+ ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+
+ ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head);
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_FALL_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+
+ /*
+ * The RX ring tail needs to be updated again to make sure that
+ * the HW will not consider the ring as empty when it is full
+ * and the correct state flags are set to match the recovered state.
+ */
+ if (state->ringfstat & BIT(rx))
+ ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
+ }
+
+ ops->write_csr_int_flag_and_col(base, bank, state->iaintflagandcolen);
+ ops->write_csr_int_en(base, bank, state->iaintflagen);
+ ops->write_csr_int_col_en(base, bank, state->iaintcolen);
+ ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0);
+ ops->write_csr_exp_int_en(base, bank, state->ringexpintenable);
+ ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl);
+
+ /*
+ * Verify whether any exceptions were raised during the bank save process.
+ * If exceptions occurred, the status and exception registers cannot
+ * be directly restored. Consequently, further restoration is not
+ * feasible, and the current state of the ring should be maintained.
+ */
+ val = state->ringexpstat;
+ if (val) {
+ pr_info("Bank %u state not fully restored due to exception in saved state (%#x)\n",
+ bank, val);
+ return 0;
+ }
+
+ /* Ensure that the restoration process completed without exceptions */
+ tmp_val = ops->read_csr_exp_stat(base, bank);
+ if (tmp_val) {
+ pr_err("Bank %u restored with exception: %#x\n", bank, tmp_val);
+ return -EFAULT;
+ }
+
+ ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben);
+
+ /* Check that all ring statuses match the saved state. */
+ ret = check_stat(ops->read_csr_stat, state->ringstat0, "ringstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_e_stat, state->ringestat, "ringestat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_ne_stat, state->ringnestat, "ringnestat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_nf_stat, state->ringnfstat, "ringnfstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_f_stat, state->ringfstat, "ringfstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_c_stat, state->ringcstat0, "ringcstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * adf_bank_state_save() - save state of bank-related registers
+ * @accel_dev: Pointer to the device structure
+ * @bank_number: Bank number
+ * @state: Pointer to bank state structure
+ *
+ * This function saves the state of a bank by reading the bank CSRs and
+ * writing them in the @state structure.
+ *
+ * Returns 0 on success, error code otherwise
+ */
+int adf_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct adf_bank_state *state)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ void __iomem *csr_base = adf_get_etr_base(accel_dev);
+
+ if (bank_number >= hw_data->num_banks || !state)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev), "Saving state of bank %d\n", bank_number);
+
+ bank_state_save(csr_ops, csr_base, bank_number, state,
+ hw_data->num_rings_per_bank);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_bank_state_save);
+
+/**
+ * adf_bank_state_restore() - restore state of bank-related registers
+ * @accel_dev: Pointer to the device structure
+ * @bank_number: Bank number
+ * @state: Pointer to bank state structure
+ *
+ * This function attempts to restore the state of a bank by writing the
+ * bank CSRs to the values in the state structure.
+ *
+ * Returns 0 on success, error code otherwise
+ */
+int adf_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct adf_bank_state *state)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ void __iomem *csr_base = adf_get_etr_base(accel_dev);
+ int ret;
+
+ if (bank_number >= hw_data->num_banks || !state)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank %d\n", bank_number);
+
+ ret = bank_state_restore(csr_ops, csr_base, bank_number, state,
+ hw_data->num_rings_per_bank, hw_data->tx_rx_gap);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev),
+ "Unable to restore state of bank %d\n", bank_number);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_bank_state_restore);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_bank_state.h b/drivers/crypto/intel/qat/qat_common/adf_bank_state.h
new file mode 100644
index 000000000000..48b573d692dd
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_bank_state.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_BANK_STATE_H_
+#define ADF_BANK_STATE_H_
+
+#include <linux/types.h>
+
+struct adf_accel_dev;
+
+struct ring_config {
+ u64 base;
+ u32 config;
+ u32 head;
+ u32 tail;
+ u32 reserved0;
+};
+
+struct adf_bank_state {
+ u32 ringstat0;
+ u32 ringstat1;
+ u32 ringuostat;
+ u32 ringestat;
+ u32 ringnestat;
+ u32 ringnfstat;
+ u32 ringfstat;
+ u32 ringcstat0;
+ u32 ringcstat1;
+ u32 ringcstat2;
+ u32 ringcstat3;
+ u32 iaintflagen;
+ u32 iaintflagreg;
+ u32 iaintflagsrcsel0;
+ u32 iaintflagsrcsel1;
+ u32 iaintcolen;
+ u32 iaintcolctl;
+ u32 iaintflagandcolen;
+ u32 ringexpstat;
+ u32 ringexpintenable;
+ u32 ringsrvarben;
+ u32 reserved0;
+ struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK];
+};
+
+int adf_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct adf_bank_state *state);
+int adf_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct adf_bank_state *state);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
index 15fdf9854b81..81e9e9d7eccd 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
@@ -29,6 +29,7 @@ enum adf_cfg_service_type {
COMP,
SYM,
ASYM,
+ DECOMP,
USED
};
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
index c39871291da7..7d00bcb41ce7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
@@ -7,6 +7,7 @@
#include <linux/pci.h>
#include <linux/string.h>
#include "adf_cfg.h"
+#include "adf_cfg_common.h"
#include "adf_cfg_services.h"
#include "adf_cfg_strings.h"
@@ -15,13 +16,14 @@ static const char *const adf_cfg_services[] = {
[SVC_SYM] = ADF_CFG_SYM,
[SVC_DC] = ADF_CFG_DC,
[SVC_DCC] = ADF_CFG_DCC,
+ [SVC_DECOMP] = ADF_CFG_DECOMP,
};
/*
* Ensure that the size of the array matches the number of services,
- * SVC_BASE_COUNT, that is used to size the bitmap.
+ * SVC_COUNT, that is used to size the bitmap.
*/
-static_assert(ARRAY_SIZE(adf_cfg_services) == SVC_BASE_COUNT);
+static_assert(ARRAY_SIZE(adf_cfg_services) == SVC_COUNT);
/*
* Ensure that the maximum number of concurrent services that can be
@@ -34,7 +36,7 @@ static_assert(ARRAY_SIZE(adf_cfg_services) >= MAX_NUM_CONCURR_SVC);
* Ensure that the number of services fit a single unsigned long, as each
* service is represented by a bit in the mask.
*/
-static_assert(BITS_PER_LONG >= SVC_BASE_COUNT);
+static_assert(BITS_PER_LONG >= SVC_COUNT);
/*
* Ensure that size of the concatenation of all service strings is smaller
@@ -43,6 +45,7 @@ static_assert(BITS_PER_LONG >= SVC_BASE_COUNT);
static_assert(sizeof(ADF_CFG_SYM ADF_SERVICES_DELIMITER
ADF_CFG_ASYM ADF_SERVICES_DELIMITER
ADF_CFG_DC ADF_SERVICES_DELIMITER
+ ADF_CFG_DECOMP ADF_SERVICES_DELIMITER
ADF_CFG_DCC) < ADF_CFG_MAX_VAL_LEN_IN_BYTES);
static int adf_service_string_to_mask(struct adf_accel_dev *accel_dev, const char *buf,
@@ -88,7 +91,7 @@ static int adf_service_mask_to_string(unsigned long mask, char *buf, size_t len)
if (len < ADF_CFG_MAX_VAL_LEN_IN_BYTES)
return -ENOSPC;
- for_each_set_bit(bit, &mask, SVC_BASE_COUNT) {
+ for_each_set_bit(bit, &mask, SVC_COUNT) {
if (offset)
offset += scnprintf(buf + offset, len - offset,
ADF_SERVICES_DELIMITER);
@@ -167,9 +170,43 @@ int adf_get_service_enabled(struct adf_accel_dev *accel_dev)
if (test_bit(SVC_DC, &mask))
return SVC_DC;
+ if (test_bit(SVC_DECOMP, &mask))
+ return SVC_DECOMP;
+
if (test_bit(SVC_DCC, &mask))
return SVC_DCC;
return -EINVAL;
}
EXPORT_SYMBOL_GPL(adf_get_service_enabled);
+
+enum adf_cfg_service_type adf_srv_to_cfg_svc_type(enum adf_base_services svc)
+{
+ switch (svc) {
+ case SVC_ASYM:
+ return ASYM;
+ case SVC_SYM:
+ return SYM;
+ case SVC_DC:
+ return COMP;
+ case SVC_DECOMP:
+ return DECOMP;
+ default:
+ return UNUSED;
+ }
+}
+
+bool adf_is_service_enabled(struct adf_accel_dev *accel_dev, enum adf_base_services svc)
+{
+ enum adf_cfg_service_type arb_srv = adf_srv_to_cfg_svc_type(svc);
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ u8 rps_per_bundle = hw_data->num_banks_per_vf;
+ int i;
+
+ for (i = 0; i < rps_per_bundle; i++) {
+ if (GET_SRV_TYPE(accel_dev, i) == arb_srv)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
index 3742c450878f..913d717280af 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
@@ -7,16 +7,21 @@
struct adf_accel_dev;
-enum adf_services {
+enum adf_base_services {
SVC_ASYM = 0,
SVC_SYM,
SVC_DC,
- SVC_DCC,
+ SVC_DECOMP,
SVC_BASE_COUNT
};
+enum adf_extended_services {
+ SVC_DCC = SVC_BASE_COUNT,
+ SVC_COUNT
+};
+
enum adf_composed_services {
- SVC_SYM_ASYM = SVC_BASE_COUNT,
+ SVC_SYM_ASYM = SVC_COUNT,
SVC_SYM_DC,
SVC_ASYM_DC,
};
@@ -33,5 +38,7 @@ int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in,
size_t in_len, char *out, size_t out_len);
int adf_get_service_enabled(struct adf_accel_dev *accel_dev);
int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask);
+enum adf_cfg_service_type adf_srv_to_cfg_svc_type(enum adf_base_services svc);
+bool adf_is_service_enabled(struct adf_accel_dev *accel_dev, enum adf_base_services svc);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
index b79982c4a856..30107a02ee7f 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
@@ -24,6 +24,7 @@
#define ADF_CY "Cy"
#define ADF_DC "Dc"
#define ADF_CFG_DC "dc"
+#define ADF_CFG_DECOMP "decomp"
#define ADF_CFG_CY "sym;asym"
#define ADF_CFG_SYM "sym"
#define ADF_CFG_ASYM "asym"
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index eaa6388a6678..6cf3a95489e8 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -86,7 +86,6 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev);
extern const struct pci_error_handlers adf_err_handler;
void adf_reset_sbr(struct adf_accel_dev *accel_dev);
void adf_reset_flr(struct adf_accel_dev *accel_dev);
-int adf_dev_autoreset(struct adf_accel_dev *accel_dev);
void adf_dev_restore(struct adf_accel_dev *accel_dev);
int adf_init_aer(void);
void adf_exit_aer(void);
@@ -189,6 +188,7 @@ void adf_exit_misc_wq(void);
bool adf_misc_wq_queue_work(struct work_struct *work);
bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
unsigned long delay);
+void adf_misc_wq_flush(void);
#if defined(CONFIG_PCI_IOV)
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index 0406cb09c5bb..349fdb323763 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
+
+#define pr_fmt(fmt) "QAT: " fmt
+
#include <linux/bitops.h>
#include <linux/iopoll.h>
#include <asm/div64.h>
@@ -259,7 +262,10 @@ bool adf_gen4_services_supported(unsigned long mask)
{
unsigned long num_svc = hweight_long(mask);
- if (mask >= BIT(SVC_BASE_COUNT))
+ if (mask >= BIT(SVC_COUNT))
+ return false;
+
+ if (test_bit(SVC_DECOMP, &mask))
return false;
switch (num_svc) {
@@ -485,187 +491,6 @@ int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev,
return ret;
}
-static void bank_state_save(struct adf_hw_csr_ops *ops, void __iomem *base,
- u32 bank, struct bank_state *state, u32 num_rings)
-{
- u32 i;
-
- state->ringstat0 = ops->read_csr_stat(base, bank);
- state->ringuostat = ops->read_csr_uo_stat(base, bank);
- state->ringestat = ops->read_csr_e_stat(base, bank);
- state->ringnestat = ops->read_csr_ne_stat(base, bank);
- state->ringnfstat = ops->read_csr_nf_stat(base, bank);
- state->ringfstat = ops->read_csr_f_stat(base, bank);
- state->ringcstat0 = ops->read_csr_c_stat(base, bank);
- state->iaintflagen = ops->read_csr_int_en(base, bank);
- state->iaintflagreg = ops->read_csr_int_flag(base, bank);
- state->iaintflagsrcsel0 = ops->read_csr_int_srcsel(base, bank);
- state->iaintcolen = ops->read_csr_int_col_en(base, bank);
- state->iaintcolctl = ops->read_csr_int_col_ctl(base, bank);
- state->iaintflagandcolen = ops->read_csr_int_flag_and_col(base, bank);
- state->ringexpstat = ops->read_csr_exp_stat(base, bank);
- state->ringexpintenable = ops->read_csr_exp_int_en(base, bank);
- state->ringsrvarben = ops->read_csr_ring_srv_arb_en(base, bank);
-
- for (i = 0; i < num_rings; i++) {
- state->rings[i].head = ops->read_csr_ring_head(base, bank, i);
- state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i);
- state->rings[i].config = ops->read_csr_ring_config(base, bank, i);
- state->rings[i].base = ops->read_csr_ring_base(base, bank, i);
- }
-}
-
-#define CHECK_STAT(op, expect_val, name, args...) \
-({ \
- u32 __expect_val = (expect_val); \
- u32 actual_val = op(args); \
- (__expect_val == actual_val) ? 0 : \
- (pr_err("QAT: Fail to restore %s register. Expected 0x%x, actual 0x%x\n", \
- name, __expect_val, actual_val), -EINVAL); \
-})
-
-static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base,
- u32 bank, struct bank_state *state, u32 num_rings,
- int tx_rx_gap)
-{
- u32 val, tmp_val, i;
- int ret;
-
- for (i = 0; i < num_rings; i++)
- ops->write_csr_ring_base(base, bank, i, state->rings[i].base);
-
- for (i = 0; i < num_rings; i++)
- ops->write_csr_ring_config(base, bank, i, state->rings[i].config);
-
- for (i = 0; i < num_rings / 2; i++) {
- int tx = i * (tx_rx_gap + 1);
- int rx = tx + tx_rx_gap;
-
- ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
- ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail);
-
- /*
- * The TX ring head needs to be updated again to make sure that
- * the HW will not consider the ring as full when it is empty
- * and the correct state flags are set to match the recovered state.
- */
- if (state->ringestat & BIT(tx)) {
- val = ops->read_csr_int_srcsel(base, bank);
- val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK;
- ops->write_csr_int_srcsel_w_val(base, bank, val);
- ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
- }
-
- ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
- val = ops->read_csr_int_srcsel(base, bank);
- val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
- ops->write_csr_int_srcsel_w_val(base, bank, val);
-
- ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head);
- val = ops->read_csr_int_srcsel(base, bank);
- val |= ADF_RP_INT_SRC_SEL_F_FALL_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
- ops->write_csr_int_srcsel_w_val(base, bank, val);
-
- /*
- * The RX ring tail needs to be updated again to make sure that
- * the HW will not consider the ring as empty when it is full
- * and the correct state flags are set to match the recovered state.
- */
- if (state->ringfstat & BIT(rx))
- ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
- }
-
- ops->write_csr_int_flag_and_col(base, bank, state->iaintflagandcolen);
- ops->write_csr_int_en(base, bank, state->iaintflagen);
- ops->write_csr_int_col_en(base, bank, state->iaintcolen);
- ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0);
- ops->write_csr_exp_int_en(base, bank, state->ringexpintenable);
- ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl);
- ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben);
-
- /* Check that all ring statuses match the saved state. */
- ret = CHECK_STAT(ops->read_csr_stat, state->ringstat0, "ringstat",
- base, bank);
- if (ret)
- return ret;
-
- ret = CHECK_STAT(ops->read_csr_e_stat, state->ringestat, "ringestat",
- base, bank);
- if (ret)
- return ret;
-
- ret = CHECK_STAT(ops->read_csr_ne_stat, state->ringnestat, "ringnestat",
- base, bank);
- if (ret)
- return ret;
-
- ret = CHECK_STAT(ops->read_csr_nf_stat, state->ringnfstat, "ringnfstat",
- base, bank);
- if (ret)
- return ret;
-
- ret = CHECK_STAT(ops->read_csr_f_stat, state->ringfstat, "ringfstat",
- base, bank);
- if (ret)
- return ret;
-
- ret = CHECK_STAT(ops->read_csr_c_stat, state->ringcstat0, "ringcstat",
- base, bank);
- if (ret)
- return ret;
-
- tmp_val = ops->read_csr_exp_stat(base, bank);
- val = state->ringexpstat;
- if (tmp_val && !val) {
- pr_err("QAT: Bank was restored with exception: 0x%x\n", val);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
- struct bank_state *state)
-{
- struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
- struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
- void __iomem *csr_base = adf_get_etr_base(accel_dev);
-
- if (bank_number >= hw_data->num_banks || !state)
- return -EINVAL;
-
- dev_dbg(&GET_DEV(accel_dev), "Saving state of bank %d\n", bank_number);
-
- bank_state_save(csr_ops, csr_base, bank_number, state,
- hw_data->num_rings_per_bank);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_bank_state_save);
-
-int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number,
- struct bank_state *state)
-{
- struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
- struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
- void __iomem *csr_base = adf_get_etr_base(accel_dev);
- int ret;
-
- if (bank_number >= hw_data->num_banks || !state)
- return -EINVAL;
-
- dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank %d\n", bank_number);
-
- ret = bank_state_restore(csr_ops, csr_base, bank_number, state,
- hw_data->num_rings_per_bank, hw_data->tx_rx_gap);
- if (ret)
- dev_err(&GET_DEV(accel_dev),
- "Unable to restore state of bank %d\n", bank_number);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore);
-
static int adf_gen4_build_comp_block(void *ctx, enum adf_dc_algo algo)
{
struct icp_qat_fw_comp_req *req_tmpl = ctx;
@@ -733,3 +558,43 @@ void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
dc_ops->build_decomp_block = adf_gen4_build_decomp_block;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
+
+void adf_gen4_init_num_svc_aes(struct adf_rl_hw_data *device_data)
+{
+ struct adf_hw_device_data *hw_data;
+ unsigned int i;
+ u32 ae_cnt;
+
+ hw_data = container_of(device_data, struct adf_hw_device_data, rl_data);
+ ae_cnt = hweight32(hw_data->get_ae_mask(hw_data));
+ if (!ae_cnt)
+ return;
+
+ for (i = 0; i < SVC_BASE_COUNT; i++)
+ device_data->svc_ae_mask[i] = ae_cnt - 1;
+
+ /*
+ * The decompression service is not supported on QAT GEN4 devices.
+ * Therefore, set svc_ae_mask to 0.
+ */
+ device_data->svc_ae_mask[SVC_DECOMP] = 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_num_svc_aes);
+
+u32 adf_gen4_get_svc_slice_cnt(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc)
+{
+ struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
+
+ switch (svc) {
+ case SVC_SYM:
+ return device_data->slices.cph_cnt;
+ case SVC_ASYM:
+ return device_data->slices.pke_cnt;
+ case SVC_DC:
+ return device_data->slices.dcpr_cnt;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_svc_slice_cnt);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index e4f4d5fa616d..cd26b6724c43 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -84,9 +84,6 @@
#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
/* Ring interrupt */
-#define ADF_RP_INT_SRC_SEL_F_RISE_MASK GENMASK(1, 0)
-#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0)
-#define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4
#define ADF_COALESCED_POLL_TIMEOUT_US (1 * USEC_PER_SEC)
#define ADF_COALESCED_POLL_DELAY_US 1000
#define ADF_WQM_CSR_RPINTSOU(bank) (0x200000 + ((bank) << 12))
@@ -176,11 +173,10 @@ int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev,
u32 bank_number, int timeout_us);
void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev,
u32 bank_number);
-int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
- struct bank_state *state);
-int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev,
- u32 bank_number, struct bank_state *state);
bool adf_gen4_services_supported(unsigned long service_mask);
void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops);
+void adf_gen4_init_num_svc_aes(struct adf_rl_hw_data *device_data);
+u32 adf_gen4_get_svc_slice_cnt(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
index 2e4095c4c12c..b7e38842a46d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
@@ -1,47 +1,18 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Intel Corporation */
#include <linux/dma-mapping.h>
-#include <linux/kernel.h>
#include <linux/string_helpers.h>
-#include <linux/stringify.h>
#include "adf_accel_devices.h"
#include "adf_admin.h"
#include "adf_common_drv.h"
#include "adf_gen4_pm.h"
+#include "adf_pm_dbgfs_utils.h"
#include "icp_qat_fw_init_admin.h"
-/*
- * This is needed because a variable is used to index the mask at
- * pm_scnprint_table(), making it not compile time constant, so the compile
- * asserts from FIELD_GET() or u32_get_bits() won't be fulfilled.
- */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-
-#define PM_INFO_MEMBER_OFF(member) \
- (offsetof(struct icp_qat_fw_init_admin_pm_info, member) / sizeof(u32))
-
-#define PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, _mask_) \
-{ \
- .reg_offset = PM_INFO_MEMBER_OFF(_reg_), \
- .key = __stringify(_field_), \
- .field_mask = _mask_, \
-}
-
-#define PM_INFO_REGSET_ENTRY32(_reg_, _field_) \
- PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, GENMASK(31, 0))
-
#define PM_INFO_REGSET_ENTRY(_reg_, _field_) \
PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, ADF_GEN4_PM_##_field_##_MASK)
-#define PM_INFO_MAX_KEY_LEN 21
-
-struct pm_status_row {
- int reg_offset;
- u32 field_mask;
- const char *key;
-};
-
static const struct pm_status_row pm_fuse_rows[] = {
PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM),
PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM_IDLE),
@@ -109,44 +80,6 @@ static const struct pm_status_row pm_csrs_rows[] = {
PM_INFO_REGSET_ENTRY32(pm.pwrreq, CPM_PM_PWRREQ),
};
-static int pm_scnprint_table(char *buff, const struct pm_status_row *table,
- u32 *pm_info_regs, size_t buff_size, int table_len,
- bool lowercase)
-{
- char key[PM_INFO_MAX_KEY_LEN];
- int wr = 0;
- int i;
-
- for (i = 0; i < table_len; i++) {
- if (lowercase)
- string_lower(key, table[i].key);
- else
- string_upper(key, table[i].key);
-
- wr += scnprintf(&buff[wr], buff_size - wr, "%s: %#x\n", key,
- field_get(table[i].field_mask,
- pm_info_regs[table[i].reg_offset]));
- }
-
- return wr;
-}
-
-static int pm_scnprint_table_upper_keys(char *buff, const struct pm_status_row *table,
- u32 *pm_info_regs, size_t buff_size,
- int table_len)
-{
- return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
- table_len, false);
-}
-
-static int pm_scnprint_table_lower_keys(char *buff, const struct pm_status_row *table,
- u32 *pm_info_regs, size_t buff_size,
- int table_len)
-{
- return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
- table_len, true);
-}
-
static_assert(sizeof(struct icp_qat_fw_init_admin_pm_info) < PAGE_SIZE);
static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev,
@@ -191,9 +124,9 @@ static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev,
/* Fusectl related */
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
"----------- PM Fuse info ---------\n");
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_fuse_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_fuse_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_fuse_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_fuse_rows));
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "max_pwrreq: %#x\n",
pm_info->max_pwrreq);
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "min_pwrreq: %#x\n",
@@ -204,28 +137,28 @@ static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev,
"------------ PM Info ------------\n");
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "power_level: %s\n",
pm_info->pwr_state == PM_SET_MIN ? "min" : "max");
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_info_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_info_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_info_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_info_rows));
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "pm_mode: STATIC\n");
/* SSM related */
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
"----------- SSM_PM Info ----------\n");
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_ssm_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_ssm_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_ssm_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_ssm_rows));
/* Log related */
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
"------------- PM Log -------------\n");
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_log_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_log_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_log_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_log_rows));
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_event_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_event_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_event_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_event_rows));
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "idle_irq_count: %#x\n",
pm->idle_irq_counters);
@@ -241,9 +174,9 @@ static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev,
/* CSRs content */
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
"----------- HW PM CSRs -----------\n");
- len += pm_scnprint_table_upper_keys(&pm_kv[len], pm_csrs_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_csrs_rows));
+ len += adf_pm_scnprint_table_upper_keys(&pm_kv[len], pm_csrs_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_csrs_rows));
val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG);
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
index a62eb5e8dbe6..adb21656a3ba 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
@@ -9,6 +9,7 @@
#include <asm/errno.h>
#include "adf_accel_devices.h"
+#include "adf_bank_state.h"
#include "adf_common_drv.h"
#include "adf_gen4_hw_data.h"
#include "adf_gen4_pfvf.h"
@@ -358,7 +359,7 @@ static int adf_gen4_vfmig_load_etr_regs(struct adf_mstate_mgr *sub_mgr,
pf_bank_nr = vf_bank_info->bank_nr + vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
ret = hw_data->bank_state_restore(accel_dev, pf_bank_nr,
- (struct bank_state *)state);
+ (struct adf_bank_state *)state);
if (ret) {
dev_err(&GET_DEV(accel_dev),
"Failed to load regs for vf%d bank%d\n",
@@ -585,7 +586,7 @@ static int adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr *subs, u8 *state,
pf_bank_nr += vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
ret = hw_data->bank_state_save(accel_dev, pf_bank_nr,
- (struct bank_state *)state);
+ (struct adf_bank_state *)state);
if (ret) {
dev_err(&GET_DEV(accel_dev),
"Failed to save regs for vf%d bank%d\n",
@@ -593,7 +594,7 @@ static int adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr *subs, u8 *state,
return ret;
}
- return sizeof(struct bank_state);
+ return sizeof(struct adf_bank_state);
}
static int adf_gen4_vfmig_save_etr_bank(struct adf_accel_dev *accel_dev,
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h
index 9a5b995f7ada..4c0d576e8c21 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h
@@ -24,5 +24,29 @@ struct adf_accel_dev;
/* cpm_pm_status bitfields */
#define ADF_GEN6_PM_INIT_STATE BIT(21)
+#define ADF_GEN6_PM_CPM_PM_STATE_MASK GENMASK(22, 20)
+
+/* fusectl0 bitfields */
+#define ADF_GEN6_PM_ENABLE_PM_MASK BIT(21)
+#define ADF_GEN6_PM_ENABLE_PM_IDLE_MASK BIT(22)
+#define ADF_GEN6_PM_ENABLE_DEEP_PM_IDLE_MASK BIT(23)
+
+/* cpm_pm_fw_init bitfields */
+#define ADF_GEN6_PM_IDLE_FILTER_MASK GENMASK(5, 3)
+#define ADF_GEN6_PM_IDLE_ENABLE_MASK BIT(2)
+
+/* ssm_pm_enable bitfield */
+#define ADF_GEN6_PM_SSM_PM_ENABLE_MASK BIT(0)
+
+/* ssm_pm_domain_status bitfield */
+#define ADF_GEN6_PM_DOMAIN_POWERED_UP_MASK BIT(0)
+
+#ifdef CONFIG_DEBUG_FS
+void adf_gen6_init_dev_pm_data(struct adf_accel_dev *accel_dev);
+#else
+static inline void adf_gen6_init_dev_pm_data(struct adf_accel_dev *accel_dev)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
#endif /* ADF_GEN6_PM_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
new file mode 100644
index 000000000000..603aefba0fdb
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/string_helpers.h>
+
+#include "adf_admin.h"
+#include "adf_common_drv.h"
+#include "adf_gen6_pm.h"
+#include "adf_pm_dbgfs_utils.h"
+#include "icp_qat_fw_init_admin.h"
+
+#define PM_INFO_REGSET_ENTRY(_reg_, _field_) \
+ PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, ADF_GEN6_PM_##_field_##_MASK)
+
+static struct pm_status_row pm_fuse_rows[] = {
+ PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM),
+ PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM_IDLE),
+ PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_DEEP_PM_IDLE),
+};
+
+static struct pm_status_row pm_info_rows[] = {
+ PM_INFO_REGSET_ENTRY(pm.status, CPM_PM_STATE),
+ PM_INFO_REGSET_ENTRY(pm.fw_init, IDLE_ENABLE),
+ PM_INFO_REGSET_ENTRY(pm.fw_init, IDLE_FILTER),
+};
+
+static struct pm_status_row pm_ssm_rows[] = {
+ PM_INFO_REGSET_ENTRY(ssm.pm_enable, SSM_PM_ENABLE),
+ PM_INFO_REGSET_ENTRY(ssm.pm_domain_status, DOMAIN_POWERED_UP),
+};
+
+static struct pm_status_row pm_csrs_rows[] = {
+ PM_INFO_REGSET_ENTRY32(pm.fw_init, CPM_PM_FW_INIT),
+ PM_INFO_REGSET_ENTRY32(pm.status, CPM_PM_STATUS),
+};
+
+static_assert(sizeof(struct icp_qat_fw_init_admin_pm_info) < PAGE_SIZE);
+
+static ssize_t adf_gen6_print_pm_status(struct adf_accel_dev *accel_dev,
+ char __user *buf, size_t count,
+ loff_t *pos)
+{
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ struct icp_qat_fw_init_admin_pm_info *pm_info;
+ dma_addr_t p_state_addr;
+ u32 *pm_info_regs;
+ size_t len = 0;
+ char *pm_kv;
+ u32 val;
+ int ret;
+
+ pm_info = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pm_info)
+ return -ENOMEM;
+
+ pm_kv = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pm_kv) {
+ kfree(pm_info);
+ return -ENOMEM;
+ }
+
+ p_state_addr = dma_map_single(&GET_DEV(accel_dev), pm_info, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(&GET_DEV(accel_dev), p_state_addr);
+ if (ret)
+ goto out_free;
+
+ /* Query power management information from QAT FW */
+ ret = adf_get_pm_info(accel_dev, p_state_addr, PAGE_SIZE);
+ dma_unmap_single(&GET_DEV(accel_dev), p_state_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (ret)
+ goto out_free;
+
+ pm_info_regs = (u32 *)pm_info;
+
+ /* Fuse control register */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- PM Fuse info ---------\n");
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_fuse_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_fuse_rows));
+
+ /* Power management */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- PM Info --------------\n");
+
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_info_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_info_rows));
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "pm_mode: ACTIVE\n");
+
+ /* Shared Slice Module */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- SSM_PM Info ----------\n");
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_ssm_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_ssm_rows));
+
+ /* Control status register content */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- HW PM CSRs -----------\n");
+ len += adf_pm_scnprint_table_upper_keys(&pm_kv[len], pm_csrs_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_csrs_rows));
+
+ val = ADF_CSR_RD(pmisc, ADF_GEN6_PM_INTERRUPT);
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "CPM_PM_INTERRUPT: %#x\n", val);
+ ret = simple_read_from_buffer(buf, count, pos, pm_kv, len);
+
+out_free:
+ kfree(pm_info);
+ kfree(pm_kv);
+
+ return ret;
+}
+
+void adf_gen6_init_dev_pm_data(struct adf_accel_dev *accel_dev)
+{
+ accel_dev->power_management.print_pm_status = adf_gen6_print_pm_status;
+ accel_dev->power_management.present = true;
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_dev_pm_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
index 58a072e2f936..c9b151006dca 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
@@ -5,6 +5,7 @@
#include "adf_gen4_config.h"
#include "adf_gen4_hw_csr_data.h"
#include "adf_gen4_pfvf.h"
+#include "adf_gen4_vf_mig.h"
#include "adf_gen6_shared.h"
struct adf_accel_dev;
@@ -47,3 +48,9 @@ int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev)
return adf_no_dev_config(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_gen6_no_dev_config);
+
+void adf_gen6_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops)
+{
+ adf_gen4_init_vf_mig_ops(vfmig_ops);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_vf_mig_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
index bc8e71e984fc..fc6fad029a70 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
@@ -4,6 +4,7 @@
#define ADF_GEN6_SHARED_H_
struct adf_hw_csr_ops;
+struct qat_migdev_ops;
struct adf_accel_dev;
struct adf_pfvf_ops;
@@ -12,4 +13,5 @@ void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev);
int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev);
int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev);
+void adf_gen6_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops);
#endif/* ADF_GEN6_SHARED_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c
new file mode 100644
index 000000000000..cf804f95838a
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2025 Intel Corporation. */
+#include <linux/export.h>
+
+#include "adf_gen6_tl.h"
+#include "adf_telemetry.h"
+#include "adf_tl_debugfs.h"
+#include "icp_qat_fw_init_admin.h"
+
+#define ADF_GEN6_TL_DEV_REG_OFF(reg) ADF_TL_DEV_REG_OFF(reg, gen6)
+
+#define ADF_GEN6_TL_RP_REG_OFF(reg) ADF_TL_RP_REG_OFF(reg, gen6)
+
+#define ADF_GEN6_TL_SL_UTIL_COUNTER(_name) \
+ ADF_TL_COUNTER("util_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_util, gen6))
+
+#define ADF_GEN6_TL_SL_EXEC_COUNTER(_name) \
+ ADF_TL_COUNTER("exec_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_exec_cnt, gen6))
+
+#define SLICE_IDX(sl) offsetof(struct icp_qat_fw_init_admin_slice_cnt, sl##_cnt)
+
+/* Device level counters. */
+static const struct adf_tl_dbg_counter dev_counters[] = {
+ /* PCIe partial transactions. */
+ ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_prt_trans_cnt)),
+ /* Max read latency[ns]. */
+ ADF_TL_COUNTER(MAX_RD_LAT_NAME, ADF_TL_COUNTER_NS,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_rd_lat_max)),
+ /* Read latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(RD_LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_rd_lat_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_rd_cmpl_cnt)),
+ /* Max "get to put" latency[ns]. */
+ ADF_TL_COUNTER(MAX_LAT_NAME, ADF_TL_COUNTER_NS,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_gp_lat_max)),
+ /* "Get to put" latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_gp_lat_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_ae_put_cnt)),
+ /* PCIe write bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_bw_in)),
+ /* PCIe read bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_bw_out)),
+ /* Page request latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(PAGE_REQ_LAT_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_page_req_lat_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_page_req_cnt)),
+ /* Page translation latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(AT_TRANS_LAT_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_trans_lat_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_trans_lat_cnt)),
+ /* Maximum uTLB used. */
+ ADF_TL_COUNTER(AT_MAX_UTLB_USED_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_max_utlb_used)),
+};
+
+/* Accelerator utilization counters */
+static const struct adf_tl_dbg_counter sl_util_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression accelerator utilization. */
+ [SLICE_IDX(cpr)] = ADF_GEN6_TL_SL_UTIL_COUNTER(cnv),
+ /* Decompression accelerator utilization. */
+ [SLICE_IDX(dcpr)] = ADF_GEN6_TL_SL_UTIL_COUNTER(dcprz),
+ /* PKE accelerator utilization. */
+ [SLICE_IDX(pke)] = ADF_GEN6_TL_SL_UTIL_COUNTER(pke),
+ /* Wireless Authentication accelerator utilization. */
+ [SLICE_IDX(wat)] = ADF_GEN6_TL_SL_UTIL_COUNTER(wat),
+ /* Wireless Cipher accelerator utilization. */
+ [SLICE_IDX(wcp)] = ADF_GEN6_TL_SL_UTIL_COUNTER(wcp),
+ /* UCS accelerator utilization. */
+ [SLICE_IDX(ucs)] = ADF_GEN6_TL_SL_UTIL_COUNTER(ucs),
+ /* Authentication accelerator utilization. */
+ [SLICE_IDX(ath)] = ADF_GEN6_TL_SL_UTIL_COUNTER(ath),
+};
+
+/* Accelerator execution counters */
+static const struct adf_tl_dbg_counter sl_exec_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression accelerator execution count. */
+ [SLICE_IDX(cpr)] = ADF_GEN6_TL_SL_EXEC_COUNTER(cnv),
+ /* Decompression accelerator execution count. */
+ [SLICE_IDX(dcpr)] = ADF_GEN6_TL_SL_EXEC_COUNTER(dcprz),
+ /* PKE execution count. */
+ [SLICE_IDX(pke)] = ADF_GEN6_TL_SL_EXEC_COUNTER(pke),
+ /* Wireless Authentication accelerator execution count. */
+ [SLICE_IDX(wat)] = ADF_GEN6_TL_SL_EXEC_COUNTER(wat),
+ /* Wireless Cipher accelerator execution count. */
+ [SLICE_IDX(wcp)] = ADF_GEN6_TL_SL_EXEC_COUNTER(wcp),
+ /* UCS accelerator execution count. */
+ [SLICE_IDX(ucs)] = ADF_GEN6_TL_SL_EXEC_COUNTER(ucs),
+ /* Authentication accelerator execution count. */
+ [SLICE_IDX(ath)] = ADF_GEN6_TL_SL_EXEC_COUNTER(ath),
+};
+
+/* Ring pair counters. */
+static const struct adf_tl_dbg_counter rp_counters[] = {
+ /* PCIe partial transactions. */
+ ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_prt_trans_cnt)),
+ /* "Get to put" latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_gp_lat_acc),
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_ae_put_cnt)),
+ /* PCIe write bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_bw_in)),
+ /* PCIe read bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_bw_out)),
+ /* Message descriptor DevTLB hit rate. */
+ ADF_TL_COUNTER(AT_GLOB_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_hit)),
+ /* Message descriptor DevTLB miss rate. */
+ ADF_TL_COUNTER(AT_GLOB_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_miss)),
+ /* Payload DevTLB hit rate. */
+ ADF_TL_COUNTER(AT_PAYLD_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_hit)),
+ /* Payload DevTLB miss rate. */
+ ADF_TL_COUNTER(AT_PAYLD_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_miss)),
+};
+
+void adf_gen6_init_tl_data(struct adf_tl_hw_data *tl_data)
+{
+ tl_data->layout_sz = ADF_GEN6_TL_LAYOUT_SZ;
+ tl_data->slice_reg_sz = ADF_GEN6_TL_SLICE_REG_SZ;
+ tl_data->rp_reg_sz = ADF_GEN6_TL_RP_REG_SZ;
+ tl_data->num_hbuff = ADF_GEN6_TL_NUM_HIST_BUFFS;
+ tl_data->max_rp = ADF_GEN6_TL_MAX_RP_NUM;
+ tl_data->msg_cnt_off = ADF_GEN6_TL_MSG_CNT_OFF;
+ tl_data->cpp_ns_per_cycle = ADF_GEN6_CPP_NS_PER_CYCLE;
+ tl_data->bw_units_to_bytes = ADF_GEN6_TL_BW_HW_UNITS_TO_BYTES;
+
+ tl_data->dev_counters = dev_counters;
+ tl_data->num_dev_counters = ARRAY_SIZE(dev_counters);
+ tl_data->sl_util_counters = sl_util_counters;
+ tl_data->sl_exec_counters = sl_exec_counters;
+ tl_data->rp_counters = rp_counters;
+ tl_data->num_rp_counters = ARRAY_SIZE(rp_counters);
+ tl_data->max_sl_cnt = ADF_GEN6_TL_MAX_SLICES_PER_TYPE;
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_tl_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.h
new file mode 100644
index 000000000000..49db660b8eb9
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2025 Intel Corporation. */
+#ifndef ADF_GEN6_TL_H
+#define ADF_GEN6_TL_H
+
+#include <linux/types.h>
+
+struct adf_tl_hw_data;
+
+/* Computation constants. */
+#define ADF_GEN6_CPP_NS_PER_CYCLE 2
+#define ADF_GEN6_TL_BW_HW_UNITS_TO_BYTES 64
+
+/* Maximum aggregation time. Value is in milliseconds. */
+#define ADF_GEN6_TL_MAX_AGGR_TIME_MS 4000
+/* Number of buffers to store historic values. */
+#define ADF_GEN6_TL_NUM_HIST_BUFFS \
+ (ADF_GEN6_TL_MAX_AGGR_TIME_MS / ADF_TL_DATA_WR_INTERVAL_MS)
+
+/* Max number of HW resources of one type */
+#define ADF_GEN6_TL_MAX_SLICES_PER_TYPE 32
+#define MAX_ATH_SL_COUNT 7
+#define MAX_CNV_SL_COUNT 2
+#define MAX_DCPRZ_SL_COUNT 2
+#define MAX_PKE_SL_COUNT 32
+#define MAX_UCS_SL_COUNT 4
+#define MAX_WAT_SL_COUNT 5
+#define MAX_WCP_SL_COUNT 5
+
+#define MAX_ATH_CMDQ_COUNT 14
+#define MAX_CNV_CMDQ_COUNT 6
+#define MAX_DCPRZ_CMDQ_COUNT 6
+#define MAX_PKE_CMDQ_COUNT 32
+#define MAX_UCS_CMDQ_COUNT 12
+#define MAX_WAT_CMDQ_COUNT 35
+#define MAX_WCP_CMDQ_COUNT 35
+
+/* Max number of simultaneously monitored ring pairs. */
+#define ADF_GEN6_TL_MAX_RP_NUM 4
+
+/**
+ * struct adf_gen6_tl_slice_data_regs - HW slice data as populated by FW.
+ * @reg_tm_slice_exec_cnt: Slice execution count.
+ * @reg_tm_slice_util: Slice utilization.
+ */
+struct adf_gen6_tl_slice_data_regs {
+ __u32 reg_tm_slice_exec_cnt;
+ __u32 reg_tm_slice_util;
+};
+
+#define ADF_GEN6_TL_SLICE_REG_SZ sizeof(struct adf_gen6_tl_slice_data_regs)
+
+/**
+ * struct adf_gen6_tl_cmdq_data_regs - HW CMDQ data as populated by FW.
+ * @reg_tm_cmdq_wait_cnt: CMDQ wait count.
+ * @reg_tm_cmdq_exec_cnt: CMDQ execution count.
+ * @reg_tm_cmdq_drain_cnt: CMDQ drain count.
+ */
+struct adf_gen6_tl_cmdq_data_regs {
+ __u32 reg_tm_cmdq_wait_cnt;
+ __u32 reg_tm_cmdq_exec_cnt;
+ __u32 reg_tm_cmdq_drain_cnt;
+ __u32 reserved;
+};
+
+#define ADF_GEN6_TL_CMDQ_REG_SZ sizeof(struct adf_gen6_tl_cmdq_data_regs)
+
+/**
+ * struct adf_gen6_tl_device_data_regs - This structure stores device telemetry
+ * counter values as are being populated periodically by device.
+ * @reg_tl_rd_lat_acc: read latency accumulator
+ * @reg_tl_gp_lat_acc: "get to put" latency accumulator
+ * @reg_tl_at_page_req_lat_acc: AT/DevTLB page request latency accumulator
+ * @reg_tl_at_trans_lat_acc: DevTLB transaction latency accumulator
+ * @reg_tl_re_acc: accumulated ring empty time
+ * @reg_tl_prt_trans_cnt: PCIe partial transactions
+ * @reg_tl_rd_lat_max: maximum logged read latency
+ * @reg_tl_rd_cmpl_cnt: read requests completed count
+ * @reg_tl_gp_lat_max: maximum logged get to put latency
+ * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings
+ * @reg_tl_bw_in: PCIe write bandwidth
+ * @reg_tl_bw_out: PCIe read bandwidth
+ * @reg_tl_at_page_req_cnt: DevTLB page requests count
+ * @reg_tl_at_trans_lat_cnt: DevTLB transaction latency samples count
+ * @reg_tl_at_max_utlb_used: maximum uTLB used
+ * @reg_tl_re_cnt: ring empty time samples count
+ * @reserved: reserved
+ * @ath_slices: array of Authentication slices utilization registers
+ * @cnv_slices: array of Compression slices utilization registers
+ * @dcprz_slices: array of Decompression slices utilization registers
+ * @pke_slices: array of PKE slices utilization registers
+ * @ucs_slices: array of UCS slices utilization registers
+ * @wat_slices: array of Wireless Authentication slices utilization registers
+ * @wcp_slices: array of Wireless Cipher slices utilization registers
+ * @ath_cmdq: array of Authentication cmdq telemetry registers
+ * @cnv_cmdq: array of Compression cmdq telemetry registers
+ * @dcprz_cmdq: array of Decomopression cmdq telemetry registers
+ * @pke_cmdq: array of PKE cmdq telemetry registers
+ * @ucs_cmdq: array of UCS cmdq telemetry registers
+ * @wat_cmdq: array of Wireless Authentication cmdq telemetry registers
+ * @wcp_cmdq: array of Wireless Cipher cmdq telemetry registers
+ */
+struct adf_gen6_tl_device_data_regs {
+ __u64 reg_tl_rd_lat_acc;
+ __u64 reg_tl_gp_lat_acc;
+ __u64 reg_tl_at_page_req_lat_acc;
+ __u64 reg_tl_at_trans_lat_acc;
+ __u64 reg_tl_re_acc;
+ __u32 reg_tl_prt_trans_cnt;
+ __u32 reg_tl_rd_lat_max;
+ __u32 reg_tl_rd_cmpl_cnt;
+ __u32 reg_tl_gp_lat_max;
+ __u32 reg_tl_ae_put_cnt;
+ __u32 reg_tl_bw_in;
+ __u32 reg_tl_bw_out;
+ __u32 reg_tl_at_page_req_cnt;
+ __u32 reg_tl_at_trans_lat_cnt;
+ __u32 reg_tl_at_max_utlb_used;
+ __u32 reg_tl_re_cnt;
+ __u32 reserved;
+ struct adf_gen6_tl_slice_data_regs ath_slices[MAX_ATH_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs cnv_slices[MAX_CNV_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs dcprz_slices[MAX_DCPRZ_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs pke_slices[MAX_PKE_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs ucs_slices[MAX_UCS_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs wat_slices[MAX_WAT_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs wcp_slices[MAX_WCP_SL_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs ath_cmdq[MAX_ATH_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs cnv_cmdq[MAX_CNV_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs dcprz_cmdq[MAX_DCPRZ_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs pke_cmdq[MAX_PKE_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs ucs_cmdq[MAX_UCS_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs wat_cmdq[MAX_WAT_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs wcp_cmdq[MAX_WCP_CMDQ_COUNT];
+};
+
+/**
+ * struct adf_gen6_tl_ring_pair_data_regs - This structure stores ring pair
+ * telemetry counter values as they are being populated periodically by device.
+ * @reg_tl_gp_lat_acc: get-put latency accumulator
+ * @reg_tl_re_acc: accumulated ring empty time
+ * @reg_tl_pci_trans_cnt: PCIe partial transactions
+ * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings
+ * @reg_tl_bw_in: PCIe write bandwidth
+ * @reg_tl_bw_out: PCIe read bandwidth
+ * @reg_tl_at_glob_devtlb_hit: Message descriptor DevTLB hit rate
+ * @reg_tl_at_glob_devtlb_miss: Message descriptor DevTLB miss rate
+ * @reg_tl_at_payld_devtlb_hit: Payload DevTLB hit rate
+ * @reg_tl_at_payld_devtlb_miss: Payload DevTLB miss rate
+ * @reg_tl_re_cnt: ring empty time samples count
+ * @reserved1: reserved
+ */
+struct adf_gen6_tl_ring_pair_data_regs {
+ __u64 reg_tl_gp_lat_acc;
+ __u64 reg_tl_re_acc;
+ __u32 reg_tl_prt_trans_cnt;
+ __u32 reg_tl_ae_put_cnt;
+ __u32 reg_tl_bw_in;
+ __u32 reg_tl_bw_out;
+ __u32 reg_tl_at_glob_devtlb_hit;
+ __u32 reg_tl_at_glob_devtlb_miss;
+ __u32 reg_tl_at_payld_devtlb_hit;
+ __u32 reg_tl_at_payld_devtlb_miss;
+ __u32 reg_tl_re_cnt;
+ __u32 reserved1;
+};
+
+#define ADF_GEN6_TL_RP_REG_SZ sizeof(struct adf_gen6_tl_ring_pair_data_regs)
+
+/**
+ * struct adf_gen6_tl_layout - This structure represents the entire telemetry
+ * counters data: Device + 4 Ring Pairs as they are being populated periodically
+ * by device.
+ * @tl_device_data_regs: structure of device telemetry registers
+ * @tl_ring_pairs_data_regs: array of ring pairs telemetry registers
+ * @reg_tl_msg_cnt: telemetry message counter
+ * @reserved: reserved
+ */
+struct adf_gen6_tl_layout {
+ struct adf_gen6_tl_device_data_regs tl_device_data_regs;
+ struct adf_gen6_tl_ring_pair_data_regs
+ tl_ring_pairs_data_regs[ADF_GEN6_TL_MAX_RP_NUM];
+ __u32 reg_tl_msg_cnt;
+ __u32 reserved;
+};
+
+#define ADF_GEN6_TL_LAYOUT_SZ sizeof(struct adf_gen6_tl_layout)
+#define ADF_GEN6_TL_MSG_CNT_OFF \
+ offsetof(struct adf_gen6_tl_layout, reg_tl_msg_cnt)
+
+#ifdef CONFIG_DEBUG_FS
+void adf_gen6_init_tl_data(struct adf_tl_hw_data *tl_data);
+#else
+static inline void adf_gen6_init_tl_data(struct adf_tl_hw_data *tl_data)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+#endif /* ADF_GEN6_TL_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index f189cce7d153..46491048e0bb 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -404,6 +404,7 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
hw_data->exit_admin_comms(accel_dev);
adf_cleanup_etr_data(accel_dev);
+ adf_misc_wq_flush();
adf_dev_restore(accel_dev);
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c
index cae1aee5479a..12e565613661 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c
@@ -407,3 +407,8 @@ bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
{
return queue_delayed_work(adf_misc_wq, work, delay);
}
+
+void adf_misc_wq_flush(void)
+{
+ flush_workqueue(adf_misc_wq);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
new file mode 100644
index 000000000000..69295a9ddf0a
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/bitops.h>
+#include <linux/sprintf.h>
+#include <linux/string_helpers.h>
+
+#include "adf_pm_dbgfs_utils.h"
+
+/*
+ * This is needed because a variable is used to index the mask at
+ * pm_scnprint_table(), making it not compile time constant, so the compile
+ * asserts from FIELD_GET() or u32_get_bits() won't be fulfilled.
+ */
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
+
+#define PM_INFO_MAX_KEY_LEN 21
+
+static int pm_scnprint_table(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len,
+ bool lowercase)
+{
+ char key[PM_INFO_MAX_KEY_LEN];
+ int wr = 0;
+ int i;
+
+ for (i = 0; i < table_len; i++) {
+ if (lowercase)
+ string_lower(key, table[i].key);
+ else
+ string_upper(key, table[i].key);
+
+ wr += scnprintf(&buff[wr], buff_size - wr, "%s: %#x\n", key,
+ field_get(table[i].field_mask,
+ pm_info_regs[table[i].reg_offset]));
+ }
+
+ return wr;
+}
+
+int adf_pm_scnprint_table_upper_keys(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len)
+{
+ return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
+ table_len, false);
+}
+
+int adf_pm_scnprint_table_lower_keys(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len)
+{
+ return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
+ table_len, true);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h
new file mode 100644
index 000000000000..854f058b35ed
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_PM_DBGFS_UTILS_H_
+#define ADF_PM_DBGFS_UTILS_H_
+
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include "icp_qat_fw_init_admin.h"
+
+#define PM_INFO_MEMBER_OFF(member) \
+ (offsetof(struct icp_qat_fw_init_admin_pm_info, member) / sizeof(u32))
+
+#define PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, _mask_) \
+{ \
+ .reg_offset = PM_INFO_MEMBER_OFF(_reg_), \
+ .key = __stringify(_field_), \
+ .field_mask = _mask_, \
+}
+
+#define PM_INFO_REGSET_ENTRY32(_reg_, _field_) \
+ PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, GENMASK(31, 0))
+
+struct pm_status_row {
+ int reg_offset;
+ u32 field_mask;
+ const char *key;
+};
+
+int adf_pm_scnprint_table_upper_keys(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len);
+
+int adf_pm_scnprint_table_lower_keys(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len);
+
+#endif /* ADF_PM_DBGFS_UTILS_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c
index e782c23fc1bf..c6a54e465931 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c
@@ -13,6 +13,7 @@
#include <linux/units.h>
#include "adf_accel_devices.h"
+#include "adf_cfg_services.h"
#include "adf_common_drv.h"
#include "adf_rl_admin.h"
#include "adf_rl.h"
@@ -55,7 +56,7 @@ static int validate_user_input(struct adf_accel_dev *accel_dev,
}
}
- if (sla_in->srv >= ADF_SVC_NONE) {
+ if (sla_in->srv >= SVC_BASE_COUNT) {
dev_notice(&GET_DEV(accel_dev),
"Wrong service type\n");
return -EINVAL;
@@ -168,20 +169,6 @@ static struct rl_sla *find_parent(struct adf_rl *rl_data,
return NULL;
}
-static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_srv)
-{
- switch (rl_srv) {
- case ADF_SVC_ASYM:
- return ASYM;
- case ADF_SVC_SYM:
- return SYM;
- case ADF_SVC_DC:
- return COMP;
- default:
- return UNUSED;
- }
-}
-
/**
* adf_rl_get_sla_arr_of_type() - Returns a pointer to SLA type specific array
* @rl_data: pointer to ratelimiting data
@@ -209,22 +196,6 @@ u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type,
}
}
-static bool is_service_enabled(struct adf_accel_dev *accel_dev,
- enum adf_base_services rl_srv)
-{
- enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(rl_srv);
- struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
- u8 rps_per_bundle = hw_data->num_banks_per_vf;
- int i;
-
- for (i = 0; i < rps_per_bundle; i++) {
- if (GET_SRV_TYPE(accel_dev, i) == arb_srv)
- return true;
- }
-
- return false;
-}
-
/**
* prepare_rp_ids() - Creates an array of ring pair IDs from bitmask
* @accel_dev: pointer to acceleration device structure
@@ -243,7 +214,7 @@ static bool is_service_enabled(struct adf_accel_dev *accel_dev,
static int prepare_rp_ids(struct adf_accel_dev *accel_dev, struct rl_sla *sla,
const unsigned long rp_mask)
{
- enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(sla->srv);
+ enum adf_cfg_service_type arb_srv = adf_srv_to_cfg_svc_type(sla->srv);
u16 rps_per_bundle = GET_HW_DATA(accel_dev)->num_banks_per_vf;
bool *rp_in_use = accel_dev->rate_limiting->rp_in_use;
size_t rp_cnt_max = ARRAY_SIZE(sla->ring_pairs_ids);
@@ -558,21 +529,9 @@ u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val,
if (!sla_val)
return 0;
+ /* Handle generation specific slice count adjustment */
avail_slice_cycles = hw_data->clock_frequency;
-
- switch (svc_type) {
- case ADF_SVC_ASYM:
- avail_slice_cycles *= device_data->slices.pke_cnt;
- break;
- case ADF_SVC_SYM:
- avail_slice_cycles *= device_data->slices.cph_cnt;
- break;
- case ADF_SVC_DC:
- avail_slice_cycles *= device_data->slices.dcpr_cnt;
- break;
- default:
- break;
- }
+ avail_slice_cycles *= hw_data->get_svc_slice_cnt(accel_dev, svc_type);
do_div(avail_slice_cycles, device_data->scan_interval);
allocated_tokens = avail_slice_cycles * sla_val;
@@ -581,6 +540,17 @@ u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val,
return allocated_tokens;
}
+static u32 adf_rl_get_num_svc_aes(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc)
+{
+ struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
+
+ if (svc >= SVC_BASE_COUNT)
+ return 0;
+
+ return device_data->svc_ae_mask[svc];
+}
+
u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val,
enum adf_base_services svc_type)
{
@@ -592,7 +562,7 @@ u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val,
return 0;
avail_ae_cycles = hw_data->clock_frequency;
- avail_ae_cycles *= hw_data->get_num_aes(hw_data) - 1;
+ avail_ae_cycles *= adf_rl_get_num_svc_aes(accel_dev, svc_type);
do_div(avail_ae_cycles, device_data->scan_interval);
sla_val *= device_data->max_tp[svc_type];
@@ -617,9 +587,8 @@ u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val,
sla_to_bytes *= device_data->max_tp[svc_type];
do_div(sla_to_bytes, device_data->scale_ref);
- sla_to_bytes *= (svc_type == ADF_SVC_ASYM) ? RL_TOKEN_ASYM_SIZE :
- BYTES_PER_MBIT;
- if (svc_type == ADF_SVC_DC && is_bw_out)
+ sla_to_bytes *= (svc_type == SVC_ASYM) ? RL_TOKEN_ASYM_SIZE : BYTES_PER_MBIT;
+ if (svc_type == SVC_DC && is_bw_out)
sla_to_bytes *= device_data->slices.dcpr_cnt -
device_data->dcpr_correction;
@@ -660,7 +629,7 @@ static int add_new_sla_entry(struct adf_accel_dev *accel_dev,
}
*sla_out = sla;
- if (!is_service_enabled(accel_dev, sla_in->srv)) {
+ if (!adf_is_service_enabled(accel_dev, sla_in->srv)) {
dev_notice(&GET_DEV(accel_dev),
"Provided service is not enabled\n");
ret = -EINVAL;
@@ -730,8 +699,8 @@ static int initialize_default_nodes(struct adf_accel_dev *accel_dev)
sla_in.type = RL_ROOT;
sla_in.parent_id = RL_PARENT_DEFAULT_ID;
- for (i = 0; i < ADF_SVC_NONE; i++) {
- if (!is_service_enabled(accel_dev, i))
+ for (i = 0; i < SVC_BASE_COUNT; i++) {
+ if (!adf_is_service_enabled(accel_dev, i))
continue;
sla_in.cir = device_data->scale_ref;
@@ -745,10 +714,9 @@ static int initialize_default_nodes(struct adf_accel_dev *accel_dev)
/* Init default cluster for each root */
sla_in.type = RL_CLUSTER;
- for (i = 0; i < ADF_SVC_NONE; i++) {
+ for (i = 0; i < SVC_BASE_COUNT; i++) {
if (!rl_data->root[i])
continue;
-
sla_in.cir = rl_data->root[i]->cir;
sla_in.pir = sla_in.cir;
sla_in.srv = rl_data->root[i]->srv;
@@ -987,7 +955,7 @@ int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev,
struct rl_sla *sla = NULL;
int i;
- if (srv >= ADF_SVC_NONE)
+ if (srv >= SVC_BASE_COUNT)
return -EINVAL;
if (sla_id > RL_SLA_EMPTY_ID && !validate_sla_id(accel_dev, sla_id)) {
@@ -1086,9 +1054,9 @@ int adf_rl_init(struct adf_accel_dev *accel_dev)
int ret = 0;
/* Validate device parameters */
- if (RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_ASYM]) ||
- RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_SYM]) ||
- RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_DC]) ||
+ if (RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[SVC_ASYM]) ||
+ RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[SVC_SYM]) ||
+ RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[SVC_DC]) ||
RL_VALIDATE_NON_ZERO(rl_hw_data->scan_interval) ||
RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_div) ||
RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_mul) ||
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h
index bfe750ea0e83..c1f3f9a51195 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h
@@ -7,6 +7,8 @@
#include <linux/mutex.h>
#include <linux/types.h>
+#include "adf_cfg_services.h"
+
struct adf_accel_dev;
#define RL_ROOT_MAX 4
@@ -24,13 +26,6 @@ enum rl_node_type {
RL_LEAF,
};
-enum adf_base_services {
- ADF_SVC_ASYM = 0,
- ADF_SVC_SYM,
- ADF_SVC_DC,
- ADF_SVC_NONE,
-};
-
/**
* struct adf_rl_sla_input_data - ratelimiting user input data structure
* @rp_mask: 64 bit bitmask of ring pair IDs which will be assigned to SLA.
@@ -73,6 +68,7 @@ struct rl_slice_cnt {
u8 dcpr_cnt;
u8 pke_cnt;
u8 cph_cnt;
+ u8 cpr_cnt;
};
struct adf_rl_interface_data {
@@ -94,6 +90,7 @@ struct adf_rl_hw_data {
u32 pcie_scale_div;
u32 dcpr_correction;
u32 max_tp[RL_ROOT_MAX];
+ u32 svc_ae_mask[SVC_BASE_COUNT];
struct rl_slice_cnt slices;
};
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c
index 698a14f4ce66..4a3e0591fdba 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c
@@ -63,6 +63,7 @@ int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev,
slices_int->pke_cnt = slices_resp.pke_cnt;
/* For symmetric crypto, slice tokens are relative to the UCS slice */
slices_int->cph_cnt = slices_resp.ucs_cnt;
+ slices_int->cpr_cnt = slices_resp.cpr_cnt;
return 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
index c75d0b6cb0ad..31d1ef0cb1f5 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
@@ -155,7 +155,6 @@ static int adf_do_enable_sriov(struct adf_accel_dev *accel_dev)
if (!device_iommu_mapped(&GET_DEV(accel_dev))) {
dev_warn(&GET_DEV(accel_dev),
"IOMMU should be enabled for SR-IOV to work correctly\n");
- return -EINVAL;
}
if (adf_dev_started(accel_dev)) {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
index 6c39194647f0..79c63dfa8ff3 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
@@ -269,6 +269,8 @@ static ssize_t rp2srv_show(struct device *dev, struct device_attribute *attr,
return sysfs_emit(buf, "%s\n", ADF_CFG_SYM);
case ASYM:
return sysfs_emit(buf, "%s\n", ADF_CFG_ASYM);
+ case DECOMP:
+ return sysfs_emit(buf, "%s\n", ADF_CFG_DECOMP);
default:
break;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
index bedb514d4e30..f31556beed8b 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
@@ -32,9 +32,10 @@ enum rl_params {
};
static const char *const rl_services[] = {
- [ADF_SVC_ASYM] = "asym",
- [ADF_SVC_SYM] = "sym",
- [ADF_SVC_DC] = "dc",
+ [SVC_ASYM] = "asym",
+ [SVC_SYM] = "sym",
+ [SVC_DC] = "dc",
+ [SVC_DECOMP] = "decomp",
};
static const char *const rl_operations[] = {
@@ -282,7 +283,7 @@ static ssize_t srv_show(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- if (get == ADF_SVC_NONE)
+ if (get == SVC_BASE_COUNT)
return -EINVAL;
return sysfs_emit(buf, "%s\n", rl_services[get]);
@@ -291,14 +292,22 @@ static ssize_t srv_show(struct device *dev, struct device_attribute *attr,
static ssize_t srv_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct adf_accel_dev *accel_dev;
unsigned int val;
int ret;
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
ret = sysfs_match_string(rl_services, buf);
if (ret < 0)
return ret;
val = ret;
+ if (!adf_is_service_enabled(accel_dev, val))
+ return -EINVAL;
+
ret = set_param_u(dev, SRV, val);
if (ret)
return ret;
@@ -439,8 +448,8 @@ int adf_sysfs_rl_add(struct adf_accel_dev *accel_dev)
dev_err(&GET_DEV(accel_dev),
"Failed to create qat_rl attribute group\n");
- data->cap_rem_srv = ADF_SVC_NONE;
- data->input.srv = ADF_SVC_NONE;
+ data->cap_rem_srv = SVC_BASE_COUNT;
+ data->input.srv = SVC_BASE_COUNT;
data->sysfs_added = true;
return ret;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
index f20ae7e35a0d..a32db273842a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
@@ -538,6 +538,9 @@ static void tl_print_rp_srv(struct adf_accel_dev *accel_dev, struct seq_file *s,
case ASYM:
seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_ASYM);
break;
+ case DECOMP:
+ seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_DECOMP);
+ break;
default:
seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, TL_RP_SRV_UNKNOWN);
break;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
index e2dd568b87b5..6c22bc9b28e4 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
@@ -10,16 +10,21 @@
static DEFINE_MUTEX(ring_read_lock);
static DEFINE_MUTEX(bank_read_lock);
+#define ADF_RING_NUM_MSGS(ring) \
+ (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / \
+ ADF_MSG_SIZE_TO_BYTES(ring->msg_size))
+
static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
{
struct adf_etr_ring_data *ring = sfile->private;
+ unsigned int num_msg = ADF_RING_NUM_MSGS(ring);
+ loff_t val = *pos;
mutex_lock(&ring_read_lock);
- if (*pos == 0)
+ if (val == 0)
return SEQ_START_TOKEN;
- if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
- ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+ if (val >= num_msg)
return NULL;
return ring->base_addr +
@@ -29,13 +34,15 @@ static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
{
struct adf_etr_ring_data *ring = sfile->private;
+ unsigned int num_msg = ADF_RING_NUM_MSGS(ring);
+ loff_t val = *pos;
+
+ (*pos)++;
- if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
- ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+ if (val >= num_msg)
return NULL;
- return ring->base_addr +
- (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+ return ring->base_addr + (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * val);
}
static int adf_ring_show(struct seq_file *sfile, void *v)
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c
index 3c4bba4a8779..43e6dd9b77b7 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c
@@ -5,11 +5,11 @@
#include <linux/crypto.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/cipher.h>
+#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/aes.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
-#include <crypto/hash.h>
#include <crypto/hmac.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
@@ -154,19 +154,19 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(shash, &ctx->sha1))
+ if (crypto_shash_export_core(shash, &ctx->sha1))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(shash, &ctx->sha256))
+ if (crypto_shash_export_core(shash, &ctx->sha256))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(shash, &ctx->sha512))
+ if (crypto_shash_export_core(shash, &ctx->sha512))
return -EFAULT;
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
@@ -190,19 +190,19 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(shash, &ctx->sha1))
+ if (crypto_shash_export_core(shash, &ctx->sha1))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(shash, &ctx->sha256))
+ if (crypto_shash_export_core(shash, &ctx->sha256))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(shash, &ctx->sha512))
+ if (crypto_shash_export_core(shash, &ctx->sha512))
return -EFAULT;
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
@@ -1277,7 +1277,7 @@ static struct aead_alg qat_aeads[] = { {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha1",
- .cra_priority = 4001,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
@@ -1294,7 +1294,7 @@ static struct aead_alg qat_aeads[] = { {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha256",
- .cra_priority = 4001,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
@@ -1311,7 +1311,7 @@ static struct aead_alg qat_aeads[] = { {
.base = {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha512",
- .cra_priority = 4001,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
@@ -1329,7 +1329,7 @@ static struct aead_alg qat_aeads[] = { {
static struct skcipher_alg qat_skciphers[] = { {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "qat_aes_cbc",
- .base.cra_priority = 4001,
+ .base.cra_priority = 100,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
@@ -1347,7 +1347,7 @@ static struct skcipher_alg qat_skciphers[] = { {
}, {
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "qat_aes_ctr",
- .base.cra_priority = 4001,
+ .base.cra_priority = 100,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
@@ -1365,7 +1365,7 @@ static struct skcipher_alg qat_skciphers[] = { {
}, {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "qat_aes_xts",
- .base.cra_priority = 4001,
+ .base.cra_priority = 100,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c
index 5e4dad4693ca..9b2338f58d97 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_bl.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c
@@ -38,7 +38,7 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
for (i = 0; i < blout->num_mapped_bufs; i++) {
dma_unmap_single(dev, blout->buffers[i].addr,
blout->buffers[i].len,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
}
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
@@ -162,7 +162,7 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
}
buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
sg->length - left,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
goto err_out;
buffers[y].len = sg->length;
@@ -204,7 +204,7 @@ err_out:
if (!dma_mapping_error(dev, buflout->buffers[i].addr))
dma_unmap_single(dev, buflout->buffers[i].addr,
buflout->buffers[i].len,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
}
if (!buf->sgl_dst_valid)
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c
index c285b45b8679..53a4db5507ec 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_compression.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c
@@ -196,7 +196,7 @@ static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
struct adf_dc_data *dc_data = NULL;
u8 *obuff = NULL;
- dc_data = devm_kzalloc(dev, sizeof(*dc_data), GFP_KERNEL);
+ dc_data = kzalloc_node(sizeof(*dc_data), GFP_KERNEL, dev_to_node(dev));
if (!dc_data)
goto err;
@@ -204,7 +204,7 @@ static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
if (!obuff)
goto err;
- obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_FROM_DEVICE);
+ obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, obuff_p)))
goto err;
@@ -232,9 +232,9 @@ static void qat_free_dc_data(struct adf_accel_dev *accel_dev)
return;
dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
kfree_sensitive(dc_data->ovf_buff);
- devm_kfree(dev, dc_data);
+ kfree(dc_data);
accel_dev->dc_data = NULL;
}
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index 48c5c8ea8c43..3fe0fd9226cf 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -75,9 +75,12 @@ mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
{
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct mv_cesa_engine *engine = creq->base.engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_skcipher_dma_cleanup(req);
+
+ atomic_sub(req->cryptlen, &engine->load);
}
static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
@@ -212,7 +215,6 @@ mv_cesa_skcipher_complete(struct crypto_async_request *req)
struct mv_cesa_engine *engine = creq->base.engine;
unsigned int ivsize;
- atomic_sub(skreq->cryptlen, &engine->load);
ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index 6815eddc9068..5103d36cdfdb 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -110,9 +110,12 @@ static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+ struct mv_cesa_engine *engine = creq->base.engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_ahash_dma_cleanup(req);
+
+ atomic_sub(req->nbytes, &engine->load);
}
static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
@@ -362,16 +365,13 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
(creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
CESA_TDMA_RESULT) {
- __le32 *data = NULL;
+ const void *data;
/*
* Result is already in the correct endianness when the SA is
* used
*/
data = creq->base.chain.last->op->ctx.hash.hash;
- for (i = 0; i < digsize / 4; i++)
- creq->state[i] = le32_to_cpu(data[i]);
-
memcpy(ahashreq->result, data, digsize);
} else {
for (i = 0; i < digsize / 4; i++)
@@ -395,8 +395,6 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
}
}
}
-
- atomic_sub(ahashreq->nbytes, &engine->load);
}
static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index d529bcb03775..062def303dce 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -18,9 +18,8 @@
#define OTX2_CPT_MAX_VFS_NUM 128
#define OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs) \
(((blk) << 20) | ((slot) << 12) | (offs))
-#define OTX2_CPT_RVU_PFFUNC(pf, func) \
- ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
- (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+#define OTX2_CPT_RVU_PFFUNC(pdev, pf, func) rvu_make_pcifunc(pdev, pf, func)
#define OTX2_CPT_INVALID_CRYPTO_ENG_GRP 0xFF
#define OTX2_CPT_NAME_LENGTH 64
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
index e27e849b01df..e64ca30335de 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
@@ -34,6 +34,9 @@
#define SG_COMP_2 2
#define SG_COMP_1 1
+#define OTX2_CPT_DPTR_RPTR_ALIGN 8
+#define OTX2_CPT_RES_ADDR_ALIGN 32
+
union otx2_cpt_opcode {
u16 flags;
struct {
@@ -347,22 +350,48 @@ static inline struct otx2_cpt_inst_info *
cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
gfp_t gfp)
{
- u32 dlen = 0, g_len, sg_len, info_len;
- int align = OTX2_CPT_DMA_MINALIGN;
+ u32 dlen = 0, g_len, s_len, sg_len, info_len;
struct otx2_cpt_inst_info *info;
- u16 g_sz_bytes, s_sz_bytes;
u32 total_mem_len;
int i;
- g_sz_bytes = ((req->in_cnt + 2) / 3) *
- sizeof(struct cn10kb_cpt_sglist_component);
- s_sz_bytes = ((req->out_cnt + 2) / 3) *
- sizeof(struct cn10kb_cpt_sglist_component);
+ /* Allocate memory to meet below alignment requirement:
+ * ------------------------------------
+ * | struct otx2_cpt_inst_info |
+ * | (No alignment required) |
+ * | --------------------------------|
+ * | | padding for ARCH_DMA_MINALIGN |
+ * | | alignment |
+ * |------------------------------------|
+ * | SG List Gather/Input memory |
+ * | Length = multiple of 32Bytes |
+ * | Alignment = 8Byte |
+ * |---------------------------------- |
+ * | SG List Scatter/Output memory |
+ * | Length = multiple of 32Bytes |
+ * | Alignment = 8Byte |
+ * | -------------------------------|
+ * | | padding for 32B alignment |
+ * |------------------------------------|
+ * | Result response memory |
+ * | Alignment = 32Byte |
+ * ------------------------------------
+ */
+
+ info_len = sizeof(*info);
+
+ g_len = ((req->in_cnt + 2) / 3) *
+ sizeof(struct cn10kb_cpt_sglist_component);
+ s_len = ((req->out_cnt + 2) / 3) *
+ sizeof(struct cn10kb_cpt_sglist_component);
+ sg_len = g_len + s_len;
- g_len = ALIGN(g_sz_bytes, align);
- sg_len = ALIGN(g_len + s_sz_bytes, align);
- info_len = ALIGN(sizeof(*info), align);
- total_mem_len = sg_len + info_len + sizeof(union otx2_cpt_res_s);
+ /* Allocate extra memory for SG and response address alignment */
+ total_mem_len = ALIGN(info_len, OTX2_CPT_DPTR_RPTR_ALIGN);
+ total_mem_len += (ARCH_DMA_MINALIGN - 1) &
+ ~(OTX2_CPT_DPTR_RPTR_ALIGN - 1);
+ total_mem_len += ALIGN(sg_len, OTX2_CPT_RES_ADDR_ALIGN);
+ total_mem_len += sizeof(union otx2_cpt_res_s);
info = kzalloc(total_mem_len, gfp);
if (unlikely(!info))
@@ -372,7 +401,8 @@ cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
dlen += req->in[i].size;
info->dlen = dlen;
- info->in_buffer = (u8 *)info + info_len;
+ info->in_buffer = PTR_ALIGN((u8 *)info + info_len, ARCH_DMA_MINALIGN);
+ info->out_buffer = info->in_buffer + g_len;
info->gthr_sz = req->in_cnt;
info->sctr_sz = req->out_cnt;
@@ -384,7 +414,7 @@ cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
}
if (sgv2io_components_setup(pdev, req->out, req->out_cnt,
- &info->in_buffer[g_len])) {
+ info->out_buffer)) {
dev_err(&pdev->dev, "Failed to setup scatter list\n");
goto destroy_info;
}
@@ -401,8 +431,10 @@ cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
* Get buffer for union otx2_cpt_res_s response
* structure and its physical address
*/
- info->completion_addr = info->in_buffer + sg_len;
- info->comp_baddr = info->dptr_baddr + sg_len;
+ info->completion_addr = PTR_ALIGN((info->in_buffer + sg_len),
+ OTX2_CPT_RES_ADDR_ALIGN);
+ info->comp_baddr = ALIGN((info->dptr_baddr + sg_len),
+ OTX2_CPT_RES_ADDR_ALIGN);
return info;
@@ -417,10 +449,9 @@ static inline struct otx2_cpt_inst_info *
otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
gfp_t gfp)
{
- int align = OTX2_CPT_DMA_MINALIGN;
struct otx2_cpt_inst_info *info;
- u32 dlen, align_dlen, info_len;
- u16 g_sz_bytes, s_sz_bytes;
+ u32 dlen, info_len;
+ u16 g_len, s_len;
u32 total_mem_len;
if (unlikely(req->in_cnt > OTX2_CPT_MAX_SG_IN_CNT ||
@@ -429,22 +460,54 @@ otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
return NULL;
}
- g_sz_bytes = ((req->in_cnt + 3) / 4) *
- sizeof(struct otx2_cpt_sglist_component);
- s_sz_bytes = ((req->out_cnt + 3) / 4) *
- sizeof(struct otx2_cpt_sglist_component);
+ /* Allocate memory to meet below alignment requirement:
+ * ------------------------------------
+ * | struct otx2_cpt_inst_info |
+ * | (No alignment required) |
+ * | --------------------------------|
+ * | | padding for ARCH_DMA_MINALIGN |
+ * | | alignment |
+ * |------------------------------------|
+ * | SG List Header of 8 Byte |
+ * |------------------------------------|
+ * | SG List Gather/Input memory |
+ * | Length = multiple of 32Bytes |
+ * | Alignment = 8Byte |
+ * |---------------------------------- |
+ * | SG List Scatter/Output memory |
+ * | Length = multiple of 32Bytes |
+ * | Alignment = 8Byte |
+ * | -------------------------------|
+ * | | padding for 32B alignment |
+ * |------------------------------------|
+ * | Result response memory |
+ * | Alignment = 32Byte |
+ * ------------------------------------
+ */
+
+ info_len = sizeof(*info);
+
+ g_len = ((req->in_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+ s_len = ((req->out_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+
+ dlen = g_len + s_len + SG_LIST_HDR_SIZE;
- dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
- align_dlen = ALIGN(dlen, align);
- info_len = ALIGN(sizeof(*info), align);
- total_mem_len = align_dlen + info_len + sizeof(union otx2_cpt_res_s);
+ /* Allocate extra memory for SG and response address alignment */
+ total_mem_len = ALIGN(info_len, OTX2_CPT_DPTR_RPTR_ALIGN);
+ total_mem_len += (ARCH_DMA_MINALIGN - 1) &
+ ~(OTX2_CPT_DPTR_RPTR_ALIGN - 1);
+ total_mem_len += ALIGN(dlen, OTX2_CPT_RES_ADDR_ALIGN);
+ total_mem_len += sizeof(union otx2_cpt_res_s);
info = kzalloc(total_mem_len, gfp);
if (unlikely(!info))
return NULL;
info->dlen = dlen;
- info->in_buffer = (u8 *)info + info_len;
+ info->in_buffer = PTR_ALIGN((u8 *)info + info_len, ARCH_DMA_MINALIGN);
+ info->out_buffer = info->in_buffer + SG_LIST_HDR_SIZE + g_len;
((u16 *)info->in_buffer)[0] = req->out_cnt;
((u16 *)info->in_buffer)[1] = req->in_cnt;
@@ -460,7 +523,7 @@ otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
}
if (setup_sgio_components(pdev, req->out, req->out_cnt,
- &info->in_buffer[8 + g_sz_bytes])) {
+ info->out_buffer)) {
dev_err(&pdev->dev, "Failed to setup scatter list\n");
goto destroy_info;
}
@@ -476,8 +539,10 @@ otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
* Get buffer for union otx2_cpt_res_s response
* structure and its physical address
*/
- info->completion_addr = info->in_buffer + align_dlen;
- info->comp_baddr = info->dptr_baddr + align_dlen;
+ info->completion_addr = PTR_ALIGN((info->in_buffer + dlen),
+ OTX2_CPT_RES_ADDR_ALIGN);
+ info->comp_baddr = ALIGN((info->dptr_baddr + dlen),
+ OTX2_CPT_RES_ADDR_ALIGN);
return info;
@@ -490,6 +555,7 @@ struct otx2_cptlf_wqe;
int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
int cpu_num);
void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe);
-int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev);
+int otx2_cpt_get_eng_grp_num(struct pci_dev *pdev,
+ enum otx2_cpt_eng_type);
#endif /* __OTX2_CPT_REQMGR_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index 6e004a5568d8..1b9f75214d18 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -124,7 +124,8 @@ struct otx2_cptlfs_info {
struct cpt_hw_ops *ops;
u8 are_lfs_attached; /* Whether CPT LFs are attached */
u8 lfs_num; /* Number of CPT LFs */
- u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */
+ u8 kcrypto_se_eng_grp_num; /* Crypto symmetric engine group number */
+ u8 kcrypto_ae_eng_grp_num; /* Crypto asymmetric engine group number */
u8 kvf_limits; /* Kernel crypto limits */
atomic_t state; /* LF's state. started/reset */
int blkaddr; /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
index 12c0e966fa65..b4b2d3d1cbc2 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -142,7 +142,7 @@ static int send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev *cptpf,
memset(req, 0, sizeof(*req));
req->hdr.id = MBOX_MSG_CPT_INLINE_IPSEC_CFG;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
- req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
+ req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pdev, cptpf->pf_id, 0);
req->dir = CPT_INLINE_INBOUND;
req->slot = slot;
req->sso_pf_func_ovrd = cptpf->sso_pf_func_ovrd;
@@ -184,7 +184,8 @@ static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp,
nix_req->gen_cfg.opcode = cpt_inline_rx_opcode(pdev);
nix_req->gen_cfg.param1 = req->param1;
nix_req->gen_cfg.param2 = req->param2;
- nix_req->inst_qsel.cpt_pf_func = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
+ nix_req->inst_qsel.cpt_pf_func =
+ OTX2_CPT_RVU_PFFUNC(cptpf->pdev, cptpf->pf_id, 0);
nix_req->inst_qsel.cpt_slot = 0;
ret = otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
if (ret)
@@ -392,9 +393,8 @@ void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
/* Set which VF sent this message based on mbox IRQ */
- msg->pcifunc = ((u16)cptpf->pf_id << RVU_PFVF_PF_SHIFT) |
- ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
-
+ msg->pcifunc = rvu_make_pcifunc(cptpf->pdev, cptpf->pf_id,
+ (vf->vf_id + 1));
err = cptpf_handle_vf_req(cptpf, vf, msg,
msg->next_msgoff - offset);
/*
@@ -469,8 +469,7 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
switch (msg->id) {
case MBOX_MSG_READY:
- cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
- RVU_PFVF_PF_MASK;
+ cptpf->pf_id = rvu_get_pf(cptpf->pdev, msg->pcifunc);
break;
case MBOX_MSG_MSIX_OFFSET:
rsp_msix = (struct msix_offset_rsp *) msg;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 78367849c3d5..cc47e361089a 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -176,7 +176,9 @@ static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
/* Set PF number for microcode fetches */
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_PF_FUNC,
- cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
+ rvu_make_pcifunc(cptpf->pdev,
+ cptpf->pf_id, 0),
+ blkaddr);
if (ret)
return ret;
@@ -1491,11 +1493,13 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
union otx2_cpt_opcode opcode;
union otx2_cpt_res_s *result;
union otx2_cpt_inst_s inst;
+ dma_addr_t result_baddr;
dma_addr_t rptr_baddr;
struct pci_dev *pdev;
- u32 len, compl_rlen;
+ int timeout = 10000;
+ void *base, *rptr;
int ret, etype;
- void *rptr;
+ u32 len;
/*
* We don't get capabilities if it was already done
@@ -1518,22 +1522,28 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
if (ret)
goto delete_grps;
- compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
- len = compl_rlen + LOADFVC_RLEN;
+ /* Allocate extra memory for "rptr" and "result" pointer alignment */
+ len = LOADFVC_RLEN + ARCH_DMA_MINALIGN +
+ sizeof(union otx2_cpt_res_s) + OTX2_CPT_RES_ADDR_ALIGN;
- result = kzalloc(len, GFP_KERNEL);
- if (!result) {
+ base = kzalloc(len, GFP_KERNEL);
+ if (!base) {
ret = -ENOMEM;
goto lf_cleanup;
}
- rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
- DMA_BIDIRECTIONAL);
+
+ rptr = PTR_ALIGN(base, ARCH_DMA_MINALIGN);
+ rptr_baddr = dma_map_single(&pdev->dev, rptr, len, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
dev_err(&pdev->dev, "DMA mapping failed\n");
ret = -EFAULT;
- goto free_result;
+ goto free_rptr;
}
- rptr = (u8 *)result + compl_rlen;
+
+ result = (union otx2_cpt_res_s *)PTR_ALIGN(rptr + LOADFVC_RLEN,
+ OTX2_CPT_RES_ADDR_ALIGN);
+ result_baddr = ALIGN(rptr_baddr + LOADFVC_RLEN,
+ OTX2_CPT_RES_ADDR_ALIGN);
/* Fill in the command */
opcode.s.major = LOADFVC_MAJOR_OP;
@@ -1545,27 +1555,38 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
/* 64-bit swap for microcode data reads, not needed for addresses */
cpu_to_be64s(&iq_cmd.cmd.u);
iq_cmd.dptr = 0;
- iq_cmd.rptr = rptr_baddr + compl_rlen;
+ iq_cmd.rptr = rptr_baddr;
iq_cmd.cptr.u = 0;
for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
etype);
- otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
+ otx2_cpt_fill_inst(&inst, &iq_cmd, result_baddr);
lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
+ timeout = 10000;
while (lfs->ops->cpt_get_compcode(result) ==
- OTX2_CPT_COMPLETION_CODE_INIT)
+ OTX2_CPT_COMPLETION_CODE_INIT) {
cpu_relax();
+ udelay(1);
+ timeout--;
+ if (!timeout) {
+ ret = -ENODEV;
+ cptpf->is_eng_caps_discovered = false;
+ dev_warn(&pdev->dev, "Timeout on CPT load_fvc completion poll\n");
+ goto error_no_response;
+ }
+ }
cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
}
- dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
cptpf->is_eng_caps_discovered = true;
-free_result:
- kfree(result);
+error_no_response:
+ dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
+free_rptr:
+ kfree(base);
lf_cleanup:
otx2_cptlf_shutdown(lfs);
delete_grps:
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index 7eb0bc13994d..8d9f394d6b50 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -384,7 +384,8 @@ static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
req_info->req_type = OTX2_CPT_ENC_DEC_REQ;
req_info->is_enc = enc;
req_info->is_trunc_hmac = false;
- req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
+ req_info->ctrl.s.grp = otx2_cpt_get_eng_grp_num(pdev,
+ OTX2_CPT_SE_TYPES);
req_info->req.cptr = ctx->er_ctx.hw_ctx;
req_info->req.cptr_dma = ctx->er_ctx.cptr_dma;
@@ -1288,7 +1289,8 @@ static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
if (status)
return status;
- req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
+ req_info->ctrl.s.grp = otx2_cpt_get_eng_grp_num(pdev,
+ OTX2_CPT_SE_TYPES);
/*
* We perform an asynchronous send and once
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
index 56904bdfd6e8..c1c44a7b89fa 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -265,17 +265,33 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
u8 eng_grp_msk;
/* Get engine group number for symmetric crypto */
- cptvf->lfs.kcrypto_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
+ cptvf->lfs.kcrypto_se_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_SE_TYPES);
if (ret)
return ret;
- if (cptvf->lfs.kcrypto_eng_grp_num == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
- dev_err(dev, "Engine group for kernel crypto not available\n");
- ret = -ENOENT;
+ if (cptvf->lfs.kcrypto_se_eng_grp_num ==
+ OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
+ dev_err(dev,
+ "Symmetric Engine group for crypto not available\n");
+ return -ENOENT;
+ }
+
+ /* Get engine group number for asymmetric crypto */
+ cptvf->lfs.kcrypto_ae_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
+ ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_AE_TYPES);
+ if (ret)
return ret;
+
+ if (cptvf->lfs.kcrypto_ae_eng_grp_num ==
+ OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
+ dev_err(dev,
+ "Asymmetric Engine group for crypto not available\n");
+ return -ENOENT;
}
- eng_grp_msk = 1 << cptvf->lfs.kcrypto_eng_grp_num;
+
+ eng_grp_msk = BIT(cptvf->lfs.kcrypto_se_eng_grp_num) |
+ BIT(cptvf->lfs.kcrypto_ae_eng_grp_num);
ret = otx2_cptvf_send_kvf_limits_msg(cptvf);
if (ret)
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
index 931b72580fd9..5277bcfa275e 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -75,6 +75,7 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
struct otx2_cpt_caps_rsp *eng_caps;
struct cpt_rd_wr_reg_msg *rsp_reg;
struct msix_offset_rsp *rsp_msix;
+ u8 grp_num;
int i;
if (msg->id >= MBOX_MSG_MAX) {
@@ -122,7 +123,11 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
break;
case MBOX_MSG_GET_ENG_GRP_NUM:
rsp_grp = (struct otx2_cpt_egrp_num_rsp *) msg;
- cptvf->lfs.kcrypto_eng_grp_num = rsp_grp->eng_grp_num;
+ grp_num = rsp_grp->eng_grp_num;
+ if (rsp_grp->eng_type == OTX2_CPT_SE_TYPES)
+ cptvf->lfs.kcrypto_se_eng_grp_num = grp_num;
+ else if (rsp_grp->eng_type == OTX2_CPT_AE_TYPES)
+ cptvf->lfs.kcrypto_ae_eng_grp_num = grp_num;
break;
case MBOX_MSG_GET_KVF_LIMITS:
rsp_limits = (struct otx2_cpt_kvf_limits_rsp *) msg;
@@ -189,7 +194,7 @@ int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type)
}
req->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
- req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->pdev, cptvf->vf_id, 0);
req->eng_type = eng_type;
return otx2_cpt_send_mbox_msg(mbox, pdev);
@@ -210,7 +215,7 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
}
req->id = MBOX_MSG_GET_KVF_LIMITS;
req->sig = OTX2_MBOX_REQ_SIG;
- req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->pdev, cptvf->vf_id, 0);
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
@@ -230,7 +235,7 @@ int otx2_cptvf_send_caps_msg(struct otx2_cptvf_dev *cptvf)
}
req->id = MBOX_MSG_GET_CAPS;
req->sig = OTX2_MBOX_REQ_SIG;
- req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->pdev, cptvf->vf_id, 0);
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
index 426244107037..e71494486c64 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
@@ -391,9 +391,19 @@ void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe)
&wqe->lfs->lf[wqe->lf_num].pqueue);
}
-int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev)
+int otx2_cpt_get_eng_grp_num(struct pci_dev *pdev,
+ enum otx2_cpt_eng_type eng_type)
{
struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
- return cptvf->lfs.kcrypto_eng_grp_num;
+ switch (eng_type) {
+ case OTX2_CPT_SE_TYPES:
+ return cptvf->lfs.kcrypto_se_eng_grp_num;
+ case OTX2_CPT_AE_TYPES:
+ return cptvf->lfs.kcrypto_ae_eng_grp_num;
+ default:
+ dev_err(&cptvf->pdev->dev, "Unsupported engine type");
+ break;
+ }
+ return -ENXIO;
}
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index c498950402e8..1f4586509ca4 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -38,7 +38,6 @@ static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
crypto_finalize_aead_request(dd->engine, req, ret);
- pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 1ecf5f6ac04e..244e24e52987 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -400,7 +400,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
crypto_finalize_skcipher_request(dd->engine, req, err);
- pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
}
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index a099460d5f21..9c5538ae17db 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -489,7 +489,6 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err)
crypto_finalize_skcipher_request(dd->engine, req, err);
- pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 56f192cb976d..6328e8026b91 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1167,7 +1167,6 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
dd->flags &= ~(BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
- pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
ctx->offset = 0;
diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c
index 2c60a1047bc3..6cfe0238f615 100644
--- a/drivers/crypto/starfive/jh7110-hash.c
+++ b/drivers/crypto/starfive/jh7110-hash.c
@@ -493,25 +493,25 @@ static int starfive_hash_setkey(struct crypto_ahash *hash,
static int starfive_sha224_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "sha224-generic",
+ return starfive_hash_init_tfm(hash, "sha224-lib",
STARFIVE_HASH_SHA224, 0);
}
static int starfive_sha256_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "sha256-generic",
+ return starfive_hash_init_tfm(hash, "sha256-lib",
STARFIVE_HASH_SHA256, 0);
}
static int starfive_sha384_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "sha384-generic",
+ return starfive_hash_init_tfm(hash, "sha384-lib",
STARFIVE_HASH_SHA384, 0);
}
static int starfive_sha512_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "sha512-generic",
+ return starfive_hash_init_tfm(hash, "sha512-lib",
STARFIVE_HASH_SHA512, 0);
}
@@ -523,25 +523,25 @@ static int starfive_sm3_init_tfm(struct crypto_ahash *hash)
static int starfive_hmac_sha224_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "hmac(sha224-generic)",
+ return starfive_hash_init_tfm(hash, "hmac-sha224-lib",
STARFIVE_HASH_SHA224, 1);
}
static int starfive_hmac_sha256_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "hmac(sha256-generic)",
+ return starfive_hash_init_tfm(hash, "hmac-sha256-lib",
STARFIVE_HASH_SHA256, 1);
}
static int starfive_hmac_sha384_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "hmac(sha384-generic)",
+ return starfive_hash_init_tfm(hash, "hmac-sha384-lib",
STARFIVE_HASH_SHA384, 1);
}
static int starfive_hmac_sha512_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "hmac(sha512-generic)",
+ return starfive_hash_init_tfm(hash, "hmac-sha512-lib",
STARFIVE_HASH_SHA512, 1);
}
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 49dfd161e9b9..d6dc848c82ee 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -1,13 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config CRYPTO_DEV_STM32_CRC
- tristate "Support for STM32 crc accelerators"
- depends on ARCH_STM32
- select CRYPTO_HASH
- select CRC32
- help
- This enables support for the CRC32 hw accelerator which can be found
- on STMicroelectronics STM32 SOC.
-
config CRYPTO_DEV_STM32_HASH
tristate "Support for STM32 hash accelerators"
depends on ARCH_STM32 || ARCH_U8500
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
index 518e0e0b11a9..c63004026afb 100644
--- a/drivers/crypto/stm32/Makefile
+++ b/drivers/crypto/stm32/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32-crc32.o
obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o
obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
deleted file mode 100644
index fd29785a3ecf..000000000000
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ /dev/null
@@ -1,480 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) STMicroelectronics SA 2017
- * Author: Fabien Dessenne <fabien.dessenne@st.com>
- */
-
-#include <linux/bitrev.h>
-#include <linux/clk.h>
-#include <linux/crc32.h>
-#include <linux/crc32poly.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-
-#include <crypto/internal/hash.h>
-
-#include <linux/unaligned.h>
-
-#define DRIVER_NAME "stm32-crc32"
-#define CHKSUM_DIGEST_SIZE 4
-#define CHKSUM_BLOCK_SIZE 1
-
-/* Registers */
-#define CRC_DR 0x00000000
-#define CRC_CR 0x00000008
-#define CRC_INIT 0x00000010
-#define CRC_POL 0x00000014
-
-/* Registers values */
-#define CRC_CR_RESET BIT(0)
-#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5))
-#define CRC_CR_REV_IN_BYTE BIT(5)
-#define CRC_CR_REV_OUT BIT(7)
-#define CRC32C_INIT_DEFAULT 0xFFFFFFFF
-
-#define CRC_AUTOSUSPEND_DELAY 50
-
-static unsigned int burst_size;
-module_param(burst_size, uint, 0644);
-MODULE_PARM_DESC(burst_size, "Select burst byte size (0 unlimited)");
-
-struct stm32_crc {
- struct list_head list;
- struct device *dev;
- void __iomem *regs;
- struct clk *clk;
- spinlock_t lock;
-};
-
-struct stm32_crc_list {
- struct list_head dev_list;
- spinlock_t lock; /* protect dev_list */
-};
-
-static struct stm32_crc_list crc_list = {
- .dev_list = LIST_HEAD_INIT(crc_list.dev_list),
- .lock = __SPIN_LOCK_UNLOCKED(crc_list.lock),
-};
-
-struct stm32_crc_ctx {
- u32 key;
- u32 poly;
-};
-
-struct stm32_crc_desc_ctx {
- u32 partial; /* crc32c: partial in first 4 bytes of that struct */
-};
-
-static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
-{
- struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
-
- mctx->key = 0;
- mctx->poly = CRC32_POLY_LE;
- return 0;
-}
-
-static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
-{
- struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
-
- mctx->key = CRC32C_INIT_DEFAULT;
- mctx->poly = CRC32C_POLY_LE;
- return 0;
-}
-
-static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
-
- if (keylen != sizeof(u32))
- return -EINVAL;
-
- mctx->key = get_unaligned_le32(key);
- return 0;
-}
-
-static struct stm32_crc *stm32_crc_get_next_crc(void)
-{
- struct stm32_crc *crc;
-
- spin_lock_bh(&crc_list.lock);
- crc = list_first_entry_or_null(&crc_list.dev_list, struct stm32_crc, list);
- if (crc)
- list_move_tail(&crc->list, &crc_list.dev_list);
- spin_unlock_bh(&crc_list.lock);
-
- return crc;
-}
-
-static int stm32_crc_init(struct shash_desc *desc)
-{
- struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
- struct stm32_crc *crc;
- unsigned long flags;
-
- crc = stm32_crc_get_next_crc();
- if (!crc)
- return -ENODEV;
-
- pm_runtime_get_sync(crc->dev);
-
- spin_lock_irqsave(&crc->lock, flags);
-
- /* Reset, set key, poly and configure in bit reverse mode */
- writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT);
- writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
- writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
-
- /* Store partial result */
- ctx->partial = readl_relaxed(crc->regs + CRC_DR);
-
- spin_unlock_irqrestore(&crc->lock, flags);
-
- pm_runtime_mark_last_busy(crc->dev);
- pm_runtime_put_autosuspend(crc->dev);
-
- return 0;
-}
-
-static int burst_update(struct shash_desc *desc, const u8 *d8,
- size_t length)
-{
- struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
- struct stm32_crc *crc;
-
- crc = stm32_crc_get_next_crc();
- if (!crc)
- return -ENODEV;
-
- pm_runtime_get_sync(crc->dev);
-
- if (!spin_trylock(&crc->lock)) {
- /* Hardware is busy, calculate crc32 by software */
- if (mctx->poly == CRC32_POLY_LE)
- ctx->partial = crc32_le(ctx->partial, d8, length);
- else
- ctx->partial = crc32c(ctx->partial, d8, length);
-
- goto pm_out;
- }
-
- /*
- * Restore previously calculated CRC for this context as init value
- * Restore polynomial configuration
- * Configure in register for word input data,
- * Configure out register in reversed bit mode data.
- */
- writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT);
- writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
- writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
-
- if (d8 != PTR_ALIGN(d8, sizeof(u32))) {
- /* Configure for byte data */
- writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
- while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) {
- writeb_relaxed(*d8++, crc->regs + CRC_DR);
- length--;
- }
- /* Configure for word data */
- writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
- }
-
- for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32))
- writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR);
-
- if (length) {
- /* Configure for byte data */
- writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
- while (length--)
- writeb_relaxed(*d8++, crc->regs + CRC_DR);
- }
-
- /* Store partial result */
- ctx->partial = readl_relaxed(crc->regs + CRC_DR);
-
- spin_unlock(&crc->lock);
-
-pm_out:
- pm_runtime_mark_last_busy(crc->dev);
- pm_runtime_put_autosuspend(crc->dev);
-
- return 0;
-}
-
-static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
- unsigned int length)
-{
- const unsigned int burst_sz = burst_size;
- unsigned int rem_sz;
- const u8 *cur;
- size_t size;
- int ret;
-
- if (!burst_sz)
- return burst_update(desc, d8, length);
-
- /* Digest first bytes not 32bit aligned at first pass in the loop */
- size = min_t(size_t, length, burst_sz + (size_t)d8 -
- ALIGN_DOWN((size_t)d8, sizeof(u32)));
- for (rem_sz = length, cur = d8; rem_sz;
- rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) {
- ret = burst_update(desc, cur, size);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int stm32_crc_final(struct shash_desc *desc, u8 *out)
-{
- struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
-
- /* Send computed CRC */
- put_unaligned_le32(mctx->poly == CRC32C_POLY_LE ?
- ~ctx->partial : ctx->partial, out);
-
- return 0;
-}
-
-static int stm32_crc_finup(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return stm32_crc_update(desc, data, length) ?:
- stm32_crc_final(desc, out);
-}
-
-static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
-}
-
-static unsigned int refcnt;
-static DEFINE_MUTEX(refcnt_lock);
-static struct shash_alg algs[] = {
- /* CRC-32 */
- {
- .setkey = stm32_crc_setkey,
- .init = stm32_crc_init,
- .update = stm32_crc_update,
- .final = stm32_crc_final,
- .finup = stm32_crc_finup,
- .digest = stm32_crc_digest,
- .descsize = sizeof(struct stm32_crc_desc_ctx),
- .digestsize = CHKSUM_DIGEST_SIZE,
- .base = {
- .cra_name = "crc32",
- .cra_driver_name = "stm32-crc32-crc32",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_crc_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = stm32_crc32_cra_init,
- }
- },
- /* CRC-32Castagnoli */
- {
- .setkey = stm32_crc_setkey,
- .init = stm32_crc_init,
- .update = stm32_crc_update,
- .final = stm32_crc_final,
- .finup = stm32_crc_finup,
- .digest = stm32_crc_digest,
- .descsize = sizeof(struct stm32_crc_desc_ctx),
- .digestsize = CHKSUM_DIGEST_SIZE,
- .base = {
- .cra_name = "crc32c",
- .cra_driver_name = "stm32-crc32-crc32c",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_crc_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = stm32_crc32c_cra_init,
- }
- }
-};
-
-static int stm32_crc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct stm32_crc *crc;
- int ret;
-
- crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
- if (!crc)
- return -ENOMEM;
-
- crc->dev = dev;
-
- crc->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(crc->regs)) {
- dev_err(dev, "Cannot map CRC IO\n");
- return PTR_ERR(crc->regs);
- }
-
- crc->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(crc->clk)) {
- dev_err(dev, "Could not get clock\n");
- return PTR_ERR(crc->clk);
- }
-
- ret = clk_prepare_enable(crc->clk);
- if (ret) {
- dev_err(crc->dev, "Failed to enable clock\n");
- return ret;
- }
-
- pm_runtime_set_autosuspend_delay(dev, CRC_AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(dev);
-
- pm_runtime_get_noresume(dev);
- pm_runtime_set_active(dev);
- pm_runtime_irq_safe(dev);
- pm_runtime_enable(dev);
-
- spin_lock_init(&crc->lock);
-
- platform_set_drvdata(pdev, crc);
-
- spin_lock(&crc_list.lock);
- list_add(&crc->list, &crc_list.dev_list);
- spin_unlock(&crc_list.lock);
-
- mutex_lock(&refcnt_lock);
- if (!refcnt) {
- ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
- if (ret) {
- mutex_unlock(&refcnt_lock);
- dev_err(dev, "Failed to register\n");
- clk_disable_unprepare(crc->clk);
- return ret;
- }
- }
- refcnt++;
- mutex_unlock(&refcnt_lock);
-
- dev_info(dev, "Initialized\n");
-
- pm_runtime_put_sync(dev);
-
- return 0;
-}
-
-static void stm32_crc_remove(struct platform_device *pdev)
-{
- struct stm32_crc *crc = platform_get_drvdata(pdev);
- int ret = pm_runtime_get_sync(crc->dev);
-
- spin_lock(&crc_list.lock);
- list_del(&crc->list);
- spin_unlock(&crc_list.lock);
-
- mutex_lock(&refcnt_lock);
- if (!--refcnt)
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
- mutex_unlock(&refcnt_lock);
-
- pm_runtime_disable(crc->dev);
- pm_runtime_put_noidle(crc->dev);
-
- if (ret >= 0)
- clk_disable(crc->clk);
- clk_unprepare(crc->clk);
-}
-
-static int __maybe_unused stm32_crc_suspend(struct device *dev)
-{
- struct stm32_crc *crc = dev_get_drvdata(dev);
- int ret;
-
- ret = pm_runtime_force_suspend(dev);
- if (ret)
- return ret;
-
- clk_unprepare(crc->clk);
-
- return 0;
-}
-
-static int __maybe_unused stm32_crc_resume(struct device *dev)
-{
- struct stm32_crc *crc = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_prepare(crc->clk);
- if (ret) {
- dev_err(crc->dev, "Failed to prepare clock\n");
- return ret;
- }
-
- return pm_runtime_force_resume(dev);
-}
-
-static int __maybe_unused stm32_crc_runtime_suspend(struct device *dev)
-{
- struct stm32_crc *crc = dev_get_drvdata(dev);
-
- clk_disable(crc->clk);
-
- return 0;
-}
-
-static int __maybe_unused stm32_crc_runtime_resume(struct device *dev)
-{
- struct stm32_crc *crc = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_enable(crc->clk);
- if (ret) {
- dev_err(crc->dev, "Failed to enable clock\n");
- return ret;
- }
-
- return 0;
-}
-
-static const struct dev_pm_ops stm32_crc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(stm32_crc_suspend,
- stm32_crc_resume)
- SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
- stm32_crc_runtime_resume, NULL)
-};
-
-static const struct of_device_id stm32_dt_ids[] = {
- { .compatible = "st,stm32f7-crc", },
- {},
-};
-MODULE_DEVICE_TABLE(of, stm32_dt_ids);
-
-static struct platform_driver stm32_crc_driver = {
- .probe = stm32_crc_probe,
- .remove = stm32_crc_remove,
- .driver = {
- .name = DRIVER_NAME,
- .pm = &stm32_crc_pm_ops,
- .of_match_table = stm32_dt_ids,
- },
-};
-
-module_platform_driver(stm32_crc_driver);
-
-MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
-MODULE_DESCRIPTION("STMicrolectronics STM32 CRC32 hardware driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 5ce88e7a8f65..a89b4c5d62a0 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -851,7 +851,6 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
if (!err && (!(is_gcm(cryp) || is_ccm(cryp) || is_ecb(cryp))))
stm32_cryp_get_iv(cryp);
- pm_runtime_mark_last_busy(cryp->dev);
pm_runtime_put_autosuspend(cryp->dev);
if (is_gcm(cryp) || is_ccm(cryp))
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 768b27de4737..a4436728b0db 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -1373,7 +1373,6 @@ static void stm32_hash_unprepare_request(struct ahash_request *req)
*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
pm_runtime:
- pm_runtime_mark_last_busy(hdev->dev);
pm_runtime_put_autosuspend(hdev->dev);
}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 7059bbe5a2eb..19c934af3df6 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -113,8 +113,6 @@ struct virtio_crypto_request {
int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
struct list_head *virtcrypto_devmgr_get_head(void);
void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev);
-struct virtio_crypto *virtcrypto_devmgr_get_first(void);
-int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 0d522049f595..3d241446099c 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -139,7 +139,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
spin_lock_init(&vi->data_vq[i].lock);
vi->data_vq[i].vq = vqs[i];
/* Initialize crypto engine */
- vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, NULL, true,
+ vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, true,
virtqueue_get_vring_size(vqs[i]));
if (!vi->data_vq[i].engine) {
ret = -ENOMEM;
diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c
index bddbd8ebfebe..06c74fa132cd 100644
--- a/drivers/crypto/virtio/virtio_crypto_mgr.c
+++ b/drivers/crypto/virtio/virtio_crypto_mgr.c
@@ -82,42 +82,6 @@ void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev)
}
/*
- * virtcrypto_devmgr_get_first()
- *
- * Function returns the first virtio crypto device from the acceleration
- * framework.
- *
- * To be used by virtio crypto device specific drivers.
- *
- * Return: pointer to vcrypto_dev or NULL if not found.
- */
-struct virtio_crypto *virtcrypto_devmgr_get_first(void)
-{
- struct virtio_crypto *dev = NULL;
-
- mutex_lock(&table_lock);
- if (!list_empty(&virtio_crypto_table))
- dev = list_first_entry(&virtio_crypto_table,
- struct virtio_crypto,
- list);
- mutex_unlock(&table_lock);
- return dev;
-}
-
-/*
- * virtcrypto_dev_in_use() - Check whether vcrypto_dev is currently in use
- * @vcrypto_dev: Pointer to virtio crypto device.
- *
- * To be used by virtio crypto device specific drivers.
- *
- * Return: 1 when device is in use, 0 otherwise.
- */
-int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev)
-{
- return atomic_read(&vcrypto_dev->ref_count) != 0;
-}
-
-/*
* virtcrypto_dev_get() - Increment vcrypto_dev reference count
* @vcrypto_dev: Pointer to virtio crypto device.
*