From 6c4fed5fee42f5785e881ef2c28359724b18b80e Mon Sep 17 00:00:00 2001 From: Harsh Jain Date: Mon, 15 Sep 2025 19:00:25 +0530 Subject: crypto: drbg - Export CTR DRBG DF functions Export drbg_ctr_df() derivative function to new module df_sp80090. Signed-off-by: Harsh Jain Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index a6688d54984c..8d3b5d2890f8 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -728,6 +728,7 @@ config CRYPTO_DEV_TEGRA config CRYPTO_DEV_XILINX_TRNG tristate "Support for Xilinx True Random Generator" depends on ZYNQMP_FIRMWARE || COMPILE_TEST + select CRYPTO_DF80090A select CRYPTO_RNG select HW_RANDOM help -- cgit From 1a098379f28b05996603888ff7dd508e5773d5d9 Mon Sep 17 00:00:00 2001 From: Harsh Jain Date: Mon, 15 Sep 2025 19:00:27 +0530 Subject: crypto: xilinx-trng - Add CTR_DRBG DF processing of seed Versal TRNG IP does not support Derivation Function (DF) of seed. Add DF processing for CTR_DRBG mode. Signed-off-by: Harsh Jain Signed-off-by: Herbert Xu --- drivers/crypto/xilinx/xilinx-trng.c | 37 +++++++++++++++++++++++++++++++------ 1 file changed, 31 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/xilinx/xilinx-trng.c b/drivers/crypto/xilinx/xilinx-trng.c index 4e4700d68127..b89a2f70bf82 100644 --- a/drivers/crypto/xilinx/xilinx-trng.c +++ b/drivers/crypto/xilinx/xilinx-trng.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include @@ -18,10 +17,11 @@ #include #include #include -#include +#include +#include +#include #include #include -#include /* TRNG Registers Offsets */ #define TRNG_STATUS_OFFSET 0x4U @@ -59,6 +59,8 @@ struct xilinx_rng { void __iomem *rng_base; struct device *dev; + unsigned char *scratchpadbuf; + struct crypto_aes_ctx *aesctx; struct mutex lock; /* Protect access to TRNG device */ struct hwrng trng; }; @@ -182,9 +184,13 @@ static void xtrng_enable_entropy(struct xilinx_rng *rng) static int xtrng_reseed_internal(struct xilinx_rng *rng) { u8 entropy[TRNG_ENTROPY_SEED_LEN_BYTES]; + struct drbg_string data; + LIST_HEAD(seedlist); u32 val; int ret; + drbg_string_fill(&data, entropy, TRNG_SEED_LEN_BYTES); + list_add_tail(&data.list, &seedlist); memset(entropy, 0, sizeof(entropy)); xtrng_enable_entropy(rng); @@ -192,9 +198,14 @@ static int xtrng_reseed_internal(struct xilinx_rng *rng) ret = xtrng_collect_random_data(rng, entropy, TRNG_SEED_LEN_BYTES, true); if (ret != TRNG_SEED_LEN_BYTES) return -EINVAL; + ret = crypto_drbg_ctr_df(rng->aesctx, rng->scratchpadbuf, + TRNG_SEED_LEN_BYTES, &seedlist, AES_BLOCK_SIZE, + TRNG_SEED_LEN_BYTES); + if (ret) + return ret; xtrng_write_multiple_registers(rng->rng_base + TRNG_EXT_SEED_OFFSET, - (u32 *)entropy, TRNG_NUM_INIT_REGS); + (u32 *)rng->scratchpadbuf, TRNG_NUM_INIT_REGS); /* select reseed operation */ iowrite32(TRNG_CTRL_PRNGXS_MASK, rng->rng_base + TRNG_CTRL_OFFSET); @@ -324,6 +335,7 @@ static void xtrng_hwrng_unregister(struct hwrng *trng) static int xtrng_probe(struct platform_device *pdev) { struct xilinx_rng *rng; + size_t sb_size; int ret; rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); @@ -337,11 +349,22 @@ static int xtrng_probe(struct platform_device *pdev) return PTR_ERR(rng->rng_base); } + rng->aesctx = devm_kzalloc(&pdev->dev, sizeof(*rng->aesctx), GFP_KERNEL); + if (!rng->aesctx) + return -ENOMEM; + + sb_size = crypto_drbg_ctr_df_datalen(TRNG_SEED_LEN_BYTES, AES_BLOCK_SIZE); + rng->scratchpadbuf = devm_kzalloc(&pdev->dev, sb_size, GFP_KERNEL); + if (!rng->scratchpadbuf) { + ret = -ENOMEM; + goto end; + } + xtrng_trng_reset(rng->rng_base); ret = xtrng_reseed_internal(rng); if (ret) { dev_err(&pdev->dev, "TRNG Seed fail\n"); - return ret; + goto end; } xilinx_rng_dev = rng; @@ -349,8 +372,9 @@ static int xtrng_probe(struct platform_device *pdev) ret = crypto_register_rng(&xtrng_trng_alg); if (ret) { dev_err(&pdev->dev, "Crypto Random device registration failed: %d\n", ret); - return ret; + goto end; } + ret = xtrng_hwrng_register(&rng->trng); if (ret) { dev_err(&pdev->dev, "HWRNG device registration failed: %d\n", ret); @@ -363,6 +387,7 @@ static int xtrng_probe(struct platform_device *pdev) crypto_rng_free: crypto_unregister_rng(&xtrng_trng_alg); +end: return ret; } -- cgit From 6af9914f7bbfdf3e0a53626a5be49d6add8e9e15 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Tue, 23 Sep 2025 03:03:04 +0200 Subject: crypto: hifn_795x - replace simple_strtoul with kstrtouint Replace simple_strtoul() with the recommended kstrtouint() for parsing the 'hifn_pll_ref=' module parameter. Unlike simple_strtoul(), which returns an unsigned long, kstrtouint() converts the string directly to an unsigned integer and avoids implicit casting. Check the return value of kstrtouint() and fall back to 66 MHz if parsing fails. This adds error handling while preserving existing behavior for valid values, and removes use of the deprecated simple_strtoul() helper. Add a space in the log message to correctly format "66 MHz" while we're at it. Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/hifn_795x.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 925991526745..edf36f6add52 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -913,11 +913,10 @@ static void hifn_init_pll(struct hifn_device *dev) else pllcfg |= HIFN_PLL_REF_CLK_HBI; - if (hifn_pll_ref[3] != '\0') - freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10); - else { + if (hifn_pll_ref[3] == '\0' || + kstrtouint(hifn_pll_ref + 3, 10, &freq)) { freq = 66; - dev_info(&dev->pdev->dev, "assuming %uMHz clock speed, override with hifn_pll_ref=%.3s\n", + dev_info(&dev->pdev->dev, "assuming %u MHz clock speed, override with hifn_pll_ref=%.3s\n", freq, hifn_pll_ref); } -- cgit From 7cf6e0b69b0d90ab042163e5bbddda0dfcf8b6a7 Mon Sep 17 00:00:00 2001 From: Guangshuo Li Date: Tue, 23 Sep 2025 20:44:18 +0800 Subject: crypto: caam - Add check for kcalloc() in test_len() As kcalloc() may fail, check its return value to avoid a NULL pointer dereference when passing the buffer to rng->read(). On allocation failure, log the error and return since test_len() returns void. Fixes: 2be0d806e25e ("crypto: caam - add a test for the RNG") Cc: stable@vger.kernel.org Signed-off-by: Guangshuo Li Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamrng.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index b3d14a7f4dd1..0eb43c862516 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -181,7 +181,9 @@ static inline void test_len(struct hwrng *rng, size_t len, bool wait) struct device *dev = ctx->ctrldev; buf = kcalloc(CAAM_RNG_MAX_FIFO_STORE_SIZE, sizeof(u8), GFP_KERNEL); - + if (!buf) { + return; + } while (len > 0) { read_len = rng->read(rng, buf, len, wait); -- cgit From e74b96d77da9eb5ee1b603c937c2adab5134a04b Mon Sep 17 00:00:00 2001 From: Jonathan McDowell Date: Tue, 23 Sep 2025 14:33:05 +0100 Subject: hwrng: core - Allow runtime disabling of the HW RNG The HW RNG core allows for manual selection of which RNG device to use, but does not allow for no device to be enabled. It may be desirable to do this on systems with only a single suitable hardware RNG, where we need exclusive access to other functionality on this device. In particular when performing TPM firmware upgrades this lets us ensure the kernel does not try to access the device. Before: root@debian-qemu-efi:~# grep "" /sys/devices/virtual/misc/hw_random/rng_* /sys/devices/virtual/misc/hw_random/rng_available:tpm-rng-0 /sys/devices/virtual/misc/hw_random/rng_current:tpm-rng-0 /sys/devices/virtual/misc/hw_random/rng_quality:1024 /sys/devices/virtual/misc/hw_random/rng_selected:0 After: root@debian-qemu-efi:~# grep "" /sys/devices/virtual/misc/hw_random/rng_* /sys/devices/virtual/misc/hw_random/rng_available:tpm-rng-0 none /sys/devices/virtual/misc/hw_random/rng_current:tpm-rng-0 /sys/devices/virtual/misc/hw_random/rng_quality:1024 /sys/devices/virtual/misc/hw_random/rng_selected:0 root@debian-qemu-efi:~# echo none > /sys/devices/virtual/misc/hw_random/rng_current root@debian-qemu-efi:~# grep "" /sys/devices/virtual/misc/hw_random/rng_* /sys/devices/virtual/misc/hw_random/rng_available:tpm-rng-0 none /sys/devices/virtual/misc/hw_random/rng_current:none grep: /sys/devices/virtual/misc/hw_random/rng_quality: No such device /sys/devices/virtual/misc/hw_random/rng_selected:1 (Observe using bpftrace no calls to TPM being made) root@debian-qemu-efi:~# echo "" > /sys/devices/virtual/misc/hw_random/rng_current root@debian-qemu-efi:~# grep "" /sys/devices/virtual/misc/hw_random/rng_* /sys/devices/virtual/misc/hw_random/rng_available:tpm-rng-0 none /sys/devices/virtual/misc/hw_random/rng_current:tpm-rng-0 /sys/devices/virtual/misc/hw_random/rng_quality:1024 /sys/devices/virtual/misc/hw_random/rng_selected:0 (Observe using bpftrace that calls to the TPM resume) Signed-off-by: Jonathan McDowell Signed-off-by: Herbert Xu --- drivers/char/hw_random/core.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 018316f54621..56d888bebe0c 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -341,6 +341,9 @@ static ssize_t rng_current_store(struct device *dev, if (sysfs_streq(buf, "")) { err = enable_best_rng(); + } else if (sysfs_streq(buf, "none")) { + cur_rng_set_by_user = 1; + drop_current_rng(); } else { list_for_each_entry(rng, &rng_list, list) { if (sysfs_streq(rng->name, buf)) { @@ -392,7 +395,7 @@ static ssize_t rng_available_show(struct device *dev, strlcat(buf, rng->name, PAGE_SIZE); strlcat(buf, " ", PAGE_SIZE); } - strlcat(buf, "\n", PAGE_SIZE); + strlcat(buf, "none\n", PAGE_SIZE); mutex_unlock(&rng_mutex); return strlen(buf); @@ -544,8 +547,8 @@ int hwrng_register(struct hwrng *rng) /* Adjust quality field to always have a proper value */ rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024); - if (!current_rng || - (!cur_rng_set_by_user && rng->quality > current_rng->quality)) { + if (!cur_rng_set_by_user && + (!current_rng || rng->quality > current_rng->quality)) { /* * Set new rng as current as the new rng source * provides better entropy quality and was not -- cgit From a703a4c2a3280835003d4d0eb8845bac0f1a6ef1 Mon Sep 17 00:00:00 2001 From: Meenakshi Aggarwal Date: Mon, 6 Oct 2025 09:17:52 +0200 Subject: KEYS: trusted: caam based protected key - CAAM supports two types of protected keys: -- Plain key encrypted with ECB -- Plain key encrypted with CCM Due to robustness, default encryption used for protected key is CCM. - Generate protected key blob and add it to trusted key payload. This is done as part of sealing operation, which is triggered when below two operations are requested: -- new key generation -- load key, Signed-off-by: Pankaj Gupta Signed-off-by: Meenakshi Aggarwal Signed-off-by: Herbert Xu --- drivers/crypto/caam/blob_gen.c | 86 ++++++++++++++++++++++++++++++++++-------- drivers/crypto/caam/desc.h | 9 ++++- 2 files changed, 78 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c index 079a22cc9f02..c18dbac56493 100644 --- a/drivers/crypto/caam/blob_gen.c +++ b/drivers/crypto/caam/blob_gen.c @@ -2,13 +2,14 @@ /* * Copyright (C) 2015 Pengutronix, Steffen Trumtrar * Copyright (C) 2021 Pengutronix, Ahmad Fatoum - * Copyright 2024 NXP + * Copyright 2024-2025 NXP */ #define pr_fmt(fmt) "caam blob_gen: " fmt #include #include +#include #include #include "compat.h" @@ -60,18 +61,27 @@ static void caam_blob_job_done(struct device *dev, u32 *desc, u32 err, void *con complete(&res->completion); } +static u32 check_caam_state(struct device *jrdev) +{ + const struct caam_drv_private *ctrlpriv; + + ctrlpriv = dev_get_drvdata(jrdev->parent); + return FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status)); +} + int caam_process_blob(struct caam_blob_priv *priv, struct caam_blob_info *info, bool encap) { - const struct caam_drv_private *ctrlpriv; struct caam_blob_job_result testres; struct device *jrdev = &priv->jrdev; dma_addr_t dma_in, dma_out; int op = OP_PCLID_BLOB; + int hwbk_caam_ovhd = 0; size_t output_len; u32 *desc; u32 moo; int ret; + int len; if (info->key_mod_len > CAAM_BLOB_KEYMOD_LENGTH) return -EINVAL; @@ -82,14 +92,29 @@ int caam_process_blob(struct caam_blob_priv *priv, } else { op |= OP_TYPE_DECAP_PROTOCOL; output_len = info->input_len - CAAM_BLOB_OVERHEAD; + info->output_len = output_len; + } + + if (encap && info->pkey_info.is_pkey) { + op |= OP_PCL_BLOB_BLACK; + if (info->pkey_info.key_enc_algo == CAAM_ENC_ALGO_CCM) { + op |= OP_PCL_BLOB_EKT; + hwbk_caam_ovhd = CAAM_CCM_OVERHEAD; + } + if ((info->input_len + hwbk_caam_ovhd) > MAX_KEY_SIZE) + return -EINVAL; + + len = info->input_len + hwbk_caam_ovhd; + } else { + len = info->input_len; } desc = kzalloc(CAAM_BLOB_DESC_BYTES_MAX, GFP_KERNEL); if (!desc) return -ENOMEM; - dma_in = dma_map_single(jrdev, info->input, info->input_len, - DMA_TO_DEVICE); + dma_in = dma_map_single(jrdev, info->input, len, + encap ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (dma_mapping_error(jrdev, dma_in)) { dev_err(jrdev, "unable to map input DMA buffer\n"); ret = -ENOMEM; @@ -104,8 +129,7 @@ int caam_process_blob(struct caam_blob_priv *priv, goto out_unmap_in; } - ctrlpriv = dev_get_drvdata(jrdev->parent); - moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status)); + moo = check_caam_state(jrdev); if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED) dev_warn(jrdev, "using insecure test key, enable HAB to use unique device key!\n"); @@ -117,18 +141,48 @@ int caam_process_blob(struct caam_blob_priv *priv, * Class 1 Context DWords 0+1+2+3. The random BK is stored in the * Class 1 Key Register. Operation Mode is set to AES-CCM. */ - init_job_desc(desc, 0); + + if (encap && info->pkey_info.is_pkey) { + /*!1. key command used to load class 1 key register + * from input plain key. + */ + append_key(desc, dma_in, info->input_len, + CLASS_1 | KEY_DEST_CLASS_REG); + /*!2. Fifostore to store protected key from class 1 key register. */ + if (info->pkey_info.key_enc_algo == CAAM_ENC_ALGO_CCM) { + append_fifo_store(desc, dma_in, info->input_len, + LDST_CLASS_1_CCB | + FIFOST_TYPE_KEY_CCM_JKEK); + } else { + append_fifo_store(desc, dma_in, info->input_len, + LDST_CLASS_1_CCB | + FIFOST_TYPE_KEY_KEK); + } + /* + * JUMP_OFFSET specifies the offset of the JUMP target from + * the JUMP command's address in the descriptor buffer. + */ + append_jump(desc, JUMP_COND_NOP | BIT(0) << JUMP_OFFSET_SHIFT); + } + + /*!3. Load class 2 key with key modifier. */ append_key_as_imm(desc, info->key_mod, info->key_mod_len, - info->key_mod_len, CLASS_2 | KEY_DEST_CLASS_REG); - append_seq_in_ptr_intlen(desc, dma_in, info->input_len, 0); - append_seq_out_ptr_intlen(desc, dma_out, output_len, 0); + info->key_mod_len, CLASS_2 | KEY_DEST_CLASS_REG); + + /*!4. SEQ IN PTR Command. */ + append_seq_in_ptr(desc, dma_in, info->input_len, 0); + + /*!5. SEQ OUT PTR Command. */ + append_seq_out_ptr(desc, dma_out, output_len, 0); + + /*!6. Blob encapsulation/decapsulation PROTOCOL Command. */ append_operation(desc, op); - print_hex_dump_debug("data@"__stringify(__LINE__)": ", + print_hex_dump_debug("data@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 1, info->input, - info->input_len, false); - print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", + len, false); + print_hex_dump_debug("jobdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 1, desc, desc_bytes(desc), false); @@ -139,7 +193,7 @@ int caam_process_blob(struct caam_blob_priv *priv, if (ret == -EINPROGRESS) { wait_for_completion(&testres.completion); ret = testres.err; - print_hex_dump_debug("output@"__stringify(__LINE__)": ", + print_hex_dump_debug("output@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 1, info->output, output_len, false); } @@ -149,10 +203,10 @@ int caam_process_blob(struct caam_blob_priv *priv, dma_unmap_single(jrdev, dma_out, output_len, DMA_FROM_DEVICE); out_unmap_in: - dma_unmap_single(jrdev, dma_in, info->input_len, DMA_TO_DEVICE); + dma_unmap_single(jrdev, dma_in, len, + encap ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); out_free: kfree(desc); - return ret; } EXPORT_SYMBOL(caam_process_blob); diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index e13470901586..c28e94fcb8c7 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h @@ -4,7 +4,7 @@ * Definitions to support CAAM descriptor instruction generation * * Copyright 2008-2011 Freescale Semiconductor, Inc. - * Copyright 2018 NXP + * Copyright 2018, 2025 NXP */ #ifndef DESC_H @@ -162,6 +162,7 @@ * Enhanced Encryption of Key */ #define KEY_EKT 0x00100000 +#define KEY_EKT_OFFSET 20 /* * Encrypted with Trusted Key @@ -403,6 +404,7 @@ #define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT) #define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT) #define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_KEY_CCM_JKEK (0x14 << FIFOST_TYPE_SHIFT) #define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT) #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT) #define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT) @@ -1001,6 +1003,11 @@ #define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63 #define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65 +/* Blob protocol protinfo bits */ + +#define OP_PCL_BLOB_BLACK 0x0004 +#define OP_PCL_BLOB_EKT 0x0100 + /* For DTLS - OP_PCLID_DTLS */ #define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f -- cgit From 66b9a095f7f9e1031e5333661be513e89e40ee08 Mon Sep 17 00:00:00 2001 From: Meenakshi Aggarwal Date: Mon, 6 Oct 2025 09:17:53 +0200 Subject: crypto: caam - Add support of paes algorithm PAES algorithm uses protected key for encryption/decryption operations. Signed-off-by: Gaurav Jain Signed-off-by: Meenakshi Aggarwal Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 128 +++++++++++++++++++++++++++++++++---- drivers/crypto/caam/caamalg_desc.c | 87 ++++++++++++++++++++++++- drivers/crypto/caam/caamalg_desc.h | 13 +++- drivers/crypto/caam/desc_constr.h | 8 ++- 4 files changed, 220 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 2cfb1b8d8c7c..32a6e6e15ee2 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -3,7 +3,7 @@ * caam - Freescale FSL CAAM support for crypto API * * Copyright 2008-2011 Freescale Semiconductor, Inc. - * Copyright 2016-2019, 2023 NXP + * Copyright 2016-2019, 2023, 2025 NXP * * Based on talitos crypto API driver. * @@ -61,13 +61,16 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include +#include /* * crypto alg @@ -119,12 +122,15 @@ struct caam_ctx { dma_addr_t sh_desc_enc_dma; dma_addr_t sh_desc_dec_dma; dma_addr_t key_dma; + u8 protected_key[CAAM_MAX_KEY_SIZE]; + dma_addr_t protected_key_dma; enum dma_data_direction dir; struct device *jrdev; struct alginfo adata; struct alginfo cdata; unsigned int authsize; bool xts_key_fallback; + bool is_blob; struct crypto_skcipher *fallback; }; @@ -751,9 +757,14 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, print_hex_dump_debug("key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + /* Here keylen is actual key length */ ctx->cdata.keylen = keylen; ctx->cdata.key_virt = key; ctx->cdata.key_inline = true; + /* Here protected key len is plain key length */ + ctx->cdata.plain_keylen = keylen; + ctx->cdata.key_cmd_opt = 0; + /* skcipher_encrypt shared descriptor */ desc = ctx->sh_desc_enc; @@ -772,6 +783,62 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, return 0; } +static int paes_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, + unsigned int keylen) +{ + struct caam_pkey_info *pkey_info = (struct caam_pkey_info *)key; + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); + struct device *jrdev = ctx->jrdev; + int err; + + ctx->cdata.key_inline = false; + + keylen = keylen - CAAM_PKEY_HEADER; + + /* Retrieve the length of key */ + ctx->cdata.plain_keylen = pkey_info->plain_key_sz; + + /* Retrieve the length of blob*/ + ctx->cdata.keylen = keylen; + + /* Retrieve the address of the blob */ + ctx->cdata.key_virt = pkey_info->key_buf; + + /* Validate key length for AES algorithms */ + err = aes_check_keylen(ctx->cdata.plain_keylen); + if (err) { + dev_err(jrdev, "bad key length\n"); + return err; + } + + /* set command option */ + ctx->cdata.key_cmd_opt |= KEY_ENC; + + /* check if the Protected-Key is CCM key */ + if (pkey_info->key_enc_algo == CAAM_ENC_ALGO_CCM) + ctx->cdata.key_cmd_opt |= KEY_EKT; + + memcpy(ctx->key, ctx->cdata.key_virt, keylen); + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); + ctx->cdata.key_dma = ctx->key_dma; + + if (pkey_info->key_enc_algo == CAAM_ENC_ALGO_CCM) + ctx->protected_key_dma = dma_map_single(jrdev, ctx->protected_key, + ctx->cdata.plain_keylen + + CAAM_CCM_OVERHEAD, + DMA_FROM_DEVICE); + else + ctx->protected_key_dma = dma_map_single(jrdev, ctx->protected_key, + ctx->cdata.plain_keylen, + DMA_FROM_DEVICE); + + ctx->cdata.protected_key_dma = ctx->protected_key_dma; + ctx->is_blob = true; + + return 0; +} + static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { @@ -1254,7 +1321,9 @@ static void init_skcipher_job(struct skcipher_request *req, struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *jrdev = ctx->jrdev; int ivsize = crypto_skcipher_ivsize(skcipher); - u32 *desc = edesc->hw_desc; + u32 *desc = !ctx->is_blob ? edesc->hw_desc : + (u32 *)((u8 *)edesc->hw_desc + CAAM_DESC_BYTES_MAX); + dma_addr_t desc_dma; u32 *sh_desc; u32 in_options = 0, out_options = 0; dma_addr_t src_dma, dst_dma, ptr; @@ -1269,11 +1338,6 @@ static void init_skcipher_job(struct skcipher_request *req, DUMP_PREFIX_ADDRESS, 16, 4, req->src, edesc->src_nents > 1 ? 100 : req->cryptlen, 1); - sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec; - ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma; - - len = desc_len(sh_desc); - init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); if (ivsize || edesc->mapped_src_nents > 1) { src_dma = edesc->sec4_sg_dma; @@ -1283,8 +1347,6 @@ static void init_skcipher_job(struct skcipher_request *req, src_dma = sg_dma_address(req->src); } - append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options); - if (likely(req->src == req->dst)) { dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry); out_options = in_options; @@ -1296,7 +1358,25 @@ static void init_skcipher_job(struct skcipher_request *req, out_options = LDST_SGF; } - append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options); + if (ctx->is_blob) { + cnstr_desc_skcipher_enc_dec(desc, &ctx->cdata, + src_dma, dst_dma, req->cryptlen + ivsize, + in_options, out_options, + ivsize, encrypt); + + desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE); + + cnstr_desc_protected_blob_decap(edesc->hw_desc, &ctx->cdata, desc_dma); + } else { + sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec; + ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma; + + len = desc_len(sh_desc); + init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); + append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options); + + append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options); + } } /* @@ -1817,6 +1897,7 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); u32 *desc; int ret = 0; + int len; /* * XTS is expected to return an error even for input length = 0 @@ -1842,8 +1923,12 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) crypto_skcipher_decrypt(&rctx->fallback_req); } + len = DESC_JOB_IO_LEN * CAAM_CMD_SZ; + if (ctx->is_blob) + len += CAAM_DESC_BYTES_MAX; + /* allocate extended descriptor */ - edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); + edesc = skcipher_edesc_alloc(req, len); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -1885,6 +1970,27 @@ static int skcipher_decrypt(struct skcipher_request *req) } static struct caam_skcipher_alg driver_algs[] = { + { + .skcipher.base = { + .base = { + .cra_name = "cbc(paes)", + .cra_driver_name = "cbc-paes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = paes_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE + CAAM_BLOB_OVERHEAD + + CAAM_PKEY_HEADER, + .max_keysize = AES_MAX_KEY_SIZE + CAAM_BLOB_OVERHEAD + + CAAM_PKEY_HEADER, + .ivsize = AES_BLOCK_SIZE, + }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + }, { .skcipher.base = { .base = { diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c index 7571e1ac913b..04c1105eb1f5 100644 --- a/drivers/crypto/caam/caamalg_desc.c +++ b/drivers/crypto/caam/caamalg_desc.c @@ -2,12 +2,13 @@ /* * Shared descriptors for aead, skcipher algorithms * - * Copyright 2016-2019 NXP + * Copyright 2016-2019, 2025 NXP */ #include "compat.h" #include "desc_constr.h" #include "caamalg_desc.h" +#include /* * For aead functions, read payload and write payload, @@ -1364,6 +1365,84 @@ static inline void skcipher_append_src_dst(u32 *desc) append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); } +void cnstr_desc_skcipher_enc_dec(u32 * const desc, struct alginfo *cdata, + dma_addr_t src, dma_addr_t dst, unsigned int data_sz, + unsigned int in_options, unsigned int out_options, + unsigned int ivsize, const bool encrypt) +{ + u32 options = cdata->algtype | OP_ALG_AS_INIT; + + if (encrypt) + options |= OP_ALG_ENCRYPT; + else + options |= OP_ALG_DECRYPT; + + init_job_desc(desc, 0); + + append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL | + JUMP_COND_NOP | JUMP_TEST_ALL | 1); + + append_key(desc, cdata->protected_key_dma, cdata->plain_keylen, + CLASS_1 | KEY_DEST_CLASS_REG | cdata->key_cmd_opt); + + append_seq_in_ptr(desc, src, data_sz, in_options); + + append_seq_out_ptr(desc, dst, data_sz, out_options); + + /* Load IV, if there is one */ + if (ivsize) + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB); + + append_operation(desc, options); + + skcipher_append_src_dst(desc); + + /* Store IV */ + if (ivsize) + append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB); + + print_hex_dump_debug("skcipher_enc_dec job desc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); +} +EXPORT_SYMBOL(cnstr_desc_skcipher_enc_dec); + +void cnstr_desc_protected_blob_decap(u32 * const desc, struct alginfo *cdata, + dma_addr_t next_desc_addr) +{ + u32 protected_store; + + init_job_desc(desc, 0); + + /* Load key modifier */ + append_load_as_imm(desc, KEYMOD, sizeof(KEYMOD) - 1, + LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY); + + append_seq_in_ptr_intlen(desc, cdata->key_dma, + cdata->plain_keylen + CAAM_BLOB_OVERHEAD, 0); + + append_seq_out_ptr_intlen(desc, cdata->protected_key_dma, + cdata->plain_keylen, 0); + + protected_store = OP_PCLID_BLOB | OP_PCL_BLOB_BLACK; + if ((cdata->key_cmd_opt >> KEY_EKT_OFFSET) & 1) + protected_store |= OP_PCL_BLOB_EKT; + + append_operation(desc, OP_TYPE_DECAP_PROTOCOL | protected_store); + + if (next_desc_addr) { + append_jump(desc, JUMP_TYPE_NONLOCAL | JUMP_TEST_ALL); + append_ptr(desc, next_desc_addr); + } + + print_hex_dump_debug("protected blob decap job desc@" __stringify(__LINE__) ":", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +} +EXPORT_SYMBOL(cnstr_desc_protected_blob_decap); + /** * cnstr_shdsc_skcipher_encap - skcipher encapsulation shared descriptor * @desc: pointer to buffer used for descriptor construction @@ -1391,7 +1470,8 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata, /* Load class1 key only */ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, - cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); + cdata->plain_keylen, CLASS_1 | KEY_DEST_CLASS_REG + | cdata->key_cmd_opt); /* Load nonce into CONTEXT1 reg */ if (is_rfc3686) { @@ -1466,7 +1546,8 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata, /* Load class1 key only */ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, - cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); + cdata->plain_keylen, CLASS_1 | KEY_DEST_CLASS_REG + | cdata->key_cmd_opt); /* Load nonce into CONTEXT1 reg */ if (is_rfc3686) { diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h index f2893393ba5e..323490a4a756 100644 --- a/drivers/crypto/caam/caamalg_desc.h +++ b/drivers/crypto/caam/caamalg_desc.h @@ -2,7 +2,7 @@ /* * Shared descriptors for aead, skcipher algorithms * - * Copyright 2016 NXP + * Copyright 2016, 2025 NXP */ #ifndef _CAAMALG_DESC_H_ @@ -48,6 +48,9 @@ #define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \ 16 * CAAM_CMD_SZ) +/* Key modifier for CAAM Protected blobs */ +#define KEYMOD "SECURE_KEY" + void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, unsigned int icvsize, int era); @@ -113,4 +116,12 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata); void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata); +void cnstr_desc_protected_blob_decap(u32 * const desc, struct alginfo *cdata, + dma_addr_t next_desc); + +void cnstr_desc_skcipher_enc_dec(u32 * const desc, struct alginfo *cdata, + dma_addr_t src, dma_addr_t dst, unsigned int data_sz, + unsigned int in_options, unsigned int out_options, + unsigned int ivsize, const bool encrypt); + #endif /* _CAAMALG_DESC_H_ */ diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 824c94d44f94..2a29dd2c9c8a 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h @@ -3,7 +3,7 @@ * caam descriptor construction helper functions * * Copyright 2008-2012 Freescale Semiconductor, Inc. - * Copyright 2019 NXP + * Copyright 2019, 2025 NXP */ #ifndef DESC_CONSTR_H @@ -498,17 +498,23 @@ do { \ * @keylen: length of the provided algorithm key, in bytes * @keylen_pad: padded length of the provided algorithm key, in bytes * @key_dma: dma (bus) address where algorithm key resides + * @protected_key_dma: dma (bus) address where protected key resides * @key_virt: virtual address where algorithm key resides * @key_inline: true - key can be inlined in the descriptor; false - key is * referenced by the descriptor + * @plain_keylen: size of the key to be loaded by the CAAM + * @key_cmd_opt: optional parameters for KEY command */ struct alginfo { u32 algtype; unsigned int keylen; unsigned int keylen_pad; dma_addr_t key_dma; + dma_addr_t protected_key_dma; const void *key_virt; bool key_inline; + u32 plain_keylen; + u32 key_cmd_opt; }; /** -- cgit From 7fc25ed2bc35c2abe00ebd26af902b5688ead5c4 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 17 Oct 2025 16:00:50 +0800 Subject: crypto: sun8i-ss - Move j init earlier in sun8i_ss_hash_run MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With gcc-14 I get ../drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c: In function ‘sun8i_ss_hash_run’: ../drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c:631:21: warning: ‘j’ may be used uninitialized [-Wmaybe-uninitialized] 631 | j = hash_pad(bf, 4096, j, byte_count, true, bs); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ../drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c:493:13: note: ‘j’ was declared here 493 | int j, i, k, todo; | ^ Fix this false positive by moving the initialisation of j earlier. Signed-off-by: Herbert Xu --- drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index 8bc08089f044..36a1ebca2e70 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -502,6 +502,7 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base); ss = algt->ss; + j = 0; digestsize = crypto_ahash_digestsize(tfm); if (digestsize == SHA224_DIGEST_SIZE) @@ -536,7 +537,6 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) goto err_dma_result; } - j = 0; len = areq->nbytes; sg = areq->src; i = 0; -- cgit From e7066160f5b4187ad9869b712fa7a35d3d5be6b9 Mon Sep 17 00:00:00 2001 From: nieweiqiang Date: Sat, 18 Oct 2025 19:27:39 +0800 Subject: crypto: hisilicon/qm - restore original qos values When the new qos valus setting fails, restore to the original qos values. Fixes: 72b010dc33b9 ("crypto: hisilicon/qm - supports writing QoS int the host") Signed-off-by: nieweiqiang Signed-off-by: Chenghai Huang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index a5b96adf2d1e..30e44cfb57ee 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3678,6 +3678,7 @@ static void qm_clear_vft_config(struct hisi_qm *qm) static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos) { struct device *dev = &qm->pdev->dev; + struct qm_shaper_factor t_factor; u32 ir = qos * QM_QOS_RATE; int ret, total_vfs, i; @@ -3685,6 +3686,7 @@ static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos) if (fun_index > total_vfs) return -EINVAL; + memcpy(&t_factor, &qm->factor[fun_index], sizeof(t_factor)); qm->factor[fun_index].func_qos = qos; ret = qm_get_shaper_para(ir, &qm->factor[fun_index]); @@ -3698,11 +3700,21 @@ static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos) ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1); if (ret) { dev_err(dev, "type: %d, failed to set shaper vft!\n", i); - return -EINVAL; + goto back_func_qos; } } return 0; + +back_func_qos: + memcpy(&qm->factor[fun_index], &t_factor, sizeof(t_factor)); + for (i--; i >= ALG_TYPE_0; i--) { + ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1); + if (ret) + dev_err(dev, "failed to restore shaper vft during rollback!\n"); + } + + return -EINVAL; } static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index) -- cgit From 4fbfd7b206b1aa3fedfe8e82a8d3b8daca007d57 Mon Sep 17 00:00:00 2001 From: T Pratham Date: Wed, 22 Oct 2025 23:15:39 +0530 Subject: crypto: ti - Add support for AES-XTS in DTHEv2 driver Add support for XTS mode of operation for AES algorithm in the AES Engine of the DTHEv2 hardware cryptographic engine. Signed-off-by: T Pratham Signed-off-by: Herbert Xu --- drivers/crypto/ti/Kconfig | 1 + drivers/crypto/ti/dthev2-aes.c | 137 ++++++++++++++++++++++++++++++++++++-- drivers/crypto/ti/dthev2-common.h | 10 ++- 3 files changed, 141 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ti/Kconfig b/drivers/crypto/ti/Kconfig index d4f91c1e0cb5..a3692ceec49b 100644 --- a/drivers/crypto/ti/Kconfig +++ b/drivers/crypto/ti/Kconfig @@ -6,6 +6,7 @@ config CRYPTO_DEV_TI_DTHEV2 select CRYPTO_SKCIPHER select CRYPTO_ECB select CRYPTO_CBC + select CRYPTO_XTS help This enables support for the TI DTHE V2 hw cryptography engine which can be found on TI K3 SOCs. Selecting this enables use diff --git a/drivers/crypto/ti/dthev2-aes.c b/drivers/crypto/ti/dthev2-aes.c index 3547c41fa4ed..156729ccc50e 100644 --- a/drivers/crypto/ti/dthev2-aes.c +++ b/drivers/crypto/ti/dthev2-aes.c @@ -25,6 +25,7 @@ // AES Engine #define DTHE_P_AES_BASE 0x7000 + #define DTHE_P_AES_KEY1_0 0x0038 #define DTHE_P_AES_KEY1_1 0x003C #define DTHE_P_AES_KEY1_2 0x0030 @@ -33,6 +34,16 @@ #define DTHE_P_AES_KEY1_5 0x002C #define DTHE_P_AES_KEY1_6 0x0020 #define DTHE_P_AES_KEY1_7 0x0024 + +#define DTHE_P_AES_KEY2_0 0x0018 +#define DTHE_P_AES_KEY2_1 0x001C +#define DTHE_P_AES_KEY2_2 0x0010 +#define DTHE_P_AES_KEY2_3 0x0014 +#define DTHE_P_AES_KEY2_4 0x0008 +#define DTHE_P_AES_KEY2_5 0x000C +#define DTHE_P_AES_KEY2_6 0x0000 +#define DTHE_P_AES_KEY2_7 0x0004 + #define DTHE_P_AES_IV_IN_0 0x0040 #define DTHE_P_AES_IV_IN_1 0x0044 #define DTHE_P_AES_IV_IN_2 0x0048 @@ -52,6 +63,7 @@ enum aes_ctrl_mode_masks { AES_CTRL_ECB_MASK = 0x00, AES_CTRL_CBC_MASK = BIT(5), + AES_CTRL_XTS_MASK = BIT(12) | BIT(11), }; #define DTHE_AES_CTRL_MODE_CLEAR_MASK ~GENMASK(28, 5) @@ -88,6 +100,31 @@ static int dthe_cipher_init_tfm(struct crypto_skcipher *tfm) return 0; } +static int dthe_cipher_xts_init_tfm(struct crypto_skcipher *tfm) +{ + struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct dthe_data *dev_data = dthe_get_dev(ctx); + + ctx->dev_data = dev_data; + ctx->keylen = 0; + + ctx->skcipher_fb = crypto_alloc_sync_skcipher("xts(aes)", 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->skcipher_fb)) { + dev_err(dev_data->dev, "fallback driver xts(aes) couldn't be loaded\n"); + return PTR_ERR(ctx->skcipher_fb); + } + + return 0; +} + +static void dthe_cipher_xts_exit_tfm(struct crypto_skcipher *tfm) +{ + struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + + crypto_free_sync_skcipher(ctx->skcipher_fb); +} + static int dthe_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); @@ -119,6 +156,27 @@ static int dthe_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, unsig return dthe_aes_setkey(tfm, key, keylen); } +static int dthe_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) +{ + struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (keylen != 2 * AES_KEYSIZE_128 && + keylen != 2 * AES_KEYSIZE_192 && + keylen != 2 * AES_KEYSIZE_256) + return -EINVAL; + + ctx->aes_mode = DTHE_AES_XTS; + ctx->keylen = keylen / 2; + memcpy(ctx->key, key, keylen); + + crypto_sync_skcipher_clear_flags(ctx->skcipher_fb, CRYPTO_TFM_REQ_MASK); + crypto_sync_skcipher_set_flags(ctx->skcipher_fb, + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + + return crypto_sync_skcipher_setkey(ctx->skcipher_fb, key, keylen); +} + static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx, struct dthe_aes_req_ctx *rctx, u32 *iv_in) @@ -141,6 +199,24 @@ static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx, writel_relaxed(ctx->key[7], aes_base_reg + DTHE_P_AES_KEY1_7); } + if (ctx->aes_mode == DTHE_AES_XTS) { + size_t key2_offset = ctx->keylen / sizeof(u32); + + writel_relaxed(ctx->key[key2_offset + 0], aes_base_reg + DTHE_P_AES_KEY2_0); + writel_relaxed(ctx->key[key2_offset + 1], aes_base_reg + DTHE_P_AES_KEY2_1); + writel_relaxed(ctx->key[key2_offset + 2], aes_base_reg + DTHE_P_AES_KEY2_2); + writel_relaxed(ctx->key[key2_offset + 3], aes_base_reg + DTHE_P_AES_KEY2_3); + + if (ctx->keylen > AES_KEYSIZE_128) { + writel_relaxed(ctx->key[key2_offset + 4], aes_base_reg + DTHE_P_AES_KEY2_4); + writel_relaxed(ctx->key[key2_offset + 5], aes_base_reg + DTHE_P_AES_KEY2_5); + } + if (ctx->keylen == AES_KEYSIZE_256) { + writel_relaxed(ctx->key[key2_offset + 6], aes_base_reg + DTHE_P_AES_KEY2_6); + writel_relaxed(ctx->key[key2_offset + 7], aes_base_reg + DTHE_P_AES_KEY2_7); + } + } + if (rctx->enc) ctrl_val |= DTHE_AES_CTRL_DIR_ENC; @@ -160,6 +236,9 @@ static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx, case DTHE_AES_CBC: ctrl_val |= AES_CTRL_CBC_MASK; break; + case DTHE_AES_XTS: + ctrl_val |= AES_CTRL_XTS_MASK; + break; } if (iv_in) { @@ -315,24 +394,45 @@ aes_err: local_bh_disable(); crypto_finalize_skcipher_request(dev_data->engine, req, ret); local_bh_enable(); - return ret; + return 0; } static int dthe_aes_crypt(struct skcipher_request *req) { struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req); struct dthe_data *dev_data = dthe_get_dev(ctx); struct crypto_engine *engine; /* - * If data is not a multiple of AES_BLOCK_SIZE, need to return -EINVAL - * If data length input is zero, no need to do any operation. + * If data is not a multiple of AES_BLOCK_SIZE: + * - need to return -EINVAL for ECB, CBC as they are block ciphers + * - need to fallback to software as H/W doesn't support Ciphertext Stealing for XTS */ - if (req->cryptlen % AES_BLOCK_SIZE) + if (req->cryptlen % AES_BLOCK_SIZE) { + if (ctx->aes_mode == DTHE_AES_XTS) { + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->skcipher_fb); + + skcipher_request_set_callback(subreq, skcipher_request_flags(req), + req->base.complete, req->base.data); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, req->iv); + + return rctx->enc ? crypto_skcipher_encrypt(subreq) : + crypto_skcipher_decrypt(subreq); + } return -EINVAL; + } - if (req->cryptlen == 0) + /* + * If data length input is zero, no need to do any operation. + * Except for XTS mode, where data length should be non-zero. + */ + if (req->cryptlen == 0) { + if (ctx->aes_mode == DTHE_AES_XTS) + return -EINVAL; return 0; + } engine = dev_data->engine; return crypto_transfer_skcipher_request_to_engine(engine, req); @@ -399,7 +499,32 @@ static struct skcipher_engine_alg cipher_algs[] = { .cra_module = THIS_MODULE, }, .op.do_one_request = dthe_aes_run, - } /* CBC AES */ + }, /* CBC AES */ + { + .base.init = dthe_cipher_xts_init_tfm, + .base.exit = dthe_cipher_xts_exit_tfm, + .base.setkey = dthe_aes_xts_setkey, + .base.encrypt = dthe_aes_encrypt, + .base.decrypt = dthe_aes_decrypt, + .base.min_keysize = AES_MIN_KEY_SIZE * 2, + .base.max_keysize = AES_MAX_KEY_SIZE * 2, + .base.ivsize = AES_IV_SIZE, + .base.base = { + .cra_name = "xts(aes)", + .cra_driver_name = "xts-aes-dthev2", + .cra_priority = 299, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_alignmask = AES_BLOCK_SIZE - 1, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct dthe_tfm_ctx), + .cra_reqsize = sizeof(struct dthe_aes_req_ctx), + .cra_module = THIS_MODULE, + }, + .op.do_one_request = dthe_aes_run, + }, /* XTS AES */ }; int dthe_register_aes_algs(void) diff --git a/drivers/crypto/ti/dthev2-common.h b/drivers/crypto/ti/dthev2-common.h index 68c94acda8aa..c7a06a4c353f 100644 --- a/drivers/crypto/ti/dthev2-common.h +++ b/drivers/crypto/ti/dthev2-common.h @@ -27,10 +27,16 @@ #define DTHE_REG_SIZE 4 #define DTHE_DMA_TIMEOUT_MS 2000 +/* + * Size of largest possible key (of all algorithms) to be stored in dthe_tfm_ctx + * This is currently the keysize of XTS-AES-256 which is 512 bits (64 bytes) + */ +#define DTHE_MAX_KEYSIZE (AES_MAX_KEY_SIZE * 2) enum dthe_aes_mode { DTHE_AES_ECB = 0, DTHE_AES_CBC, + DTHE_AES_XTS, }; /* Driver specific struct definitions */ @@ -73,12 +79,14 @@ struct dthe_list { * @keylen: AES key length * @key: AES key * @aes_mode: AES mode + * @skcipher_fb: Fallback crypto skcipher handle for AES-XTS mode */ struct dthe_tfm_ctx { struct dthe_data *dev_data; unsigned int keylen; - u32 key[AES_KEYSIZE_256 / sizeof(u32)]; + u32 key[DTHE_MAX_KEYSIZE / sizeof(u32)]; enum dthe_aes_mode aes_mode; + struct crypto_sync_skcipher *skcipher_fb; }; /** -- cgit From f5e297a112fa0dd31e09246709c859bb38dfba0b Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Fri, 24 Oct 2025 14:35:07 -0700 Subject: crypto: qce - Provide dev_err_probe() status on DMA failure On multiple occasions the qce device have shown up in devices_deferred, without the explanation that this came from the failure to acquire the DMA channels from the associated BAM. Use dev_err_probe() to associate this context with the failure to faster pinpoint the culprit when this happens in the future. Signed-off-by: Bjorn Andersson Reviewed-by: Abel Vesa Reviewed-by: David Heidelberg Reviewed-by: Konrad Dybcio Signed-off-by: Herbert Xu --- drivers/crypto/qce/dma.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c index 1dec7aea852d..68cafd4741ad 100644 --- a/drivers/crypto/qce/dma.c +++ b/drivers/crypto/qce/dma.c @@ -24,11 +24,13 @@ int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma) dma->txchan = dma_request_chan(dev, "tx"); if (IS_ERR(dma->txchan)) - return PTR_ERR(dma->txchan); + return dev_err_probe(dev, PTR_ERR(dma->txchan), + "Failed to get TX DMA channel\n"); dma->rxchan = dma_request_chan(dev, "rx"); if (IS_ERR(dma->rxchan)) { - ret = PTR_ERR(dma->rxchan); + ret = dev_err_probe(dev, PTR_ERR(dma->rxchan), + "Failed to get RX DMA channel\n"); goto error_rx; } -- cgit From f5a332980a6877c79863fbf5c16ae86555627ba6 Mon Sep 17 00:00:00 2001 From: nieweiqiang Date: Sat, 25 Oct 2025 18:12:55 +0800 Subject: crypto: hisilicon/qm - add the save operation of eqe and aeqe The eqe and aeqe are device updated values that include the valid bit and queue number. In the current process, there is no memory barrier added, so it cannot be guaranteed that the valid bit is read before other processes are executed. Since eqe and aeqe are only 4 bytes and the device writes them to memory in a single operation, saving the values of eqe and aeqe ensures that the valid bit and queue number read by the CPU were written by the device simultaneously. Signed-off-by: nieweiqiang Signed-off-by: Chenghai Huang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 30e44cfb57ee..8274da8b37ca 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -64,10 +64,10 @@ #define QM_EQE_AEQE_SIZE (2UL << 12) #define QM_EQC_PHASE_SHIFT 16 -#define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1) +#define QM_EQE_PHASE(dw0) (((dw0) >> 16) & 0x1) #define QM_EQE_CQN_MASK GENMASK(15, 0) -#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1) +#define QM_AEQE_PHASE(dw0) (((dw0) >> 16) & 0x1) #define QM_AEQE_TYPE_SHIFT 17 #define QM_AEQE_TYPE_MASK 0xf #define QM_AEQE_CQN_MASK GENMASK(15, 0) @@ -976,23 +976,23 @@ static void qm_get_complete_eqe_num(struct hisi_qm *qm) { struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; struct hisi_qm_poll_data *poll_data = NULL; + u32 dw0 = le32_to_cpu(eqe->dw0); u16 eq_depth = qm->eq_depth; u16 cqn, eqe_num = 0; - if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) { + if (QM_EQE_PHASE(dw0) != qm->status.eqc_phase) { atomic64_inc(&qm->debug.dfx.err_irq_cnt); qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); return; } - cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; + cqn = dw0 & QM_EQE_CQN_MASK; if (unlikely(cqn >= qm->qp_num)) return; poll_data = &qm->poll_data[cqn]; - while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { - cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; - poll_data->qp_finish_id[eqe_num] = cqn; + while (QM_EQE_PHASE(dw0) != qm->status.eqc_phase) { + poll_data->qp_finish_id[eqe_num] = dw0 & QM_EQE_CQN_MASK; eqe_num++; if (qm->status.eq_head == eq_depth - 1) { @@ -1006,6 +1006,8 @@ static void qm_get_complete_eqe_num(struct hisi_qm *qm) if (eqe_num == (eq_depth >> 1) - 1) break; + + dw0 = le32_to_cpu(eqe->dw0); } poll_data->eqe_num = eqe_num; @@ -1098,15 +1100,15 @@ static irqreturn_t qm_aeq_thread(int irq, void *data) { struct hisi_qm *qm = data; struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; + u32 dw0 = le32_to_cpu(aeqe->dw0); u16 aeq_depth = qm->aeq_depth; u32 type, qp_id; atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); - while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { - type = (le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT) & - QM_AEQE_TYPE_MASK; - qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK; + while (QM_AEQE_PHASE(dw0) == qm->status.aeqc_phase) { + type = (dw0 >> QM_AEQE_TYPE_SHIFT) & QM_AEQE_TYPE_MASK; + qp_id = dw0 & QM_AEQE_CQN_MASK; switch (type) { case QM_EQ_OVERFLOW: @@ -1134,6 +1136,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data) aeqe++; qm->status.aeq_head++; } + dw0 = le32_to_cpu(aeqe->dw0); } qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); -- cgit From 1b5645e454615f5bdae432d0d0d4f94ad0d5c09e Mon Sep 17 00:00:00 2001 From: nieweiqiang Date: Sat, 25 Oct 2025 18:12:56 +0800 Subject: crypto: hisilicon/qm - add concurrency protection for variable err_threshold The isolate_strategy_store function is not protected by a lock. If sysfs operations and functions that depend on the err_threshold variable,such as qm_hw_err_isolate(), execute concurrently, the outcome will be unpredictable. Therefore, concurrency protection should be added for the err_threshold variable. Signed-off-by: nieweiqiang Signed-off-by: Chenghai Huang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 8274da8b37ca..c58f67567c12 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -2655,10 +2655,10 @@ static int qm_hw_err_isolate(struct hisi_qm *qm) } } list_add(&hw_err->list, &isolate->qm_hw_errs); - mutex_unlock(&isolate->isolate_lock); if (count >= isolate->err_threshold) isolate->is_isolate = true; + mutex_unlock(&isolate->isolate_lock); return 0; } @@ -2667,12 +2667,10 @@ static void qm_hw_err_destroy(struct hisi_qm *qm) { struct qm_hw_err *err, *tmp; - mutex_lock(&qm->isolate_data.isolate_lock); list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) { list_del(&err->list); kfree(err); } - mutex_unlock(&qm->isolate_data.isolate_lock); } static enum uacce_dev_state hisi_qm_get_isolate_state(struct uacce_device *uacce) @@ -2700,10 +2698,12 @@ static int hisi_qm_isolate_threshold_write(struct uacce_device *uacce, u32 num) if (qm->isolate_data.is_isolate) return -EPERM; + mutex_lock(&qm->isolate_data.isolate_lock); qm->isolate_data.err_threshold = num; /* After the policy is updated, need to reset the hardware err list */ qm_hw_err_destroy(qm); + mutex_unlock(&qm->isolate_data.isolate_lock); return 0; } @@ -2740,7 +2740,10 @@ static void qm_remove_uacce(struct hisi_qm *qm) struct uacce_device *uacce = qm->uacce; if (qm->use_sva) { + mutex_lock(&qm->isolate_data.isolate_lock); qm_hw_err_destroy(qm); + mutex_unlock(&qm->isolate_data.isolate_lock); + uacce_remove(uacce); qm->uacce = NULL; } -- cgit From 51996331f713b2dfdbc47210c40872988a31f1dd Mon Sep 17 00:00:00 2001 From: nieweiqiang Date: Sat, 25 Oct 2025 18:12:57 +0800 Subject: crypto: hisilicon/sgl - remove unnecessary checks for curr_hw_sgl error Before calling acc_get_sgl(), the mem_block has already been created. acc_get_sgl() will not return NULL or any other error. so the return value check can be removed. Signed-off-by: nieweiqiang Signed-off-by: Chenghai Huang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sgl.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index 7a9ef2a9972a..24c7b6ab285b 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -245,11 +245,6 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, struct scatterlist *sgl, } curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma); - if (IS_ERR(curr_hw_sgl)) { - dev_err(dev, "Get SGL error!\n"); - ret = -ENOMEM; - goto err_unmap; - } curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr); curr_hw_sge = curr_hw_sgl->sge_entries; -- cgit From d9124947478319cea3537ca033bae582e97e70d1 Mon Sep 17 00:00:00 2001 From: nieweiqiang Date: Sat, 25 Oct 2025 18:12:58 +0800 Subject: crypto: hisilicon/qm - add missing default in switch in qm_vft_data_cfg Add default case to avoid warnings and meet code style requirements. Signed-off-by: nieweiqiang Signed-off-by: Chenghai Huang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index c58f67567c12..8533384e3eaa 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -1286,6 +1286,13 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT); } break; + /* + * Note: The current logic only needs to handle the above three types + * If new types are added, they need to be supplemented here, + * otherwise undefined behavior may occur. + */ + default: + break; } } -- cgit From d5b59ec33c5b6f6adb016d6210f8f5123be35b36 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Sun, 26 Oct 2025 02:57:07 +0100 Subject: crypto: qat - use simple_strtoull to improve qat_uclo_parse_num Replace the manual string copying and parsing logic with a call to simple_strtoull() to simplify and improve qat_uclo_parse_num(). Ensure that the parsed number does not exceed UINT_MAX, and add an approximate upper-bound check (no more than 19 digits) to guard against overflow. Signed-off-by: Thorsten Blum Acked-by: Giovanni Cabiddu Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/qat_uclo.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index 18c3e4416dc5..06d49cb781ae 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -200,20 +200,12 @@ qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle, static int qat_uclo_parse_num(char *str, unsigned int *num) { - char buf[16] = {0}; - unsigned long ae = 0; - int i; - - strscpy(buf, str, sizeof(buf)); - for (i = 0; i < 16; i++) { - if (!isdigit(buf[i])) { - buf[i] = '\0'; - break; - } - } - if ((kstrtoul(buf, 10, &ae))) - return -EFAULT; + unsigned long long ae; + char *end; + ae = simple_strtoull(str, &end, 10); + if (ae > UINT_MAX || str == end || (end - str) > 19) + return -EINVAL; *num = (unsigned int)ae; return 0; } -- cgit From 426b1a1bdfcec75457104f50443e7d7dc024e5e8 Mon Sep 17 00:00:00 2001 From: Harsh Jain Date: Wed, 29 Oct 2025 12:38:38 +0530 Subject: crypto: xilinx - Use %pe to print PTR_ERR Fix cocci warnings to use %pe to print PTR_ERR(). Reported-by: kernel test robot Reported-by: Julia Lawall Closes: https://lore.kernel.org/r/202510231229.Z6TduqZy-lkp@intel.com/ Signed-off-by: Harsh Jain Signed-off-by: Herbert Xu --- drivers/crypto/xilinx/xilinx-trng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/xilinx/xilinx-trng.c b/drivers/crypto/xilinx/xilinx-trng.c index b89a2f70bf82..db0fbb28ff32 100644 --- a/drivers/crypto/xilinx/xilinx-trng.c +++ b/drivers/crypto/xilinx/xilinx-trng.c @@ -345,7 +345,7 @@ static int xtrng_probe(struct platform_device *pdev) rng->dev = &pdev->dev; rng->rng_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rng->rng_base)) { - dev_err(&pdev->dev, "Failed to map resource %ld\n", PTR_ERR(rng->rng_base)); + dev_err(&pdev->dev, "Failed to map resource %pe\n", rng->rng_base); return PTR_ERR(rng->rng_base); } -- cgit From 457be301fc6f8731c825b19b9be55a8d72c624b8 Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Wed, 29 Oct 2025 01:25:31 -0700 Subject: crypto: qce - fix version check The previous version check made it difficult to support newer major versions (e.g., v6.0) without adding extra checks/macros. Update the logic to only reject v5.0 and allow future versions without additional changes. Signed-off-by: Gaurav Kashyap Signed-off-by: Jingyi Wang Reviewed-by: Bjorn Andersson Signed-off-by: Herbert Xu --- drivers/crypto/qce/core.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c index e95e84486d9a..b966f3365b7d 100644 --- a/drivers/crypto/qce/core.c +++ b/drivers/crypto/qce/core.c @@ -21,7 +21,6 @@ #include "sha.h" #include "aead.h" -#define QCE_MAJOR_VERSION5 0x05 #define QCE_QUEUE_LENGTH 1 #define QCE_DEFAULT_MEM_BANDWIDTH 393600 @@ -161,7 +160,7 @@ static int qce_check_version(struct qce_device *qce) * the driver does not support v5 with minor 0 because it has special * alignment requirements. */ - if (major != QCE_MAJOR_VERSION5 || minor == 0) + if (major == 5 && minor == 0) return -ENODEV; qce->burst_size = QCE_BAM_BURST_SIZE; -- cgit From 9fc6290117259a8dbf8247cb54559df62fd1550f Mon Sep 17 00:00:00 2001 From: "Mario Limonciello (AMD)" Date: Wed, 29 Oct 2025 11:15:01 -0500 Subject: crypto: ccp - Add support for PCI device 0x115A PCI device 0x115A is similar to pspv5, except it doesn't have platform access mailbox support. Signed-off-by: Mario Limonciello (AMD) Acked-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sp-pci.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index e7bb803912a6..8891ceee1d7d 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -459,6 +459,17 @@ static const struct psp_vdata pspv6 = { .intsts_reg = 0x10514, /* P2CMSG_INTSTS */ }; +static const struct psp_vdata pspv7 = { + .tee = &teev2, + .cmdresp_reg = 0x10944, /* C2PMSG_17 */ + .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */ + .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */ + .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */ + .feature_reg = 0x109fc, /* C2PMSG_63 */ + .inten_reg = 0x10510, /* P2CMSG_INTEN */ + .intsts_reg = 0x10514, /* P2CMSG_INTSTS */ +}; + #endif static const struct sp_dev_vdata dev_vdata[] = { @@ -525,6 +536,13 @@ static const struct sp_dev_vdata dev_vdata[] = { .psp_vdata = &pspv6, #endif }, + { /* 9 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + .psp_vdata = &pspv7, +#endif + }, + }; static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] }, @@ -539,6 +557,7 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, { PCI_VDEVICE(AMD, 0x17D8), (kernel_ulong_t)&dev_vdata[8] }, + { PCI_VDEVICE(AMD, 0x115A), (kernel_ulong_t)&dev_vdata[9] }, /* Last entry must be zero */ { 0, } }; -- cgit From d633730bb3873578a00fde4b97f9ac62a1be8d34 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Sat, 1 Nov 2025 15:04:42 +0100 Subject: crypto: octeontx2 - Replace deprecated strcpy in cpt_ucode_load_fw strcpy() is deprecated; use the safer strscpy() instead. The destination buffer is only zero-initialized for the first iteration and since strscpy() guarantees its NUL termination anyway, remove zero-initializing 'eng_type'. Link: https://github.com/KSPP/linux/issues/88 Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index ebdf4efa09d4..b5cc5401f704 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -3,6 +3,7 @@ #include #include +#include #include #include "otx2_cptpf_ucode.h" #include "otx2_cpt_common.h" @@ -458,13 +459,13 @@ static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info, u16 rid) { char filename[OTX2_CPT_NAME_LENGTH]; - char eng_type[8] = {0}; + char eng_type[8]; int ret, e, i; INIT_LIST_HEAD(&fw_info->ucodes); for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) { - strcpy(eng_type, get_eng_type_str(e)); + strscpy(eng_type, get_eng_type_str(e)); for (i = 0; i < strlen(eng_type); i++) eng_type[i] = tolower(eng_type[i]); -- cgit From d52e9b8843749f446dff2afcfc5da3d8bbe806cc Mon Sep 17 00:00:00 2001 From: Karina Yankevich Date: Wed, 5 Nov 2025 17:52:04 +0300 Subject: crypto: rockchip - drop redundant crypto_skcipher_ivsize() calls The function already initialized the ivsize variable at the point of declaration, let's use it instead of calling crypto_skcipher_ivsize() extra couple times. Found by Linux Verification Center (linuxtesting.org) with SVACE. Fixes: 57d67c6e8219 ("crypto: rockchip - rework by using crypto_engine") Signed-off-by: Karina Yankevich Reviewed-by: Sergey Shtylyov Signed-off-by: Herbert Xu --- drivers/crypto/rockchip/rk3288_crypto_skcipher.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c index 9393e10671c2..e80f9148c012 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c @@ -321,8 +321,7 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req) algt->stat_req++; rkc->nreq++; - ivsize = crypto_skcipher_ivsize(tfm); - if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { + if (areq->iv && ivsize > 0) { if (rctx->mode & RK_CRYPTO_DEC) { offset = areq->cryptlen - ivsize; scatterwalk_map_and_copy(rctx->backup_iv, areq->src, -- cgit From 5f8c6c931827cace83c876e1f13ee488fe8618b7 Mon Sep 17 00:00:00 2001 From: Marco Crivellari Date: Thu, 6 Nov 2025 17:37:58 +0100 Subject: crypto: atmel-i2c - add WQ_PERCPU to alloc_workqueue users MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently if a user enqueues a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistency cannot be addressed without refactoring the API. alloc_workqueue() treats all queues as per-CPU by default, while unbound workqueues must opt-in via WQ_UNBOUND. This default is suboptimal: most workloads benefit from unbound queues, allowing the scheduler to place worker threads where they’re needed and reducing noise when CPUs are isolated. This continues the effort to refactor workqueue APIs, which began with the introduction of new workqueues and a new alloc_workqueue flag in: commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq") commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag") This change adds a new WQ_PERCPU flag to explicitly request alloc_workqueue() to be per-cpu when WQ_UNBOUND has not been specified. With the introduction of the WQ_PERCPU flag (equivalent to !WQ_UNBOUND), any alloc_workqueue() caller that doesn’t explicitly specify WQ_UNBOUND must now use WQ_PERCPU. Once migration is complete, WQ_UNBOUND can be removed and unbound will become the implicit default. Suggested-by: Tejun Heo Signed-off-by: Marco Crivellari Signed-off-by: Herbert Xu --- drivers/crypto/atmel-i2c.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c index a895e4289efa..9688d116d07e 100644 --- a/drivers/crypto/atmel-i2c.c +++ b/drivers/crypto/atmel-i2c.c @@ -402,7 +402,7 @@ EXPORT_SYMBOL(atmel_i2c_probe); static int __init atmel_i2c_init(void) { - atmel_wq = alloc_workqueue("atmel_wq", 0, 0); + atmel_wq = alloc_workqueue("atmel_wq", WQ_PERCPU, 0); return atmel_wq ? 0 : -ENOMEM; } -- cgit From 7e8f232ae8d7ceffcb65b949600cb1529dd92663 Mon Sep 17 00:00:00 2001 From: Marco Crivellari Date: Thu, 6 Nov 2025 17:42:36 +0100 Subject: crypto: cavium/nitrox - add WQ_PERCPU to alloc_workqueue users MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently if a user enqueues a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistency cannot be addressed without refactoring the API. alloc_workqueue() treats all queues as per-CPU by default, while unbound workqueues must opt-in via WQ_UNBOUND. This default is suboptimal: most workloads benefit from unbound queues, allowing the scheduler to place worker threads where they’re needed and reducing noise when CPUs are isolated. This continues the effort to refactor workqueue APIs, which began with the introduction of new workqueues and a new alloc_workqueue flag in: commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq") commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag") This change adds a new WQ_PERCPU flag to explicitly request alloc_workqueue() to be per-cpu when WQ_UNBOUND has not been specified. With the introduction of the WQ_PERCPU flag (equivalent to !WQ_UNBOUND), any alloc_workqueue() caller that doesn’t explicitly specify WQ_UNBOUND must now use WQ_PERCPU. Once migration is complete, WQ_UNBOUND can be removed and unbound will become the implicit default. Suggested-by: Tejun Heo Signed-off-by: Marco Crivellari Signed-off-by: Herbert Xu --- drivers/crypto/cavium/nitrox/nitrox_mbx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c index d4e06999af9b..a6a76e50ba84 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c +++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c @@ -192,7 +192,7 @@ int nitrox_mbox_init(struct nitrox_device *ndev) } /* allocate pf2vf response workqueue */ - ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0); + ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", WQ_PERCPU, 0); if (!ndev->iov.pf2vf_wq) { kfree(ndev->iov.vfdev); ndev->iov.vfdev = NULL; -- cgit From cdd7bbce7b507bfa608e6c6abefdc43661c24035 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Fri, 7 Nov 2025 09:15:48 +0100 Subject: hwrng: bcm2835 - Move MODULE_DEVICE_TABLE() to table definition Convention is to place MODULE_DEVICE_TABLE() immediately after definition of the affected table, so one can easily spot missing such. There is on the other hand no benefits of putting MODULE_DEVICE_TABLE() far away. Reviewed-by: Florian Fainelli Signed-off-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- drivers/char/hw_random/bcm2835-rng.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index aa2b135e3ee2..0b67cfd15b11 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -138,6 +138,7 @@ static const struct of_device_id bcm2835_rng_of_match[] = { { .compatible = "brcm,bcm6368-rng"}, {}, }; +MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match); static int bcm2835_rng_probe(struct platform_device *pdev) { @@ -191,8 +192,6 @@ static int bcm2835_rng_probe(struct platform_device *pdev) return err; } -MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match); - static const struct platform_device_id bcm2835_rng_devtype[] = { { .name = "bcm2835-rng" }, { .name = "bcm63xx-rng" }, -- cgit From 6b94eb68ad2147c7ed0e978b1924707f88922e4c Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Fri, 7 Nov 2025 09:15:49 +0100 Subject: hwrng: bcm2835 - Simplify with of_device_get_match_data() Driver's probe function matches against driver's of_device_id table, where each entry has non-NULL match data, so of_match_node() can be simplified with of_device_get_match_data(). Reviewed-by: Florian Fainelli Signed-off-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- drivers/char/hw_random/bcm2835-rng.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index 0b67cfd15b11..6d6ac409efcf 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -142,9 +142,7 @@ MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match); static int bcm2835_rng_probe(struct platform_device *pdev) { - const struct bcm2835_rng_of_data *of_data; struct device *dev = &pdev->dev; - const struct of_device_id *rng_id; struct bcm2835_rng_priv *priv; int err; @@ -172,12 +170,10 @@ static int bcm2835_rng_probe(struct platform_device *pdev) priv->rng.cleanup = bcm2835_rng_cleanup; if (dev_of_node(dev)) { - rng_id = of_match_node(bcm2835_rng_of_match, dev->of_node); - if (!rng_id) - return -EINVAL; + const struct bcm2835_rng_of_data *of_data; /* Check for rng init function, execute it */ - of_data = rng_id->data; + of_data = of_device_get_match_data(dev); if (of_data) priv->mask_interrupts = of_data->mask_interrupts; } -- cgit From c6c247ae336aa7a85e8400d60c1b2c4f2edf93d4 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Fri, 7 Nov 2025 09:15:50 +0100 Subject: crypto: artpec6 - Simplify with of_device_get_match_data() Driver's probe function matches against driver's of_device_id table, so of_match_node() can be simplified with of_device_get_match_data(). This requires changing the enum used in the driver match data entries to non-zero, to be able to recognize error case of of_device_get_match_data(). Signed-off-by: Krzysztof Kozlowski Acked-by: Jesper Nilsson Signed-off-by: Herbert Xu --- drivers/crypto/axis/artpec6_crypto.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 75ee065da1ec..b04d6379244a 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -252,7 +252,7 @@ struct artpec6_crypto_dma_descriptors { }; enum artpec6_crypto_variant { - ARTPEC6_CRYPTO, + ARTPEC6_CRYPTO = 1, ARTPEC7_CRYPTO, }; @@ -2842,7 +2842,6 @@ MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match); static int artpec6_crypto_probe(struct platform_device *pdev) { - const struct of_device_id *match; enum artpec6_crypto_variant variant; struct artpec6_crypto *ac; struct device *dev = &pdev->dev; @@ -2853,12 +2852,10 @@ static int artpec6_crypto_probe(struct platform_device *pdev) if (artpec6_crypto_dev) return -ENODEV; - match = of_match_node(artpec6_crypto_of_match, dev->of_node); - if (!match) + variant = (enum artpec6_crypto_variant)of_device_get_match_data(dev); + if (!variant) return -EINVAL; - variant = (enum artpec6_crypto_variant)match->data; - base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); -- cgit From ec2054c1244c8aa192632a40a07a0d210d7116e1 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Fri, 7 Nov 2025 09:15:51 +0100 Subject: crypto: ccp - Constify 'dev_vdata' member sp_device->dev_vdata points to only const data (see 'static const struct sp_dev_vdata dev_vdata'), so can be made pointer to const for code safety. Update also sp_get_acpi_version() function which returns this pointer to 'pointer to const' for code readability, even though it is not needed. On the other hand, do not touch similar function sp_get_of_version() because it will be immediately removed in next patches. Acked-by: Tom Lendacky Signed-off-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sp-dev.h | 2 +- drivers/crypto/ccp/sp-platform.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 6f9d7063257d..1335a83fe052 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -95,7 +95,7 @@ struct sp_device { struct device *dev; - struct sp_dev_vdata *dev_vdata; + const struct sp_dev_vdata *dev_vdata; unsigned int ord; char name[SP_MAX_NAME_LEN]; diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c index 3933cac1694d..de8a8183efdb 100644 --- a/drivers/crypto/ccp/sp-platform.c +++ b/drivers/crypto/ccp/sp-platform.c @@ -63,13 +63,13 @@ static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev) return NULL; } -static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev) +static const struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev) { const struct acpi_device_id *match; match = acpi_match_device(sp_acpi_match, &pdev->dev); if (match && match->driver_data) - return (struct sp_dev_vdata *)match->driver_data; + return (const struct sp_dev_vdata *)match->driver_data; return NULL; } -- cgit From 4ae946a45dcdd0f99b3c233c6718e7ff0b183a2c Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Fri, 7 Nov 2025 09:15:52 +0100 Subject: crypto: ccp - Simplify with of_device_get_match_data() Driver's probe function matches against driver's of_device_id table, where each entry has non-NULL match data, so of_match_node() can be simplified with of_device_get_match_data(). Acked-by: Tom Lendacky Signed-off-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sp-platform.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c index de8a8183efdb..3f9843fa7782 100644 --- a/drivers/crypto/ccp/sp-platform.c +++ b/drivers/crypto/ccp/sp-platform.c @@ -52,17 +52,6 @@ static const struct of_device_id sp_of_match[] = { }; MODULE_DEVICE_TABLE(of, sp_of_match); -static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev) -{ - const struct of_device_id *match; - - match = of_match_node(sp_of_match, pdev->dev.of_node); - if (match && match->data) - return (struct sp_dev_vdata *)match->data; - - return NULL; -} - static const struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev) { const struct acpi_device_id *match; @@ -123,7 +112,7 @@ static int sp_platform_probe(struct platform_device *pdev) goto e_err; sp->dev_specific = sp_platform; - sp->dev_vdata = pdev->dev.of_node ? sp_get_of_version(pdev) + sp->dev_vdata = pdev->dev.of_node ? of_device_get_match_data(&pdev->dev) : sp_get_acpi_version(pdev); if (!sp->dev_vdata) { ret = -ENODEV; -- cgit From 054c7f7ad323fde38fd8d1a7858aae16330e4fc1 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Fri, 7 Nov 2025 09:15:53 +0100 Subject: crypto: cesa - Simplify with of_device_get_match_data() Driver's probe function matches against driver's of_device_id table, where each entry has non-NULL match data, so of_match_node() can be simplified with of_device_get_match_data(). Signed-off-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa/cesa.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c index 9c21f5d835d2..301bdf239e7d 100644 --- a/drivers/crypto/marvell/cesa/cesa.c +++ b/drivers/crypto/marvell/cesa/cesa.c @@ -420,7 +420,6 @@ static int mv_cesa_probe(struct platform_device *pdev) { const struct mv_cesa_caps *caps = &orion_caps; const struct mbus_dram_target_info *dram; - const struct of_device_id *match; struct device *dev = &pdev->dev; struct mv_cesa_dev *cesa; struct mv_cesa_engine *engines; @@ -433,11 +432,9 @@ static int mv_cesa_probe(struct platform_device *pdev) } if (dev->of_node) { - match = of_match_node(mv_cesa_of_match_table, dev->of_node); - if (!match || !match->data) + caps = of_device_get_match_data(dev); + if (!caps) return -ENOTSUPP; - - caps = match->data; } cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL); -- cgit From 06c489ce5b5053d6f9eddac7ccc0d80839e11198 Mon Sep 17 00:00:00 2001 From: Marco Crivellari Date: Fri, 7 Nov 2025 12:23:54 +0100 Subject: crypto: qat - add WQ_PERCPU to alloc_workqueue users MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently if a user enqueues a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistency cannot be addressed without refactoring the API. alloc_workqueue() treats all queues as per-CPU by default, while unbound workqueues must opt-in via WQ_UNBOUND. This default is suboptimal: most workloads benefit from unbound queues, allowing the scheduler to place worker threads where they’re needed and reducing noise when CPUs are isolated. This continues the effort to refactor workqueue APIs, which began with the introduction of new workqueues and a new alloc_workqueue flag in: commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq") commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag") This change adds a new WQ_PERCPU flag to explicitly request alloc_workqueue() to be per-cpu when WQ_UNBOUND has not been specified. With the introduction of the WQ_PERCPU flag (equivalent to !WQ_UNBOUND), any alloc_workqueue() caller that doesn’t explicitly specify WQ_UNBOUND must now use WQ_PERCPU. Once migration is complete, WQ_UNBOUND can be removed and unbound will become the implicit default. Suggested-by: Tejun Heo Signed-off-by: Marco Crivellari Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_aer.c | 4 ++-- drivers/crypto/intel/qat/qat_common/adf_isr.c | 3 ++- drivers/crypto/intel/qat/qat_common/adf_sriov.c | 3 ++- drivers/crypto/intel/qat/qat_common/adf_vf_isr.c | 3 ++- 4 files changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index 35679b21ff63..667d5e320f50 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -276,11 +276,11 @@ int adf_notify_fatal_error(struct adf_accel_dev *accel_dev) int adf_init_aer(void) { device_reset_wq = alloc_workqueue("qat_device_reset_wq", - WQ_MEM_RECLAIM, 0); + WQ_MEM_RECLAIM | WQ_PERCPU, 0); if (!device_reset_wq) return -EFAULT; - device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0); + device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", WQ_PERCPU, 0); if (!device_sriov_wq) { destroy_workqueue(device_reset_wq); device_reset_wq = NULL; diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c index 12e565613661..4639d7fd93e6 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c @@ -384,7 +384,8 @@ EXPORT_SYMBOL_GPL(adf_isr_resource_alloc); */ int __init adf_init_misc_wq(void) { - adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0); + adf_misc_wq = alloc_workqueue("qat_misc_wq", + WQ_MEM_RECLAIM | WQ_PERCPU, 0); return !adf_misc_wq ? -ENOMEM : 0; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c index 31d1ef0cb1f5..bb904ba4bf84 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c @@ -299,7 +299,8 @@ EXPORT_SYMBOL_GPL(adf_sriov_configure); int __init adf_init_pf_wq(void) { /* Workqueue for PF2VF responses */ - pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0); + pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", + WQ_MEM_RECLAIM | WQ_PERCPU, 0); return !pf2vf_resp_wq ? -ENOMEM : 0; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c index a4636ec9f9ca..d0fef20a3df4 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c @@ -299,7 +299,8 @@ EXPORT_SYMBOL_GPL(adf_flush_vf_wq); */ int __init adf_init_vf_wq(void) { - adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0); + adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", + WQ_MEM_RECLAIM | WQ_PERCPU, 0); return !adf_vf_stop_wq ? -EFAULT : 0; } -- cgit From 76ce17f6f7f78ab79b9741388bdb4dafa985b4e9 Mon Sep 17 00:00:00 2001 From: Zilin Guan Date: Sun, 9 Nov 2025 14:56:48 +0000 Subject: crypto: iaa - Fix incorrect return value in save_iaa_wq() The save_iaa_wq() function unconditionally returns 0, even when an error is encountered. This prevents the error code from being propagated to the caller. Fix this by returning the 'ret' variable, which holds the actual status of the operations within the function. Fixes: ea7a5cbb43696 ("crypto: iaa - Add Intel IAA Compression Accelerator crypto driver core") Signed-off-by: Zilin Guan Signed-off-by: Herbert Xu --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 23f585219fb4..d0058757b000 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -805,7 +805,7 @@ static int save_iaa_wq(struct idxd_wq *wq) if (!cpus_per_iaa) cpus_per_iaa = 1; out: - return 0; + return ret; } static void remove_iaa_wq(struct idxd_wq *wq) -- cgit From e9eb52037a529fbb307c290e9951a62dd728b03d Mon Sep 17 00:00:00 2001 From: Haotian Zhang Date: Mon, 10 Nov 2025 14:54:38 +0800 Subject: crypto: starfive - Correctly handle return of sg_nents_for_len The return value of sg_nents_for_len was assigned to an unsigned long in starfive_hash_digest, causing negative error codes to be converted to large positive integers. Add error checking for sg_nents_for_len and return immediately on failure to prevent potential buffer overflows. Fixes: 7883d1b28a2b ("crypto: starfive - Add hash and HMAC support") Signed-off-by: Haotian Zhang Signed-off-by: Herbert Xu --- drivers/crypto/starfive/jh7110-hash.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c index e6839c7bfb73..54b7af4a7aee 100644 --- a/drivers/crypto/starfive/jh7110-hash.c +++ b/drivers/crypto/starfive/jh7110-hash.c @@ -325,6 +325,7 @@ static int starfive_hash_digest(struct ahash_request *req) struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); struct starfive_cryp_dev *cryp = ctx->cryp; + int sg_len; memset(rctx, 0, sizeof(struct starfive_cryp_request_ctx)); @@ -333,7 +334,10 @@ static int starfive_hash_digest(struct ahash_request *req) rctx->in_sg = req->src; rctx->blksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); rctx->digsize = crypto_ahash_digestsize(tfm); - rctx->in_sg_len = sg_nents_for_len(rctx->in_sg, rctx->total); + sg_len = sg_nents_for_len(rctx->in_sg, rctx->total); + if (sg_len < 0) + return sg_len; + rctx->in_sg_len = sg_len; ctx->rctx = rctx; return crypto_transfer_hash_request_to_engine(cryp->engine, req); -- cgit From 8700ce07c5c6bf27afa7b59a8d9cf58d783a7d5c Mon Sep 17 00:00:00 2001 From: Haotian Zhang Date: Mon, 10 Nov 2025 15:20:41 +0800 Subject: crypto: ccree - Correctly handle return of sg_nents_for_len Fix error handling in cc_map_hash_request_update where sg_nents_for_len return value was assigned to u32, converting negative errors to large positive values before passing to sg_copy_to_buffer. Check sg_nents_for_len return value and propagate errors before assigning to areq_ctx->in_nents. Fixes: b7ec8530687a ("crypto: ccree - use std api when possible") Signed-off-by: Haotian Zhang Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_buffer_mgr.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index 3963bb91321f..dc7e0cd51c25 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -1235,6 +1235,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, int rc = 0; u32 dummy = 0; u32 mapped_nents = 0; + int sg_nents; dev_dbg(dev, " update params : curr_buff=%p curr_buff_cnt=0x%X nbytes=0x%X src=%p curr_index=%u\n", curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); @@ -1248,7 +1249,10 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, if (total_in_len < block_size) { dev_dbg(dev, " less than one block: curr_buff=%p *curr_buff_cnt=0x%X copy_to=%p\n", curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); - areq_ctx->in_nents = sg_nents_for_len(src, nbytes); + sg_nents = sg_nents_for_len(src, nbytes); + if (sg_nents < 0) + return sg_nents; + areq_ctx->in_nents = sg_nents; sg_copy_to_buffer(src, areq_ctx->in_nents, &curr_buff[*curr_buff_cnt], nbytes); *curr_buff_cnt += nbytes; -- cgit From 0f8ead58b6dce9520fc3f9ff7f943bb0627a7a19 Mon Sep 17 00:00:00 2001 From: David Laight Date: Wed, 19 Nov 2025 22:41:10 +0000 Subject: hwrng: core - use min3() instead of nested min_t() min_t(u16, a, b) is likely to discard significant bits. Replace: min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024); with: min3(default_quality, 1024, rng->quality ?: 1024); Signed-off-by: David Laight Signed-off-by: Herbert Xu --- drivers/char/hw_random/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 56d888bebe0c..96d7fe41b373 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -545,7 +545,7 @@ int hwrng_register(struct hwrng *rng) init_completion(&rng->dying); /* Adjust quality field to always have a proper value */ - rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024); + rng->quality = min3(default_quality, 1024, rng->quality ?: 1024); if (!cur_rng_set_by_user && (!current_rng || rng->quality > current_rng->quality)) { -- cgit From 14ca8ce1fcbbd4448c302e144052cfc2fe288232 Mon Sep 17 00:00:00 2001 From: David Laight Date: Wed, 19 Nov 2025 22:41:12 +0000 Subject: crypto: ccp - use min() instead of min_t() min_t(unsigned int, a, b) casts an 'unsigned long' to 'unsigned int'. Use min(a, b) instead as it promotes any 'unsigned int' to 'unsigned long' and so cannot discard significant bits. In this case the 'unsigned long' value is small enough that the result is ok. Detected by an extra check added to min_t(). Signed-off-by: David Laight Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index c531d13d971f..246801912e1a 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -507,7 +507,7 @@ int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng); u32 trng_value; - int len = min_t(int, sizeof(trng_value), max); + int len = min(sizeof(trng_value), max); /* Locking is provided by the caller so we can update device * hwrng-related fields safely -- cgit