diff options
31 files changed, 326 insertions, 234 deletions
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c index 30e70f4fe2f7..6d3b85e53d0e 100644 --- a/arch/x86/crypto/sha512_ssse3_glue.c +++ b/arch/x86/crypto/sha512_ssse3_glue.c @@ -36,6 +36,7 @@ #include <linux/types.h> #include <crypto/sha2.h> #include <crypto/sha512_base.h> +#include <asm/cpu_device_id.h> #include <asm/simd.h> asmlinkage void sha512_transform_ssse3(struct sha512_state *state, @@ -284,6 +285,13 @@ static int register_sha512_avx2(void) ARRAY_SIZE(sha512_avx2_algs)); return 0; } +static const struct x86_cpu_id module_cpu_ids[] = { + X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL), + X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL), + X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids); static void unregister_sha512_avx2(void) { @@ -294,6 +302,8 @@ static void unregister_sha512_avx2(void) static int __init sha512_ssse3_mod_init(void) { + if (!x86_match_cpu(module_cpu_ids)) + return -ENODEV; if (register_sha512_ssse3()) goto fail; diff --git a/crypto/Kconfig b/crypto/Kconfig index bb427a835e44..b1ccf873779d 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -15,7 +15,7 @@ source "crypto/async_tx/Kconfig" # menuconfig CRYPTO tristate "Cryptographic API" - select LIB_MEMNEQ + select CRYPTO_LIB_UTILS help This option provides the core Cryptographic API. diff --git a/crypto/algapi.c b/crypto/algapi.c index d1c99288af3e..5c69ff8e8fa5 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -997,77 +997,6 @@ void crypto_inc(u8 *a, unsigned int size) } EXPORT_SYMBOL_GPL(crypto_inc); -void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len) -{ - int relalign = 0; - - if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { - int size = sizeof(unsigned long); - int d = (((unsigned long)dst ^ (unsigned long)src1) | - ((unsigned long)dst ^ (unsigned long)src2)) & - (size - 1); - - relalign = d ? 1 << __ffs(d) : size; - - /* - * If we care about alignment, process as many bytes as - * needed to advance dst and src to values whose alignments - * equal their relative alignment. This will allow us to - * process the remainder of the input using optimal strides. - */ - while (((unsigned long)dst & (relalign - 1)) && len > 0) { - *dst++ = *src1++ ^ *src2++; - len--; - } - } - - while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) { - if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { - u64 l = get_unaligned((u64 *)src1) ^ - get_unaligned((u64 *)src2); - put_unaligned(l, (u64 *)dst); - } else { - *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2; - } - dst += 8; - src1 += 8; - src2 += 8; - len -= 8; - } - - while (len >= 4 && !(relalign & 3)) { - if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { - u32 l = get_unaligned((u32 *)src1) ^ - get_unaligned((u32 *)src2); - put_unaligned(l, (u32 *)dst); - } else { - *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2; - } - dst += 4; - src1 += 4; - src2 += 4; - len -= 4; - } - - while (len >= 2 && !(relalign & 1)) { - if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { - u16 l = get_unaligned((u16 *)src1) ^ - get_unaligned((u16 *)src2); - put_unaligned(l, (u16 *)dst); - } else { - *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2; - } - dst += 2; - src1 += 2; - src2 += 2; - len -= 2; - } - - while (len--) - *dst++ = *src1++ ^ *src2++; -} -EXPORT_SYMBOL_GPL(__crypto_xor); - unsigned int crypto_alg_extsize(struct crypto_alg *alg) { return alg->cra_ctxsize + diff --git a/crypto/api.c b/crypto/api.c index 69508ae9345e..ab4b5e2b0756 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -321,7 +321,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) /* * If the internal flag is set for a cipher, require a caller to - * to invoke the cipher with the internal flag to use that cipher. + * invoke the cipher with the internal flag to use that cipher. * Also, if a caller wants to allocate a cipher that may or may * not be an internal cipher, use type | CRYPTO_ALG_INTERNAL and * !(mask & CRYPTO_ALG_INTERNAL). diff --git a/crypto/drbg.c b/crypto/drbg.c index 177983b6ae38..982d4ca4526d 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1703,7 +1703,7 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg) static int drbg_fini_hash_kernel(struct drbg_state *drbg) { - struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; + struct sdesc *sdesc = drbg->priv_data; if (sdesc) { crypto_free_shash(sdesc->shash.tfm); kfree_sensitive(sdesc); @@ -1715,7 +1715,7 @@ static int drbg_fini_hash_kernel(struct drbg_state *drbg) static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg, const unsigned char *key) { - struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; + struct sdesc *sdesc = drbg->priv_data; crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg)); } @@ -1723,7 +1723,7 @@ static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg, static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval, const struct list_head *in) { - struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; + struct sdesc *sdesc = drbg->priv_data; struct drbg_string *input = NULL; crypto_shash_init(&sdesc->shash); @@ -1818,8 +1818,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) static void drbg_kcapi_symsetkey(struct drbg_state *drbg, const unsigned char *key) { - struct crypto_cipher *tfm = - (struct crypto_cipher *)drbg->priv_data; + struct crypto_cipher *tfm = drbg->priv_data; crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg))); } @@ -1827,8 +1826,7 @@ static void drbg_kcapi_symsetkey(struct drbg_state *drbg, static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval, const struct drbg_string *in) { - struct crypto_cipher *tfm = - (struct crypto_cipher *)drbg->priv_data; + struct crypto_cipher *tfm = drbg->priv_data; /* there is only component in *in */ BUG_ON(in->len < drbg_blocklen(drbg)); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 5349ffee6bbd..2ad4bcc58617 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -3417,6 +3417,21 @@ static int test_acomp(struct crypto_acomp *tfm, goto out; } +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS + crypto_init_wait(&wait); + sg_init_one(&src, input_vec, ilen); + acomp_request_set_params(req, &src, NULL, ilen, 0); + + ret = crypto_wait_req(crypto_acomp_compress(req), &wait); + if (ret) { + pr_err("alg: acomp: compression failed on NULL dst buffer test %d for %s: ret=%d\n", + i + 1, algo, -ret); + kfree(input_vec); + acomp_request_free(req); + goto out; + } +#endif + kfree(input_vec); acomp_request_free(req); } @@ -3478,6 +3493,20 @@ static int test_acomp(struct crypto_acomp *tfm, goto out; } +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS + crypto_init_wait(&wait); + acomp_request_set_params(req, &src, NULL, ilen, 0); + + ret = crypto_wait_req(crypto_acomp_decompress(req), &wait); + if (ret) { + pr_err("alg: acomp: decompression failed on NULL dst buffer test %d for %s: ret=%d\n", + i + 1, algo, -ret); + kfree(input_vec); + acomp_request_free(req); + goto out; + } +#endif + kfree(input_vec); acomp_request_free(req); } @@ -5801,8 +5830,11 @@ test_done: driver, alg, fips_enabled ? "fips" : "panic_on_fail"); } - WARN(1, "alg: self-tests for %s (%s) failed (rc=%d)", - driver, alg, rc); + pr_warn("alg: self-tests for %s using %s failed (rc=%d)", + alg, driver, rc); + WARN(rc != -ENOENT, + "alg: self-tests for %s using %s failed (rc=%d)", + alg, driver, rc); } else { if (fips_enabled) pr_info("alg: self-tests for %s (%s) passed\n", diff --git a/drivers/char/hw_random/arm_smccc_trng.c b/drivers/char/hw_random/arm_smccc_trng.c index b24ac39a903b..e34c3ea692b6 100644 --- a/drivers/char/hw_random/arm_smccc_trng.c +++ b/drivers/char/hw_random/arm_smccc_trng.c @@ -71,8 +71,6 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) MAX_BITS_PER_CALL); arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND, bits, &res); - if ((int)res.a0 < 0) - return (int)res.a0; switch ((int)res.a0) { case SMCCC_RET_SUCCESS: @@ -88,6 +86,8 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) return copied; cond_resched(); break; + default: + return -EIO; } } diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 16f227b995e8..d7045dfaf16c 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -507,16 +507,17 @@ static int hwrng_fillfn(void *unused) rng->quality = current_quality; /* obsolete */ quality = rng->quality; mutex_unlock(&reading_mutex); + + if (rc <= 0) + hwrng_msleep(rng, 10000); + put_rng(rng); if (!quality) break; - if (rc <= 0) { - pr_warn("hwrng: no data available\n"); - msleep_interruptible(10000); + if (rc <= 0) continue; - } /* If we cannot credit at least one bit of entropy, * keep track of the remainder for the next iteration @@ -570,6 +571,7 @@ int hwrng_register(struct hwrng *rng) init_completion(&rng->cleanup_done); complete(&rng->cleanup_done); + init_completion(&rng->dying); if (!current_rng || (!cur_rng_set_by_user && rng->quality > current_rng->quality)) { @@ -617,6 +619,7 @@ void hwrng_unregister(struct hwrng *rng) old_rng = current_rng; list_del(&rng->list); + complete_all(&rng->dying); if (current_rng == rng) { err = enable_best_rng(); if (err) { @@ -685,6 +688,14 @@ void devm_hwrng_unregister(struct device *dev, struct hwrng *rng) } EXPORT_SYMBOL_GPL(devm_hwrng_unregister); +long hwrng_msleep(struct hwrng *rng, unsigned int msecs) +{ + unsigned long timeout = msecs_to_jiffies(msecs) + 1; + + return wait_for_completion_interruptible_timeout(&rng->dying, timeout); +} +EXPORT_SYMBOL_GPL(hwrng_msleep); + static int __init hwrng_modinit(void) { int ret; diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c index b05d676ca814..78c10fa4c79e 100644 --- a/drivers/char/hw_random/imx-rngc.c +++ b/drivers/char/hw_random/imx-rngc.c @@ -355,7 +355,7 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids); static struct platform_driver imx_rngc_driver = { .driver = { - .name = "imx_rngc", + .name = KBUILD_MODNAME, .pm = &imx_rngc_pm_ops, .of_match_table = imx_rngc_dt_ids, }, diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c index 19cd2e52f89d..c4b0a8b58842 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c @@ -54,11 +54,9 @@ static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wa goto err_dst; } - err = pm_runtime_get_sync(ce->dev); - if (err < 0) { - pm_runtime_put_noidle(ce->dev); + err = pm_runtime_resume_and_get(ce->dev); + if (err < 0) goto err_pm; - } mutex_lock(&ce->rnglock); chan = &ce->chanlist[flow]; diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 9ad188cffd0d..b4820594ab80 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -2091,7 +2091,7 @@ static void artpec6_crypto_task(unsigned long data) return; } - spin_lock_bh(&ac->queue_lock); + spin_lock(&ac->queue_lock); list_for_each_entry_safe(req, n, &ac->pending, list) { struct artpec6_crypto_dma_descriptors *dma = req->dma; @@ -2128,7 +2128,7 @@ static void artpec6_crypto_task(unsigned long data) artpec6_crypto_process_queue(ac, &complete_in_progress); - spin_unlock_bh(&ac->queue_lock); + spin_unlock(&ac->queue_lock); /* Perform the completion callbacks without holding the queue lock * to allow new request submissions from the callbacks. diff --git a/drivers/crypto/cavium/cpt/cpt_hw_types.h b/drivers/crypto/cavium/cpt/cpt_hw_types.h index 8ec6edc69f3f..ae4791a8ec4a 100644 --- a/drivers/crypto/cavium/cpt/cpt_hw_types.h +++ b/drivers/crypto/cavium/cpt/cpt_hw_types.h @@ -396,7 +396,7 @@ union cptx_vqx_misc_ena_w1s { * Word0 * reserved_20_63:44 [63:20] Reserved. * dbell_cnt:20 [19:0](R/W/H) Number of instruction queue 64-bit words to add - * to the CPT instruction doorbell count. Readback value is the the + * to the CPT instruction doorbell count. Readback value is the * current number of pending doorbell requests. If counter overflows * CPT()_VQ()_MISC_INT[DBELL_DOVF] is set. To reset the count back to * zero, write one to clear CPT()_VQ()_MISC_INT_ENA_W1C[DBELL_DOVF], diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 9f588c9728f8..b292641c8a99 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -744,6 +744,11 @@ static int sev_update_firmware(struct device *dev) struct page *p; u64 data_size; + if (!sev_version_greater_or_equal(0, 15)) { + dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n"); + return -1; + } + if (sev_get_firmware(dev, &firmware) == -ENOENT) { dev_dbg(dev, "No SEV firmware file present\n"); return -1; @@ -776,6 +781,14 @@ static int sev_update_firmware(struct device *dev) data->len = firmware->size; ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); + + /* + * A quirk for fixing the committed TCB version, when upgrading from + * earlier firmware version than 1.50. + */ + if (!ret && !sev_version_greater_or_equal(1, 50)) + ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); + if (ret) dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); else @@ -1285,8 +1298,7 @@ void sev_pci_init(void) if (sev_get_api_version()) goto err; - if (sev_version_greater_or_equal(0, 15) && - sev_update_firmware(sev->dev) == 0) + if (sev_update_firmware(sev->dev) == 0) sev_get_api_version(); /* If an init_ex_path is provided rely on INIT_EX for PSP initialization diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 9d529df0eab9..8e0e87cede6b 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -708,7 +708,7 @@ static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file) return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT); } -static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val) +static void hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val) { struct hisi_qm *qm = hpre_file_to_qm(file); int cluster_index = file->index - HPRE_CLUSTER_CTRL; @@ -716,8 +716,6 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val) HPRE_CLSTR_ADDR_INTRVL; writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY); - - return 0; } static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, @@ -792,9 +790,7 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf, goto err_input; break; case HPRE_CLUSTER_CTRL: - ret = hpre_cluster_inqry_write(file, val); - if (ret) - goto err_input; + hpre_cluster_inqry_write(file, val); break; default: ret = -EINVAL; diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index ad83c194d664..b2e7abff1b8a 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -1857,39 +1857,19 @@ static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, kfree(ctx_addr); } -static int dump_show(struct hisi_qm *qm, void *info, +static void dump_show(struct hisi_qm *qm, void *info, unsigned int info_size, char *info_name) { struct device *dev = &qm->pdev->dev; - u8 *info_buf, *info_curr = info; + u8 *info_curr = info; u32 i; #define BYTE_PER_DW 4 - info_buf = kzalloc(info_size, GFP_KERNEL); - if (!info_buf) - return -ENOMEM; - - for (i = 0; i < info_size; i++, info_curr++) { - if (i % BYTE_PER_DW == 0) - info_buf[i + 3UL] = *info_curr; - else if (i % BYTE_PER_DW == 1) - info_buf[i + 1UL] = *info_curr; - else if (i % BYTE_PER_DW == 2) - info_buf[i - 1] = *info_curr; - else if (i % BYTE_PER_DW == 3) - info_buf[i - 3] = *info_curr; - } - dev_info(dev, "%s DUMP\n", info_name); - for (i = 0; i < info_size; i += BYTE_PER_DW) { + for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) { pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW, - info_buf[i], info_buf[i + 1UL], - info_buf[i + 2UL], info_buf[i + 3UL]); + *(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr)); } - - kfree(info_buf); - - return 0; } static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) @@ -1929,23 +1909,18 @@ static int qm_sqc_dump(struct hisi_qm *qm, const char *s) if (qm->sqc) { sqc_curr = qm->sqc + qp_id; - ret = dump_show(qm, sqc_curr, sizeof(*sqc), - "SOFT SQC"); - if (ret) - dev_info(dev, "Show soft sqc failed!\n"); + dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC"); } up_read(&qm->qps_lock); - goto err_free_ctx; + goto free_ctx; } - ret = dump_show(qm, sqc, sizeof(*sqc), "SQC"); - if (ret) - dev_info(dev, "Show hw sqc failed!\n"); + dump_show(qm, sqc, sizeof(*sqc), "SQC"); -err_free_ctx: +free_ctx: qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma); - return ret; + return 0; } static int qm_cqc_dump(struct hisi_qm *qm, const char *s) @@ -1975,23 +1950,18 @@ static int qm_cqc_dump(struct hisi_qm *qm, const char *s) if (qm->cqc) { cqc_curr = qm->cqc + qp_id; - ret = dump_show(qm, cqc_curr, sizeof(*cqc), - "SOFT CQC"); - if (ret) - dev_info(dev, "Show soft cqc failed!\n"); + dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC"); } up_read(&qm->qps_lock); - goto err_free_ctx; + goto free_ctx; } - ret = dump_show(qm, cqc, sizeof(*cqc), "CQC"); - if (ret) - dev_info(dev, "Show hw cqc failed!\n"); + dump_show(qm, cqc, sizeof(*cqc), "CQC"); -err_free_ctx: +free_ctx: qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma); - return ret; + return 0; } static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size, @@ -2015,9 +1985,7 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size, if (ret) goto err_free_ctx; - ret = dump_show(qm, xeqc, size, name); - if (ret) - dev_info(dev, "Show hw %s failed!\n", name); + dump_show(qm, xeqc, size, name); err_free_ctx: qm_ctx_free(qm, size, xeqc, &xeqc_dma); @@ -2066,7 +2034,6 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s, static int qm_sq_dump(struct hisi_qm *qm, char *s) { - struct device *dev = &qm->pdev->dev; void *sqe, *sqe_curr; struct hisi_qp *qp; u32 qp_id, sqe_id; @@ -2086,18 +2053,15 @@ static int qm_sq_dump(struct hisi_qm *qm, char *s) memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK, qm->debug.sqe_mask_len); - ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE"); - if (ret) - dev_info(dev, "Show sqe failed!\n"); + dump_show(qm, sqe_curr, qm->sqe_size, "SQE"); kfree(sqe); - return ret; + return 0; } static int qm_cq_dump(struct hisi_qm *qm, char *s) { - struct device *dev = &qm->pdev->dev; struct qm_cqe *cqe_curr; struct hisi_qp *qp; u32 qp_id, cqe_id; @@ -2109,11 +2073,9 @@ static int qm_cq_dump(struct hisi_qm *qm, char *s) qp = &qm->qp_array[qp_id]; cqe_curr = qp->cqe + cqe_id; - ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE"); - if (ret) - dev_info(dev, "Show cqe failed!\n"); + dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE"); - return ret; + return 0; } static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s, @@ -2150,9 +2112,7 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s, goto err_unlock; } - ret = dump_show(qm, xeqe, size, name); - if (ret) - dev_info(dev, "Show %s failed!\n", name); + dump_show(qm, xeqe, size, name); err_unlock: up_read(&qm->qps_lock); @@ -3286,7 +3246,6 @@ static void hisi_qm_uacce_put_queue(struct uacce_queue *q) { struct hisi_qp *qp = q->priv; - hisi_qm_cache_wb(qp->qm); hisi_qm_release_qp(qp); } @@ -5466,8 +5425,6 @@ pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev) if (pdev->is_virtfn) return PCI_ERS_RESULT_RECOVERED; - pci_aer_clear_nonfatal_status(pdev); - /* reset pcie device controller */ ret = qm_controller_reset(qm); if (ret) { @@ -6141,8 +6098,8 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) GFP_ATOMIC); dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); if (!qm->qdma.va) { - ret = -ENOMEM; - goto err_alloc_qdma; + ret = -ENOMEM; + goto err_destroy_idr; } QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH); @@ -6158,7 +6115,8 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) err_alloc_qp_array: dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); -err_alloc_qdma: +err_destroy_idr: + idr_destroy(&qm->qp_idr); kfree(qm->factor); return ret; diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h index 3dfd3bac5a33..f289656e9ac0 100644 --- a/drivers/crypto/hisilicon/zip/zip.h +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -81,7 +81,7 @@ struct hisi_zip_sqe { u32 rsvd1[4]; }; -int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node); +int zip_create_qps(struct hisi_qp **qps, int qp_num, int node); int hisi_zip_register_to_crypto(struct hisi_qm *qm); void hisi_zip_unregister_from_crypto(struct hisi_qm *qm); #endif diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index ad35434a3fdb..a6c914d527eb 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -123,19 +123,19 @@ static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp) if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX) return -EINVAL; - return param_set_int(val, kp); + return param_set_ushort(val, kp); } static const struct kernel_param_ops sgl_sge_nr_ops = { .set = sgl_sge_nr_set, - .get = param_get_int, + .get = param_get_ushort, }; static u16 sgl_sge_nr = HZIP_SGL_SGE_NR; module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444); MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)"); -static u16 get_extra_field_size(const u8 *start) +static u32 get_extra_field_size(const u8 *start) { return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN; } @@ -167,7 +167,7 @@ static u32 __get_gzip_head_size(const u8 *src) return size; } -static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl) +static u32 __maybe_unused get_gzip_head_size(struct scatterlist *sgl) { char buf[HZIP_GZIP_HEAD_BUF]; @@ -183,7 +183,7 @@ static int add_comp_head(struct scatterlist *dst, u8 req_type) int ret; ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size); - if (ret != head_size) { + if (unlikely(ret != head_size)) { pr_err("the head size of buffer is wrong (%d)!\n", ret); return -ENOMEM; } @@ -193,11 +193,11 @@ static int add_comp_head(struct scatterlist *dst, u8 req_type) static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type) { - if (!acomp_req->src || !acomp_req->slen) + if (unlikely(!acomp_req->src || !acomp_req->slen)) return -EINVAL; - if (req_type == HZIP_ALG_TYPE_GZIP && - acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT) + if (unlikely(req_type == HZIP_ALG_TYPE_GZIP && + acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT)) return -EINVAL; switch (req_type) { @@ -230,6 +230,8 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, } set_bit(req_id, req_q->req_bitmap); + write_unlock(&req_q->req_lock); + req_cache = q + req_id; req_cache->req_id = req_id; req_cache->req = req; @@ -242,8 +244,6 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, req_cache->dskip = 0; } - write_unlock(&req_q->req_lock); - return req_cache; } @@ -254,7 +254,6 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx, write_lock(&req_q->req_lock); clear_bit(req->req_id, req_q->req_bitmap); - memset(req, 0, sizeof(struct hisi_zip_req)); write_unlock(&req_q->req_lock); } @@ -339,7 +338,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, struct hisi_zip_sqe zip_sqe; int ret; - if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen) + if (unlikely(!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)) return -EINVAL; req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool, @@ -365,7 +364,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, /* send command to start a task */ atomic64_inc(&dfx->send_cnt); ret = hisi_qp_send(qp, &zip_sqe); - if (ret < 0) { + if (unlikely(ret < 0)) { atomic64_inc(&dfx->send_busy_cnt); ret = -EAGAIN; dev_dbg_ratelimited(dev, "failed to send request!\n"); @@ -417,7 +416,7 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) atomic64_inc(&dfx->recv_cnt); status = ops->get_status(sqe); - if (status != 0 && status != HZIP_NC_ERR) { + if (unlikely(status != 0 && status != HZIP_NC_ERR)) { dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n", (qp->alg_type == 0) ? "" : "de", qp->qp_id, status, sqe->produced); @@ -450,7 +449,7 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req) /* let's output compression head now */ head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type); - if (head_size < 0) { + if (unlikely(head_size < 0)) { dev_err_ratelimited(dev, "failed to add comp head (%d)!\n", head_size); return head_size; @@ -461,7 +460,7 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req) return PTR_ERR(req); ret = hisi_zip_do_work(req, qp_ctx); - if (ret != -EINPROGRESS) { + if (unlikely(ret != -EINPROGRESS)) { dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret); hisi_zip_remove_req(qp_ctx, req); } @@ -478,7 +477,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req) int head_size, ret; head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type); - if (head_size < 0) { + if (unlikely(head_size < 0)) { dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n", head_size); return head_size; @@ -489,7 +488,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req) return PTR_ERR(req); ret = hisi_zip_do_work(req, qp_ctx); - if (ret != -EINPROGRESS) { + if (unlikely(ret != -EINPROGRESS)) { dev_info_ratelimited(dev, "failed to do decompress (%d)!\n", ret); hisi_zip_remove_req(qp_ctx, req); @@ -498,7 +497,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req) return ret; } -static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx, +static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx, int alg_type, int req_type) { struct device *dev = &qp->qm->pdev->dev; @@ -506,7 +505,7 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx, qp->req_type = req_type; qp->alg_type = alg_type; - qp->qp_ctx = ctx; + qp->qp_ctx = qp_ctx; ret = hisi_qm_start_qp(qp, 0); if (ret < 0) { @@ -514,15 +513,15 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx, return ret; } - ctx->qp = qp; + qp_ctx->qp = qp; return 0; } -static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx) +static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx) { - hisi_qm_stop_qp(ctx->qp); - hisi_qm_free_qps(&ctx->qp, 1); + hisi_qm_stop_qp(qp_ctx->qp); + hisi_qm_free_qps(&qp_ctx->qp, 1); } static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = { @@ -594,7 +593,7 @@ static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx) { int i; - for (i = 1; i >= 0; i--) + for (i = 0; i < HZIP_CTX_Q_NUM; i++) hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]); } @@ -613,7 +612,7 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) if (i == 0) return ret; - goto err_free_loop0; + goto err_free_comp_q; } rwlock_init(&req_q->req_lock); @@ -622,19 +621,19 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) if (!req_q->q) { ret = -ENOMEM; if (i == 0) - goto err_free_bitmap; + goto err_free_comp_bitmap; else - goto err_free_loop1; + goto err_free_decomp_bitmap; } } return 0; -err_free_loop1: +err_free_decomp_bitmap: bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap); -err_free_loop0: +err_free_comp_q: kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q); -err_free_bitmap: +err_free_comp_bitmap: bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap); return ret; } diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index c3303d99acac..04c8a4c65d77 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -586,8 +586,9 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp, return len; tbuf[len] = '\0'; - if (kstrtoul(tbuf, 0, &val)) - return -EFAULT; + ret = kstrtoul(tbuf, 0, &val); + if (ret) + return ret; ret = hisi_qm_get_dfx_access(qm); if (ret) @@ -976,7 +977,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) qm->err_ini = &hisi_zip_err_ini; qm->err_ini->err_info_init(qm); - hisi_zip_set_user_domain_and_cache(qm); + ret = hisi_zip_set_user_domain_and_cache(qm); + if (ret) + return ret; + hisi_zip_open_sva_prefetch(qm); hisi_qm_dev_err_init(qm); hisi_zip_debug_regs_clear(qm); diff --git a/drivers/crypto/keembay/Kconfig b/drivers/crypto/keembay/Kconfig index 7942b48dd55a..1cd62f9c3e3a 100644 --- a/drivers/crypto/keembay/Kconfig +++ b/drivers/crypto/keembay/Kconfig @@ -42,7 +42,7 @@ config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS config CRYPTO_DEV_KEEMBAY_OCS_ECC tristate "Support for Intel Keem Bay OCS ECC HW acceleration" depends on ARCH_KEEMBAY || COMPILE_TEST - depends on OF || COMPILE_TEST + depends on OF depends on HAS_IOMEM select CRYPTO_ECDH select CRYPTO_ENGINE @@ -64,7 +64,7 @@ config CRYPTO_DEV_KEEMBAY_OCS_HCU select CRYPTO_ENGINE depends on HAS_IOMEM depends on ARCH_KEEMBAY || COMPILE_TEST - depends on OF || COMPILE_TEST + depends on OF help Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) Hash Control Unit (HCU) hardware acceleration for use with Crypto API. diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h index 4b36869bf460..69482abdb8b9 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h @@ -86,7 +86,8 @@ ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \ ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \ ICP_QAT_CSS_SIGNATURE_LEN(handle)) -#define ICP_QAT_CSS_MAX_IMAGE_LEN 0x40000 +#define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN 0x40000 +#define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN 0x30000 #define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf) #define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf) diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 0fe5a474aa45..b7f7869ef8b2 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -1367,6 +1367,48 @@ static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle, } } +static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle, + char *image, unsigned int size, + unsigned int fw_type) +{ + char *fw_type_name = fw_type ? "MMP" : "AE"; + unsigned int css_dword_size = sizeof(u32); + + if (handle->chip_info->fw_auth) { + struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image; + unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle); + + if ((css_hdr->header_len * css_dword_size) != header_len) + goto err; + if ((css_hdr->size * css_dword_size) != size) + goto err; + if (fw_type != css_hdr->fw_type) + goto err; + if (size <= header_len) + goto err; + size -= header_len; + } + + if (fw_type == CSS_AE_FIRMWARE) { + if (size < sizeof(struct icp_qat_simg_ae_mode *) + + ICP_QAT_SIMG_AE_INIT_SEQ_LEN) + goto err; + if (size > ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) + goto err; + } else if (fw_type == CSS_MMP_FIRMWARE) { + if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN) + goto err; + } else { + pr_err("QAT: Unsupported firmware type\n"); + return -EINVAL; + } + return 0; + +err: + pr_err("QAT: Invalid %s firmware image\n", fw_type_name); + return -EINVAL; +} + static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, char *image, unsigned int size, struct icp_qat_fw_auth_desc **desc) @@ -1379,7 +1421,7 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, struct icp_qat_simg_ae_mode *simg_ae_mode; struct icp_firml_dram_desc img_desc; - if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_MAX_IMAGE_LEN)) { + if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) { pr_err("QAT: error, input image size overflow %d\n", size); return -EINVAL; } @@ -1547,6 +1589,11 @@ int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, { struct icp_qat_fw_auth_desc *desc = NULL; int status = 0; + int ret; + + ret = qat_uclo_check_image(handle, addr_ptr, mem_size, CSS_MMP_FIRMWARE); + if (ret) + return ret; if (handle->chip_info->fw_auth) { status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc); @@ -2018,8 +2065,15 @@ static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle) struct icp_qat_fw_auth_desc *desc = NULL; struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr; + int ret; for (i = 0; i < sobj_handle->img_table.num_simgs; i++) { + ret = qat_uclo_check_image(handle, simg_hdr[i].simg_buf, + simg_hdr[i].simg_len, + CSS_AE_FIRMWARE); + if (ret) + return ret; + if (qat_uclo_map_auth_fw(handle, (char *)simg_hdr[i].simg_buf, (unsigned int) diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c index 031b5f701a0a..72dd1a4ebac4 100644 --- a/drivers/crypto/qcom-rng.c +++ b/drivers/crypto/qcom-rng.c @@ -9,6 +9,7 @@ #include <linux/crypto.h> #include <linux/io.h> #include <linux/iopoll.h> +#include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> @@ -201,15 +202,13 @@ static int qcom_rng_remove(struct platform_device *pdev) return 0; } -#if IS_ENABLED(CONFIG_ACPI) -static const struct acpi_device_id qcom_rng_acpi_match[] = { +static const struct acpi_device_id __maybe_unused qcom_rng_acpi_match[] = { { .id = "QCOM8160", .driver_data = 1 }, {} }; MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match); -#endif -static const struct of_device_id qcom_rng_of_match[] = { +static const struct of_device_id __maybe_unused qcom_rng_of_match[] = { { .compatible = "qcom,prng", .data = (void *)0}, { .compatible = "qcom,prng-ee", .data = (void *)1}, {} diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 457084b344c1..b07ae4ba165e 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -26,10 +26,10 @@ #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/module.h> -#include <linux/mutex.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/spinlock.h> #define SHA_BUFFER_LEN PAGE_SIZE #define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE @@ -196,7 +196,7 @@ struct sahara_dev { void __iomem *regs_base; struct clk *clk_ipg; struct clk *clk_ahb; - struct mutex queue_mutex; + spinlock_t queue_spinlock; struct task_struct *kthread; struct completion dma_completion; @@ -642,9 +642,9 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode) rctx->mode = mode; - mutex_lock(&dev->queue_mutex); + spin_lock_bh(&dev->queue_spinlock); err = crypto_enqueue_request(&dev->queue, &req->base); - mutex_unlock(&dev->queue_mutex); + spin_unlock_bh(&dev->queue_spinlock); wake_up_process(dev->kthread); @@ -1043,10 +1043,10 @@ static int sahara_queue_manage(void *data) do { __set_current_state(TASK_INTERRUPTIBLE); - mutex_lock(&dev->queue_mutex); + spin_lock_bh(&dev->queue_spinlock); backlog = crypto_get_backlog(&dev->queue); async_req = crypto_dequeue_request(&dev->queue); - mutex_unlock(&dev->queue_mutex); + spin_unlock_bh(&dev->queue_spinlock); if (backlog) backlog->complete(backlog, -EINPROGRESS); @@ -1092,9 +1092,9 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last) rctx->first = 1; } - mutex_lock(&dev->queue_mutex); + spin_lock_bh(&dev->queue_spinlock); ret = crypto_enqueue_request(&dev->queue, &req->base); - mutex_unlock(&dev->queue_mutex); + spin_unlock_bh(&dev->queue_spinlock); wake_up_process(dev->kthread); @@ -1449,7 +1449,7 @@ static int sahara_probe(struct platform_device *pdev) crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH); - mutex_init(&dev->queue_mutex); + spin_lock_init(&dev->queue_spinlock); dev_ptr = dev; diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c index cb5414265a9b..58c0ab01771b 100644 --- a/drivers/net/wireless/ath/ath9k/rng.c +++ b/drivers/net/wireless/ath/ath9k/rng.c @@ -83,7 +83,8 @@ static int ath9k_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) if (!wait || !max || likely(bytes_read) || fail_stats > 110) break; - msleep_interruptible(ath9k_rng_delay_get(++fail_stats)); + if (hwrng_msleep(rng, ath9k_rng_delay_get(++fail_stats))) + break; } if (wait && !bytes_read && max) diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index aa1d4da03538..77c2885c4c13 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -50,6 +50,7 @@ struct hwrng { struct list_head list; struct kref ref; struct completion cleanup_done; + struct completion dying; }; struct device; @@ -61,4 +62,6 @@ extern int devm_hwrng_register(struct device *dev, struct hwrng *rng); extern void hwrng_unregister(struct hwrng *rng); extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng); +extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs); + #endif /* LINUX_HWRANDOM_H_ */ diff --git a/lib/Kconfig b/lib/Kconfig index dc1ab2ed1dc6..6f113ade49ca 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -127,9 +127,6 @@ config TRACE_MMIO_ACCESS source "lib/crypto/Kconfig" -config LIB_MEMNEQ - bool - config CRC_CCITT tristate "CRC-CCITT functions" help diff --git a/lib/Makefile b/lib/Makefile index c95212141928..0eebd502e09d 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -253,7 +253,6 @@ obj-$(CONFIG_DIMLIB) += dim/ obj-$(CONFIG_SIGNATURE) += digsig.o lib-$(CONFIG_CLZ_TAB) += clz_tab.o -lib-$(CONFIG_LIB_MEMNEQ) += memneq.o obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 47816af9a9d7..7e9683e9f5c6 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -2,6 +2,9 @@ menu "Crypto library routines" +config CRYPTO_LIB_UTILS + tristate + config CRYPTO_LIB_AES tristate @@ -33,6 +36,7 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA config CRYPTO_LIB_CHACHA_GENERIC tristate + select CRYPTO_LIB_UTILS help This symbol can be depended upon by arch implementations of the ChaCha library interface that require the generic code as a @@ -42,7 +46,6 @@ config CRYPTO_LIB_CHACHA_GENERIC config CRYPTO_LIB_CHACHA tristate "ChaCha library interface" - depends on CRYPTO depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n help @@ -70,7 +73,7 @@ config CRYPTO_LIB_CURVE25519 tristate "Curve25519 scalar multiplication library" depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519 select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n - select LIB_MEMNEQ + select CRYPTO_LIB_UTILS help Enable the Curve25519 library interface. This interface may be fulfilled by either the generic implementation or an arch-specific diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 919cbb2c220d..c852f067ab06 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -1,5 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_CRYPTO_LIB_UTILS) += libcryptoutils.o +libcryptoutils-y := memneq.o utils.o + # chacha is used by the /dev/random driver which is always builtin obj-y += chacha.o obj-$(CONFIG_CRYPTO_LIB_CHACHA_GENERIC) += libchacha.o diff --git a/lib/memneq.c b/lib/crypto/memneq.c index fb11608b1ec1..243d8677cc51 100644 --- a/lib/memneq.c +++ b/lib/crypto/memneq.c @@ -59,10 +59,9 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include <crypto/algapi.h> #include <asm/unaligned.h> - -#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ +#include <crypto/algapi.h> +#include <linux/module.h> /* Generic path for arbitrary size */ static inline unsigned long @@ -172,5 +171,3 @@ noinline unsigned long __crypto_memneq(const void *a, const void *b, } } EXPORT_SYMBOL(__crypto_memneq); - -#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */ diff --git a/lib/crypto/utils.c b/lib/crypto/utils.c new file mode 100644 index 000000000000..53230ab1b195 --- /dev/null +++ b/lib/crypto/utils.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Crypto library utility functions + * + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#include <asm/unaligned.h> +#include <crypto/algapi.h> +#include <linux/module.h> + +/* + * XOR @len bytes from @src1 and @src2 together, writing the result to @dst + * (which may alias one of the sources). Don't call this directly; call + * crypto_xor() or crypto_xor_cpy() instead. + */ +void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len) +{ + int relalign = 0; + + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { + int size = sizeof(unsigned long); + int d = (((unsigned long)dst ^ (unsigned long)src1) | + ((unsigned long)dst ^ (unsigned long)src2)) & + (size - 1); + + relalign = d ? 1 << __ffs(d) : size; + + /* + * If we care about alignment, process as many bytes as + * needed to advance dst and src to values whose alignments + * equal their relative alignment. This will allow us to + * process the remainder of the input using optimal strides. + */ + while (((unsigned long)dst & (relalign - 1)) && len > 0) { + *dst++ = *src1++ ^ *src2++; + len--; + } + } + + while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) { + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { + u64 l = get_unaligned((u64 *)src1) ^ + get_unaligned((u64 *)src2); + put_unaligned(l, (u64 *)dst); + } else { + *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2; + } + dst += 8; + src1 += 8; + src2 += 8; + len -= 8; + } + + while (len >= 4 && !(relalign & 3)) { + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { + u32 l = get_unaligned((u32 *)src1) ^ + get_unaligned((u32 *)src2); + put_unaligned(l, (u32 *)dst); + } else { + *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2; + } + dst += 4; + src1 += 4; + src2 += 4; + len -= 4; + } + + while (len >= 2 && !(relalign & 1)) { + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { + u16 l = get_unaligned((u16 *)src1) ^ + get_unaligned((u16 *)src2); + put_unaligned(l, (u16 *)dst); + } else { + *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2; + } + dst += 2; + src1 += 2; + src2 += 2; + len -= 2; + } + + while (len--) + *dst++ = *src1++ ^ *src2++; +} +EXPORT_SYMBOL_GPL(__crypto_xor); + +MODULE_LICENSE("GPL"); |
