diff options
Diffstat (limited to 'drivers/crypto')
46 files changed, 2123 insertions, 166 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index a6688d54984c..8d3b5d2890f8 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -728,6 +728,7 @@ config CRYPTO_DEV_TEGRA config CRYPTO_DEV_XILINX_TRNG tristate "Support for Xilinx True Random Generator" depends on ZYNQMP_FIRMWARE || COMPILE_TEST + select CRYPTO_DF80090A select CRYPTO_RNG select HW_RANDOM help diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index 8bc08089f044..36a1ebca2e70 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -502,6 +502,7 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base); ss = algt->ss; + j = 0; digestsize = crypto_ahash_digestsize(tfm); if (digestsize == SHA224_DIGEST_SIZE) @@ -536,7 +537,6 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) goto err_dma_result; } - j = 0; len = areq->nbytes; sg = areq->src; i = 0; diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c index 8d1c79aaca07..5993bcba9716 100644 --- a/drivers/crypto/aspeed/aspeed-acry.c +++ b/drivers/crypto/aspeed/aspeed-acry.c @@ -787,7 +787,6 @@ static int aspeed_acry_probe(struct platform_device *pdev) err_engine_rsa_start: crypto_engine_exit(acry_dev->crypt_engine_rsa); clk_exit: - clk_disable_unprepare(acry_dev->clk); return rc; } @@ -799,7 +798,6 @@ static void aspeed_acry_remove(struct platform_device *pdev) aspeed_acry_unregister(acry_dev); crypto_engine_exit(acry_dev->crypt_engine_rsa); tasklet_kill(&acry_dev->done_task); - clk_disable_unprepare(acry_dev->clk); } MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches); diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c index a895e4289efa..9688d116d07e 100644 --- a/drivers/crypto/atmel-i2c.c +++ b/drivers/crypto/atmel-i2c.c @@ -402,7 +402,7 @@ EXPORT_SYMBOL(atmel_i2c_probe); static int __init atmel_i2c_init(void) { - atmel_wq = alloc_workqueue("atmel_wq", 0, 0); + atmel_wq = alloc_workqueue("atmel_wq", WQ_PERCPU, 0); return atmel_wq ? 0 : -ENOMEM; } diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 75ee065da1ec..b04d6379244a 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -252,7 +252,7 @@ struct artpec6_crypto_dma_descriptors { }; enum artpec6_crypto_variant { - ARTPEC6_CRYPTO, + ARTPEC6_CRYPTO = 1, ARTPEC7_CRYPTO, }; @@ -2842,7 +2842,6 @@ MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match); static int artpec6_crypto_probe(struct platform_device *pdev) { - const struct of_device_id *match; enum artpec6_crypto_variant variant; struct artpec6_crypto *ac; struct device *dev = &pdev->dev; @@ -2853,12 +2852,10 @@ static int artpec6_crypto_probe(struct platform_device *pdev) if (artpec6_crypto_dev) return -ENODEV; - match = of_match_node(artpec6_crypto_of_match, dev->of_node); - if (!match) + variant = (enum artpec6_crypto_variant)of_device_get_match_data(dev); + if (!variant) return -EINVAL; - variant = (enum artpec6_crypto_variant)match->data; - base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c index 079a22cc9f02..c18dbac56493 100644 --- a/drivers/crypto/caam/blob_gen.c +++ b/drivers/crypto/caam/blob_gen.c @@ -2,13 +2,14 @@ /* * Copyright (C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de> * Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de> - * Copyright 2024 NXP + * Copyright 2024-2025 NXP */ #define pr_fmt(fmt) "caam blob_gen: " fmt #include <linux/bitfield.h> #include <linux/device.h> +#include <keys/trusted-type.h> #include <soc/fsl/caam-blob.h> #include "compat.h" @@ -60,18 +61,27 @@ static void caam_blob_job_done(struct device *dev, u32 *desc, u32 err, void *con complete(&res->completion); } +static u32 check_caam_state(struct device *jrdev) +{ + const struct caam_drv_private *ctrlpriv; + + ctrlpriv = dev_get_drvdata(jrdev->parent); + return FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status)); +} + int caam_process_blob(struct caam_blob_priv *priv, struct caam_blob_info *info, bool encap) { - const struct caam_drv_private *ctrlpriv; struct caam_blob_job_result testres; struct device *jrdev = &priv->jrdev; dma_addr_t dma_in, dma_out; int op = OP_PCLID_BLOB; + int hwbk_caam_ovhd = 0; size_t output_len; u32 *desc; u32 moo; int ret; + int len; if (info->key_mod_len > CAAM_BLOB_KEYMOD_LENGTH) return -EINVAL; @@ -82,14 +92,29 @@ int caam_process_blob(struct caam_blob_priv *priv, } else { op |= OP_TYPE_DECAP_PROTOCOL; output_len = info->input_len - CAAM_BLOB_OVERHEAD; + info->output_len = output_len; + } + + if (encap && info->pkey_info.is_pkey) { + op |= OP_PCL_BLOB_BLACK; + if (info->pkey_info.key_enc_algo == CAAM_ENC_ALGO_CCM) { + op |= OP_PCL_BLOB_EKT; + hwbk_caam_ovhd = CAAM_CCM_OVERHEAD; + } + if ((info->input_len + hwbk_caam_ovhd) > MAX_KEY_SIZE) + return -EINVAL; + + len = info->input_len + hwbk_caam_ovhd; + } else { + len = info->input_len; } desc = kzalloc(CAAM_BLOB_DESC_BYTES_MAX, GFP_KERNEL); if (!desc) return -ENOMEM; - dma_in = dma_map_single(jrdev, info->input, info->input_len, - DMA_TO_DEVICE); + dma_in = dma_map_single(jrdev, info->input, len, + encap ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (dma_mapping_error(jrdev, dma_in)) { dev_err(jrdev, "unable to map input DMA buffer\n"); ret = -ENOMEM; @@ -104,8 +129,7 @@ int caam_process_blob(struct caam_blob_priv *priv, goto out_unmap_in; } - ctrlpriv = dev_get_drvdata(jrdev->parent); - moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status)); + moo = check_caam_state(jrdev); if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED) dev_warn(jrdev, "using insecure test key, enable HAB to use unique device key!\n"); @@ -117,18 +141,48 @@ int caam_process_blob(struct caam_blob_priv *priv, * Class 1 Context DWords 0+1+2+3. The random BK is stored in the * Class 1 Key Register. Operation Mode is set to AES-CCM. */ - init_job_desc(desc, 0); + + if (encap && info->pkey_info.is_pkey) { + /*!1. key command used to load class 1 key register + * from input plain key. + */ + append_key(desc, dma_in, info->input_len, + CLASS_1 | KEY_DEST_CLASS_REG); + /*!2. Fifostore to store protected key from class 1 key register. */ + if (info->pkey_info.key_enc_algo == CAAM_ENC_ALGO_CCM) { + append_fifo_store(desc, dma_in, info->input_len, + LDST_CLASS_1_CCB | + FIFOST_TYPE_KEY_CCM_JKEK); + } else { + append_fifo_store(desc, dma_in, info->input_len, + LDST_CLASS_1_CCB | + FIFOST_TYPE_KEY_KEK); + } + /* + * JUMP_OFFSET specifies the offset of the JUMP target from + * the JUMP command's address in the descriptor buffer. + */ + append_jump(desc, JUMP_COND_NOP | BIT(0) << JUMP_OFFSET_SHIFT); + } + + /*!3. Load class 2 key with key modifier. */ append_key_as_imm(desc, info->key_mod, info->key_mod_len, - info->key_mod_len, CLASS_2 | KEY_DEST_CLASS_REG); - append_seq_in_ptr_intlen(desc, dma_in, info->input_len, 0); - append_seq_out_ptr_intlen(desc, dma_out, output_len, 0); + info->key_mod_len, CLASS_2 | KEY_DEST_CLASS_REG); + + /*!4. SEQ IN PTR Command. */ + append_seq_in_ptr(desc, dma_in, info->input_len, 0); + + /*!5. SEQ OUT PTR Command. */ + append_seq_out_ptr(desc, dma_out, output_len, 0); + + /*!6. Blob encapsulation/decapsulation PROTOCOL Command. */ append_operation(desc, op); - print_hex_dump_debug("data@"__stringify(__LINE__)": ", + print_hex_dump_debug("data@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 1, info->input, - info->input_len, false); - print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", + len, false); + print_hex_dump_debug("jobdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 1, desc, desc_bytes(desc), false); @@ -139,7 +193,7 @@ int caam_process_blob(struct caam_blob_priv *priv, if (ret == -EINPROGRESS) { wait_for_completion(&testres.completion); ret = testres.err; - print_hex_dump_debug("output@"__stringify(__LINE__)": ", + print_hex_dump_debug("output@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 1, info->output, output_len, false); } @@ -149,10 +203,10 @@ int caam_process_blob(struct caam_blob_priv *priv, dma_unmap_single(jrdev, dma_out, output_len, DMA_FROM_DEVICE); out_unmap_in: - dma_unmap_single(jrdev, dma_in, info->input_len, DMA_TO_DEVICE); + dma_unmap_single(jrdev, dma_in, len, + encap ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); out_free: kfree(desc); - return ret; } EXPORT_SYMBOL(caam_process_blob); diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 2cfb1b8d8c7c..32a6e6e15ee2 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -3,7 +3,7 @@ * caam - Freescale FSL CAAM support for crypto API * * Copyright 2008-2011 Freescale Semiconductor, Inc. - * Copyright 2016-2019, 2023 NXP + * Copyright 2016-2019, 2023, 2025 NXP * * Based on talitos crypto API driver. * @@ -61,13 +61,16 @@ #include <crypto/internal/engine.h> #include <crypto/internal/skcipher.h> #include <crypto/xts.h> +#include <keys/trusted-type.h> #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/err.h> #include <linux/module.h> #include <linux/kernel.h> +#include <linux/key-type.h> #include <linux/slab.h> #include <linux/string.h> +#include <soc/fsl/caam-blob.h> /* * crypto alg @@ -119,12 +122,15 @@ struct caam_ctx { dma_addr_t sh_desc_enc_dma; dma_addr_t sh_desc_dec_dma; dma_addr_t key_dma; + u8 protected_key[CAAM_MAX_KEY_SIZE]; + dma_addr_t protected_key_dma; enum dma_data_direction dir; struct device *jrdev; struct alginfo adata; struct alginfo cdata; unsigned int authsize; bool xts_key_fallback; + bool is_blob; struct crypto_skcipher *fallback; }; @@ -751,9 +757,14 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, print_hex_dump_debug("key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + /* Here keylen is actual key length */ ctx->cdata.keylen = keylen; ctx->cdata.key_virt = key; ctx->cdata.key_inline = true; + /* Here protected key len is plain key length */ + ctx->cdata.plain_keylen = keylen; + ctx->cdata.key_cmd_opt = 0; + /* skcipher_encrypt shared descriptor */ desc = ctx->sh_desc_enc; @@ -772,6 +783,62 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, return 0; } +static int paes_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, + unsigned int keylen) +{ + struct caam_pkey_info *pkey_info = (struct caam_pkey_info *)key; + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); + struct device *jrdev = ctx->jrdev; + int err; + + ctx->cdata.key_inline = false; + + keylen = keylen - CAAM_PKEY_HEADER; + + /* Retrieve the length of key */ + ctx->cdata.plain_keylen = pkey_info->plain_key_sz; + + /* Retrieve the length of blob*/ + ctx->cdata.keylen = keylen; + + /* Retrieve the address of the blob */ + ctx->cdata.key_virt = pkey_info->key_buf; + + /* Validate key length for AES algorithms */ + err = aes_check_keylen(ctx->cdata.plain_keylen); + if (err) { + dev_err(jrdev, "bad key length\n"); + return err; + } + + /* set command option */ + ctx->cdata.key_cmd_opt |= KEY_ENC; + + /* check if the Protected-Key is CCM key */ + if (pkey_info->key_enc_algo == CAAM_ENC_ALGO_CCM) + ctx->cdata.key_cmd_opt |= KEY_EKT; + + memcpy(ctx->key, ctx->cdata.key_virt, keylen); + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); + ctx->cdata.key_dma = ctx->key_dma; + + if (pkey_info->key_enc_algo == CAAM_ENC_ALGO_CCM) + ctx->protected_key_dma = dma_map_single(jrdev, ctx->protected_key, + ctx->cdata.plain_keylen + + CAAM_CCM_OVERHEAD, + DMA_FROM_DEVICE); + else + ctx->protected_key_dma = dma_map_single(jrdev, ctx->protected_key, + ctx->cdata.plain_keylen, + DMA_FROM_DEVICE); + + ctx->cdata.protected_key_dma = ctx->protected_key_dma; + ctx->is_blob = true; + + return 0; +} + static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { @@ -1254,7 +1321,9 @@ static void init_skcipher_job(struct skcipher_request *req, struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *jrdev = ctx->jrdev; int ivsize = crypto_skcipher_ivsize(skcipher); - u32 *desc = edesc->hw_desc; + u32 *desc = !ctx->is_blob ? edesc->hw_desc : + (u32 *)((u8 *)edesc->hw_desc + CAAM_DESC_BYTES_MAX); + dma_addr_t desc_dma; u32 *sh_desc; u32 in_options = 0, out_options = 0; dma_addr_t src_dma, dst_dma, ptr; @@ -1269,11 +1338,6 @@ static void init_skcipher_job(struct skcipher_request *req, DUMP_PREFIX_ADDRESS, 16, 4, req->src, edesc->src_nents > 1 ? 100 : req->cryptlen, 1); - sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec; - ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma; - - len = desc_len(sh_desc); - init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); if (ivsize || edesc->mapped_src_nents > 1) { src_dma = edesc->sec4_sg_dma; @@ -1283,8 +1347,6 @@ static void init_skcipher_job(struct skcipher_request *req, src_dma = sg_dma_address(req->src); } - append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options); - if (likely(req->src == req->dst)) { dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry); out_options = in_options; @@ -1296,7 +1358,25 @@ static void init_skcipher_job(struct skcipher_request *req, out_options = LDST_SGF; } - append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options); + if (ctx->is_blob) { + cnstr_desc_skcipher_enc_dec(desc, &ctx->cdata, + src_dma, dst_dma, req->cryptlen + ivsize, + in_options, out_options, + ivsize, encrypt); + + desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE); + + cnstr_desc_protected_blob_decap(edesc->hw_desc, &ctx->cdata, desc_dma); + } else { + sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec; + ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma; + + len = desc_len(sh_desc); + init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); + append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options); + + append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options); + } } /* @@ -1817,6 +1897,7 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); u32 *desc; int ret = 0; + int len; /* * XTS is expected to return an error even for input length = 0 @@ -1842,8 +1923,12 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) crypto_skcipher_decrypt(&rctx->fallback_req); } + len = DESC_JOB_IO_LEN * CAAM_CMD_SZ; + if (ctx->is_blob) + len += CAAM_DESC_BYTES_MAX; + /* allocate extended descriptor */ - edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); + edesc = skcipher_edesc_alloc(req, len); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -1888,6 +1973,27 @@ static struct caam_skcipher_alg driver_algs[] = { { .skcipher.base = { .base = { + .cra_name = "cbc(paes)", + .cra_driver_name = "cbc-paes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = paes_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE + CAAM_BLOB_OVERHEAD + + CAAM_PKEY_HEADER, + .max_keysize = AES_MAX_KEY_SIZE + CAAM_BLOB_OVERHEAD + + CAAM_PKEY_HEADER, + .ivsize = AES_BLOCK_SIZE, + }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + }, + { + .skcipher.base = { + .base = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-caam", .cra_blocksize = AES_BLOCK_SIZE, diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c index 7571e1ac913b..04c1105eb1f5 100644 --- a/drivers/crypto/caam/caamalg_desc.c +++ b/drivers/crypto/caam/caamalg_desc.c @@ -2,12 +2,13 @@ /* * Shared descriptors for aead, skcipher algorithms * - * Copyright 2016-2019 NXP + * Copyright 2016-2019, 2025 NXP */ #include "compat.h" #include "desc_constr.h" #include "caamalg_desc.h" +#include <soc/fsl/caam-blob.h> /* * For aead functions, read payload and write payload, @@ -1364,6 +1365,84 @@ static inline void skcipher_append_src_dst(u32 *desc) append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); } +void cnstr_desc_skcipher_enc_dec(u32 * const desc, struct alginfo *cdata, + dma_addr_t src, dma_addr_t dst, unsigned int data_sz, + unsigned int in_options, unsigned int out_options, + unsigned int ivsize, const bool encrypt) +{ + u32 options = cdata->algtype | OP_ALG_AS_INIT; + + if (encrypt) + options |= OP_ALG_ENCRYPT; + else + options |= OP_ALG_DECRYPT; + + init_job_desc(desc, 0); + + append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL | + JUMP_COND_NOP | JUMP_TEST_ALL | 1); + + append_key(desc, cdata->protected_key_dma, cdata->plain_keylen, + CLASS_1 | KEY_DEST_CLASS_REG | cdata->key_cmd_opt); + + append_seq_in_ptr(desc, src, data_sz, in_options); + + append_seq_out_ptr(desc, dst, data_sz, out_options); + + /* Load IV, if there is one */ + if (ivsize) + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB); + + append_operation(desc, options); + + skcipher_append_src_dst(desc); + + /* Store IV */ + if (ivsize) + append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB); + + print_hex_dump_debug("skcipher_enc_dec job desc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); +} +EXPORT_SYMBOL(cnstr_desc_skcipher_enc_dec); + +void cnstr_desc_protected_blob_decap(u32 * const desc, struct alginfo *cdata, + dma_addr_t next_desc_addr) +{ + u32 protected_store; + + init_job_desc(desc, 0); + + /* Load key modifier */ + append_load_as_imm(desc, KEYMOD, sizeof(KEYMOD) - 1, + LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY); + + append_seq_in_ptr_intlen(desc, cdata->key_dma, + cdata->plain_keylen + CAAM_BLOB_OVERHEAD, 0); + + append_seq_out_ptr_intlen(desc, cdata->protected_key_dma, + cdata->plain_keylen, 0); + + protected_store = OP_PCLID_BLOB | OP_PCL_BLOB_BLACK; + if ((cdata->key_cmd_opt >> KEY_EKT_OFFSET) & 1) + protected_store |= OP_PCL_BLOB_EKT; + + append_operation(desc, OP_TYPE_DECAP_PROTOCOL | protected_store); + + if (next_desc_addr) { + append_jump(desc, JUMP_TYPE_NONLOCAL | JUMP_TEST_ALL); + append_ptr(desc, next_desc_addr); + } + + print_hex_dump_debug("protected blob decap job desc@" __stringify(__LINE__) ":", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +} +EXPORT_SYMBOL(cnstr_desc_protected_blob_decap); + /** * cnstr_shdsc_skcipher_encap - skcipher encapsulation shared descriptor * @desc: pointer to buffer used for descriptor construction @@ -1391,7 +1470,8 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata, /* Load class1 key only */ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, - cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); + cdata->plain_keylen, CLASS_1 | KEY_DEST_CLASS_REG + | cdata->key_cmd_opt); /* Load nonce into CONTEXT1 reg */ if (is_rfc3686) { @@ -1466,7 +1546,8 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata, /* Load class1 key only */ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, - cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); + cdata->plain_keylen, CLASS_1 | KEY_DEST_CLASS_REG + | cdata->key_cmd_opt); /* Load nonce into CONTEXT1 reg */ if (is_rfc3686) { diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h index f2893393ba5e..323490a4a756 100644 --- a/drivers/crypto/caam/caamalg_desc.h +++ b/drivers/crypto/caam/caamalg_desc.h @@ -2,7 +2,7 @@ /* * Shared descriptors for aead, skcipher algorithms * - * Copyright 2016 NXP + * Copyright 2016, 2025 NXP */ #ifndef _CAAMALG_DESC_H_ @@ -48,6 +48,9 @@ #define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \ 16 * CAAM_CMD_SZ) +/* Key modifier for CAAM Protected blobs */ +#define KEYMOD "SECURE_KEY" + void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, unsigned int icvsize, int era); @@ -113,4 +116,12 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata); void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata); +void cnstr_desc_protected_blob_decap(u32 * const desc, struct alginfo *cdata, + dma_addr_t next_desc); + +void cnstr_desc_skcipher_enc_dec(u32 * const desc, struct alginfo *cdata, + dma_addr_t src, dma_addr_t dst, unsigned int data_sz, + unsigned int in_options, unsigned int out_options, + unsigned int ivsize, const bool encrypt); + #endif /* _CAAMALG_DESC_H_ */ diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index b3d14a7f4dd1..0eb43c862516 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -181,7 +181,9 @@ static inline void test_len(struct hwrng *rng, size_t len, bool wait) struct device *dev = ctx->ctrldev; buf = kcalloc(CAAM_RNG_MAX_FIFO_STORE_SIZE, sizeof(u8), GFP_KERNEL); - + if (!buf) { + return; + } while (len > 0) { read_len = rng->read(rng, buf, len, wait); diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index e13470901586..c28e94fcb8c7 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h @@ -4,7 +4,7 @@ * Definitions to support CAAM descriptor instruction generation * * Copyright 2008-2011 Freescale Semiconductor, Inc. - * Copyright 2018 NXP + * Copyright 2018, 2025 NXP */ #ifndef DESC_H @@ -162,6 +162,7 @@ * Enhanced Encryption of Key */ #define KEY_EKT 0x00100000 +#define KEY_EKT_OFFSET 20 /* * Encrypted with Trusted Key @@ -403,6 +404,7 @@ #define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT) #define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT) #define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_KEY_CCM_JKEK (0x14 << FIFOST_TYPE_SHIFT) #define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT) #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT) #define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT) @@ -1001,6 +1003,11 @@ #define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63 #define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65 +/* Blob protocol protinfo bits */ + +#define OP_PCL_BLOB_BLACK 0x0004 +#define OP_PCL_BLOB_EKT 0x0100 + /* For DTLS - OP_PCLID_DTLS */ #define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 824c94d44f94..2a29dd2c9c8a 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h @@ -3,7 +3,7 @@ * caam descriptor construction helper functions * * Copyright 2008-2012 Freescale Semiconductor, Inc. - * Copyright 2019 NXP + * Copyright 2019, 2025 NXP */ #ifndef DESC_CONSTR_H @@ -498,17 +498,23 @@ do { \ * @keylen: length of the provided algorithm key, in bytes * @keylen_pad: padded length of the provided algorithm key, in bytes * @key_dma: dma (bus) address where algorithm key resides + * @protected_key_dma: dma (bus) address where protected key resides * @key_virt: virtual address where algorithm key resides * @key_inline: true - key can be inlined in the descriptor; false - key is * referenced by the descriptor + * @plain_keylen: size of the key to be loaded by the CAAM + * @key_cmd_opt: optional parameters for KEY command */ struct alginfo { u32 algtype; unsigned int keylen; unsigned int keylen_pad; dma_addr_t key_dma; + dma_addr_t protected_key_dma; const void *key_virt; bool key_inline; + u32 plain_keylen; + u32 key_cmd_opt; }; /** diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c index d4e06999af9b..a6a76e50ba84 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c +++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c @@ -192,7 +192,7 @@ int nitrox_mbox_init(struct nitrox_device *ndev) } /* allocate pf2vf response workqueue */ - ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0); + ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", WQ_PERCPU, 0); if (!ndev->iov.pf2vf_wq) { kfree(ndev->iov.vfdev); ndev->iov.vfdev = NULL; diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index f394e45e11ab..f16a0f611317 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -39,6 +39,7 @@ config CRYPTO_DEV_SP_PSP bool "Platform Security Processor (PSP) device" default y depends on CRYPTO_DEV_CCP_DD && X86_64 && AMD_IOMMU + select PCI_TSM if PCI help Provide support for the AMD Platform Security Processor (PSP). The PSP is a dedicated processor that provides support for key diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index a9626b30044a..0424e08561ef 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -16,6 +16,10 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ hsti.o \ sfs.o +ifeq ($(CONFIG_PCI_TSM),y) +ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += sev-dev-tsm.o sev-dev-tio.o +endif + obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-aes.o \ diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index c531d13d971f..246801912e1a 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -507,7 +507,7 @@ int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng); u32 trng_value; - int len = min_t(int, sizeof(trng_value), max); + int len = min(sizeof(trng_value), max); /* Locking is provided by the caller so we can update device * hwrng-related fields safely diff --git a/drivers/crypto/ccp/sev-dev-tio.c b/drivers/crypto/ccp/sev-dev-tio.c new file mode 100644 index 000000000000..9a98f98c20a7 --- /dev/null +++ b/drivers/crypto/ccp/sev-dev-tio.c @@ -0,0 +1,864 @@ +// SPDX-License-Identifier: GPL-2.0-only + +// Interface to PSP for CCP/SEV-TIO/SNP-VM + +#include <linux/pci.h> +#include <linux/tsm.h> +#include <linux/psp.h> +#include <linux/vmalloc.h> +#include <linux/bitfield.h> +#include <linux/pci-doe.h> +#include <asm/sev-common.h> +#include <asm/sev.h> +#include <asm/page.h> +#include "sev-dev.h" +#include "sev-dev-tio.h" + +#define to_tio_status(dev_data) \ + (container_of((dev_data), struct tio_dsm, data)->sev->tio_status) + +#define SLA_PAGE_TYPE_DATA 0 +#define SLA_PAGE_TYPE_SCATTER 1 +#define SLA_PAGE_SIZE_4K 0 +#define SLA_PAGE_SIZE_2M 1 +#define SLA_SZ(s) ((s).page_size == SLA_PAGE_SIZE_2M ? SZ_2M : SZ_4K) +#define SLA_SCATTER_LEN(s) (SLA_SZ(s) / sizeof(struct sla_addr_t)) +#define SLA_EOL ((struct sla_addr_t) { .pfn = ((1UL << 40) - 1) }) +#define SLA_NULL ((struct sla_addr_t) { 0 }) +#define IS_SLA_NULL(s) ((s).sla == SLA_NULL.sla) +#define IS_SLA_EOL(s) ((s).sla == SLA_EOL.sla) + +static phys_addr_t sla_to_pa(struct sla_addr_t sla) +{ + u64 pfn = sla.pfn; + u64 pa = pfn << PAGE_SHIFT; + + return pa; +} + +static void *sla_to_va(struct sla_addr_t sla) +{ + void *va = __va(__sme_clr(sla_to_pa(sla))); + + return va; +} + +#define sla_to_pfn(sla) (__pa(sla_to_va(sla)) >> PAGE_SHIFT) +#define sla_to_page(sla) virt_to_page(sla_to_va(sla)) + +static struct sla_addr_t make_sla(struct page *pg, bool stp) +{ + u64 pa = __sme_set(page_to_phys(pg)); + struct sla_addr_t ret = { + .pfn = pa >> PAGE_SHIFT, + .page_size = SLA_PAGE_SIZE_4K, /* Do not do SLA_PAGE_SIZE_2M ATM */ + .page_type = stp ? SLA_PAGE_TYPE_SCATTER : SLA_PAGE_TYPE_DATA + }; + + return ret; +} + +/* the BUFFER Structure */ +#define SLA_BUFFER_FLAG_ENCRYPTION BIT(0) + +/* + * struct sla_buffer_hdr - Scatter list address buffer header + * + * @capacity_sz: Total capacity of the buffer in bytes + * @payload_sz: Size of buffer payload in bytes, must be multiple of 32B + * @flags: Buffer flags (SLA_BUFFER_FLAG_ENCRYPTION: buffer is encrypted) + * @iv: Initialization vector used for encryption + * @authtag: Authentication tag for encrypted buffer + */ +struct sla_buffer_hdr { + u32 capacity_sz; + u32 payload_sz; /* The size of BUFFER_PAYLOAD in bytes. Must be multiple of 32B */ + u32 flags; + u8 reserved1[4]; + u8 iv[16]; /* IV used for the encryption of this buffer */ + u8 authtag[16]; /* Authentication tag for this buffer */ + u8 reserved2[16]; +} __packed; + +enum spdm_data_type_t { + DOBJ_DATA_TYPE_SPDM = 0x1, + DOBJ_DATA_TYPE_SECURE_SPDM = 0x2, +}; + +struct spdm_dobj_hdr_req { + struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_REQ */ + u8 data_type; /* spdm_data_type_t */ + u8 reserved2[5]; +} __packed; + +struct spdm_dobj_hdr_resp { + struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_RESP */ + u8 data_type; /* spdm_data_type_t */ + u8 reserved2[5]; +} __packed; + +/* Defined in sev-dev-tio.h so sev-dev-tsm.c can read types of blobs */ +struct spdm_dobj_hdr_cert; +struct spdm_dobj_hdr_meas; +struct spdm_dobj_hdr_report; + +/* Used in all SPDM-aware TIO commands */ +struct spdm_ctrl { + struct sla_addr_t req; + struct sla_addr_t resp; + struct sla_addr_t scratch; + struct sla_addr_t output; +} __packed; + +static size_t sla_dobj_id_to_size(u8 id) +{ + size_t n; + + BUILD_BUG_ON(sizeof(struct spdm_dobj_hdr_resp) != 0x10); + switch (id) { + case SPDM_DOBJ_ID_REQ: + n = sizeof(struct spdm_dobj_hdr_req); + break; + case SPDM_DOBJ_ID_RESP: + n = sizeof(struct spdm_dobj_hdr_resp); + break; + default: + WARN_ON(1); + n = 0; + break; + } + + return n; +} + +#define SPDM_DOBJ_HDR_SIZE(hdr) sla_dobj_id_to_size((hdr)->id) +#define SPDM_DOBJ_DATA(hdr) ((u8 *)(hdr) + SPDM_DOBJ_HDR_SIZE(hdr)) +#define SPDM_DOBJ_LEN(hdr) ((hdr)->length - SPDM_DOBJ_HDR_SIZE(hdr)) + +#define sla_to_dobj_resp_hdr(buf) ((struct spdm_dobj_hdr_resp *) \ + sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_RESP)) +#define sla_to_dobj_req_hdr(buf) ((struct spdm_dobj_hdr_req *) \ + sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_REQ)) + +static struct spdm_dobj_hdr *sla_to_dobj_hdr(struct sla_buffer_hdr *buf) +{ + if (!buf) + return NULL; + + return (struct spdm_dobj_hdr *) &buf[1]; +} + +static struct spdm_dobj_hdr *sla_to_dobj_hdr_check(struct sla_buffer_hdr *buf, u32 check_dobjid) +{ + struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf); + + if (WARN_ON_ONCE(!hdr)) + return NULL; + + if (hdr->id != check_dobjid) { + pr_err("! ERROR: expected %d, found %d\n", check_dobjid, hdr->id); + return NULL; + } + + return hdr; +} + +static void *sla_to_data(struct sla_buffer_hdr *buf, u32 dobjid) +{ + struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf); + + if (WARN_ON_ONCE(dobjid != SPDM_DOBJ_ID_REQ && dobjid != SPDM_DOBJ_ID_RESP)) + return NULL; + + if (!hdr) + return NULL; + + return (u8 *) hdr + sla_dobj_id_to_size(dobjid); +} + +/* + * struct sev_data_tio_status - SEV_CMD_TIO_STATUS command + * + * @length: Length of this command buffer in bytes + * @status_paddr: System physical address of the TIO_STATUS structure + */ +struct sev_data_tio_status { + u32 length; + u8 reserved[4]; + u64 status_paddr; +} __packed; + +/* TIO_INIT */ +struct sev_data_tio_init { + u32 length; + u8 reserved[12]; +} __packed; + +/* + * struct sev_data_tio_dev_create - TIO_DEV_CREATE command + * + * @length: Length in bytes of this command buffer + * @dev_ctx_sla: Scatter list address pointing to a buffer to be used as a device context buffer + * @device_id: PCIe Routing Identifier of the device to connect to + * @root_port_id: PCIe Routing Identifier of the root port of the device + * @segment_id: PCIe Segment Identifier of the device to connect to + */ +struct sev_data_tio_dev_create { + u32 length; + u8 reserved1[4]; + struct sla_addr_t dev_ctx_sla; + u16 device_id; + u16 root_port_id; + u8 segment_id; + u8 reserved2[11]; +} __packed; + +/* + * struct sev_data_tio_dev_connect - TIO_DEV_CONNECT command + * + * @length: Length in bytes of this command buffer + * @spdm_ctrl: SPDM control structure defined in Section 5.1 + * @dev_ctx_sla: Scatter list address of the device context buffer + * @tc_mask: Bitmask of the traffic classes to initialize for SEV-TIO usage. + * Setting the kth bit of the TC_MASK to 1 indicates that the traffic + * class k will be initialized + * @cert_slot: Slot number of the certificate requested for constructing the SPDM session + * @ide_stream_id: IDE stream IDs to be associated with this device. + * Valid only if corresponding bit in TC_MASK is set + */ +struct sev_data_tio_dev_connect { + u32 length; + u8 reserved1[4]; + struct spdm_ctrl spdm_ctrl; + u8 reserved2[8]; + struct sla_addr_t dev_ctx_sla; + u8 tc_mask; + u8 cert_slot; + u8 reserved3[6]; + u8 ide_stream_id[8]; + u8 reserved4[8]; +} __packed; + +/* + * struct sev_data_tio_dev_disconnect - TIO_DEV_DISCONNECT command + * + * @length: Length in bytes of this command buffer + * @flags: Command flags (TIO_DEV_DISCONNECT_FLAG_FORCE: force disconnect) + * @spdm_ctrl: SPDM control structure defined in Section 5.1 + * @dev_ctx_sla: Scatter list address of the device context buffer + */ +#define TIO_DEV_DISCONNECT_FLAG_FORCE BIT(0) + +struct sev_data_tio_dev_disconnect { + u32 length; + u32 flags; + struct spdm_ctrl spdm_ctrl; + struct sla_addr_t dev_ctx_sla; +} __packed; + +/* + * struct sev_data_tio_dev_meas - TIO_DEV_MEASUREMENTS command + * + * @length: Length in bytes of this command buffer + * @flags: Command flags (TIO_DEV_MEAS_FLAG_RAW_BITSTREAM: request raw measurements) + * @spdm_ctrl: SPDM control structure defined in Section 5.1 + * @dev_ctx_sla: Scatter list address of the device context buffer + * @meas_nonce: Nonce for measurement freshness verification + */ +#define TIO_DEV_MEAS_FLAG_RAW_BITSTREAM BIT(0) + +struct sev_data_tio_dev_meas { + u32 length; + u32 flags; + struct spdm_ctrl spdm_ctrl; + struct sla_addr_t dev_ctx_sla; + u8 meas_nonce[32]; +} __packed; + +/* + * struct sev_data_tio_dev_certs - TIO_DEV_CERTIFICATES command + * + * @length: Length in bytes of this command buffer + * @spdm_ctrl: SPDM control structure defined in Section 5.1 + * @dev_ctx_sla: Scatter list address of the device context buffer + */ +struct sev_data_tio_dev_certs { + u32 length; + u8 reserved[4]; + struct spdm_ctrl spdm_ctrl; + struct sla_addr_t dev_ctx_sla; +} __packed; + +/* + * struct sev_data_tio_dev_reclaim - TIO_DEV_RECLAIM command + * + * @length: Length in bytes of this command buffer + * @dev_ctx_sla: Scatter list address of the device context buffer + * + * This command reclaims resources associated with a device context. + */ +struct sev_data_tio_dev_reclaim { + u32 length; + u8 reserved[4]; + struct sla_addr_t dev_ctx_sla; +} __packed; + +static struct sla_buffer_hdr *sla_buffer_map(struct sla_addr_t sla) +{ + struct sla_buffer_hdr *buf; + + BUILD_BUG_ON(sizeof(struct sla_buffer_hdr) != 0x40); + if (IS_SLA_NULL(sla)) + return NULL; + + if (sla.page_type == SLA_PAGE_TYPE_SCATTER) { + struct sla_addr_t *scatter = sla_to_va(sla); + unsigned int i, npages = 0; + + for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) { + if (WARN_ON_ONCE(SLA_SZ(scatter[i]) > SZ_4K)) + return NULL; + + if (WARN_ON_ONCE(scatter[i].page_type == SLA_PAGE_TYPE_SCATTER)) + return NULL; + + if (IS_SLA_EOL(scatter[i])) { + npages = i; + break; + } + } + if (WARN_ON_ONCE(!npages)) + return NULL; + + struct page **pp = kmalloc_array(npages, sizeof(pp[0]), GFP_KERNEL); + + if (!pp) + return NULL; + + for (i = 0; i < npages; ++i) + pp[i] = sla_to_page(scatter[i]); + + buf = vm_map_ram(pp, npages, 0); + kfree(pp); + } else { + struct page *pg = sla_to_page(sla); + + buf = vm_map_ram(&pg, 1, 0); + } + + return buf; +} + +static void sla_buffer_unmap(struct sla_addr_t sla, struct sla_buffer_hdr *buf) +{ + if (!buf) + return; + + if (sla.page_type == SLA_PAGE_TYPE_SCATTER) { + struct sla_addr_t *scatter = sla_to_va(sla); + unsigned int i, npages = 0; + + for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) { + if (IS_SLA_EOL(scatter[i])) { + npages = i; + break; + } + } + if (!npages) + return; + + vm_unmap_ram(buf, npages); + } else { + vm_unmap_ram(buf, 1); + } +} + +static void dobj_response_init(struct sla_buffer_hdr *buf) +{ + struct spdm_dobj_hdr *dobj = sla_to_dobj_hdr(buf); + + dobj->id = SPDM_DOBJ_ID_RESP; + dobj->version.major = 0x1; + dobj->version.minor = 0; + dobj->length = 0; + buf->payload_sz = sla_dobj_id_to_size(dobj->id) + dobj->length; +} + +static void sla_free(struct sla_addr_t sla, size_t len, bool firmware_state) +{ + unsigned int npages = PAGE_ALIGN(len) >> PAGE_SHIFT; + struct sla_addr_t *scatter = NULL; + int ret = 0, i; + + if (IS_SLA_NULL(sla)) + return; + + if (firmware_state) { + if (sla.page_type == SLA_PAGE_TYPE_SCATTER) { + scatter = sla_to_va(sla); + + for (i = 0; i < npages; ++i) { + if (IS_SLA_EOL(scatter[i])) + break; + + ret = snp_reclaim_pages(sla_to_pa(scatter[i]), 1, false); + if (ret) + break; + } + } else { + ret = snp_reclaim_pages(sla_to_pa(sla), 1, false); + } + } + + if (WARN_ON(ret)) + return; + + if (scatter) { + for (i = 0; i < npages; ++i) { + if (IS_SLA_EOL(scatter[i])) + break; + free_page((unsigned long)sla_to_va(scatter[i])); + } + } + + free_page((unsigned long)sla_to_va(sla)); +} + +static struct sla_addr_t sla_alloc(size_t len, bool firmware_state) +{ + unsigned long i, npages = PAGE_ALIGN(len) >> PAGE_SHIFT; + struct sla_addr_t *scatter = NULL; + struct sla_addr_t ret = SLA_NULL; + struct sla_buffer_hdr *buf; + struct page *pg; + + if (npages == 0) + return ret; + + if (WARN_ON_ONCE(npages > ((PAGE_SIZE / sizeof(struct sla_addr_t)) + 1))) + return ret; + + BUILD_BUG_ON(PAGE_SIZE < SZ_4K); + + if (npages > 1) { + pg = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!pg) + return SLA_NULL; + + ret = make_sla(pg, true); + scatter = page_to_virt(pg); + for (i = 0; i < npages; ++i) { + pg = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!pg) + goto no_reclaim_exit; + + scatter[i] = make_sla(pg, false); + } + scatter[i] = SLA_EOL; + } else { + pg = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!pg) + return SLA_NULL; + + ret = make_sla(pg, false); + } + + buf = sla_buffer_map(ret); + if (!buf) + goto no_reclaim_exit; + + buf->capacity_sz = (npages << PAGE_SHIFT); + sla_buffer_unmap(ret, buf); + + if (firmware_state) { + if (scatter) { + for (i = 0; i < npages; ++i) { + if (rmp_make_private(sla_to_pfn(scatter[i]), 0, + PG_LEVEL_4K, 0, true)) + goto free_exit; + } + } else { + if (rmp_make_private(sla_to_pfn(ret), 0, PG_LEVEL_4K, 0, true)) + goto no_reclaim_exit; + } + } + + return ret; + +no_reclaim_exit: + firmware_state = false; +free_exit: + sla_free(ret, len, firmware_state); + return SLA_NULL; +} + +/* Expands a buffer, only firmware owned buffers allowed for now */ +static int sla_expand(struct sla_addr_t *sla, size_t *len) +{ + struct sla_buffer_hdr *oldbuf = sla_buffer_map(*sla), *newbuf; + struct sla_addr_t oldsla = *sla, newsla; + size_t oldlen = *len, newlen; + + if (!oldbuf) + return -EFAULT; + + newlen = oldbuf->capacity_sz; + if (oldbuf->capacity_sz == oldlen) { + /* This buffer does not require expansion, must be another buffer */ + sla_buffer_unmap(oldsla, oldbuf); + return 1; + } + + pr_notice("Expanding BUFFER from %ld to %ld bytes\n", oldlen, newlen); + + newsla = sla_alloc(newlen, true); + if (IS_SLA_NULL(newsla)) + return -ENOMEM; + + newbuf = sla_buffer_map(newsla); + if (!newbuf) { + sla_free(newsla, newlen, true); + return -EFAULT; + } + + memcpy(newbuf, oldbuf, oldlen); + + sla_buffer_unmap(newsla, newbuf); + sla_free(oldsla, oldlen, true); + *sla = newsla; + *len = newlen; + + return 0; +} + +static int sev_tio_do_cmd(int cmd, void *data, size_t data_len, int *psp_ret, + struct tsm_dsm_tio *dev_data) +{ + int rc; + + *psp_ret = 0; + rc = sev_do_cmd(cmd, data, psp_ret); + + if (WARN_ON(!rc && *psp_ret == SEV_RET_SPDM_REQUEST)) + return -EIO; + + if (rc == 0 && *psp_ret == SEV_RET_EXPAND_BUFFER_LENGTH_REQUEST) { + int rc1, rc2; + + rc1 = sla_expand(&dev_data->output, &dev_data->output_len); + if (rc1 < 0) + return rc1; + + rc2 = sla_expand(&dev_data->scratch, &dev_data->scratch_len); + if (rc2 < 0) + return rc2; + + if (!rc1 && !rc2) + /* Neither buffer requires expansion, this is wrong */ + return -EFAULT; + + *psp_ret = 0; + rc = sev_do_cmd(cmd, data, psp_ret); + } + + if ((rc == 0 || rc == -EIO) && *psp_ret == SEV_RET_SPDM_REQUEST) { + struct spdm_dobj_hdr_resp *resp_hdr; + struct spdm_dobj_hdr_req *req_hdr; + struct sev_tio_status *tio_status = to_tio_status(dev_data); + size_t resp_len = tio_status->spdm_req_size_max - + (sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) + sizeof(struct sla_buffer_hdr)); + + if (!dev_data->cmd) { + if (WARN_ON_ONCE(!data_len || (data_len != *(u32 *) data))) + return -EINVAL; + if (WARN_ON(data_len > sizeof(dev_data->cmd_data))) + return -EFAULT; + memcpy(dev_data->cmd_data, data, data_len); + memset(&dev_data->cmd_data[data_len], 0xFF, + sizeof(dev_data->cmd_data) - data_len); + dev_data->cmd = cmd; + } + + req_hdr = sla_to_dobj_req_hdr(dev_data->reqbuf); + resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf); + switch (req_hdr->data_type) { + case DOBJ_DATA_TYPE_SPDM: + rc = PCI_DOE_FEATURE_CMA; + break; + case DOBJ_DATA_TYPE_SECURE_SPDM: + rc = PCI_DOE_FEATURE_SSESSION; + break; + default: + return -EINVAL; + } + resp_hdr->data_type = req_hdr->data_type; + dev_data->spdm.req_len = req_hdr->hdr.length - + sla_dobj_id_to_size(SPDM_DOBJ_ID_REQ); + dev_data->spdm.rsp_len = resp_len; + } else if (dev_data && dev_data->cmd) { + /* For either error or success just stop the bouncing */ + memset(dev_data->cmd_data, 0, sizeof(dev_data->cmd_data)); + dev_data->cmd = 0; + } + + return rc; +} + +int sev_tio_continue(struct tsm_dsm_tio *dev_data) +{ + struct spdm_dobj_hdr_resp *resp_hdr; + int ret; + + if (!dev_data || !dev_data->cmd) + return -EINVAL; + + resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf); + resp_hdr->hdr.length = ALIGN(sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) + + dev_data->spdm.rsp_len, 32); + dev_data->respbuf->payload_sz = resp_hdr->hdr.length; + + ret = sev_tio_do_cmd(dev_data->cmd, dev_data->cmd_data, 0, + &dev_data->psp_ret, dev_data); + if (ret) + return ret; + + if (dev_data->psp_ret != SEV_RET_SUCCESS) + return -EINVAL; + + return 0; +} + +static void spdm_ctrl_init(struct spdm_ctrl *ctrl, struct tsm_dsm_tio *dev_data) +{ + ctrl->req = dev_data->req; + ctrl->resp = dev_data->resp; + ctrl->scratch = dev_data->scratch; + ctrl->output = dev_data->output; +} + +static void spdm_ctrl_free(struct tsm_dsm_tio *dev_data) +{ + struct sev_tio_status *tio_status = to_tio_status(dev_data); + size_t len = tio_status->spdm_req_size_max - + (sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) + + sizeof(struct sla_buffer_hdr)); + struct tsm_spdm *spdm = &dev_data->spdm; + + sla_buffer_unmap(dev_data->resp, dev_data->respbuf); + sla_buffer_unmap(dev_data->req, dev_data->reqbuf); + spdm->rsp = NULL; + spdm->req = NULL; + sla_free(dev_data->req, len, true); + sla_free(dev_data->resp, len, false); + sla_free(dev_data->scratch, tio_status->spdm_scratch_size_max, true); + + dev_data->req.sla = 0; + dev_data->resp.sla = 0; + dev_data->scratch.sla = 0; + dev_data->respbuf = NULL; + dev_data->reqbuf = NULL; + sla_free(dev_data->output, tio_status->spdm_out_size_max, true); +} + +static int spdm_ctrl_alloc(struct tsm_dsm_tio *dev_data) +{ + struct sev_tio_status *tio_status = to_tio_status(dev_data); + struct tsm_spdm *spdm = &dev_data->spdm; + int ret; + + dev_data->req = sla_alloc(tio_status->spdm_req_size_max, true); + dev_data->resp = sla_alloc(tio_status->spdm_req_size_max, false); + dev_data->scratch_len = tio_status->spdm_scratch_size_max; + dev_data->scratch = sla_alloc(dev_data->scratch_len, true); + dev_data->output_len = tio_status->spdm_out_size_max; + dev_data->output = sla_alloc(dev_data->output_len, true); + + if (IS_SLA_NULL(dev_data->req) || IS_SLA_NULL(dev_data->resp) || + IS_SLA_NULL(dev_data->scratch) || IS_SLA_NULL(dev_data->dev_ctx)) { + ret = -ENOMEM; + goto free_spdm_exit; + } + + dev_data->reqbuf = sla_buffer_map(dev_data->req); + dev_data->respbuf = sla_buffer_map(dev_data->resp); + if (!dev_data->reqbuf || !dev_data->respbuf) { + ret = -EFAULT; + goto free_spdm_exit; + } + + spdm->req = sla_to_data(dev_data->reqbuf, SPDM_DOBJ_ID_REQ); + spdm->rsp = sla_to_data(dev_data->respbuf, SPDM_DOBJ_ID_RESP); + if (!spdm->req || !spdm->rsp) { + ret = -EFAULT; + goto free_spdm_exit; + } + + dobj_response_init(dev_data->respbuf); + + return 0; + +free_spdm_exit: + spdm_ctrl_free(dev_data); + return ret; +} + +int sev_tio_init_locked(void *tio_status_page) +{ + struct sev_tio_status *tio_status = tio_status_page; + struct sev_data_tio_status data_status = { + .length = sizeof(data_status), + }; + int ret, psp_ret; + + data_status.status_paddr = __psp_pa(tio_status_page); + ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret); + if (ret) + return ret; + + if (tio_status->length < offsetofend(struct sev_tio_status, tdictx_size) || + tio_status->reserved) + return -EFAULT; + + if (!tio_status->tio_en && !tio_status->tio_init_done) + return -ENOENT; + + if (tio_status->tio_init_done) + return -EBUSY; + + struct sev_data_tio_init ti = { .length = sizeof(ti) }; + + ret = __sev_do_cmd_locked(SEV_CMD_TIO_INIT, &ti, &psp_ret); + if (ret) + return ret; + + ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret); + if (ret) + return ret; + + return 0; +} + +int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id, + u16 root_port_id, u8 segment_id) +{ + struct sev_tio_status *tio_status = to_tio_status(dev_data); + struct sev_data_tio_dev_create create = { + .length = sizeof(create), + .device_id = device_id, + .root_port_id = root_port_id, + .segment_id = segment_id, + }; + void *data_pg; + int ret; + + dev_data->dev_ctx = sla_alloc(tio_status->devctx_size, true); + if (IS_SLA_NULL(dev_data->dev_ctx)) + return -ENOMEM; + + data_pg = snp_alloc_firmware_page(GFP_KERNEL_ACCOUNT); + if (!data_pg) { + ret = -ENOMEM; + goto free_ctx_exit; + } + + create.dev_ctx_sla = dev_data->dev_ctx; + ret = sev_do_cmd(SEV_CMD_TIO_DEV_CREATE, &create, &dev_data->psp_ret); + if (ret) + goto free_data_pg_exit; + + dev_data->data_pg = data_pg; + + return 0; + +free_data_pg_exit: + snp_free_firmware_page(data_pg); +free_ctx_exit: + sla_free(create.dev_ctx_sla, tio_status->devctx_size, true); + return ret; +} + +int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data) +{ + struct sev_tio_status *tio_status = to_tio_status(dev_data); + struct sev_data_tio_dev_reclaim r = { + .length = sizeof(r), + .dev_ctx_sla = dev_data->dev_ctx, + }; + int ret; + + if (dev_data->data_pg) { + snp_free_firmware_page(dev_data->data_pg); + dev_data->data_pg = NULL; + } + + if (IS_SLA_NULL(dev_data->dev_ctx)) + return 0; + + ret = sev_do_cmd(SEV_CMD_TIO_DEV_RECLAIM, &r, &dev_data->psp_ret); + + sla_free(dev_data->dev_ctx, tio_status->devctx_size, true); + dev_data->dev_ctx = SLA_NULL; + + spdm_ctrl_free(dev_data); + + return ret; +} + +int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot) +{ + struct sev_data_tio_dev_connect connect = { + .length = sizeof(connect), + .tc_mask = tc_mask, + .cert_slot = cert_slot, + .dev_ctx_sla = dev_data->dev_ctx, + .ide_stream_id = { + ids[0], ids[1], ids[2], ids[3], + ids[4], ids[5], ids[6], ids[7] + }, + }; + int ret; + + if (WARN_ON(IS_SLA_NULL(dev_data->dev_ctx))) + return -EFAULT; + if (!(tc_mask & 1)) + return -EINVAL; + + ret = spdm_ctrl_alloc(dev_data); + if (ret) + return ret; + + spdm_ctrl_init(&connect.spdm_ctrl, dev_data); + + return sev_tio_do_cmd(SEV_CMD_TIO_DEV_CONNECT, &connect, sizeof(connect), + &dev_data->psp_ret, dev_data); +} + +int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force) +{ + struct sev_data_tio_dev_disconnect dc = { + .length = sizeof(dc), + .dev_ctx_sla = dev_data->dev_ctx, + .flags = force ? TIO_DEV_DISCONNECT_FLAG_FORCE : 0, + }; + + if (WARN_ON_ONCE(IS_SLA_NULL(dev_data->dev_ctx))) + return -EFAULT; + + spdm_ctrl_init(&dc.spdm_ctrl, dev_data); + + return sev_tio_do_cmd(SEV_CMD_TIO_DEV_DISCONNECT, &dc, sizeof(dc), + &dev_data->psp_ret, dev_data); +} + +int sev_tio_cmd_buffer_len(int cmd) +{ + switch (cmd) { + case SEV_CMD_TIO_STATUS: return sizeof(struct sev_data_tio_status); + case SEV_CMD_TIO_INIT: return sizeof(struct sev_data_tio_init); + case SEV_CMD_TIO_DEV_CREATE: return sizeof(struct sev_data_tio_dev_create); + case SEV_CMD_TIO_DEV_RECLAIM: return sizeof(struct sev_data_tio_dev_reclaim); + case SEV_CMD_TIO_DEV_CONNECT: return sizeof(struct sev_data_tio_dev_connect); + case SEV_CMD_TIO_DEV_DISCONNECT: return sizeof(struct sev_data_tio_dev_disconnect); + default: return 0; + } +} diff --git a/drivers/crypto/ccp/sev-dev-tio.h b/drivers/crypto/ccp/sev-dev-tio.h new file mode 100644 index 000000000000..67512b3dbc53 --- /dev/null +++ b/drivers/crypto/ccp/sev-dev-tio.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __PSP_SEV_TIO_H__ +#define __PSP_SEV_TIO_H__ + +#include <linux/pci-tsm.h> +#include <linux/pci-ide.h> +#include <linux/tsm.h> +#include <uapi/linux/psp-sev.h> + +struct sla_addr_t { + union { + u64 sla; + struct { + u64 page_type :1, + page_size :1, + reserved1 :10, + pfn :40, + reserved2 :12; + }; + }; +} __packed; + +#define SEV_TIO_MAX_COMMAND_LENGTH 128 + +/* SPDM control structure for DOE */ +struct tsm_spdm { + unsigned long req_len; + void *req; + unsigned long rsp_len; + void *rsp; +}; + +/* Describes TIO device */ +struct tsm_dsm_tio { + u8 cert_slot; + struct sla_addr_t dev_ctx; + struct sla_addr_t req; + struct sla_addr_t resp; + struct sla_addr_t scratch; + struct sla_addr_t output; + size_t output_len; + size_t scratch_len; + struct tsm_spdm spdm; + struct sla_buffer_hdr *reqbuf; /* vmap'ed @req for DOE */ + struct sla_buffer_hdr *respbuf; /* vmap'ed @resp for DOE */ + + int cmd; + int psp_ret; + u8 cmd_data[SEV_TIO_MAX_COMMAND_LENGTH]; + void *data_pg; /* Data page for DEV_STATUS/TDI_STATUS/TDI_INFO/ASID_FENCE */ + +#define TIO_IDE_MAX_TC 8 + struct pci_ide *ide[TIO_IDE_MAX_TC]; +}; + +/* Describes TSM structure for PF0 pointed by pci_dev->tsm */ +struct tio_dsm { + struct pci_tsm_pf0 tsm; + struct tsm_dsm_tio data; + struct sev_device *sev; +}; + +/* Data object IDs */ +#define SPDM_DOBJ_ID_NONE 0 +#define SPDM_DOBJ_ID_REQ 1 +#define SPDM_DOBJ_ID_RESP 2 + +struct spdm_dobj_hdr { + u32 id; /* Data object type identifier */ + u32 length; /* Length of the data object, INCLUDING THIS HEADER */ + struct { /* Version of the data object structure */ + u8 minor; + u8 major; + } version; +} __packed; + +/** + * struct sev_tio_status - TIO_STATUS command's info_paddr buffer + * + * @length: Length of this structure in bytes + * @tio_en: Indicates that SNP_INIT_EX initialized the RMP for SEV-TIO + * @tio_init_done: Indicates TIO_INIT has been invoked + * @spdm_req_size_min: Minimum SPDM request buffer size in bytes + * @spdm_req_size_max: Maximum SPDM request buffer size in bytes + * @spdm_scratch_size_min: Minimum SPDM scratch buffer size in bytes + * @spdm_scratch_size_max: Maximum SPDM scratch buffer size in bytes + * @spdm_out_size_min: Minimum SPDM output buffer size in bytes + * @spdm_out_size_max: Maximum for the SPDM output buffer size in bytes + * @spdm_rsp_size_min: Minimum SPDM response buffer size in bytes + * @spdm_rsp_size_max: Maximum SPDM response buffer size in bytes + * @devctx_size: Size of a device context buffer in bytes + * @tdictx_size: Size of a TDI context buffer in bytes + * @tio_crypto_alg: TIO crypto algorithms supported + */ +struct sev_tio_status { + u32 length; + u32 tio_en :1, + tio_init_done :1, + reserved :30; + u32 spdm_req_size_min; + u32 spdm_req_size_max; + u32 spdm_scratch_size_min; + u32 spdm_scratch_size_max; + u32 spdm_out_size_min; + u32 spdm_out_size_max; + u32 spdm_rsp_size_min; + u32 spdm_rsp_size_max; + u32 devctx_size; + u32 tdictx_size; + u32 tio_crypto_alg; + u8 reserved2[12]; +} __packed; + +int sev_tio_init_locked(void *tio_status_page); +int sev_tio_continue(struct tsm_dsm_tio *dev_data); + +int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id, u16 root_port_id, + u8 segment_id); +int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot); +int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force); +int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data); + +#endif /* __PSP_SEV_TIO_H__ */ diff --git a/drivers/crypto/ccp/sev-dev-tsm.c b/drivers/crypto/ccp/sev-dev-tsm.c new file mode 100644 index 000000000000..ea29cd5d0ff9 --- /dev/null +++ b/drivers/crypto/ccp/sev-dev-tsm.c @@ -0,0 +1,405 @@ +// SPDX-License-Identifier: GPL-2.0-only + +// Interface to CCP/SEV-TIO for generic PCIe TDISP module + +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/tsm.h> +#include <linux/iommu.h> +#include <linux/pci-doe.h> +#include <linux/bitfield.h> +#include <linux/module.h> + +#include <asm/sev-common.h> +#include <asm/sev.h> + +#include "psp-dev.h" +#include "sev-dev.h" +#include "sev-dev-tio.h" + +MODULE_IMPORT_NS("PCI_IDE"); + +#define TIO_DEFAULT_NR_IDE_STREAMS 1 + +static uint nr_ide_streams = TIO_DEFAULT_NR_IDE_STREAMS; +module_param_named(ide_nr, nr_ide_streams, uint, 0644); +MODULE_PARM_DESC(ide_nr, "Set the maximum number of IDE streams per PHB"); + +#define dev_to_sp(dev) ((struct sp_device *)dev_get_drvdata(dev)) +#define dev_to_psp(dev) ((struct psp_device *)(dev_to_sp(dev)->psp_data)) +#define dev_to_sev(dev) ((struct sev_device *)(dev_to_psp(dev)->sev_data)) +#define tsm_dev_to_sev(tsmdev) dev_to_sev((tsmdev)->dev.parent) + +#define pdev_to_tio_dsm(pdev) (container_of((pdev)->tsm, struct tio_dsm, tsm.base_tsm)) + +static int sev_tio_spdm_cmd(struct tio_dsm *dsm, int ret) +{ + struct tsm_dsm_tio *dev_data = &dsm->data; + struct tsm_spdm *spdm = &dev_data->spdm; + + /* Check the main command handler response before entering the loop */ + if (ret == 0 && dev_data->psp_ret != SEV_RET_SUCCESS) + return -EINVAL; + + if (ret <= 0) + return ret; + + /* ret > 0 means "SPDM requested" */ + while (ret == PCI_DOE_FEATURE_CMA || ret == PCI_DOE_FEATURE_SSESSION) { + ret = pci_doe(dsm->tsm.doe_mb, PCI_VENDOR_ID_PCI_SIG, ret, + spdm->req, spdm->req_len, spdm->rsp, spdm->rsp_len); + if (ret < 0) + break; + + WARN_ON_ONCE(ret == 0); /* The response should never be empty */ + spdm->rsp_len = ret; + ret = sev_tio_continue(dev_data); + } + + return ret; +} + +static int stream_enable(struct pci_ide *ide) +{ + struct pci_dev *rp = pcie_find_root_port(ide->pdev); + int ret; + + ret = pci_ide_stream_enable(rp, ide); + if (ret) + return ret; + + ret = pci_ide_stream_enable(ide->pdev, ide); + if (ret) + pci_ide_stream_disable(rp, ide); + + return ret; +} + +static int streams_enable(struct pci_ide **ide) +{ + int ret = 0; + + for (int i = 0; i < TIO_IDE_MAX_TC; ++i) { + if (ide[i]) { + ret = stream_enable(ide[i]); + if (ret) + break; + } + } + + return ret; +} + +static void stream_disable(struct pci_ide *ide) +{ + pci_ide_stream_disable(ide->pdev, ide); + pci_ide_stream_disable(pcie_find_root_port(ide->pdev), ide); +} + +static void streams_disable(struct pci_ide **ide) +{ + for (int i = 0; i < TIO_IDE_MAX_TC; ++i) + if (ide[i]) + stream_disable(ide[i]); +} + +static void stream_setup(struct pci_ide *ide) +{ + struct pci_dev *rp = pcie_find_root_port(ide->pdev); + + ide->partner[PCI_IDE_EP].rid_start = 0; + ide->partner[PCI_IDE_EP].rid_end = 0xffff; + ide->partner[PCI_IDE_RP].rid_start = 0; + ide->partner[PCI_IDE_RP].rid_end = 0xffff; + + ide->pdev->ide_cfg = 0; + ide->pdev->ide_tee_limit = 1; + rp->ide_cfg = 1; + rp->ide_tee_limit = 0; + + pci_warn(ide->pdev, "Forcing CFG/TEE for %s", pci_name(rp)); + pci_ide_stream_setup(ide->pdev, ide); + pci_ide_stream_setup(rp, ide); +} + +static u8 streams_setup(struct pci_ide **ide, u8 *ids) +{ + bool def = false; + u8 tc_mask = 0; + int i; + + for (i = 0; i < TIO_IDE_MAX_TC; ++i) { + if (!ide[i]) { + ids[i] = 0xFF; + continue; + } + + tc_mask |= BIT(i); + ids[i] = ide[i]->stream_id; + + if (!def) { + struct pci_ide_partner *settings; + + settings = pci_ide_to_settings(ide[i]->pdev, ide[i]); + settings->default_stream = 1; + def = true; + } + + stream_setup(ide[i]); + } + + return tc_mask; +} + +static int streams_register(struct pci_ide **ide) +{ + int ret = 0, i; + + for (i = 0; i < TIO_IDE_MAX_TC; ++i) { + if (ide[i]) { + ret = pci_ide_stream_register(ide[i]); + if (ret) + break; + } + } + + return ret; +} + +static void streams_unregister(struct pci_ide **ide) +{ + for (int i = 0; i < TIO_IDE_MAX_TC; ++i) + if (ide[i]) + pci_ide_stream_unregister(ide[i]); +} + +static void stream_teardown(struct pci_ide *ide) +{ + pci_ide_stream_teardown(ide->pdev, ide); + pci_ide_stream_teardown(pcie_find_root_port(ide->pdev), ide); +} + +static void streams_teardown(struct pci_ide **ide) +{ + for (int i = 0; i < TIO_IDE_MAX_TC; ++i) { + if (ide[i]) { + stream_teardown(ide[i]); + pci_ide_stream_free(ide[i]); + ide[i] = NULL; + } + } +} + +static int stream_alloc(struct pci_dev *pdev, struct pci_ide **ide, + unsigned int tc) +{ + struct pci_dev *rp = pcie_find_root_port(pdev); + struct pci_ide *ide1; + + if (ide[tc]) { + pci_err(pdev, "Stream for class=%d already registered", tc); + return -EBUSY; + } + + /* FIXME: find a better way */ + if (nr_ide_streams != TIO_DEFAULT_NR_IDE_STREAMS) + pci_notice(pdev, "Enable non-default %d streams", nr_ide_streams); + pci_ide_set_nr_streams(to_pci_host_bridge(rp->bus->bridge), nr_ide_streams); + + ide1 = pci_ide_stream_alloc(pdev); + if (!ide1) + return -EFAULT; + + /* Blindly assign streamid=0 to TC=0, and so on */ + ide1->stream_id = tc; + + ide[tc] = ide1; + + return 0; +} + +static struct pci_tsm *tio_pf0_probe(struct pci_dev *pdev, struct sev_device *sev) +{ + struct tio_dsm *dsm __free(kfree) = kzalloc(sizeof(*dsm), GFP_KERNEL); + int rc; + + if (!dsm) + return NULL; + + rc = pci_tsm_pf0_constructor(pdev, &dsm->tsm, sev->tsmdev); + if (rc) + return NULL; + + pci_dbg(pdev, "TSM enabled\n"); + dsm->sev = sev; + return &no_free_ptr(dsm)->tsm.base_tsm; +} + +static struct pci_tsm *dsm_probe(struct tsm_dev *tsmdev, struct pci_dev *pdev) +{ + struct sev_device *sev = tsm_dev_to_sev(tsmdev); + + if (is_pci_tsm_pf0(pdev)) + return tio_pf0_probe(pdev, sev); + return 0; +} + +static void dsm_remove(struct pci_tsm *tsm) +{ + struct pci_dev *pdev = tsm->pdev; + + pci_dbg(pdev, "TSM disabled\n"); + + if (is_pci_tsm_pf0(pdev)) { + struct tio_dsm *dsm = container_of(tsm, struct tio_dsm, tsm.base_tsm); + + pci_tsm_pf0_destructor(&dsm->tsm); + kfree(dsm); + } +} + +static int dsm_create(struct tio_dsm *dsm) +{ + struct pci_dev *pdev = dsm->tsm.base_tsm.pdev; + u8 segment_id = pdev->bus ? pci_domain_nr(pdev->bus) : 0; + struct pci_dev *rootport = pcie_find_root_port(pdev); + u16 device_id = pci_dev_id(pdev); + u16 root_port_id; + u32 lnkcap = 0; + + if (pci_read_config_dword(rootport, pci_pcie_cap(rootport) + PCI_EXP_LNKCAP, + &lnkcap)) + return -ENODEV; + + root_port_id = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap); + + return sev_tio_dev_create(&dsm->data, device_id, root_port_id, segment_id); +} + +static int dsm_connect(struct pci_dev *pdev) +{ + struct tio_dsm *dsm = pdev_to_tio_dsm(pdev); + struct tsm_dsm_tio *dev_data = &dsm->data; + u8 ids[TIO_IDE_MAX_TC]; + u8 tc_mask; + int ret; + + if (pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_PCI_SIG, + PCI_DOE_FEATURE_SSESSION) != dsm->tsm.doe_mb) { + pci_err(pdev, "CMA DOE MB must support SSESSION\n"); + return -EFAULT; + } + + ret = stream_alloc(pdev, dev_data->ide, 0); + if (ret) + return ret; + + ret = dsm_create(dsm); + if (ret) + goto ide_free_exit; + + tc_mask = streams_setup(dev_data->ide, ids); + + ret = sev_tio_dev_connect(dev_data, tc_mask, ids, dev_data->cert_slot); + ret = sev_tio_spdm_cmd(dsm, ret); + if (ret) + goto free_exit; + + streams_enable(dev_data->ide); + + ret = streams_register(dev_data->ide); + if (ret) + goto free_exit; + + return 0; + +free_exit: + sev_tio_dev_reclaim(dev_data); + + streams_disable(dev_data->ide); +ide_free_exit: + + streams_teardown(dev_data->ide); + + return ret; +} + +static void dsm_disconnect(struct pci_dev *pdev) +{ + bool force = SYSTEM_HALT <= system_state && system_state <= SYSTEM_RESTART; + struct tio_dsm *dsm = pdev_to_tio_dsm(pdev); + struct tsm_dsm_tio *dev_data = &dsm->data; + int ret; + + ret = sev_tio_dev_disconnect(dev_data, force); + ret = sev_tio_spdm_cmd(dsm, ret); + if (ret && !force) { + ret = sev_tio_dev_disconnect(dev_data, true); + sev_tio_spdm_cmd(dsm, ret); + } + + sev_tio_dev_reclaim(dev_data); + + streams_disable(dev_data->ide); + streams_unregister(dev_data->ide); + streams_teardown(dev_data->ide); +} + +static struct pci_tsm_ops sev_tsm_ops = { + .probe = dsm_probe, + .remove = dsm_remove, + .connect = dsm_connect, + .disconnect = dsm_disconnect, +}; + +void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page) +{ + struct sev_tio_status *t = kzalloc(sizeof(*t), GFP_KERNEL); + struct tsm_dev *tsmdev; + int ret; + + WARN_ON(sev->tio_status); + + if (!t) + return; + + ret = sev_tio_init_locked(tio_status_page); + if (ret) { + pr_warn("SEV-TIO STATUS failed with %d\n", ret); + goto error_exit; + } + + tsmdev = tsm_register(sev->dev, &sev_tsm_ops); + if (IS_ERR(tsmdev)) + goto error_exit; + + memcpy(t, tio_status_page, sizeof(*t)); + + pr_notice("SEV-TIO status: EN=%d INIT_DONE=%d rq=%d..%d rs=%d..%d " + "scr=%d..%d out=%d..%d dev=%d tdi=%d algos=%x\n", + t->tio_en, t->tio_init_done, + t->spdm_req_size_min, t->spdm_req_size_max, + t->spdm_rsp_size_min, t->spdm_rsp_size_max, + t->spdm_scratch_size_min, t->spdm_scratch_size_max, + t->spdm_out_size_min, t->spdm_out_size_max, + t->devctx_size, t->tdictx_size, + t->tio_crypto_alg); + + sev->tsmdev = tsmdev; + sev->tio_status = t; + + return; + +error_exit: + kfree(t); + pr_err("Failed to enable SEV-TIO: ret=%d en=%d initdone=%d SEV=%d\n", + ret, t->tio_en, t->tio_init_done, boot_cpu_has(X86_FEATURE_SEV)); +} + +void sev_tsm_uninit(struct sev_device *sev) +{ + if (sev->tsmdev) + tsm_unregister(sev->tsmdev); + + sev->tsmdev = NULL; +} diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 0d13d47c164b..956ea609d0cc 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -75,6 +75,14 @@ static bool psp_init_on_probe = true; module_param(psp_init_on_probe, bool, 0444); MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it"); +#if IS_ENABLED(CONFIG_PCI_TSM) +static bool sev_tio_enabled = true; +module_param_named(tio, sev_tio_enabled, bool, 0444); +MODULE_PARM_DESC(tio, "Enables TIO in SNP_INIT_EX"); +#else +static const bool sev_tio_enabled = false; +#endif + MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */ MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */ MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */ @@ -251,7 +259,7 @@ static int sev_cmd_buffer_len(int cmd) case SEV_CMD_SNP_COMMIT: return sizeof(struct sev_data_snp_commit); case SEV_CMD_SNP_FEATURE_INFO: return sizeof(struct sev_data_snp_feature_info); case SEV_CMD_SNP_VLEK_LOAD: return sizeof(struct sev_user_data_snp_vlek_load); - default: return 0; + default: return sev_tio_cmd_buffer_len(cmd); } return 0; @@ -259,27 +267,20 @@ static int sev_cmd_buffer_len(int cmd) static struct file *open_file_as_root(const char *filename, int flags, umode_t mode) { - struct file *fp; - struct path root; - struct cred *cred; - const struct cred *old_cred; + struct path root __free(path_put) = {}; task_lock(&init_task); get_fs_root(init_task.fs, &root); task_unlock(&init_task); - cred = prepare_creds(); + CLASS(prepare_creds, cred)(); if (!cred) return ERR_PTR(-ENOMEM); - cred->fsuid = GLOBAL_ROOT_UID; - old_cred = override_creds(cred); - fp = file_open_root(&root, filename, flags, mode); - path_put(&root); - - put_cred(revert_creds(old_cred)); + cred->fsuid = GLOBAL_ROOT_UID; - return fp; + scoped_with_creds(cred) + return file_open_root(&root, filename, flags, mode); } static int sev_read_init_ex_file(void) @@ -387,13 +388,7 @@ static int sev_write_init_ex_file_if_required(int cmd_id) return sev_write_init_ex_file(); } -/* - * snp_reclaim_pages() needs __sev_do_cmd_locked(), and __sev_do_cmd_locked() - * needs snp_reclaim_pages(), so a forward declaration is needed. - */ -static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret); - -static int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked) +int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked) { int ret, err, i; @@ -427,6 +422,7 @@ cleanup: snp_leak_pages(__phys_to_pfn(paddr), npages - i); return ret; } +EXPORT_SYMBOL_GPL(snp_reclaim_pages); static int rmp_mark_pages_firmware(unsigned long paddr, unsigned int npages, bool locked) { @@ -857,7 +853,7 @@ static int snp_reclaim_cmd_buf(int cmd, void *cmd_buf) return 0; } -static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) +int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) { struct cmd_buf_desc desc_list[CMD_BUF_DESC_MAX] = {0}; struct psp_device *psp = psp_master; @@ -1399,6 +1395,8 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid) * */ if (sev_version_greater_or_equal(SNP_MIN_API_MAJOR, 52)) { + bool tio_supp = !!(sev->snp_feat_info_0.ebx & SNP_SEV_TIO_SUPPORTED); + /* * Firmware checks that the pages containing the ranges enumerated * in the RANGES structure are either in the default page state or in the @@ -1439,6 +1437,17 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid) data.init_rmp = 1; data.list_paddr_en = 1; data.list_paddr = __psp_pa(snp_range_list); + + data.tio_en = tio_supp && sev_tio_enabled && amd_iommu_sev_tio_supported(); + + /* + * When psp_init_on_probe is disabled, the userspace calling + * SEV ioctl can inadvertently shut down SNP and SEV-TIO causing + * unexpected state loss. + */ + if (data.tio_en && !psp_init_on_probe) + dev_warn(sev->dev, "SEV-TIO as incompatible with psp_init_on_probe=0\n"); + cmd = SEV_CMD_SNP_INIT_EX; } else { cmd = SEV_CMD_SNP_INIT; @@ -1476,7 +1485,8 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid) snp_hv_fixed_pages_state_update(sev, HV_FIXED); sev->snp_initialized = true; - dev_dbg(sev->dev, "SEV-SNP firmware initialized\n"); + dev_dbg(sev->dev, "SEV-SNP firmware initialized, SEV-TIO is %s\n", + data.tio_en ? "enabled" : "disabled"); dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major, sev->api_minor, sev->build); @@ -1484,6 +1494,23 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid) atomic_notifier_chain_register(&panic_notifier_list, &snp_panic_notifier); + if (data.tio_en) { + /* + * This executes with the sev_cmd_mutex held so down the stack + * snp_reclaim_pages(locked=false) might be needed (which is extremely + * unlikely) but will cause a deadlock. + * Instead of exporting __snp_alloc_firmware_pages(), allocate a page + * for this one call here. + */ + void *tio_status = page_address(__snp_alloc_firmware_pages( + GFP_KERNEL_ACCOUNT | __GFP_ZERO, 0, true)); + + if (tio_status) { + sev_tsm_init_locked(sev, tio_status); + __snp_free_firmware_pages(virt_to_page(tio_status), 0, true); + } + } + sev_es_tmr_size = SNP_TMR_SIZE; return 0; @@ -2763,8 +2790,20 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic) static void sev_firmware_shutdown(struct sev_device *sev) { + /* + * Calling without sev_cmd_mutex held as TSM will likely try disconnecting + * IDE and this ends up calling sev_do_cmd() which locks sev_cmd_mutex. + */ + if (sev->tio_status) + sev_tsm_uninit(sev); + mutex_lock(&sev_cmd_mutex); + __sev_firmware_shutdown(sev, false); + + kfree(sev->tio_status); + sev->tio_status = NULL; + mutex_unlock(&sev_cmd_mutex); } @@ -2777,6 +2816,43 @@ void sev_platform_shutdown(void) } EXPORT_SYMBOL_GPL(sev_platform_shutdown); +u64 sev_get_snp_policy_bits(void) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + u64 policy_bits; + + if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP)) + return 0; + + if (!psp || !psp->sev_data) + return 0; + + sev = psp->sev_data; + + policy_bits = SNP_POLICY_MASK_BASE; + + if (sev->snp_plat_status.feature_info) { + if (sev->snp_feat_info_0.ecx & SNP_RAPL_DISABLE_SUPPORTED) + policy_bits |= SNP_POLICY_MASK_RAPL_DIS; + + if (sev->snp_feat_info_0.ecx & SNP_CIPHER_TEXT_HIDING_SUPPORTED) + policy_bits |= SNP_POLICY_MASK_CIPHERTEXT_HIDING_DRAM; + + if (sev->snp_feat_info_0.ecx & SNP_AES_256_XTS_POLICY_SUPPORTED) + policy_bits |= SNP_POLICY_MASK_MEM_AES_256_XTS; + + if (sev->snp_feat_info_0.ecx & SNP_CXL_ALLOW_POLICY_SUPPORTED) + policy_bits |= SNP_POLICY_MASK_CXL_ALLOW; + + if (sev_version_greater_or_equal(1, 58)) + policy_bits |= SNP_POLICY_MASK_PAGE_SWAP_DISABLE; + } + + return policy_bits; +} +EXPORT_SYMBOL_GPL(sev_get_snp_policy_bits); + void sev_dev_destroy(struct psp_device *psp) { struct sev_device *sev = psp->sev_data; diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h index ac03bd0848f7..b1cd556bbbf6 100644 --- a/drivers/crypto/ccp/sev-dev.h +++ b/drivers/crypto/ccp/sev-dev.h @@ -34,6 +34,8 @@ struct sev_misc_dev { struct miscdevice misc; }; +struct sev_tio_status; + struct sev_device { struct device *dev; struct psp_device *psp; @@ -61,15 +63,24 @@ struct sev_device { struct sev_user_data_snp_status snp_plat_status; struct snp_feature_info snp_feat_info_0; + + struct tsm_dev *tsmdev; + struct sev_tio_status *tio_status; }; int sev_dev_init(struct psp_device *psp); void sev_dev_destroy(struct psp_device *psp); +int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret); + void sev_pci_init(void); void sev_pci_exit(void); struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages); void snp_free_hv_fixed_pages(struct page *page); +void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page); +void sev_tsm_uninit(struct sev_device *sev); +int sev_tio_cmd_buffer_len(int cmd); + #endif /* __SEV_DEV_H */ diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 6f9d7063257d..1335a83fe052 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -95,7 +95,7 @@ struct sp_device { struct device *dev; - struct sp_dev_vdata *dev_vdata; + const struct sp_dev_vdata *dev_vdata; unsigned int ord; char name[SP_MAX_NAME_LEN]; diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index e7bb803912a6..8891ceee1d7d 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -459,6 +459,17 @@ static const struct psp_vdata pspv6 = { .intsts_reg = 0x10514, /* P2CMSG_INTSTS */ }; +static const struct psp_vdata pspv7 = { + .tee = &teev2, + .cmdresp_reg = 0x10944, /* C2PMSG_17 */ + .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */ + .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */ + .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */ + .feature_reg = 0x109fc, /* C2PMSG_63 */ + .inten_reg = 0x10510, /* P2CMSG_INTEN */ + .intsts_reg = 0x10514, /* P2CMSG_INTSTS */ +}; + #endif static const struct sp_dev_vdata dev_vdata[] = { @@ -525,6 +536,13 @@ static const struct sp_dev_vdata dev_vdata[] = { .psp_vdata = &pspv6, #endif }, + { /* 9 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + .psp_vdata = &pspv7, +#endif + }, + }; static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] }, @@ -539,6 +557,7 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, { PCI_VDEVICE(AMD, 0x17D8), (kernel_ulong_t)&dev_vdata[8] }, + { PCI_VDEVICE(AMD, 0x115A), (kernel_ulong_t)&dev_vdata[9] }, /* Last entry must be zero */ { 0, } }; diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c index 3933cac1694d..3f9843fa7782 100644 --- a/drivers/crypto/ccp/sp-platform.c +++ b/drivers/crypto/ccp/sp-platform.c @@ -52,24 +52,13 @@ static const struct of_device_id sp_of_match[] = { }; MODULE_DEVICE_TABLE(of, sp_of_match); -static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev) -{ - const struct of_device_id *match; - - match = of_match_node(sp_of_match, pdev->dev.of_node); - if (match && match->data) - return (struct sp_dev_vdata *)match->data; - - return NULL; -} - -static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev) +static const struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev) { const struct acpi_device_id *match; match = acpi_match_device(sp_acpi_match, &pdev->dev); if (match && match->driver_data) - return (struct sp_dev_vdata *)match->driver_data; + return (const struct sp_dev_vdata *)match->driver_data; return NULL; } @@ -123,7 +112,7 @@ static int sp_platform_probe(struct platform_device *pdev) goto e_err; sp->dev_specific = sp_platform; - sp->dev_vdata = pdev->dev.of_node ? sp_get_of_version(pdev) + sp->dev_vdata = pdev->dev.of_node ? of_device_get_match_data(&pdev->dev) : sp_get_acpi_version(pdev); if (!sp->dev_vdata) { ret = -ENODEV; diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index 3963bb91321f..dc7e0cd51c25 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -1235,6 +1235,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, int rc = 0; u32 dummy = 0; u32 mapped_nents = 0; + int sg_nents; dev_dbg(dev, " update params : curr_buff=%p curr_buff_cnt=0x%X nbytes=0x%X src=%p curr_index=%u\n", curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); @@ -1248,7 +1249,10 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, if (total_in_len < block_size) { dev_dbg(dev, " less than one block: curr_buff=%p *curr_buff_cnt=0x%X copy_to=%p\n", curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); - areq_ctx->in_nents = sg_nents_for_len(src, nbytes); + sg_nents = sg_nents_for_len(src, nbytes); + if (sg_nents < 0) + return sg_nents; + areq_ctx->in_nents = sg_nents; sg_copy_to_buffer(src, areq_ctx->in_nents, &curr_buff[*curr_buff_cnt], nbytes); *curr_buff_cnt += nbytes; diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 925991526745..edf36f6add52 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -913,11 +913,10 @@ static void hifn_init_pll(struct hifn_device *dev) else pllcfg |= HIFN_PLL_REF_CLK_HBI; - if (hifn_pll_ref[3] != '\0') - freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10); - else { + if (hifn_pll_ref[3] == '\0' || + kstrtouint(hifn_pll_ref + 3, 10, &freq)) { freq = 66; - dev_info(&dev->pdev->dev, "assuming %uMHz clock speed, override with hifn_pll_ref=%.3s<frequency>\n", + dev_info(&dev->pdev->dev, "assuming %u MHz clock speed, override with hifn_pll_ref=%.3s<frequency>\n", freq, hifn_pll_ref); } diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index a5b96adf2d1e..f8bfff5dd0bd 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -64,10 +64,10 @@ #define QM_EQE_AEQE_SIZE (2UL << 12) #define QM_EQC_PHASE_SHIFT 16 -#define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1) +#define QM_EQE_PHASE(dw0) (((dw0) >> 16) & 0x1) #define QM_EQE_CQN_MASK GENMASK(15, 0) -#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1) +#define QM_AEQE_PHASE(dw0) (((dw0) >> 16) & 0x1) #define QM_AEQE_TYPE_SHIFT 17 #define QM_AEQE_TYPE_MASK 0xf #define QM_AEQE_CQN_MASK GENMASK(15, 0) @@ -976,23 +976,23 @@ static void qm_get_complete_eqe_num(struct hisi_qm *qm) { struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; struct hisi_qm_poll_data *poll_data = NULL; + u32 dw0 = le32_to_cpu(eqe->dw0); u16 eq_depth = qm->eq_depth; u16 cqn, eqe_num = 0; - if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) { + if (QM_EQE_PHASE(dw0) != qm->status.eqc_phase) { atomic64_inc(&qm->debug.dfx.err_irq_cnt); qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); return; } - cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; + cqn = dw0 & QM_EQE_CQN_MASK; if (unlikely(cqn >= qm->qp_num)) return; poll_data = &qm->poll_data[cqn]; - while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { - cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; - poll_data->qp_finish_id[eqe_num] = cqn; + while (QM_EQE_PHASE(dw0) != qm->status.eqc_phase) { + poll_data->qp_finish_id[eqe_num] = dw0 & QM_EQE_CQN_MASK; eqe_num++; if (qm->status.eq_head == eq_depth - 1) { @@ -1006,6 +1006,8 @@ static void qm_get_complete_eqe_num(struct hisi_qm *qm) if (eqe_num == (eq_depth >> 1) - 1) break; + + dw0 = le32_to_cpu(eqe->dw0); } poll_data->eqe_num = eqe_num; @@ -1098,15 +1100,15 @@ static irqreturn_t qm_aeq_thread(int irq, void *data) { struct hisi_qm *qm = data; struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; + u32 dw0 = le32_to_cpu(aeqe->dw0); u16 aeq_depth = qm->aeq_depth; u32 type, qp_id; atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); - while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { - type = (le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT) & - QM_AEQE_TYPE_MASK; - qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK; + while (QM_AEQE_PHASE(dw0) == qm->status.aeqc_phase) { + type = (dw0 >> QM_AEQE_TYPE_SHIFT) & QM_AEQE_TYPE_MASK; + qp_id = dw0 & QM_AEQE_CQN_MASK; switch (type) { case QM_EQ_OVERFLOW: @@ -1134,6 +1136,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data) aeqe++; qm->status.aeq_head++; } + dw0 = le32_to_cpu(aeqe->dw0); } qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); @@ -1283,6 +1286,13 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT); } break; + /* + * Note: The current logic only needs to handle the above three types + * If new types are added, they need to be supplemented here, + * otherwise undefined behavior may occur. + */ + default: + break; } } @@ -2652,10 +2662,10 @@ static int qm_hw_err_isolate(struct hisi_qm *qm) } } list_add(&hw_err->list, &isolate->qm_hw_errs); - mutex_unlock(&isolate->isolate_lock); if (count >= isolate->err_threshold) isolate->is_isolate = true; + mutex_unlock(&isolate->isolate_lock); return 0; } @@ -2664,12 +2674,10 @@ static void qm_hw_err_destroy(struct hisi_qm *qm) { struct qm_hw_err *err, *tmp; - mutex_lock(&qm->isolate_data.isolate_lock); list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) { list_del(&err->list); kfree(err); } - mutex_unlock(&qm->isolate_data.isolate_lock); } static enum uacce_dev_state hisi_qm_get_isolate_state(struct uacce_device *uacce) @@ -2697,10 +2705,12 @@ static int hisi_qm_isolate_threshold_write(struct uacce_device *uacce, u32 num) if (qm->isolate_data.is_isolate) return -EPERM; + mutex_lock(&qm->isolate_data.isolate_lock); qm->isolate_data.err_threshold = num; /* After the policy is updated, need to reset the hardware err list */ qm_hw_err_destroy(qm); + mutex_unlock(&qm->isolate_data.isolate_lock); return 0; } @@ -2737,7 +2747,10 @@ static void qm_remove_uacce(struct hisi_qm *qm) struct uacce_device *uacce = qm->uacce; if (qm->use_sva) { + mutex_lock(&qm->isolate_data.isolate_lock); qm_hw_err_destroy(qm); + mutex_unlock(&qm->isolate_data.isolate_lock); + uacce_remove(uacce); qm->uacce = NULL; } @@ -3019,11 +3032,36 @@ static void qm_put_pci_res(struct hisi_qm *qm) pci_release_mem_regions(pdev); } +static void hisi_mig_region_clear(struct hisi_qm *qm) +{ + u32 val; + + /* Clear migration region set of PF */ + if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V3) { + val = readl(qm->io_base + QM_MIG_REGION_SEL); + val &= ~QM_MIG_REGION_EN; + writel(val, qm->io_base + QM_MIG_REGION_SEL); + } +} + +static void hisi_mig_region_enable(struct hisi_qm *qm) +{ + u32 val; + + /* Select migration region of PF */ + if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V3) { + val = readl(qm->io_base + QM_MIG_REGION_SEL); + val |= QM_MIG_REGION_EN; + writel(val, qm->io_base + QM_MIG_REGION_SEL); + } +} + static void hisi_qm_pci_uninit(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; pci_free_irq_vectors(pdev); + hisi_mig_region_clear(qm); qm_put_pci_res(qm); pci_disable_device(pdev); } @@ -3678,6 +3716,7 @@ static void qm_clear_vft_config(struct hisi_qm *qm) static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos) { struct device *dev = &qm->pdev->dev; + struct qm_shaper_factor t_factor; u32 ir = qos * QM_QOS_RATE; int ret, total_vfs, i; @@ -3685,6 +3724,7 @@ static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos) if (fun_index > total_vfs) return -EINVAL; + memcpy(&t_factor, &qm->factor[fun_index], sizeof(t_factor)); qm->factor[fun_index].func_qos = qos; ret = qm_get_shaper_para(ir, &qm->factor[fun_index]); @@ -3698,11 +3738,21 @@ static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos) ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1); if (ret) { dev_err(dev, "type: %d, failed to set shaper vft!\n", i); - return -EINVAL; + goto back_func_qos; } } return 0; + +back_func_qos: + memcpy(&qm->factor[fun_index], &t_factor, sizeof(t_factor)); + for (i--; i >= ALG_TYPE_0; i--) { + ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1); + if (ret) + dev_err(dev, "failed to restore shaper vft during rollback!\n"); + } + + return -EINVAL; } static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index) @@ -3871,10 +3921,12 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf, pdev = container_of(dev, struct pci_dev, dev); if (pci_physfn(pdev) != qm->pdev) { pci_err(qm->pdev, "the pdev input does not match the pf!\n"); + put_device(dev); return -EINVAL; } *fun_index = pdev->devfn; + put_device(dev); return 0; } @@ -5725,6 +5777,7 @@ int hisi_qm_init(struct hisi_qm *qm) goto err_free_qm_memory; qm_cmd_init(qm); + hisi_mig_region_enable(qm); return 0; @@ -5863,6 +5916,7 @@ static int qm_rebuild_for_resume(struct hisi_qm *qm) } qm_cmd_init(qm); + hisi_mig_region_enable(qm); hisi_qm_dev_err_init(qm); /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */ writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index 7a9ef2a9972a..24c7b6ab285b 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -245,11 +245,6 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, struct scatterlist *sgl, } curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma); - if (IS_ERR(curr_hw_sgl)) { - dev_err(dev, "Get SGL error!\n"); - ret = -ENOMEM; - goto err_unmap; - } curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr); curr_hw_sge = curr_hw_sgl->sge_entries; diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 23f585219fb4..d0058757b000 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -805,7 +805,7 @@ static int save_iaa_wq(struct idxd_wq *wq) if (!cpus_per_iaa) cpus_per_iaa = 1; out: - return 0; + return ret; } static void remove_iaa_wq(struct idxd_wq *wq) diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index 35679b21ff63..11728cf32653 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -105,7 +105,6 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev) accel_dev->accel_id); hw_device->reset_device(accel_dev); pci_restore_state(pdev); - pci_save_state(pdev); } } @@ -204,7 +203,6 @@ static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev) if (!pdev->is_busmaster) pci_set_master(pdev); pci_restore_state(pdev); - pci_save_state(pdev); res = adf_dev_up(accel_dev, false); if (res && res != -EALREADY) return PCI_ERS_RESULT_DISCONNECT; @@ -276,11 +274,11 @@ int adf_notify_fatal_error(struct adf_accel_dev *accel_dev) int adf_init_aer(void) { device_reset_wq = alloc_workqueue("qat_device_reset_wq", - WQ_MEM_RECLAIM, 0); + WQ_MEM_RECLAIM | WQ_PERCPU, 0); if (!device_reset_wq) return -EFAULT; - device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0); + device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", WQ_PERCPU, 0); if (!device_sriov_wq) { destroy_workqueue(device_reset_wq); device_reset_wq = NULL; diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c index 12e565613661..4639d7fd93e6 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c @@ -384,7 +384,8 @@ EXPORT_SYMBOL_GPL(adf_isr_resource_alloc); */ int __init adf_init_misc_wq(void) { - adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0); + adf_misc_wq = alloc_workqueue("qat_misc_wq", + WQ_MEM_RECLAIM | WQ_PERCPU, 0); return !adf_misc_wq ? -ENOMEM : 0; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c index 69295a9ddf0a..4ccc94ed9493 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c @@ -1,18 +1,12 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2025 Intel Corporation */ +#include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/sprintf.h> #include <linux/string_helpers.h> #include "adf_pm_dbgfs_utils.h" -/* - * This is needed because a variable is used to index the mask at - * pm_scnprint_table(), making it not compile time constant, so the compile - * asserts from FIELD_GET() or u32_get_bits() won't be fulfilled. - */ -#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1)) - #define PM_INFO_MAX_KEY_LEN 21 static int pm_scnprint_table(char *buff, const struct pm_status_row *table, diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c index 31d1ef0cb1f5..bb904ba4bf84 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c @@ -299,7 +299,8 @@ EXPORT_SYMBOL_GPL(adf_sriov_configure); int __init adf_init_pf_wq(void) { /* Workqueue for PF2VF responses */ - pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0); + pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", + WQ_MEM_RECLAIM | WQ_PERCPU, 0); return !pf2vf_resp_wq ? -ENOMEM : 0; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c index a4636ec9f9ca..d0fef20a3df4 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c @@ -299,7 +299,8 @@ EXPORT_SYMBOL_GPL(adf_flush_vf_wq); */ int __init adf_init_vf_wq(void) { - adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0); + adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", + WQ_MEM_RECLAIM | WQ_PERCPU, 0); return !adf_vf_stop_wq ? -EFAULT : 0; } diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index 18c3e4416dc5..06d49cb781ae 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -200,20 +200,12 @@ qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle, static int qat_uclo_parse_num(char *str, unsigned int *num) { - char buf[16] = {0}; - unsigned long ae = 0; - int i; - - strscpy(buf, str, sizeof(buf)); - for (i = 0; i < 16; i++) { - if (!isdigit(buf[i])) { - buf[i] = '\0'; - break; - } - } - if ((kstrtoul(buf, 10, &ae))) - return -EFAULT; + unsigned long long ae; + char *end; + ae = simple_strtoull(str, &end, 10); + if (ae > UINT_MAX || str == end || (end - str) > 19) + return -EINVAL; *num = (unsigned int)ae; return 0; } diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c index 9c21f5d835d2..301bdf239e7d 100644 --- a/drivers/crypto/marvell/cesa/cesa.c +++ b/drivers/crypto/marvell/cesa/cesa.c @@ -420,7 +420,6 @@ static int mv_cesa_probe(struct platform_device *pdev) { const struct mv_cesa_caps *caps = &orion_caps; const struct mbus_dram_target_info *dram; - const struct of_device_id *match; struct device *dev = &pdev->dev; struct mv_cesa_dev *cesa; struct mv_cesa_engine *engines; @@ -433,11 +432,9 @@ static int mv_cesa_probe(struct platform_device *pdev) } if (dev->of_node) { - match = of_match_node(mv_cesa_of_match_table, dev->of_node); - if (!match || !match->data) + caps = of_device_get_match_data(dev); + if (!caps) return -ENOTSUPP; - - caps = match->data; } cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c index 215a1a8ba7e9..07a74f702c3a 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c @@ -24,7 +24,8 @@ static int otx2_cpt_dl_egrp_delete(struct devlink *dl, u32 id, } static int otx2_cpt_dl_uc_info(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { ctx->val.vstr[0] = '\0'; @@ -32,7 +33,8 @@ static int otx2_cpt_dl_uc_info(struct devlink *dl, u32 id, } static int otx2_cpt_dl_t106_mode_get(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl); struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf; diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index ebdf4efa09d4..b5cc5401f704 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -3,6 +3,7 @@ #include <linux/ctype.h> #include <linux/firmware.h> +#include <linux/string.h> #include <linux/string_choices.h> #include "otx2_cptpf_ucode.h" #include "otx2_cpt_common.h" @@ -458,13 +459,13 @@ static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info, u16 rid) { char filename[OTX2_CPT_NAME_LENGTH]; - char eng_type[8] = {0}; + char eng_type[8]; int ret, e, i; INIT_LIST_HEAD(&fw_info->ucodes); for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) { - strcpy(eng_type, get_eng_type_str(e)); + strscpy(eng_type, get_eng_type_str(e)); for (i = 0; i < strlen(eng_type); i++) eng_type[i] = tolower(eng_type[i]); diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c index e95e84486d9a..b966f3365b7d 100644 --- a/drivers/crypto/qce/core.c +++ b/drivers/crypto/qce/core.c @@ -21,7 +21,6 @@ #include "sha.h" #include "aead.h" -#define QCE_MAJOR_VERSION5 0x05 #define QCE_QUEUE_LENGTH 1 #define QCE_DEFAULT_MEM_BANDWIDTH 393600 @@ -161,7 +160,7 @@ static int qce_check_version(struct qce_device *qce) * the driver does not support v5 with minor 0 because it has special * alignment requirements. */ - if (major != QCE_MAJOR_VERSION5 || minor == 0) + if (major == 5 && minor == 0) return -ENODEV; qce->burst_size = QCE_BAM_BURST_SIZE; diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c index 1dec7aea852d..68cafd4741ad 100644 --- a/drivers/crypto/qce/dma.c +++ b/drivers/crypto/qce/dma.c @@ -24,11 +24,13 @@ int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma) dma->txchan = dma_request_chan(dev, "tx"); if (IS_ERR(dma->txchan)) - return PTR_ERR(dma->txchan); + return dev_err_probe(dev, PTR_ERR(dma->txchan), + "Failed to get TX DMA channel\n"); dma->rxchan = dma_request_chan(dev, "rx"); if (IS_ERR(dma->rxchan)) { - ret = PTR_ERR(dma->rxchan); + ret = dev_err_probe(dev, PTR_ERR(dma->rxchan), + "Failed to get RX DMA channel\n"); goto error_rx; } diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c index 9393e10671c2..e80f9148c012 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c @@ -321,8 +321,7 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req) algt->stat_req++; rkc->nreq++; - ivsize = crypto_skcipher_ivsize(tfm); - if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { + if (areq->iv && ivsize > 0) { if (rctx->mode & RK_CRYPTO_DEC) { offset = areq->cryptlen - ivsize; scatterwalk_map_and_copy(rctx->backup_iv, areq->src, diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c index e6839c7bfb73..54b7af4a7aee 100644 --- a/drivers/crypto/starfive/jh7110-hash.c +++ b/drivers/crypto/starfive/jh7110-hash.c @@ -325,6 +325,7 @@ static int starfive_hash_digest(struct ahash_request *req) struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); struct starfive_cryp_dev *cryp = ctx->cryp; + int sg_len; memset(rctx, 0, sizeof(struct starfive_cryp_request_ctx)); @@ -333,7 +334,10 @@ static int starfive_hash_digest(struct ahash_request *req) rctx->in_sg = req->src; rctx->blksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); rctx->digsize = crypto_ahash_digestsize(tfm); - rctx->in_sg_len = sg_nents_for_len(rctx->in_sg, rctx->total); + sg_len = sg_nents_for_len(rctx->in_sg, rctx->total); + if (sg_len < 0) + return sg_len; + rctx->in_sg_len = sg_len; ctx->rctx = rctx; return crypto_transfer_hash_request_to_engine(cryp->engine, req); diff --git a/drivers/crypto/ti/Kconfig b/drivers/crypto/ti/Kconfig index d4f91c1e0cb5..a3692ceec49b 100644 --- a/drivers/crypto/ti/Kconfig +++ b/drivers/crypto/ti/Kconfig @@ -6,6 +6,7 @@ config CRYPTO_DEV_TI_DTHEV2 select CRYPTO_SKCIPHER select CRYPTO_ECB select CRYPTO_CBC + select CRYPTO_XTS help This enables support for the TI DTHE V2 hw cryptography engine which can be found on TI K3 SOCs. Selecting this enables use diff --git a/drivers/crypto/ti/dthev2-aes.c b/drivers/crypto/ti/dthev2-aes.c index 3547c41fa4ed..156729ccc50e 100644 --- a/drivers/crypto/ti/dthev2-aes.c +++ b/drivers/crypto/ti/dthev2-aes.c @@ -25,6 +25,7 @@ // AES Engine #define DTHE_P_AES_BASE 0x7000 + #define DTHE_P_AES_KEY1_0 0x0038 #define DTHE_P_AES_KEY1_1 0x003C #define DTHE_P_AES_KEY1_2 0x0030 @@ -33,6 +34,16 @@ #define DTHE_P_AES_KEY1_5 0x002C #define DTHE_P_AES_KEY1_6 0x0020 #define DTHE_P_AES_KEY1_7 0x0024 + +#define DTHE_P_AES_KEY2_0 0x0018 +#define DTHE_P_AES_KEY2_1 0x001C +#define DTHE_P_AES_KEY2_2 0x0010 +#define DTHE_P_AES_KEY2_3 0x0014 +#define DTHE_P_AES_KEY2_4 0x0008 +#define DTHE_P_AES_KEY2_5 0x000C +#define DTHE_P_AES_KEY2_6 0x0000 +#define DTHE_P_AES_KEY2_7 0x0004 + #define DTHE_P_AES_IV_IN_0 0x0040 #define DTHE_P_AES_IV_IN_1 0x0044 #define DTHE_P_AES_IV_IN_2 0x0048 @@ -52,6 +63,7 @@ enum aes_ctrl_mode_masks { AES_CTRL_ECB_MASK = 0x00, AES_CTRL_CBC_MASK = BIT(5), + AES_CTRL_XTS_MASK = BIT(12) | BIT(11), }; #define DTHE_AES_CTRL_MODE_CLEAR_MASK ~GENMASK(28, 5) @@ -88,6 +100,31 @@ static int dthe_cipher_init_tfm(struct crypto_skcipher *tfm) return 0; } +static int dthe_cipher_xts_init_tfm(struct crypto_skcipher *tfm) +{ + struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct dthe_data *dev_data = dthe_get_dev(ctx); + + ctx->dev_data = dev_data; + ctx->keylen = 0; + + ctx->skcipher_fb = crypto_alloc_sync_skcipher("xts(aes)", 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->skcipher_fb)) { + dev_err(dev_data->dev, "fallback driver xts(aes) couldn't be loaded\n"); + return PTR_ERR(ctx->skcipher_fb); + } + + return 0; +} + +static void dthe_cipher_xts_exit_tfm(struct crypto_skcipher *tfm) +{ + struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + + crypto_free_sync_skcipher(ctx->skcipher_fb); +} + static int dthe_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); @@ -119,6 +156,27 @@ static int dthe_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, unsig return dthe_aes_setkey(tfm, key, keylen); } +static int dthe_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) +{ + struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (keylen != 2 * AES_KEYSIZE_128 && + keylen != 2 * AES_KEYSIZE_192 && + keylen != 2 * AES_KEYSIZE_256) + return -EINVAL; + + ctx->aes_mode = DTHE_AES_XTS; + ctx->keylen = keylen / 2; + memcpy(ctx->key, key, keylen); + + crypto_sync_skcipher_clear_flags(ctx->skcipher_fb, CRYPTO_TFM_REQ_MASK); + crypto_sync_skcipher_set_flags(ctx->skcipher_fb, + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + + return crypto_sync_skcipher_setkey(ctx->skcipher_fb, key, keylen); +} + static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx, struct dthe_aes_req_ctx *rctx, u32 *iv_in) @@ -141,6 +199,24 @@ static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx, writel_relaxed(ctx->key[7], aes_base_reg + DTHE_P_AES_KEY1_7); } + if (ctx->aes_mode == DTHE_AES_XTS) { + size_t key2_offset = ctx->keylen / sizeof(u32); + + writel_relaxed(ctx->key[key2_offset + 0], aes_base_reg + DTHE_P_AES_KEY2_0); + writel_relaxed(ctx->key[key2_offset + 1], aes_base_reg + DTHE_P_AES_KEY2_1); + writel_relaxed(ctx->key[key2_offset + 2], aes_base_reg + DTHE_P_AES_KEY2_2); + writel_relaxed(ctx->key[key2_offset + 3], aes_base_reg + DTHE_P_AES_KEY2_3); + + if (ctx->keylen > AES_KEYSIZE_128) { + writel_relaxed(ctx->key[key2_offset + 4], aes_base_reg + DTHE_P_AES_KEY2_4); + writel_relaxed(ctx->key[key2_offset + 5], aes_base_reg + DTHE_P_AES_KEY2_5); + } + if (ctx->keylen == AES_KEYSIZE_256) { + writel_relaxed(ctx->key[key2_offset + 6], aes_base_reg + DTHE_P_AES_KEY2_6); + writel_relaxed(ctx->key[key2_offset + 7], aes_base_reg + DTHE_P_AES_KEY2_7); + } + } + if (rctx->enc) ctrl_val |= DTHE_AES_CTRL_DIR_ENC; @@ -160,6 +236,9 @@ static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx, case DTHE_AES_CBC: ctrl_val |= AES_CTRL_CBC_MASK; break; + case DTHE_AES_XTS: + ctrl_val |= AES_CTRL_XTS_MASK; + break; } if (iv_in) { @@ -315,24 +394,45 @@ aes_err: local_bh_disable(); crypto_finalize_skcipher_request(dev_data->engine, req, ret); local_bh_enable(); - return ret; + return 0; } static int dthe_aes_crypt(struct skcipher_request *req) { struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req); struct dthe_data *dev_data = dthe_get_dev(ctx); struct crypto_engine *engine; /* - * If data is not a multiple of AES_BLOCK_SIZE, need to return -EINVAL - * If data length input is zero, no need to do any operation. + * If data is not a multiple of AES_BLOCK_SIZE: + * - need to return -EINVAL for ECB, CBC as they are block ciphers + * - need to fallback to software as H/W doesn't support Ciphertext Stealing for XTS */ - if (req->cryptlen % AES_BLOCK_SIZE) + if (req->cryptlen % AES_BLOCK_SIZE) { + if (ctx->aes_mode == DTHE_AES_XTS) { + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->skcipher_fb); + + skcipher_request_set_callback(subreq, skcipher_request_flags(req), + req->base.complete, req->base.data); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, req->iv); + + return rctx->enc ? crypto_skcipher_encrypt(subreq) : + crypto_skcipher_decrypt(subreq); + } return -EINVAL; + } - if (req->cryptlen == 0) + /* + * If data length input is zero, no need to do any operation. + * Except for XTS mode, where data length should be non-zero. + */ + if (req->cryptlen == 0) { + if (ctx->aes_mode == DTHE_AES_XTS) + return -EINVAL; return 0; + } engine = dev_data->engine; return crypto_transfer_skcipher_request_to_engine(engine, req); @@ -399,7 +499,32 @@ static struct skcipher_engine_alg cipher_algs[] = { .cra_module = THIS_MODULE, }, .op.do_one_request = dthe_aes_run, - } /* CBC AES */ + }, /* CBC AES */ + { + .base.init = dthe_cipher_xts_init_tfm, + .base.exit = dthe_cipher_xts_exit_tfm, + .base.setkey = dthe_aes_xts_setkey, + .base.encrypt = dthe_aes_encrypt, + .base.decrypt = dthe_aes_decrypt, + .base.min_keysize = AES_MIN_KEY_SIZE * 2, + .base.max_keysize = AES_MAX_KEY_SIZE * 2, + .base.ivsize = AES_IV_SIZE, + .base.base = { + .cra_name = "xts(aes)", + .cra_driver_name = "xts-aes-dthev2", + .cra_priority = 299, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_alignmask = AES_BLOCK_SIZE - 1, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct dthe_tfm_ctx), + .cra_reqsize = sizeof(struct dthe_aes_req_ctx), + .cra_module = THIS_MODULE, + }, + .op.do_one_request = dthe_aes_run, + }, /* XTS AES */ }; int dthe_register_aes_algs(void) diff --git a/drivers/crypto/ti/dthev2-common.h b/drivers/crypto/ti/dthev2-common.h index 68c94acda8aa..c7a06a4c353f 100644 --- a/drivers/crypto/ti/dthev2-common.h +++ b/drivers/crypto/ti/dthev2-common.h @@ -27,10 +27,16 @@ #define DTHE_REG_SIZE 4 #define DTHE_DMA_TIMEOUT_MS 2000 +/* + * Size of largest possible key (of all algorithms) to be stored in dthe_tfm_ctx + * This is currently the keysize of XTS-AES-256 which is 512 bits (64 bytes) + */ +#define DTHE_MAX_KEYSIZE (AES_MAX_KEY_SIZE * 2) enum dthe_aes_mode { DTHE_AES_ECB = 0, DTHE_AES_CBC, + DTHE_AES_XTS, }; /* Driver specific struct definitions */ @@ -73,12 +79,14 @@ struct dthe_list { * @keylen: AES key length * @key: AES key * @aes_mode: AES mode + * @skcipher_fb: Fallback crypto skcipher handle for AES-XTS mode */ struct dthe_tfm_ctx { struct dthe_data *dev_data; unsigned int keylen; - u32 key[AES_KEYSIZE_256 / sizeof(u32)]; + u32 key[DTHE_MAX_KEYSIZE / sizeof(u32)]; enum dthe_aes_mode aes_mode; + struct crypto_sync_skcipher *skcipher_fb; }; /** diff --git a/drivers/crypto/xilinx/xilinx-trng.c b/drivers/crypto/xilinx/xilinx-trng.c index 4e4700d68127..db0fbb28ff32 100644 --- a/drivers/crypto/xilinx/xilinx-trng.c +++ b/drivers/crypto/xilinx/xilinx-trng.c @@ -8,7 +8,6 @@ #include <linux/clk.h> #include <linux/crypto.h> #include <linux/delay.h> -#include <linux/errno.h> #include <linux/firmware/xlnx-zynqmp.h> #include <linux/hw_random.h> #include <linux/io.h> @@ -18,10 +17,11 @@ #include <linux/mutex.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> -#include <linux/string.h> +#include <crypto/aes.h> +#include <crypto/df_sp80090a.h> +#include <crypto/internal/drbg.h> #include <crypto/internal/cipher.h> #include <crypto/internal/rng.h> -#include <crypto/aes.h> /* TRNG Registers Offsets */ #define TRNG_STATUS_OFFSET 0x4U @@ -59,6 +59,8 @@ struct xilinx_rng { void __iomem *rng_base; struct device *dev; + unsigned char *scratchpadbuf; + struct crypto_aes_ctx *aesctx; struct mutex lock; /* Protect access to TRNG device */ struct hwrng trng; }; @@ -182,9 +184,13 @@ static void xtrng_enable_entropy(struct xilinx_rng *rng) static int xtrng_reseed_internal(struct xilinx_rng *rng) { u8 entropy[TRNG_ENTROPY_SEED_LEN_BYTES]; + struct drbg_string data; + LIST_HEAD(seedlist); u32 val; int ret; + drbg_string_fill(&data, entropy, TRNG_SEED_LEN_BYTES); + list_add_tail(&data.list, &seedlist); memset(entropy, 0, sizeof(entropy)); xtrng_enable_entropy(rng); @@ -192,9 +198,14 @@ static int xtrng_reseed_internal(struct xilinx_rng *rng) ret = xtrng_collect_random_data(rng, entropy, TRNG_SEED_LEN_BYTES, true); if (ret != TRNG_SEED_LEN_BYTES) return -EINVAL; + ret = crypto_drbg_ctr_df(rng->aesctx, rng->scratchpadbuf, + TRNG_SEED_LEN_BYTES, &seedlist, AES_BLOCK_SIZE, + TRNG_SEED_LEN_BYTES); + if (ret) + return ret; xtrng_write_multiple_registers(rng->rng_base + TRNG_EXT_SEED_OFFSET, - (u32 *)entropy, TRNG_NUM_INIT_REGS); + (u32 *)rng->scratchpadbuf, TRNG_NUM_INIT_REGS); /* select reseed operation */ iowrite32(TRNG_CTRL_PRNGXS_MASK, rng->rng_base + TRNG_CTRL_OFFSET); @@ -324,6 +335,7 @@ static void xtrng_hwrng_unregister(struct hwrng *trng) static int xtrng_probe(struct platform_device *pdev) { struct xilinx_rng *rng; + size_t sb_size; int ret; rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); @@ -333,15 +345,26 @@ static int xtrng_probe(struct platform_device *pdev) rng->dev = &pdev->dev; rng->rng_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rng->rng_base)) { - dev_err(&pdev->dev, "Failed to map resource %ld\n", PTR_ERR(rng->rng_base)); + dev_err(&pdev->dev, "Failed to map resource %pe\n", rng->rng_base); return PTR_ERR(rng->rng_base); } + rng->aesctx = devm_kzalloc(&pdev->dev, sizeof(*rng->aesctx), GFP_KERNEL); + if (!rng->aesctx) + return -ENOMEM; + + sb_size = crypto_drbg_ctr_df_datalen(TRNG_SEED_LEN_BYTES, AES_BLOCK_SIZE); + rng->scratchpadbuf = devm_kzalloc(&pdev->dev, sb_size, GFP_KERNEL); + if (!rng->scratchpadbuf) { + ret = -ENOMEM; + goto end; + } + xtrng_trng_reset(rng->rng_base); ret = xtrng_reseed_internal(rng); if (ret) { dev_err(&pdev->dev, "TRNG Seed fail\n"); - return ret; + goto end; } xilinx_rng_dev = rng; @@ -349,8 +372,9 @@ static int xtrng_probe(struct platform_device *pdev) ret = crypto_register_rng(&xtrng_trng_alg); if (ret) { dev_err(&pdev->dev, "Crypto Random device registration failed: %d\n", ret); - return ret; + goto end; } + ret = xtrng_hwrng_register(&rng->trng); if (ret) { dev_err(&pdev->dev, "HWRNG device registration failed: %d\n", ret); @@ -363,6 +387,7 @@ static int xtrng_probe(struct platform_device *pdev) crypto_rng_free: crypto_unregister_rng(&xtrng_trng_alg); +end: return ret; } |
