diff options
Diffstat (limited to 'drivers/crypto/ccp')
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto-aes-cmac.c | 2 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto-rsa.c | 7 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto-sha.c | 2 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-debugfs.c | 7 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-dmaengine.c | 2 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-ops.c | 108 | ||||
-rw-r--r-- | drivers/crypto/ccp/psp-dev.c | 15 | ||||
-rw-r--r-- | drivers/crypto/ccp/sp-dev.c | 6 |
8 files changed, 91 insertions, 58 deletions
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c index 60fc0fa26fd3..26687f318de6 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c @@ -46,7 +46,7 @@ static int ccp_aes_cmac_complete(struct crypto_async_request *async_req, } /* Update result area if supplied */ - if (req->result) + if (req->result && rctx->final) memcpy(req->result, rctx->iv, digest_size); e_free: diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c index e6db8672d89c..05850dfd7940 100644 --- a/drivers/crypto/ccp/ccp-crypto-rsa.c +++ b/drivers/crypto/ccp/ccp-crypto-rsa.c @@ -60,10 +60,9 @@ static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret) static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm) { - if (ccp_version() > CCP_VERSION(3, 0)) - return CCP5_RSA_MAXMOD; - else - return CCP_RSA_MAXMOD; + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + + return ctx->u.rsa.n_len; } static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt) diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 8b9b16d433f7..871c9628a2ee 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -47,7 +47,7 @@ static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) } /* Update result area if supplied */ - if (req->result) + if (req->result && rctx->final) memcpy(req->result, rctx->ctx, digest_size); e_free: diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c index 59d4ca4e72d8..1a734bd2070a 100644 --- a/drivers/crypto/ccp/ccp-debugfs.c +++ b/drivers/crypto/ccp/ccp-debugfs.c @@ -278,7 +278,7 @@ static const struct file_operations ccp_debugfs_stats_ops = { }; static struct dentry *ccp_debugfs_dir; -static DEFINE_RWLOCK(ccp_debugfs_lock); +static DEFINE_MUTEX(ccp_debugfs_lock); #define MAX_NAME_LEN 20 @@ -290,16 +290,15 @@ void ccp5_debugfs_setup(struct ccp_device *ccp) struct dentry *debugfs_stats; struct dentry *debugfs_q_instance; struct dentry *debugfs_q_stats; - unsigned long flags; int i; if (!debugfs_initialized()) return; - write_lock_irqsave(&ccp_debugfs_lock, flags); + mutex_lock(&ccp_debugfs_lock); if (!ccp_debugfs_dir) ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); - write_unlock_irqrestore(&ccp_debugfs_lock, flags); + mutex_unlock(&ccp_debugfs_lock); if (!ccp_debugfs_dir) return; diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index 8b9da58459df..67155cb21636 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -38,7 +38,7 @@ static unsigned int dma_chan_attr = CCP_DMA_DFLT; module_param(dma_chan_attr, uint, 0444); MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public"); -unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp) +static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp) { switch (dma_chan_attr) { case CCP_DMA_DFLT: diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 406b95329b3d..0ea43cdeb05f 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -178,14 +178,18 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa, return 0; } -static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, - struct scatterlist *sg, unsigned int sg_offset, - unsigned int len) +static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, + struct scatterlist *sg, unsigned int sg_offset, + unsigned int len) { WARN_ON(!wa->address); + if (len > (wa->length - wa_offset)) + return -EINVAL; + scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, 0); + return 0; } static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, @@ -205,8 +209,11 @@ static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa, unsigned int len) { u8 *p, *q; + int rc; - ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len); + rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len); + if (rc) + return rc; p = wa->address + wa_offset; q = p + len - 1; @@ -509,7 +516,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, return ret; dm_offset = CCP_SB_BYTES - aes->key_len; - ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); + ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); + if (ret) + goto e_key; ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { @@ -528,7 +537,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, goto e_key; dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; - ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + if (ret) + goto e_ctx; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { @@ -556,8 +567,10 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, goto e_src; } - ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, - aes->cmac_key_len); + ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, + aes->cmac_key_len); + if (ret) + goto e_src; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { @@ -666,7 +679,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, return ret; dm_offset = CCP_SB_BYTES - aes->key_len; - ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); + ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); + if (ret) + goto e_key; ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { @@ -685,7 +700,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, goto e_key; dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len; - ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + if (ret) + goto e_ctx; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); @@ -777,7 +794,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, goto e_dst; } - ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + if (ret) + goto e_dst; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); @@ -820,7 +839,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, DMA_BIDIRECTIONAL); if (ret) goto e_tag; - ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE); + ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE); + if (ret) + goto e_tag; ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE); ccp_dm_free(&tag); @@ -914,7 +935,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return ret; dm_offset = CCP_SB_BYTES - aes->key_len; - ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); + ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); + if (ret) + goto e_key; ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { @@ -935,7 +958,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (aes->mode != CCP_AES_MODE_ECB) { /* Load the AES context - convert to LE */ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; - ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + if (ret) + goto e_ctx; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { @@ -1113,8 +1138,12 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, * big endian to little endian. */ dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; - ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); - ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len); + ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); + if (ret) + goto e_key; + ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len); + if (ret) + goto e_key; } else { /* Version 5 CCPs use a 512-bit space for the key: each portion * occupies 256 bits, or one entire slot, and is zero-padded. @@ -1123,9 +1152,13 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, dm_offset = CCP_SB_BYTES; pad = dm_offset - xts->key_len; - ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len); - ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len, - xts->key_len); + ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len); + if (ret) + goto e_key; + ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key, + xts->key_len, xts->key_len); + if (ret) + goto e_key; } ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); @@ -1144,7 +1177,9 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, if (ret) goto e_key; - ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); + ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); + if (ret) + goto e_ctx; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { @@ -1287,12 +1322,18 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */ len_singlekey = des3->key_len / 3; - ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey, - des3->key, 0, len_singlekey); - ccp_set_dm_area(&key, dm_offset + len_singlekey, - des3->key, len_singlekey, len_singlekey); - ccp_set_dm_area(&key, dm_offset, - des3->key, 2 * len_singlekey, len_singlekey); + ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey, + des3->key, 0, len_singlekey); + if (ret) + goto e_key; + ret = ccp_set_dm_area(&key, dm_offset + len_singlekey, + des3->key, len_singlekey, len_singlekey); + if (ret) + goto e_key; + ret = ccp_set_dm_area(&key, dm_offset, + des3->key, 2 * len_singlekey, len_singlekey); + if (ret) + goto e_key; /* Copy the key to the SB */ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, @@ -1320,7 +1361,10 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) /* Load the context into the LSB */ dm_offset = CCP_SB_BYTES - des3->iv_len; - ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len); + ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, + des3->iv_len); + if (ret) + goto e_ctx; if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) load_mode = CCP_PASSTHRU_BYTESWAP_NOOP; @@ -1604,8 +1648,10 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) } } else { /* Restore the context */ - ccp_set_dm_area(&ctx, 0, sha->ctx, 0, - sb_count * CCP_SB_BYTES); + ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0, + sb_count * CCP_SB_BYTES); + if (ret) + goto e_ctx; } ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, @@ -1927,7 +1973,9 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, if (ret) return ret; - ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); + ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); + if (ret) + goto e_mask; ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index b3afb6cc9d72..d95ec526587a 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -367,8 +367,6 @@ e_free: void *psp_copy_user_blob(u64 __user uaddr, u32 len) { - void *data; - if (!uaddr || !len) return ERR_PTR(-EINVAL); @@ -376,18 +374,7 @@ void *psp_copy_user_blob(u64 __user uaddr, u32 len) if (len > SEV_FW_BLOB_MAX_SIZE) return ERR_PTR(-EINVAL); - data = kmalloc(len, GFP_KERNEL); - if (!data) - return ERR_PTR(-ENOMEM); - - if (copy_from_user(data, (void __user *)(uintptr_t)uaddr, len)) - goto e_free; - - return data; - -e_free: - kfree(data); - return ERR_PTR(-EFAULT); + return memdup_user((void __user *)(uintptr_t)uaddr, len); } EXPORT_SYMBOL_GPL(psp_copy_user_blob); diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c index eb0da6572720..e0459002eb71 100644 --- a/drivers/crypto/ccp/sp-dev.c +++ b/drivers/crypto/ccp/sp-dev.c @@ -252,12 +252,12 @@ struct sp_device *sp_get_psp_master_device(void) goto unlock; list_for_each_entry(i, &sp_units, entry) { - if (i->psp_data) + if (i->psp_data && i->get_psp_master_device) { + ret = i->get_psp_master_device(); break; + } } - if (i->get_psp_master_device) - ret = i->get_psp_master_device(); unlock: write_unlock_irqrestore(&sp_unit_lock, flags); return ret; |