From aca24d48cee232a88839c8895ac6fcd2fcad6588 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 1 Jul 2019 14:08:14 +0100 Subject: crypto: ccree - fix spelling mistake "configration" -> "configuration" There is a spelling mistake in a dev_err message. Fix it. Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_driver.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 980aa04b655b..eeb803f40623 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -421,7 +421,7 @@ static int init_cc_resources(struct platform_device *plat_dev) } break; default: - dev_err(dev, "Unsupported engines configration.\n"); + dev_err(dev, "Unsupported engines configuration.\n"); rc = -EINVAL; goto post_clk_err; } -- cgit From e6e6600c001c3aa3760aeb83dbd4630063e926ee Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Tue, 2 Jul 2019 14:39:18 +0300 Subject: crypto: ccree - drop legacy ivgen support ccree had a mechanism for IV generation which was not compatible with the Linux seqiv or echainiv iv generator and was never used in any of the upstream versions so drop all the code implementing it. Signed-off-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- drivers/crypto/ccree/Makefile | 2 +- drivers/crypto/ccree/cc_aead.c | 76 +--------- drivers/crypto/ccree/cc_aead.h | 3 +- drivers/crypto/ccree/cc_driver.c | 12 +- drivers/crypto/ccree/cc_driver.h | 10 -- drivers/crypto/ccree/cc_ivgen.c | 276 ---------------------------------- drivers/crypto/ccree/cc_ivgen.h | 55 ------- drivers/crypto/ccree/cc_pm.c | 2 - drivers/crypto/ccree/cc_request_mgr.c | 47 +----- 9 files changed, 17 insertions(+), 466 deletions(-) delete mode 100644 drivers/crypto/ccree/cc_ivgen.c delete mode 100644 drivers/crypto/ccree/cc_ivgen.h (limited to 'drivers') diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile index 145e50bdbf16..5cfda508ee41 100644 --- a/drivers/crypto/ccree/Makefile +++ b/drivers/crypto/ccree/Makefile @@ -2,7 +2,7 @@ # Copyright (C) 2012-2019 ARM Limited (or its affiliates). obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o -ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o +ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_sram_mgr.o ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o ccree-$(CONFIG_PM) += cc_pm.o diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index 7aa4cbe19a86..a9914ea6c95f 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -239,28 +239,13 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err) cc_zero_sgl(areq->dst, areq_ctx->cryptlen); err = -EBADMSG; } - } else { /*ENCRYPT*/ - if (areq_ctx->is_icv_fragmented) { - u32 skip = areq->cryptlen + areq_ctx->dst_offset; - - cc_copy_sg_portion(dev, areq_ctx->mac_buf, - areq_ctx->dst_sgl, skip, - (skip + ctx->authsize), - CC_SG_FROM_BUF); - } + /*ENCRYPT*/ + } else if (areq_ctx->is_icv_fragmented) { + u32 skip = areq->cryptlen + areq_ctx->dst_offset; - /* If an IV was generated, copy it back to the user provided - * buffer. - */ - if (areq_ctx->backup_giv) { - if (ctx->cipher_mode == DRV_CIPHER_CTR) - memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + - CTR_RFC3686_NONCE_SIZE, - CTR_RFC3686_IV_SIZE); - else if (ctx->cipher_mode == DRV_CIPHER_CCM) - memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + - CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE); - } + cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl, + skip, (skip + ctx->authsize), + CC_SG_FROM_BUF); } done: aead_request_complete(areq, err); @@ -1975,9 +1960,8 @@ static int cc_proc_aead(struct aead_request *req, */ memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE); - if (!areq_ctx->backup_giv) /*User none-generated IV*/ - memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, - req->iv, CTR_RFC3686_IV_SIZE); + memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv, + CTR_RFC3686_IV_SIZE); /* Initialize counter portion of counter block */ *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); @@ -2023,40 +2007,6 @@ static int cc_proc_aead(struct aead_request *req, goto exit; } - /* do we need to generate IV? */ - if (areq_ctx->backup_giv) { - /* set the DMA mapped IV address*/ - if (ctx->cipher_mode == DRV_CIPHER_CTR) { - cc_req.ivgen_dma_addr[0] = - areq_ctx->gen_ctx.iv_dma_addr + - CTR_RFC3686_NONCE_SIZE; - cc_req.ivgen_dma_addr_len = 1; - } else if (ctx->cipher_mode == DRV_CIPHER_CCM) { - /* In ccm, the IV needs to exist both inside B0 and - * inside the counter.It is also copied to iv_dma_addr - * for other reasons (like returning it to the user). - * So, using 3 (identical) IV outputs. - */ - cc_req.ivgen_dma_addr[0] = - areq_ctx->gen_ctx.iv_dma_addr + - CCM_BLOCK_IV_OFFSET; - cc_req.ivgen_dma_addr[1] = - sg_dma_address(&areq_ctx->ccm_adata_sg) + - CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET; - cc_req.ivgen_dma_addr[2] = - sg_dma_address(&areq_ctx->ccm_adata_sg) + - CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET; - cc_req.ivgen_dma_addr_len = 3; - } else { - cc_req.ivgen_dma_addr[0] = - areq_ctx->gen_ctx.iv_dma_addr; - cc_req.ivgen_dma_addr_len = 1; - } - - /* set the IV size (8/16 B long)*/ - cc_req.ivgen_size = crypto_aead_ivsize(tfm); - } - /* STAT_PHASE_2: Create sequence */ /* Load MLLI tables to SRAM if necessary */ @@ -2107,7 +2057,6 @@ static int cc_aead_encrypt(struct aead_request *req) /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; - areq_ctx->backup_giv = NULL; areq_ctx->is_gcm4543 = false; areq_ctx->plaintext_authenticate_only = false; @@ -2139,7 +2088,6 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req) /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; - areq_ctx->backup_giv = NULL; areq_ctx->is_gcm4543 = true; cc_proc_rfc4309_ccm(req); @@ -2161,7 +2109,6 @@ static int cc_aead_decrypt(struct aead_request *req) /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; - areq_ctx->backup_giv = NULL; areq_ctx->is_gcm4543 = false; areq_ctx->plaintext_authenticate_only = false; @@ -2191,7 +2138,6 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req) /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; - areq_ctx->backup_giv = NULL; areq_ctx->is_gcm4543 = true; cc_proc_rfc4309_ccm(req); @@ -2311,8 +2257,6 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req) /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; - areq_ctx->backup_giv = NULL; - areq_ctx->plaintext_authenticate_only = false; cc_proc_rfc4_gcm(req); @@ -2340,7 +2284,6 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req) /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; - areq_ctx->backup_giv = NULL; cc_proc_rfc4_gcm(req); areq_ctx->is_gcm4543 = true; @@ -2372,8 +2315,6 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req) /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; - areq_ctx->backup_giv = NULL; - areq_ctx->plaintext_authenticate_only = false; cc_proc_rfc4_gcm(req); @@ -2401,7 +2342,6 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req) /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; - areq_ctx->backup_giv = NULL; cc_proc_rfc4_gcm(req); areq_ctx->is_gcm4543 = true; diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h index e51724b96c56..f12169b57f9d 100644 --- a/drivers/crypto/ccree/cc_aead.h +++ b/drivers/crypto/ccree/cc_aead.h @@ -65,8 +65,7 @@ struct aead_req_ctx { unsigned int hw_iv_size ____cacheline_aligned; /* used to prevent cache coherence problem */ u8 backup_mac[MAX_MAC_SIZE]; - u8 *backup_iv; /*store iv for generated IV flow*/ - u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/ + u8 *backup_iv; /* store orig iv */ u32 assoclen; /* internal assoclen */ dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */ /* buffer for internal ccm configurations */ diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index eeb803f40623..196e3d140355 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -22,7 +22,6 @@ #include "cc_cipher.h" #include "cc_aead.h" #include "cc_hash.h" -#include "cc_ivgen.h" #include "cc_sram_mgr.h" #include "cc_pm.h" #include "cc_fips.h" @@ -503,17 +502,11 @@ static int init_cc_resources(struct platform_device *plat_dev) goto post_buf_mgr_err; } - rc = cc_ivgen_init(new_drvdata); - if (rc) { - dev_err(dev, "cc_ivgen_init failed\n"); - goto post_buf_mgr_err; - } - /* Allocate crypto algs */ rc = cc_cipher_alloc(new_drvdata); if (rc) { dev_err(dev, "cc_cipher_alloc failed\n"); - goto post_ivgen_err; + goto post_buf_mgr_err; } /* hash must be allocated before aead since hash exports APIs */ @@ -544,8 +537,6 @@ post_hash_err: cc_hash_free(new_drvdata); post_cipher_err: cc_cipher_free(new_drvdata); -post_ivgen_err: - cc_ivgen_fini(new_drvdata); post_buf_mgr_err: cc_buffer_mgr_fini(new_drvdata); post_req_mgr_err: @@ -577,7 +568,6 @@ static void cleanup_cc_resources(struct platform_device *plat_dev) cc_aead_free(drvdata); cc_hash_free(drvdata); cc_cipher_free(drvdata); - cc_ivgen_fini(drvdata); cc_pm_fini(drvdata); cc_buffer_mgr_fini(drvdata); cc_req_mgr_fini(drvdata); diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h index 7cd99380bf1f..ab31d4a68c80 100644 --- a/drivers/crypto/ccree/cc_driver.h +++ b/drivers/crypto/ccree/cc_driver.h @@ -126,15 +126,6 @@ struct cc_cpp_req { struct cc_crypto_req { void (*user_cb)(struct device *dev, void *req, int err); void *user_arg; - dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES]; - /* For the first 'ivgen_dma_addr_len' addresses of this array, - * generated IV would be placed in it by send_request(). - * Same generated IV for all addresses! - */ - /* Amount of 'ivgen_dma_addr' elements to be filled. */ - unsigned int ivgen_dma_addr_len; - /* The generated IV size required, 8/16 B allowed. */ - unsigned int ivgen_size; struct completion seq_compl; /* request completion */ struct cc_cpp_req cpp; }; @@ -158,7 +149,6 @@ struct cc_drvdata { void *aead_handle; void *request_mgr_handle; void *fips_handle; - void *ivgen_handle; void *sram_mgr_handle; void *debugfs; struct clk *clk; diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c deleted file mode 100644 index 99dc69383e20..000000000000 --- a/drivers/crypto/ccree/cc_ivgen.c +++ /dev/null @@ -1,276 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ - -#include -#include "cc_driver.h" -#include "cc_ivgen.h" -#include "cc_request_mgr.h" -#include "cc_sram_mgr.h" -#include "cc_buffer_mgr.h" - -/* The max. size of pool *MUST* be <= SRAM total size */ -#define CC_IVPOOL_SIZE 1024 -/* The first 32B fraction of pool are dedicated to the - * next encryption "key" & "IV" for pool regeneration - */ -#define CC_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128) -#define CC_IVPOOL_GEN_SEQ_LEN 4 - -/** - * struct cc_ivgen_ctx -IV pool generation context - * @pool: the start address of the iv-pool resides in internal RAM - * @ctr_key_dma: address of pool's encryption key material in internal RAM - * @ctr_iv_dma: address of pool's counter iv in internal RAM - * @next_iv_ofs: the offset to the next available IV in pool - * @pool_meta: virt. address of the initial enc. key/IV - * @pool_meta_dma: phys. address of the initial enc. key/IV - */ -struct cc_ivgen_ctx { - cc_sram_addr_t pool; - cc_sram_addr_t ctr_key; - cc_sram_addr_t ctr_iv; - u32 next_iv_ofs; - u8 *pool_meta; - dma_addr_t pool_meta_dma; -}; - -/*! - * Generates CC_IVPOOL_SIZE of random bytes by - * encrypting 0's using AES128-CTR. - * - * \param ivgen iv-pool context - * \param iv_seq IN/OUT array to the descriptors sequence - * \param iv_seq_len IN/OUT pointer to the sequence length - */ -static int cc_gen_iv_pool(struct cc_ivgen_ctx *ivgen_ctx, - struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len) -{ - unsigned int idx = *iv_seq_len; - - if ((*iv_seq_len + CC_IVPOOL_GEN_SEQ_LEN) > CC_IVPOOL_SEQ_LEN) { - /* The sequence will be longer than allowed */ - return -EINVAL; - } - /* Setup key */ - hw_desc_init(&iv_seq[idx]); - set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128); - set_setup_mode(&iv_seq[idx], SETUP_LOAD_KEY0); - set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); - set_flow_mode(&iv_seq[idx], S_DIN_to_AES); - set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE); - set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR); - idx++; - - /* Setup cipher state */ - hw_desc_init(&iv_seq[idx]); - set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE); - set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); - set_flow_mode(&iv_seq[idx], S_DIN_to_AES); - set_setup_mode(&iv_seq[idx], SETUP_LOAD_STATE1); - set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE); - set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR); - idx++; - - /* Perform dummy encrypt to skip first block */ - hw_desc_init(&iv_seq[idx]); - set_din_const(&iv_seq[idx], 0, CC_AES_IV_SIZE); - set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE); - set_flow_mode(&iv_seq[idx], DIN_AES_DOUT); - idx++; - - /* Generate IV pool */ - hw_desc_init(&iv_seq[idx]); - set_din_const(&iv_seq[idx], 0, CC_IVPOOL_SIZE); - set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_IVPOOL_SIZE); - set_flow_mode(&iv_seq[idx], DIN_AES_DOUT); - idx++; - - *iv_seq_len = idx; /* Update sequence length */ - - /* queue ordering assures pool readiness */ - ivgen_ctx->next_iv_ofs = CC_IVPOOL_META_SIZE; - - return 0; -} - -/*! - * Generates the initial pool in SRAM. - * This function should be invoked when resuming driver. - * - * \param drvdata - * - * \return int Zero for success, negative value otherwise. - */ -int cc_init_iv_sram(struct cc_drvdata *drvdata) -{ - struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle; - struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN]; - unsigned int iv_seq_len = 0; - int rc; - - /* Generate initial enc. key/iv */ - get_random_bytes(ivgen_ctx->pool_meta, CC_IVPOOL_META_SIZE); - - /* The first 32B reserved for the enc. Key/IV */ - ivgen_ctx->ctr_key = ivgen_ctx->pool; - ivgen_ctx->ctr_iv = ivgen_ctx->pool + AES_KEYSIZE_128; - - /* Copy initial enc. key and IV to SRAM at a single descriptor */ - hw_desc_init(&iv_seq[iv_seq_len]); - set_din_type(&iv_seq[iv_seq_len], DMA_DLLI, ivgen_ctx->pool_meta_dma, - CC_IVPOOL_META_SIZE, NS_BIT); - set_dout_sram(&iv_seq[iv_seq_len], ivgen_ctx->pool, - CC_IVPOOL_META_SIZE); - set_flow_mode(&iv_seq[iv_seq_len], BYPASS); - iv_seq_len++; - - /* Generate initial pool */ - rc = cc_gen_iv_pool(ivgen_ctx, iv_seq, &iv_seq_len); - if (rc) - return rc; - - /* Fire-and-forget */ - return send_request_init(drvdata, iv_seq, iv_seq_len); -} - -/*! - * Free iv-pool and ivgen context. - * - * \param drvdata - */ -void cc_ivgen_fini(struct cc_drvdata *drvdata) -{ - struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle; - struct device *device = &drvdata->plat_dev->dev; - - if (!ivgen_ctx) - return; - - if (ivgen_ctx->pool_meta) { - memset(ivgen_ctx->pool_meta, 0, CC_IVPOOL_META_SIZE); - dma_free_coherent(device, CC_IVPOOL_META_SIZE, - ivgen_ctx->pool_meta, - ivgen_ctx->pool_meta_dma); - } - - ivgen_ctx->pool = NULL_SRAM_ADDR; -} - -/*! - * Allocates iv-pool and maps resources. - * This function generates the first IV pool. - * - * \param drvdata Driver's private context - * - * \return int Zero for success, negative value otherwise. - */ -int cc_ivgen_init(struct cc_drvdata *drvdata) -{ - struct cc_ivgen_ctx *ivgen_ctx; - struct device *device = &drvdata->plat_dev->dev; - int rc; - - /* Allocate "this" context */ - ivgen_ctx = devm_kzalloc(device, sizeof(*ivgen_ctx), GFP_KERNEL); - if (!ivgen_ctx) - return -ENOMEM; - - drvdata->ivgen_handle = ivgen_ctx; - - /* Allocate pool's header for initial enc. key/IV */ - ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE, - &ivgen_ctx->pool_meta_dma, - GFP_KERNEL); - if (!ivgen_ctx->pool_meta) { - dev_err(device, "Not enough memory to allocate DMA of pool_meta (%u B)\n", - CC_IVPOOL_META_SIZE); - rc = -ENOMEM; - goto out; - } - /* Allocate IV pool in SRAM */ - ivgen_ctx->pool = cc_sram_alloc(drvdata, CC_IVPOOL_SIZE); - if (ivgen_ctx->pool == NULL_SRAM_ADDR) { - dev_err(device, "SRAM pool exhausted\n"); - rc = -ENOMEM; - goto out; - } - - return cc_init_iv_sram(drvdata); - -out: - cc_ivgen_fini(drvdata); - return rc; -} - -/*! - * Acquires 16 Bytes IV from the iv-pool - * - * \param drvdata Driver private context - * \param iv_out_dma Array of physical IV out addresses - * \param iv_out_dma_len Length of iv_out_dma array (additional elements - * of iv_out_dma array are ignore) - * \param iv_out_size May be 8 or 16 bytes long - * \param iv_seq IN/OUT array to the descriptors sequence - * \param iv_seq_len IN/OUT pointer to the sequence length - * - * \return int Zero for success, negative value otherwise. - */ -int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[], - unsigned int iv_out_dma_len, unsigned int iv_out_size, - struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len) -{ - struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle; - unsigned int idx = *iv_seq_len; - struct device *dev = drvdata_to_dev(drvdata); - unsigned int t; - - if (iv_out_size != CC_AES_IV_SIZE && - iv_out_size != CTR_RFC3686_IV_SIZE) { - return -EINVAL; - } - if ((iv_out_dma_len + 1) > CC_IVPOOL_SEQ_LEN) { - /* The sequence will be longer than allowed */ - return -EINVAL; - } - - /* check that number of generated IV is limited to max dma address - * iv buffer size - */ - if (iv_out_dma_len > CC_MAX_IVGEN_DMA_ADDRESSES) { - /* The sequence will be longer than allowed */ - return -EINVAL; - } - - for (t = 0; t < iv_out_dma_len; t++) { - /* Acquire IV from pool */ - hw_desc_init(&iv_seq[idx]); - set_din_sram(&iv_seq[idx], (ivgen_ctx->pool + - ivgen_ctx->next_iv_ofs), - iv_out_size); - set_dout_dlli(&iv_seq[idx], iv_out_dma[t], iv_out_size, - NS_BIT, 0); - set_flow_mode(&iv_seq[idx], BYPASS); - idx++; - } - - /* Bypass operation is proceeded by crypto sequence, hence must - * assure bypass-write-transaction by a memory barrier - */ - hw_desc_init(&iv_seq[idx]); - set_din_no_dma(&iv_seq[idx], 0, 0xfffff0); - set_dout_no_dma(&iv_seq[idx], 0, 0, 1); - idx++; - - *iv_seq_len = idx; /* update seq length */ - - /* Update iv index */ - ivgen_ctx->next_iv_ofs += iv_out_size; - - if ((CC_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) { - dev_dbg(dev, "Pool exhausted, regenerating iv-pool\n"); - /* pool is drained -regenerate it! */ - return cc_gen_iv_pool(ivgen_ctx, iv_seq, iv_seq_len); - } - - return 0; -} diff --git a/drivers/crypto/ccree/cc_ivgen.h b/drivers/crypto/ccree/cc_ivgen.h deleted file mode 100644 index a9f5e8bba4f1..000000000000 --- a/drivers/crypto/ccree/cc_ivgen.h +++ /dev/null @@ -1,55 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ - -#ifndef __CC_IVGEN_H__ -#define __CC_IVGEN_H__ - -#include "cc_hw_queue_defs.h" - -#define CC_IVPOOL_SEQ_LEN 8 - -/*! - * Allocates iv-pool and maps resources. - * This function generates the first IV pool. - * - * \param drvdata Driver's private context - * - * \return int Zero for success, negative value otherwise. - */ -int cc_ivgen_init(struct cc_drvdata *drvdata); - -/*! - * Free iv-pool and ivgen context. - * - * \param drvdata - */ -void cc_ivgen_fini(struct cc_drvdata *drvdata); - -/*! - * Generates the initial pool in SRAM. - * This function should be invoked when resuming DX driver. - * - * \param drvdata - * - * \return int Zero for success, negative value otherwise. - */ -int cc_init_iv_sram(struct cc_drvdata *drvdata); - -/*! - * Acquires 16 Bytes IV from the iv-pool - * - * \param drvdata Driver private context - * \param iv_out_dma Array of physical IV out addresses - * \param iv_out_dma_len Length of iv_out_dma array (additional elements of - * iv_out_dma array are ignore) - * \param iv_out_size May be 8 or 16 bytes long - * \param iv_seq IN/OUT array to the descriptors sequence - * \param iv_seq_len IN/OUT pointer to the sequence length - * - * \return int Zero for success, negative value otherwise. - */ -int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[], - unsigned int iv_out_dma_len, unsigned int iv_out_size, - struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len); - -#endif /*__CC_IVGEN_H__*/ diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c index 899a52f05b7a..dbc508fb719b 100644 --- a/drivers/crypto/ccree/cc_pm.c +++ b/drivers/crypto/ccree/cc_pm.c @@ -8,7 +8,6 @@ #include "cc_buffer_mgr.h" #include "cc_request_mgr.h" #include "cc_sram_mgr.h" -#include "cc_ivgen.h" #include "cc_hash.h" #include "cc_pm.h" #include "cc_fips.h" @@ -73,7 +72,6 @@ int cc_pm_resume(struct device *dev) /* must be after the queue resuming as it uses the HW queue*/ cc_init_hash_sram(drvdata); - cc_init_iv_sram(drvdata); return 0; } diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c index 0bc6ccb0b899..a947d5a2cf35 100644 --- a/drivers/crypto/ccree/cc_request_mgr.c +++ b/drivers/crypto/ccree/cc_request_mgr.c @@ -6,7 +6,6 @@ #include "cc_driver.h" #include "cc_buffer_mgr.h" #include "cc_request_mgr.h" -#include "cc_ivgen.h" #include "cc_pm.h" #define CC_MAX_POLL_ITER 10 @@ -281,36 +280,12 @@ static int cc_queues_status(struct cc_drvdata *drvdata, static int cc_do_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, struct cc_hw_desc *desc, unsigned int len, - bool add_comp, bool ivgen) + bool add_comp) { struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; unsigned int used_sw_slots; - unsigned int iv_seq_len = 0; unsigned int total_seq_len = len; /*initial sequence length*/ - struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN]; struct device *dev = drvdata_to_dev(drvdata); - int rc; - - if (ivgen) { - dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n", - cc_req->ivgen_dma_addr_len, - &cc_req->ivgen_dma_addr[0], - &cc_req->ivgen_dma_addr[1], - &cc_req->ivgen_dma_addr[2], - cc_req->ivgen_size); - - /* Acquire IV from pool */ - rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr, - cc_req->ivgen_dma_addr_len, - cc_req->ivgen_size, iv_seq, &iv_seq_len); - - if (rc) { - dev_err(dev, "Failed to generate IV (rc=%d)\n", rc); - return rc; - } - - total_seq_len += iv_seq_len; - } used_sw_slots = ((req_mgr_h->req_queue_head - req_mgr_h->req_queue_tail) & @@ -334,8 +309,6 @@ static int cc_do_send_request(struct cc_drvdata *drvdata, wmb(); /* STAT_PHASE_4: Push sequence */ - if (ivgen) - enqueue_seq(drvdata, iv_seq, iv_seq_len); enqueue_seq(drvdata, desc, len); @@ -380,8 +353,6 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata) struct cc_bl_item *bli; struct cc_crypto_req *creq; void *req; - bool ivgen; - unsigned int total_len; struct device *dev = drvdata_to_dev(drvdata); int rc; @@ -406,12 +377,9 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata) bli->notif = true; } - ivgen = !!creq->ivgen_dma_addr_len; - total_len = bli->len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0); - spin_lock(&mgr->hw_lock); - rc = cc_queues_status(drvdata, mgr, total_len); + rc = cc_queues_status(drvdata, mgr, bli->len); if (rc) { /* * There is still not room in the FIFO for @@ -423,7 +391,7 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata) } rc = cc_do_send_request(drvdata, &bli->creq, bli->desc, - bli->len, false, ivgen); + bli->len, false); spin_unlock(&mgr->hw_lock); @@ -447,8 +415,6 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, { int rc; struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; - bool ivgen = !!cc_req->ivgen_dma_addr_len; - unsigned int total_len = len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0); struct device *dev = drvdata_to_dev(drvdata); bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG; gfp_t flags = cc_gfp_flags(req); @@ -461,7 +427,7 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, } spin_lock_bh(&mgr->hw_lock); - rc = cc_queues_status(drvdata, mgr, total_len); + rc = cc_queues_status(drvdata, mgr, len); #ifdef CC_DEBUG_FORCE_BACKLOG if (backlog_ok) @@ -486,8 +452,7 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, } if (!rc) - rc = cc_do_send_request(drvdata, cc_req, desc, len, false, - ivgen); + rc = cc_do_send_request(drvdata, cc_req, desc, len, false); spin_unlock_bh(&mgr->hw_lock); return rc; @@ -527,7 +492,7 @@ int cc_send_sync_request(struct cc_drvdata *drvdata, reinit_completion(&drvdata->hw_queue_avail); } - rc = cc_do_send_request(drvdata, cc_req, desc, len, true, false); + rc = cc_do_send_request(drvdata, cc_req, desc, len, true); spin_unlock_bh(&mgr->hw_lock); if (rc != -EINPROGRESS) { -- cgit From 76a95bd8f9e10cade9c4c8df93b5c20ff45dc0f5 Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Tue, 2 Jul 2019 14:39:19 +0300 Subject: crypto: ccree - account for TEE not ready to report When ccree driver runs it checks the state of the Trusted Execution Environment CryptoCell driver before proceeding. We did not account for cases where the TEE side is not ready or not available at all. Fix it by only considering TEE error state after sync with the TEE side driver. Signed-off-by: Gilad Ben-Yossef Fixes: ab8ec9658f5a ("crypto: ccree - add FIPS support") CC: stable@vger.kernel.org # v4.17+ Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_fips.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c index 5ad3ffb7acaa..040e09c0e1af 100644 --- a/drivers/crypto/ccree/cc_fips.c +++ b/drivers/crypto/ccree/cc_fips.c @@ -21,7 +21,13 @@ static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata) u32 reg; reg = cc_ioread(drvdata, CC_REG(GPR_HOST)); - return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)); + /* Did the TEE report status? */ + if (reg & CC_FIPS_SYNC_TEE_STATUS) + /* Yes. Is it OK? */ + return (reg & CC_FIPS_SYNC_MODULE_OK); + + /* No. It's either not in use or will be reported later */ + return true; } /* -- cgit From 452c53d7868b3f95658ebb2f346acca96eaa2aed Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Tue, 2 Jul 2019 14:39:21 +0300 Subject: crypto: ccree - notify TEE on FIPS tests errors Register a FIPS test failure notifier and use it to notify TEE side of FIPS test failures on our side prior to panic. Signed-off-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_fips.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c index 040e09c0e1af..4c8bce33abcf 100644 --- a/drivers/crypto/ccree/cc_fips.c +++ b/drivers/crypto/ccree/cc_fips.c @@ -3,6 +3,7 @@ #include #include +#include #include "cc_driver.h" #include "cc_fips.h" @@ -11,6 +12,8 @@ static void fips_dsr(unsigned long devarg); struct cc_fips_handle { struct tasklet_struct tasklet; + struct notifier_block nb; + struct cc_drvdata *drvdata; }; /* The function called once at driver entry point to check @@ -46,6 +49,21 @@ void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool status) cc_iowrite(drvdata, CC_REG(HOST_GPR0), val); } +/* Push REE side FIPS test failure to TEE side */ +static int cc_ree_fips_failure(struct notifier_block *nb, unsigned long unused1, + void *unused2) +{ + struct cc_fips_handle *fips_h = + container_of(nb, struct cc_fips_handle, nb); + struct cc_drvdata *drvdata = fips_h->drvdata; + struct device *dev = drvdata_to_dev(drvdata); + + cc_set_ree_fips_status(drvdata, false); + dev_info(dev, "Notifying TEE of FIPS test failure...\n"); + + return NOTIFY_OK; +} + void cc_fips_fini(struct cc_drvdata *drvdata) { struct cc_fips_handle *fips_h = drvdata->fips_handle; @@ -53,6 +71,8 @@ void cc_fips_fini(struct cc_drvdata *drvdata) if (drvdata->hw_rev < CC_HW_REV_712 || !fips_h) return; + atomic_notifier_chain_unregister(&fips_fail_notif_chain, &fips_h->nb); + /* Kill tasklet */ tasklet_kill(&fips_h->tasklet); drvdata->fips_handle = NULL; @@ -124,6 +144,9 @@ int cc_fips_init(struct cc_drvdata *p_drvdata) dev_dbg(dev, "Initializing fips tasklet\n"); tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata); + fips_h->drvdata = p_drvdata; + fips_h->nb.notifier_call = cc_ree_fips_failure; + atomic_notifier_chain_register(&fips_fail_notif_chain, &fips_h->nb); cc_tee_handle_fips_error(p_drvdata); -- cgit From 36160aadb1034787d4d30b6af7805cb3dd636aac Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Tue, 2 Jul 2019 16:39:52 +0200 Subject: crypto: inside-secure - keep ivsize for DES ECB modes at 0 The driver incorrectly advertised the IV size for DES and 3DES ECB mode as being the DES blocksize of 8. This is incorrect as ECB mode does not need any IV. Signed-off-by: Pascal van Leeuwen Acked-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_cipher.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 8cdbdbe35681..82f293caf68a 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -1037,7 +1037,6 @@ struct safexcel_alg_template safexcel_alg_ecb_des = { .decrypt = safexcel_ecb_des_decrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, .base = { .cra_name = "ecb(des)", .cra_driver_name = "safexcel-ecb-des", @@ -1140,7 +1139,6 @@ struct safexcel_alg_template safexcel_alg_ecb_des3_ede = { .decrypt = safexcel_ecb_des3_ede_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, .base = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "safexcel-ecb-des3_ede", -- cgit From 384ce433884af04c2063ec8a0ec0464b3fb6f1d2 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Tue, 2 Jul 2019 16:39:53 +0200 Subject: crypto: inside-secure - silently return -EINVAL for input error cases Driver was printing an error message for certain input error cases that should just return -EINVAL, which caused the related testmgr extra tests to flood the kernel message log. Ensured those cases remain silent while making some other device-specific errors a bit more verbose. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index df43a2c6933b..045919651272 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -589,16 +589,31 @@ finalize: inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, struct safexcel_result_desc *rdesc) { - if (likely(!rdesc->result_data.error_code)) + if (likely((!rdesc->descriptor_overflow) && + (!rdesc->buffer_overflow) && + (!rdesc->result_data.error_code))) return 0; - if (rdesc->result_data.error_code & 0x407f) { - /* Fatal error (bits 0-7, 14) */ + if (rdesc->descriptor_overflow) + dev_err(priv->dev, "Descriptor overflow detected"); + + if (rdesc->buffer_overflow) + dev_err(priv->dev, "Buffer overflow detected"); + + if (rdesc->result_data.error_code & 0x4067) { + /* Fatal error (bits 0,1,2,5,6 & 14) */ dev_err(priv->dev, - "cipher: result: result descriptor error (0x%x)\n", + "result descriptor error (%x)", rdesc->result_data.error_code); + return -EIO; + } else if (rdesc->result_data.error_code & + (BIT(7) | BIT(4) | BIT(3))) { + /* + * Give priority over authentication fails: + * Blocksize & overflow errors, something wrong with the input! + */ return -EINVAL; - } else if (rdesc->result_data.error_code == BIT(9)) { + } else if (rdesc->result_data.error_code & BIT(9)) { /* Authentication failed */ return -EBADMSG; } -- cgit From 5bdb6e6aa53bf5c6e6082a4bb44e1106a22d68b8 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Tue, 2 Jul 2019 16:39:54 +0200 Subject: crypto: inside-secure - fix incorrect skcipher output IV This patch fixes corruption issues with the skcipher output IV witnessed on x86+EIP197-FPGA (devboard). The original fix, commit 57660b11d5ad ("crypto: inside-secure - implement IV retrieval"), attempted to write out the result IV through the context record. However, this is not a reliable mechanism as there is no way of knowing the hardware context update actually arrived in memory, so it is possible to read the old contents instead of the updated IV. (and indeed, this failed for the x86/FPGA case) The alternative approach used here recognises the fact that the result IV for CBC is actually the last cipher block, which is the last input block in case of decryption and the last output block in case of encryption. So the result IV is taken from the input data buffer respectively the output data buffer instead, which *is* reliable. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_cipher.c | 84 +++++++++++++------------- 1 file changed, 43 insertions(+), 41 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 82f293caf68a..f03640cc2095 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -92,25 +92,6 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, token[0].instructions = EIP197_TOKEN_INS_LAST | EIP197_TOKEN_INS_TYPE_CRYTO | EIP197_TOKEN_INS_TYPE_OUTPUT; - - if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { - u32 last = (EIP197_MAX_TOKENS - 1) - offset; - - token[last].opcode = EIP197_TOKEN_OPCODE_CTX_ACCESS; - token[last].packet_length = EIP197_TOKEN_DIRECTION_EXTERNAL | - EIP197_TOKEN_EXEC_IF_SUCCESSFUL| - EIP197_TOKEN_CTX_OFFSET(0x2); - token[last].stat = EIP197_TOKEN_STAT_LAST_HASH | - EIP197_TOKEN_STAT_LAST_PACKET; - token[last].instructions = - EIP197_TOKEN_INS_ORIGIN_LEN(block_sz / sizeof(u32)) | - EIP197_TOKEN_INS_ORIGIN_IV0; - - /* Store the updated IV values back in the internal context - * registers. - */ - cdesc->control_data.control1 |= CONTEXT_CONTROL_CRYPTO_STORE; - } } static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, @@ -348,6 +329,9 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin struct safexcel_cipher_req *sreq, bool *should_complete, int *ret) { + struct skcipher_request *areq = skcipher_request_cast(async); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(areq); + struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(skcipher); struct safexcel_result_desc *rdesc; int ndesc = 0; @@ -380,6 +364,18 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin dma_unmap_sg(priv->dev, dst, sg_nents(dst), DMA_FROM_DEVICE); } + /* + * Update IV in req from last crypto output word for CBC modes + */ + if ((!ctx->aead) && (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) && + (sreq->direction == SAFEXCEL_ENCRYPT)) { + /* For encrypt take the last output word */ + sg_pcopy_to_buffer(dst, sg_nents(dst), areq->iv, + crypto_skcipher_ivsize(skcipher), + (cryptlen - + crypto_skcipher_ivsize(skcipher))); + } + *should_complete = true; return ndesc; @@ -392,6 +388,8 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, unsigned int digestsize, u8 *iv, int *commands, int *results) { + struct skcipher_request *areq = skcipher_request_cast(base); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(areq); struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); struct safexcel_crypto_priv *priv = ctx->priv; struct safexcel_command_desc *cdesc; @@ -401,6 +399,19 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen; int i, ret = 0; + if ((!ctx->aead) && (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) && + (sreq->direction == SAFEXCEL_DECRYPT)) { + /* + * Save IV from last crypto input word for CBC modes in decrypt + * direction. Need to do this first in case of inplace operation + * as it will be overwritten. + */ + sg_pcopy_to_buffer(src, sg_nents(src), areq->iv, + crypto_skcipher_ivsize(skcipher), + (totlen - + crypto_skcipher_ivsize(skcipher))); + } + if (src == dst) { nr_src = dma_map_sg(priv->dev, src, sg_nents(src), DMA_BIDIRECTIONAL); @@ -570,7 +581,6 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv, { struct skcipher_request *req = skcipher_request_cast(async); struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); - struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(async->tfm); int err; if (sreq->needs_inv) { @@ -581,24 +591,6 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv, err = safexcel_handle_req_result(priv, ring, async, req->src, req->dst, req->cryptlen, sreq, should_complete, ret); - - if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { - u32 block_sz = 0; - - switch (ctx->alg) { - case SAFEXCEL_DES: - block_sz = DES_BLOCK_SIZE; - break; - case SAFEXCEL_3DES: - block_sz = DES3_EDE_BLOCK_SIZE; - break; - case SAFEXCEL_AES: - block_sz = AES_BLOCK_SIZE; - break; - } - - memcpy(req->iv, ctx->base.ctxr->data, block_sz); - } } return err; @@ -656,12 +648,22 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring, BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv); - if (sreq->needs_inv) + if (sreq->needs_inv) { ret = safexcel_cipher_send_inv(async, ring, commands, results); - else + } else { + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + u8 input_iv[AES_BLOCK_SIZE]; + + /* + * Save input IV in case of CBC decrypt mode + * Will be overwritten with output IV prior to use! + */ + memcpy(input_iv, req->iv, crypto_skcipher_ivsize(skcipher)); + ret = safexcel_send_req(async, ring, sreq, req->src, - req->dst, req->cryptlen, 0, 0, req->iv, + req->dst, req->cryptlen, 0, 0, input_iv, commands, results); + } sreq->rdescs = *results; return ret; -- cgit From 19b347b32bb195c50aea56b1ebf524e461e39827 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Tue, 2 Jul 2019 16:39:55 +0200 Subject: crypto: inside-secure - fix scatter/gather list to descriptor conversion Fixed issues with the skcipher and AEAD scatter/gather list to engine descriptor conversion code which caused either too much or too little buffer space to be provided to the hardware. This caused errors with the testmgr extra tests, either kernel panics (on x86-EIP197-FPGA) or engine descriptor errors 0x1, 0x8 or 0x9 (on Macchiatobin e.g. Marvell A8K). With this patch in place, all skcipher and AEAD (extra) tests pass. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_cipher.c | 182 ++++++++++++++++++------- 1 file changed, 136 insertions(+), 46 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index f03640cc2095..4f25f5c9dfd6 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -54,6 +54,7 @@ struct safexcel_cipher_req { /* Number of result descriptors associated to the request */ unsigned int rdescs; bool needs_inv; + int nr_src, nr_dst; }; static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, @@ -358,10 +359,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin safexcel_complete(priv, ring); if (src == dst) { - dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_BIDIRECTIONAL); + dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL); } else { - dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_TO_DEVICE); - dma_unmap_sg(priv->dev, dst, sg_nents(dst), DMA_FROM_DEVICE); + dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); + dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE); } /* @@ -370,7 +371,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin if ((!ctx->aead) && (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) && (sreq->direction == SAFEXCEL_ENCRYPT)) { /* For encrypt take the last output word */ - sg_pcopy_to_buffer(dst, sg_nents(dst), areq->iv, + sg_pcopy_to_buffer(dst, sreq->nr_dst, areq->iv, crypto_skcipher_ivsize(skcipher), (cryptlen - crypto_skcipher_ivsize(skcipher))); @@ -393,63 +394,99 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); struct safexcel_crypto_priv *priv = ctx->priv; struct safexcel_command_desc *cdesc; + struct safexcel_command_desc *first_cdesc = NULL; struct safexcel_result_desc *rdesc, *first_rdesc = NULL; struct scatterlist *sg; - unsigned int totlen = cryptlen + assoclen; - int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen; - int i, ret = 0; + unsigned int totlen; + unsigned int totlen_src = cryptlen + assoclen; + unsigned int totlen_dst = totlen_src; + int n_cdesc = 0, n_rdesc = 0; + int queued, i, ret = 0; + bool first = true; - if ((!ctx->aead) && (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) && - (sreq->direction == SAFEXCEL_DECRYPT)) { + sreq->nr_src = sg_nents_for_len(src, totlen_src); + + if (ctx->aead) { + /* + * AEAD has auth tag appended to output for encrypt and + * removed from the output for decrypt! + */ + if (sreq->direction == SAFEXCEL_DECRYPT) + totlen_dst -= digestsize; + else + totlen_dst += digestsize; + + memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32), + ctx->ipad, ctx->state_sz); + memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) / + sizeof(u32), + ctx->opad, ctx->state_sz); + } else if ((ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) && + (sreq->direction == SAFEXCEL_DECRYPT)) { /* * Save IV from last crypto input word for CBC modes in decrypt * direction. Need to do this first in case of inplace operation * as it will be overwritten. */ - sg_pcopy_to_buffer(src, sg_nents(src), areq->iv, + sg_pcopy_to_buffer(src, sreq->nr_src, areq->iv, crypto_skcipher_ivsize(skcipher), - (totlen - + (totlen_src - crypto_skcipher_ivsize(skcipher))); } + sreq->nr_dst = sg_nents_for_len(dst, totlen_dst); + + /* + * Remember actual input length, source buffer length may be + * updated in case of inline operation below. + */ + totlen = totlen_src; + queued = totlen_src; + if (src == dst) { - nr_src = dma_map_sg(priv->dev, src, sg_nents(src), - DMA_BIDIRECTIONAL); - nr_dst = nr_src; - if (!nr_src) + sreq->nr_src = max(sreq->nr_src, sreq->nr_dst); + sreq->nr_dst = sreq->nr_src; + if (unlikely((totlen_src || totlen_dst) && + (sreq->nr_src <= 0))) { + dev_err(priv->dev, "In-place buffer not large enough (need %d bytes)!", + max(totlen_src, totlen_dst)); return -EINVAL; + } + dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL); } else { - nr_src = dma_map_sg(priv->dev, src, sg_nents(src), - DMA_TO_DEVICE); - if (!nr_src) + if (unlikely(totlen_src && (sreq->nr_src <= 0))) { + dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!", + totlen_src); return -EINVAL; + } + dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); - nr_dst = dma_map_sg(priv->dev, dst, sg_nents(dst), - DMA_FROM_DEVICE); - if (!nr_dst) { - dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE); + if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) { + dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!", + totlen_dst); + dma_unmap_sg(priv->dev, src, sreq->nr_src, + DMA_TO_DEVICE); return -EINVAL; } + dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE); } memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len); - if (ctx->aead) { - memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32), - ctx->ipad, ctx->state_sz); - memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) / sizeof(u32), - ctx->opad, ctx->state_sz); - } + /* The EIP cannot deal with zero length input packets! */ + if (totlen == 0) + totlen = 1; /* command descriptors */ - for_each_sg(src, sg, nr_src, i) { + for_each_sg(src, sg, sreq->nr_src, i) { int len = sg_dma_len(sg); /* Do not overflow the request */ if (queued - len < 0) len = queued; - cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len), + cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, + !(queued - len), sg_dma_address(sg), len, totlen, ctx->base.ctxr_dma); if (IS_ERR(cdesc)) { @@ -460,14 +497,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, n_cdesc++; if (n_cdesc == 1) { - safexcel_context_control(ctx, base, sreq, cdesc); - if (ctx->aead) - safexcel_aead_token(ctx, iv, cdesc, - sreq->direction, cryptlen, - assoclen, digestsize); - else - safexcel_skcipher_token(ctx, iv, cdesc, - cryptlen); + first_cdesc = cdesc; } queued -= len; @@ -475,23 +505,83 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, break; } + if (unlikely(!n_cdesc)) { + /* + * Special case: zero length input buffer. + * The engine always needs the 1st command descriptor, however! + */ + first_cdesc = safexcel_add_cdesc(priv, ring, 1, 1, 0, 0, totlen, + ctx->base.ctxr_dma); + n_cdesc = 1; + } + + /* Add context control words and token to first command descriptor */ + safexcel_context_control(ctx, base, sreq, first_cdesc); + if (ctx->aead) + safexcel_aead_token(ctx, iv, first_cdesc, + sreq->direction, cryptlen, + assoclen, digestsize); + else + safexcel_skcipher_token(ctx, iv, first_cdesc, + cryptlen); + /* result descriptors */ - for_each_sg(dst, sg, nr_dst, i) { - bool first = !i, last = sg_is_last(sg); + for_each_sg(dst, sg, sreq->nr_dst, i) { + bool last = (i == sreq->nr_dst - 1); u32 len = sg_dma_len(sg); - rdesc = safexcel_add_rdesc(priv, ring, first, last, - sg_dma_address(sg), len); + /* only allow the part of the buffer we know we need */ + if (len > totlen_dst) + len = totlen_dst; + if (unlikely(!len)) + break; + totlen_dst -= len; + + /* skip over AAD space in buffer - not written */ + if (assoclen) { + if (assoclen >= len) { + assoclen -= len; + continue; + } + rdesc = safexcel_add_rdesc(priv, ring, first, last, + sg_dma_address(sg) + + assoclen, + len - assoclen); + assoclen = 0; + } else { + rdesc = safexcel_add_rdesc(priv, ring, first, last, + sg_dma_address(sg), + len); + } if (IS_ERR(rdesc)) { /* No space left in the result descriptor ring */ ret = PTR_ERR(rdesc); goto rdesc_rollback; } - if (first) + if (first) { first_rdesc = rdesc; + first = false; + } n_rdesc++; } + if (unlikely(first)) { + /* + * Special case: AEAD decrypt with only AAD data. + * In this case there is NO output data from the engine, + * but the engine still needs a result descriptor! + * Create a dummy one just for catching the result token. + */ + rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0); + if (IS_ERR(rdesc)) { + /* No space left in the result descriptor ring */ + ret = PTR_ERR(rdesc); + goto rdesc_rollback; + } + first_rdesc = rdesc; + n_rdesc = 1; + } + safexcel_rdr_req_set(priv, ring, first_rdesc, base); *commands = n_cdesc; @@ -506,10 +596,10 @@ cdesc_rollback: safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); if (src == dst) { - dma_unmap_sg(priv->dev, src, nr_src, DMA_BIDIRECTIONAL); + dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL); } else { - dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE); - dma_unmap_sg(priv->dev, dst, nr_dst, DMA_FROM_DEVICE); + dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); + dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE); } return ret; -- cgit From a74d850f787ef9578fa2829bfaecb222269cb754 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Tue, 2 Jul 2019 16:39:56 +0200 Subject: crypto: inside-secure - fix EINVAL error (buf overflow) for AEAD decrypt This patch fixes a buffer overflow error returning -EINVAL for AEAD decrypt operations by NOT appending the (already verified) ICV to the output packet (which is not expected by the API anyway). With this fix, all testmgr AEAD (extra) tests now pass. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.h | 2 +- drivers/crypto/inside-secure/safexcel_cipher.c | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index e0c202f33674..91d221b421fd 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -458,7 +458,7 @@ static inline void eip197_noop_token(struct safexcel_token *token) #define EIP197_TOKEN_INS_ORIGIN_LEN(x) ((x) << 5) #define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5) #define EIP197_TOKEN_INS_TYPE_HASH BIT(6) -#define EIP197_TOKEN_INS_TYPE_CRYTO BIT(7) +#define EIP197_TOKEN_INS_TYPE_CRYPTO BIT(7) #define EIP197_TOKEN_INS_LAST BIT(8) /* Processing Engine Control Data */ diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 4f25f5c9dfd6..e87428733671 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -91,7 +91,7 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET | EIP197_TOKEN_STAT_LAST_HASH; token[0].instructions = EIP197_TOKEN_INS_LAST | - EIP197_TOKEN_INS_TYPE_CRYTO | + EIP197_TOKEN_INS_TYPE_CRYPTO | EIP197_TOKEN_INS_TYPE_OUTPUT; } @@ -117,14 +117,13 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; token[0].packet_length = assoclen; - token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH | - EIP197_TOKEN_INS_TYPE_OUTPUT; + token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH; token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION; token[1].packet_length = cryptlen; token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; token[1].instructions = EIP197_TOKEN_INS_LAST | - EIP197_TOKEN_INS_TYPE_CRYTO | + EIP197_TOKEN_INS_TYPE_CRYPTO | EIP197_TOKEN_INS_TYPE_HASH | EIP197_TOKEN_INS_TYPE_OUTPUT; -- cgit From dc5268b65dfc147850e0d2c49b000cb385d7b026 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Tue, 2 Jul 2019 16:39:57 +0200 Subject: crypto: inside-secure: back out parts of earlier HMAC update workaround This patch backs out some changes done with commit 082ec2d48467 - "add support for HMAC updates" as that update just works around the issue for the basic tests by providing twice the amount of buffering, but this does not solve the case of much larger data blocks such as those performed by the extra tests. This is in preparation of an actual solution in the next patch(es), which does not actually require any extra buffering at all. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_hash.c | 32 +++++++++++----------------- 1 file changed, 13 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index a80a5e757b1f..ab972227ed90 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -41,11 +41,11 @@ struct safexcel_ahash_req { u64 len[2]; u64 processed[2]; - u8 cache[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32)); + u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); dma_addr_t cache_dma; unsigned int cache_sz; - u8 cache_next[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32)); + u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); }; static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) @@ -89,9 +89,6 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, cdesc->control_data.control0 |= ctx->alg; cdesc->control_data.control0 |= req->digest; - if (!req->finish) - cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH; - if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) { if (req->processed[0] || req->processed[1]) { if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) @@ -110,6 +107,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH; } + if (!req->finish) + cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH; + /* * Copy the input digest if needed, and setup the context * fields. Do this now as we need it to setup the first command @@ -216,8 +216,6 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, u64 queued, len, cache_len, cache_max; cache_max = crypto_ahash_blocksize(ahash); - if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) - cache_max <<= 1; queued = len = safexcel_queued_len(req); if (queued <= cache_max) @@ -229,17 +227,13 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, /* If this is not the last request and the queued data does not * fit into full blocks, cache it for the next send() call. */ - extra = queued & (crypto_ahash_blocksize(ahash) - 1); - - if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC && - extra < crypto_ahash_blocksize(ahash)) - extra += crypto_ahash_blocksize(ahash); + extra = queued & (cache_max - 1); /* If this is not the last request and the queued data * is a multiple of a block, cache the last one for now. */ if (!extra) - extra = crypto_ahash_blocksize(ahash); + extra = cache_max; sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), req->cache_next, extra, @@ -247,6 +241,12 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, queued -= extra; len -= extra; + + if (!queued) { + *commands = 0; + *results = 0; + return 0; + } } /* Add a command descriptor for the cached data, if any */ @@ -613,8 +613,6 @@ static int safexcel_ahash_update(struct ahash_request *areq) req->len[1]++; cache_max = crypto_ahash_blocksize(ahash); - if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) - cache_max <<= 1; safexcel_ahash_cache(areq, cache_max); @@ -689,8 +687,6 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out) u32 cache_sz; cache_sz = crypto_ahash_blocksize(ahash); - if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) - cache_sz <<= 1; export->len[0] = req->len[0]; export->len[1] = req->len[1]; @@ -718,8 +714,6 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in) return ret; cache_sz = crypto_ahash_blocksize(ahash); - if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) - cache_sz <<= 1; req->len[0] = export->len[0]; req->len[1] = export->len[1]; -- cgit From 85695b093d559bcd2393a65373355390ac62d67b Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Tue, 2 Jul 2019 16:39:58 +0200 Subject: crypto: inside-secure - let HW deal with initial hash digest The driver was loading the initial digest for hash operations into the hardware explicitly, but this is not needed as the hardware can handle that by itself, which is more efficient and avoids any context record coherence issues. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_hash.c | 71 +++------------------------- 1 file changed, 6 insertions(+), 65 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index ab972227ed90..d3c64901e2a9 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -641,8 +641,12 @@ static int safexcel_ahash_final(struct ahash_request *areq) req->last_req = true; req->finish = true; - /* If we have an overall 0 length request */ - if (!req->len[0] && !req->len[1] && !areq->nbytes) { + if (unlikely(!req->len[0] && !req->len[1] && !areq->nbytes)) { + /* + * If we have an overall 0 length *hash* request: + * The HW cannot do 0 length hash, so we provide the correct + * result directly here. + */ if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) memcpy(areq->result, md5_zero_message_hash, MD5_DIGEST_SIZE); @@ -751,12 +755,6 @@ static int safexcel_sha1_init(struct ahash_request *areq) memset(req, 0, sizeof(*req)); - req->state[0] = SHA1_H0; - req->state[1] = SHA1_H1; - req->state[2] = SHA1_H2; - req->state[3] = SHA1_H3; - req->state[4] = SHA1_H4; - ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = SHA1_DIGEST_SIZE; @@ -1065,15 +1063,6 @@ static int safexcel_sha256_init(struct ahash_request *areq) memset(req, 0, sizeof(*req)); - req->state[0] = SHA256_H0; - req->state[1] = SHA256_H1; - req->state[2] = SHA256_H2; - req->state[3] = SHA256_H3; - req->state[4] = SHA256_H4; - req->state[5] = SHA256_H5; - req->state[6] = SHA256_H6; - req->state[7] = SHA256_H7; - ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = SHA256_DIGEST_SIZE; @@ -1128,15 +1117,6 @@ static int safexcel_sha224_init(struct ahash_request *areq) memset(req, 0, sizeof(*req)); - req->state[0] = SHA224_H0; - req->state[1] = SHA224_H1; - req->state[2] = SHA224_H2; - req->state[3] = SHA224_H3; - req->state[4] = SHA224_H4; - req->state[5] = SHA224_H5; - req->state[6] = SHA224_H6; - req->state[7] = SHA224_H7; - ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = SHA256_DIGEST_SIZE; @@ -1305,23 +1285,6 @@ static int safexcel_sha512_init(struct ahash_request *areq) memset(req, 0, sizeof(*req)); - req->state[0] = lower_32_bits(SHA512_H0); - req->state[1] = upper_32_bits(SHA512_H0); - req->state[2] = lower_32_bits(SHA512_H1); - req->state[3] = upper_32_bits(SHA512_H1); - req->state[4] = lower_32_bits(SHA512_H2); - req->state[5] = upper_32_bits(SHA512_H2); - req->state[6] = lower_32_bits(SHA512_H3); - req->state[7] = upper_32_bits(SHA512_H3); - req->state[8] = lower_32_bits(SHA512_H4); - req->state[9] = upper_32_bits(SHA512_H4); - req->state[10] = lower_32_bits(SHA512_H5); - req->state[11] = upper_32_bits(SHA512_H5); - req->state[12] = lower_32_bits(SHA512_H6); - req->state[13] = upper_32_bits(SHA512_H6); - req->state[14] = lower_32_bits(SHA512_H7); - req->state[15] = upper_32_bits(SHA512_H7); - ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = SHA512_DIGEST_SIZE; @@ -1376,23 +1339,6 @@ static int safexcel_sha384_init(struct ahash_request *areq) memset(req, 0, sizeof(*req)); - req->state[0] = lower_32_bits(SHA384_H0); - req->state[1] = upper_32_bits(SHA384_H0); - req->state[2] = lower_32_bits(SHA384_H1); - req->state[3] = upper_32_bits(SHA384_H1); - req->state[4] = lower_32_bits(SHA384_H2); - req->state[5] = upper_32_bits(SHA384_H2); - req->state[6] = lower_32_bits(SHA384_H3); - req->state[7] = upper_32_bits(SHA384_H3); - req->state[8] = lower_32_bits(SHA384_H4); - req->state[9] = upper_32_bits(SHA384_H4); - req->state[10] = lower_32_bits(SHA384_H5); - req->state[11] = upper_32_bits(SHA384_H5); - req->state[12] = lower_32_bits(SHA384_H6); - req->state[13] = upper_32_bits(SHA384_H6); - req->state[14] = lower_32_bits(SHA384_H7); - req->state[15] = upper_32_bits(SHA384_H7); - ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = SHA512_DIGEST_SIZE; @@ -1561,11 +1507,6 @@ static int safexcel_md5_init(struct ahash_request *areq) memset(req, 0, sizeof(*req)); - req->state[0] = MD5_H0; - req->state[1] = MD5_H1; - req->state[2] = MD5_H2; - req->state[3] = MD5_H3; - ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = MD5_DIGEST_SIZE; -- cgit From 41abed7d72c93fd31b0c1d51f42606216f1ea882 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Tue, 2 Jul 2019 16:39:59 +0200 Subject: crypto: inside-secure - add support for arbitrary size hash/HMAC updates This patch fixes an issue with hash and HMAC operations that perform "large" intermediate updates (i.e. combined size > 2 hash blocks) by actually making use of the hardware's hash continue capabilities. The original implementation would cache these updates in a buffer that was 2 hash blocks in size and fail if all update calls combined would overflow that buffer. Which caused the cryptomgr extra tests to fail. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.h | 4 +- drivers/crypto/inside-secure/safexcel_hash.c | 423 +++++++++++++++++---------- 2 files changed, 269 insertions(+), 158 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 91d221b421fd..b73b17dcb8b1 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -637,6 +637,8 @@ struct safexcel_context { bool exit_inv; }; +#define HASH_CACHE_SIZE SHA512_BLOCK_SIZE + struct safexcel_ahash_export_state { u64 len[2]; u64 processed[2]; @@ -644,7 +646,7 @@ struct safexcel_ahash_export_state { u32 digest; u32 state[SHA512_DIGEST_SIZE / sizeof(u32)]; - u8 cache[SHA512_BLOCK_SIZE << 1]; + u8 cache[HASH_CACHE_SIZE]; }; /* diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index d3c64901e2a9..1476574b346e 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -35,17 +35,18 @@ struct safexcel_ahash_req { u32 digest; - u8 state_sz; /* expected sate size, only set once */ + u8 state_sz; /* expected state size, only set once */ + u8 block_sz; /* block size, only set once */ u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); u64 len[2]; u64 processed[2]; - u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); + u8 cache[HASH_CACHE_SIZE] __aligned(sizeof(u32)); dma_addr_t cache_dma; unsigned int cache_sz; - u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); + u8 cache_next[HASH_CACHE_SIZE] __aligned(sizeof(u32)); }; static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) @@ -79,75 +80,99 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc, static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, struct safexcel_ahash_req *req, - struct safexcel_command_desc *cdesc, - unsigned int digestsize) + struct safexcel_command_desc *cdesc) { struct safexcel_crypto_priv *priv = ctx->priv; - int i; + u64 count = 0; - cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT; cdesc->control_data.control0 |= ctx->alg; - cdesc->control_data.control0 |= req->digest; - - if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) { - if (req->processed[0] || req->processed[1]) { - if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) - cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(5); - else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1) - cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6); - else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 || - ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256) - cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9); - else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384 || - ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512) - cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(17); - - cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT; + + /* + * Copy the input digest if needed, and setup the context + * fields. Do this now as we need it to setup the first command + * descriptor. + */ + if ((!req->processed[0]) && (!req->processed[1])) { + /* First - and possibly only - block of basic hash only */ + if (req->finish) { + cdesc->control_data.control0 |= + CONTEXT_CONTROL_TYPE_HASH_OUT | + CONTEXT_CONTROL_RESTART_HASH | + /* ensure its not 0! */ + CONTEXT_CONTROL_SIZE(1); } else { - cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH; + cdesc->control_data.control0 |= + CONTEXT_CONTROL_TYPE_HASH_OUT | + CONTEXT_CONTROL_RESTART_HASH | + CONTEXT_CONTROL_NO_FINISH_HASH | + /* ensure its not 0! */ + CONTEXT_CONTROL_SIZE(1); } + return; + } - if (!req->finish) - cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH; - - /* - * Copy the input digest if needed, and setup the context - * fields. Do this now as we need it to setup the first command - * descriptor. - */ - if (req->processed[0] || req->processed[1]) { - for (i = 0; i < digestsize / sizeof(u32); i++) - ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]); - - if (req->finish) { - u64 count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE; - count += ((0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * - req->processed[1]); - - /* This is a haredware limitation, as the - * counter must fit into an u32. This represents - * a farily big amount of input data, so we - * shouldn't see this. - */ - if (unlikely(count & 0xffff0000)) { - dev_warn(priv->dev, - "Input data is too big\n"); - return; - } - - ctx->base.ctxr->data[i] = cpu_to_le32(count); + /* Hash continuation or HMAC, setup (inner) digest from state */ + memcpy(ctx->base.ctxr->data, req->state, req->state_sz); + + if (req->finish) { + /* Compute digest count for hash/HMAC finish operations */ + if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) || + req->processed[1] || + (req->processed[0] != req->block_sz)) { + count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE; + count += ((0x100000000ULL / EIP197_COUNTER_BLOCK_SIZE) * + req->processed[1]); + + /* This is a hardware limitation, as the + * counter must fit into an u32. This represents + * a fairly big amount of input data, so we + * shouldn't see this. + */ + if (unlikely(count & 0xffffffff00000000ULL)) { + dev_warn(priv->dev, + "Input data is too big\n"); + return; } } - } else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) { - cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32)); - memcpy(ctx->base.ctxr->data, ctx->ipad, req->state_sz); - memcpy(ctx->base.ctxr->data + req->state_sz / sizeof(u32), - ctx->opad, req->state_sz); + if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) || + /* PE HW < 4.4 cannot do HMAC continue, fake using hash */ + ((req->processed[1] || + (req->processed[0] != req->block_sz)))) { + /* Basic hash continue operation, need digest + cnt */ + cdesc->control_data.control0 |= + CONTEXT_CONTROL_SIZE((req->state_sz >> 2) + 1) | + CONTEXT_CONTROL_TYPE_HASH_OUT | + CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + cdesc->control_data.control1 |= + CONTEXT_CONTROL_DIGEST_CNT; + ctx->base.ctxr->data[req->state_sz >> 2] = + cpu_to_le32(count); + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + } else { /* HMAC */ + /* Need outer digest for HMAC finalization */ + memcpy(ctx->base.ctxr->data + (req->state_sz >> 2), + ctx->opad, req->state_sz); + + /* Single pass HMAC - no digest count */ + cdesc->control_data.control0 |= + CONTEXT_CONTROL_SIZE(req->state_sz >> 1) | + CONTEXT_CONTROL_TYPE_HASH_OUT | + CONTEXT_CONTROL_DIGEST_HMAC; + } + } else { /* Hash continuation, do not finish yet */ + cdesc->control_data.control0 |= + CONTEXT_CONTROL_SIZE(req->state_sz >> 2) | + CONTEXT_CONTROL_DIGEST_PRECOMPUTED | + CONTEXT_CONTROL_TYPE_HASH_OUT | + CONTEXT_CONTROL_NO_FINISH_HASH; } } -static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, +static int safexcel_ahash_enqueue(struct ahash_request *areq); + +static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, + int ring, struct crypto_async_request *async, bool *should_complete, int *ret) { @@ -155,6 +180,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin struct ahash_request *areq = ahash_request_cast(async); struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); struct safexcel_ahash_req *sreq = ahash_request_ctx(areq); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash); u64 cache_len; *ret = 0; @@ -188,9 +214,33 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin sreq->cache_sz = 0; } - if (sreq->finish) + if (sreq->finish) { + if (sreq->hmac && + (sreq->digest != CONTEXT_CONTROL_DIGEST_HMAC)) { + /* Faking HMAC using hash - need to do outer hash */ + memcpy(sreq->cache, sreq->state, + crypto_ahash_digestsize(ahash)); + + memcpy(sreq->state, ctx->opad, sreq->state_sz); + + sreq->len[0] = sreq->block_sz + + crypto_ahash_digestsize(ahash); + sreq->len[1] = 0; + sreq->processed[0] = sreq->block_sz; + sreq->processed[1] = 0; + sreq->hmac = 0; + + ctx->base.needs_inv = true; + areq->nbytes = 0; + safexcel_ahash_enqueue(areq); + + *should_complete = false; /* Not done yet */ + return 1; + } + memcpy(areq->result, sreq->state, crypto_ahash_digestsize(ahash)); + } cache_len = safexcel_queued_len(sreq); if (cache_len) @@ -205,7 +255,6 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, int *commands, int *results) { struct ahash_request *areq = ahash_request_cast(async); - struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); struct safexcel_ahash_req *req = ahash_request_ctx(areq); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); struct safexcel_crypto_priv *priv = ctx->priv; @@ -213,27 +262,25 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, struct safexcel_result_desc *rdesc; struct scatterlist *sg; int i, extra = 0, n_cdesc = 0, ret = 0; - u64 queued, len, cache_len, cache_max; - - cache_max = crypto_ahash_blocksize(ahash); + u64 queued, len, cache_len; queued = len = safexcel_queued_len(req); - if (queued <= cache_max) + if (queued <= HASH_CACHE_SIZE) cache_len = queued; else cache_len = queued - areq->nbytes; - if (!req->last_req) { + if (!req->finish && !req->last_req) { /* If this is not the last request and the queued data does not - * fit into full blocks, cache it for the next send() call. + * fit into full cache blocks, cache it for the next send call. */ - extra = queued & (cache_max - 1); + extra = queued & (HASH_CACHE_SIZE - 1); /* If this is not the last request and the queued data * is a multiple of a block, cache the last one for now. */ if (!extra) - extra = cache_max; + extra = HASH_CACHE_SIZE; sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), req->cache_next, extra, @@ -272,8 +319,14 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, goto send_command; } + /* Skip descriptor generation for zero-length requests */ + if (!areq->nbytes) + goto send_command; + /* Now handle the current ahash request buffer(s) */ - req->nents = dma_map_sg(priv->dev, areq->src, sg_nents(areq->src), + req->nents = dma_map_sg(priv->dev, areq->src, + sg_nents_for_len(areq->src, + areq->nbytes), DMA_TO_DEVICE); if (!req->nents) { ret = -ENOMEM; @@ -288,7 +341,8 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, sglen = queued; cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, - !(queued - sglen), sg_dma_address(sg), + !(queued - sglen), + sg_dma_address(sg), sglen, len, ctx->base.ctxr_dma); if (IS_ERR(cdesc)) { ret = PTR_ERR(cdesc); @@ -306,7 +360,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, send_command: /* Setup the context options */ - safexcel_context_control(ctx, req, first_cdesc, req->state_sz); + safexcel_context_control(ctx, req, first_cdesc); /* Add the token */ safexcel_hash_token(first_cdesc, len, req->state_sz); @@ -355,27 +409,6 @@ unmap_cache: return ret; } -static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq) -{ - struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); - unsigned int state_w_sz = req->state_sz / sizeof(u32); - u64 processed; - int i; - - processed = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE; - processed += (0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * req->processed[1]; - - for (i = 0; i < state_w_sz; i++) - if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i])) - return true; - - if (ctx->base.ctxr->data[state_w_sz] != cpu_to_le32(processed)) - return true; - - return false; -} - static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, int ring, struct crypto_async_request *async, @@ -523,30 +556,25 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) /* safexcel_ahash_cache: cache data until at least one request can be sent to * the engine, aka. when there is at least 1 block size in the pipe. */ -static int safexcel_ahash_cache(struct ahash_request *areq, u32 cache_max) +static int safexcel_ahash_cache(struct ahash_request *areq) { struct safexcel_ahash_req *req = ahash_request_ctx(areq); - u64 queued, cache_len; + u64 cache_len; - /* queued: everything accepted by the driver which will be handled by - * the next send() calls. - * tot sz handled by update() - tot sz handled by send() - */ - queued = safexcel_queued_len(req); /* cache_len: everything accepted by the driver but not sent yet, * tot sz handled by update() - last req sz - tot sz handled by send() */ - cache_len = queued - areq->nbytes; + cache_len = safexcel_queued_len(req); /* * In case there isn't enough bytes to proceed (less than a * block size), cache the data until we have enough. */ - if (cache_len + areq->nbytes <= cache_max) { + if (cache_len + areq->nbytes <= HASH_CACHE_SIZE) { sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), req->cache + cache_len, areq->nbytes, 0); - return areq->nbytes; + return 0; } /* We couldn't cache all the data */ @@ -565,13 +593,25 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) if (ctx->base.ctxr) { if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv && (req->processed[0] || req->processed[1]) && - req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) - /* We're still setting needs_inv here, even though it is + (/* invalidate for basic hash continuation finish */ + (req->finish && + (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)) || + /* invalidate if (i)digest changed */ + memcmp(ctx->base.ctxr->data, req->state, req->state_sz) || + /* invalidate for HMAC continuation finish */ + (req->finish && (req->processed[1] || + (req->processed[0] != req->block_sz))) || + /* invalidate for HMAC finish with odigest changed */ + (req->finish && + memcmp(ctx->base.ctxr->data + (req->state_sz>>2), + ctx->opad, req->state_sz)))) + /* + * We're still setting needs_inv here, even though it is * cleared right away, because the needs_inv flag can be * set in other functions and we want to keep the same * logic. */ - ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); + ctx->base.needs_inv = true; if (ctx->base.needs_inv) { ctx->base.needs_inv = false; @@ -601,33 +641,25 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) static int safexcel_ahash_update(struct ahash_request *areq) { struct safexcel_ahash_req *req = ahash_request_ctx(areq); - struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); - u32 cache_max; + int ret; /* If the request is 0 length, do nothing */ if (!areq->nbytes) return 0; + /* Add request to the cache if it fits */ + ret = safexcel_ahash_cache(areq); + + /* Update total request length */ req->len[0] += areq->nbytes; if (req->len[0] < areq->nbytes) req->len[1]++; - cache_max = crypto_ahash_blocksize(ahash); - - safexcel_ahash_cache(areq, cache_max); - - /* - * We're not doing partial updates when performing an hmac request. - * Everything will be handled by the final() call. + /* If not all data could fit into the cache, go process the excess. + * Also go process immediately for an HMAC IV precompute, which + * will never be finished at all, but needs to be processed anyway. */ - if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) - return 0; - - if (req->hmac) - return safexcel_ahash_enqueue(areq); - - if (!req->last_req && - safexcel_queued_len(req) > cache_max) + if ((ret && !req->finish) || req->last_req) return safexcel_ahash_enqueue(areq); return 0; @@ -638,7 +670,6 @@ static int safexcel_ahash_final(struct ahash_request *areq) struct safexcel_ahash_req *req = ahash_request_ctx(areq); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - req->last_req = true; req->finish = true; if (unlikely(!req->len[0] && !req->len[1] && !areq->nbytes)) { @@ -667,6 +698,14 @@ static int safexcel_ahash_final(struct ahash_request *areq) SHA512_DIGEST_SIZE); return 0; + } else if (unlikely(req->hmac && !req->len[1] && + (req->len[0] == req->block_sz) && + !areq->nbytes)) { + /* TODO: add support for zero length HMAC */ + return 0; + } else if (req->hmac) { + /* Finalize HMAC */ + req->digest = CONTEXT_CONTROL_DIGEST_HMAC; } return safexcel_ahash_enqueue(areq); @@ -676,7 +715,6 @@ static int safexcel_ahash_finup(struct ahash_request *areq) { struct safexcel_ahash_req *req = ahash_request_ctx(areq); - req->last_req = true; req->finish = true; safexcel_ahash_update(areq); @@ -685,12 +723,8 @@ static int safexcel_ahash_finup(struct ahash_request *areq) static int safexcel_ahash_export(struct ahash_request *areq, void *out) { - struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); struct safexcel_ahash_req *req = ahash_request_ctx(areq); struct safexcel_ahash_export_state *export = out; - u32 cache_sz; - - cache_sz = crypto_ahash_blocksize(ahash); export->len[0] = req->len[0]; export->len[1] = req->len[1]; @@ -700,25 +734,21 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out) export->digest = req->digest; memcpy(export->state, req->state, req->state_sz); - memcpy(export->cache, req->cache, cache_sz); + memcpy(export->cache, req->cache, HASH_CACHE_SIZE); return 0; } static int safexcel_ahash_import(struct ahash_request *areq, const void *in) { - struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); struct safexcel_ahash_req *req = ahash_request_ctx(areq); const struct safexcel_ahash_export_state *export = in; - u32 cache_sz; int ret; ret = crypto_ahash_init(areq); if (ret) return ret; - cache_sz = crypto_ahash_blocksize(ahash); - req->len[0] = export->len[0]; req->len[1] = export->len[1]; req->processed[0] = export->processed[0]; @@ -726,7 +756,7 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in) req->digest = export->digest; - memcpy(req->cache, export->cache, cache_sz); + memcpy(req->cache, export->cache, HASH_CACHE_SIZE); memcpy(req->state, export->state, req->state_sz); return 0; @@ -758,6 +788,7 @@ static int safexcel_sha1_init(struct ahash_request *areq) ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = SHA1_DIGEST_SIZE; + req->block_sz = SHA1_BLOCK_SIZE; return 0; } @@ -824,10 +855,23 @@ struct safexcel_alg_template safexcel_alg_sha1 = { static int safexcel_hmac_sha1_init(struct ahash_request *areq) { + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); struct safexcel_ahash_req *req = ahash_request_ctx(areq); - safexcel_sha1_init(areq); - req->digest = CONTEXT_CONTROL_DIGEST_HMAC; + memset(req, 0, sizeof(*req)); + + /* Start from ipad precompute */ + memcpy(req->state, ctx->ipad, SHA1_DIGEST_SIZE); + /* Already processed the key^ipad part now! */ + req->len[0] = SHA1_BLOCK_SIZE; + req->processed[0] = SHA1_BLOCK_SIZE; + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + req->state_sz = SHA1_DIGEST_SIZE; + req->block_sz = SHA1_BLOCK_SIZE; + req->hmac = true; + return 0; } @@ -996,21 +1040,16 @@ static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key, struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); struct safexcel_crypto_priv *priv = ctx->priv; struct safexcel_ahash_export_state istate, ostate; - int ret, i; + int ret; ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate); if (ret) return ret; - if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr) { - for (i = 0; i < state_sz / sizeof(u32); i++) { - if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || - ctx->opad[i] != le32_to_cpu(ostate.state[i])) { - ctx->base.needs_inv = true; - break; - } - } - } + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr && + (memcmp(ctx->ipad, istate.state, state_sz) || + memcmp(ctx->opad, ostate.state, state_sz))) + ctx->base.needs_inv = true; memcpy(ctx->ipad, &istate.state, state_sz); memcpy(ctx->opad, &ostate.state, state_sz); @@ -1066,6 +1105,7 @@ static int safexcel_sha256_init(struct ahash_request *areq) ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = SHA256_DIGEST_SIZE; + req->block_sz = SHA256_BLOCK_SIZE; return 0; } @@ -1120,6 +1160,7 @@ static int safexcel_sha224_init(struct ahash_request *areq) ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = SHA256_DIGEST_SIZE; + req->block_sz = SHA256_BLOCK_SIZE; return 0; } @@ -1173,10 +1214,23 @@ static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key, static int safexcel_hmac_sha224_init(struct ahash_request *areq) { + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); struct safexcel_ahash_req *req = ahash_request_ctx(areq); - safexcel_sha224_init(areq); - req->digest = CONTEXT_CONTROL_DIGEST_HMAC; + memset(req, 0, sizeof(*req)); + + /* Start from ipad precompute */ + memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE); + /* Already processed the key^ipad part now! */ + req->len[0] = SHA256_BLOCK_SIZE; + req->processed[0] = SHA256_BLOCK_SIZE; + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224; + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + req->state_sz = SHA256_DIGEST_SIZE; + req->block_sz = SHA256_BLOCK_SIZE; + req->hmac = true; + return 0; } @@ -1230,10 +1284,23 @@ static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, static int safexcel_hmac_sha256_init(struct ahash_request *areq) { + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); struct safexcel_ahash_req *req = ahash_request_ctx(areq); - safexcel_sha256_init(areq); - req->digest = CONTEXT_CONTROL_DIGEST_HMAC; + memset(req, 0, sizeof(*req)); + + /* Start from ipad precompute */ + memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE); + /* Already processed the key^ipad part now! */ + req->len[0] = SHA256_BLOCK_SIZE; + req->processed[0] = SHA256_BLOCK_SIZE; + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256; + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + req->state_sz = SHA256_DIGEST_SIZE; + req->block_sz = SHA256_BLOCK_SIZE; + req->hmac = true; + return 0; } @@ -1288,6 +1355,7 @@ static int safexcel_sha512_init(struct ahash_request *areq) ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = SHA512_DIGEST_SIZE; + req->block_sz = SHA512_BLOCK_SIZE; return 0; } @@ -1342,6 +1410,7 @@ static int safexcel_sha384_init(struct ahash_request *areq) ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = SHA512_DIGEST_SIZE; + req->block_sz = SHA512_BLOCK_SIZE; return 0; } @@ -1395,10 +1464,23 @@ static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key, static int safexcel_hmac_sha512_init(struct ahash_request *areq) { + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); struct safexcel_ahash_req *req = ahash_request_ctx(areq); - safexcel_sha512_init(areq); - req->digest = CONTEXT_CONTROL_DIGEST_HMAC; + memset(req, 0, sizeof(*req)); + + /* Start from ipad precompute */ + memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE); + /* Already processed the key^ipad part now! */ + req->len[0] = SHA512_BLOCK_SIZE; + req->processed[0] = SHA512_BLOCK_SIZE; + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512; + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + req->state_sz = SHA512_DIGEST_SIZE; + req->block_sz = SHA512_BLOCK_SIZE; + req->hmac = true; + return 0; } @@ -1452,10 +1534,23 @@ static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key, static int safexcel_hmac_sha384_init(struct ahash_request *areq) { + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); struct safexcel_ahash_req *req = ahash_request_ctx(areq); - safexcel_sha384_init(areq); - req->digest = CONTEXT_CONTROL_DIGEST_HMAC; + memset(req, 0, sizeof(*req)); + + /* Start from ipad precompute */ + memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE); + /* Already processed the key^ipad part now! */ + req->len[0] = SHA512_BLOCK_SIZE; + req->processed[0] = SHA512_BLOCK_SIZE; + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384; + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + req->state_sz = SHA512_DIGEST_SIZE; + req->block_sz = SHA512_BLOCK_SIZE; + req->hmac = true; + return 0; } @@ -1510,6 +1605,7 @@ static int safexcel_md5_init(struct ahash_request *areq) ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = MD5_DIGEST_SIZE; + req->block_sz = MD5_HMAC_BLOCK_SIZE; return 0; } @@ -1556,10 +1652,23 @@ struct safexcel_alg_template safexcel_alg_md5 = { static int safexcel_hmac_md5_init(struct ahash_request *areq) { + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); struct safexcel_ahash_req *req = ahash_request_ctx(areq); - safexcel_md5_init(areq); - req->digest = CONTEXT_CONTROL_DIGEST_HMAC; + memset(req, 0, sizeof(*req)); + + /* Start from ipad precompute */ + memcpy(req->state, ctx->ipad, MD5_DIGEST_SIZE); + /* Already processed the key^ipad part now! */ + req->len[0] = MD5_HMAC_BLOCK_SIZE; + req->processed[0] = MD5_HMAC_BLOCK_SIZE; + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5; + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + req->state_sz = MD5_DIGEST_SIZE; + req->block_sz = MD5_HMAC_BLOCK_SIZE; + req->hmac = true; + return 0; } -- cgit From 85b36ee8e9a8b18b3c09b8a8b6ac3dc694584bab Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Tue, 2 Jul 2019 16:40:00 +0200 Subject: crypto: inside-secure - add support for 0 length HMAC messages This patch adds support for the specific corner case of performing HMAC on an empty string (i.e. payload length is zero). This solves the last failing cryptomgr extratests for HMAC. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_hash.c | 47 ++++++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 1476574b346e..a777dec5f41f 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -29,6 +29,8 @@ struct safexcel_ahash_req { bool finish; bool hmac; bool needs_inv; + bool hmac_zlen; + bool len_is_le; int nents; dma_addr_t result_dma; @@ -117,7 +119,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, if (req->finish) { /* Compute digest count for hash/HMAC finish operations */ if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) || - req->processed[1] || + req->hmac_zlen || req->processed[1] || (req->processed[0] != req->block_sz)) { count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE; count += ((0x100000000ULL / EIP197_COUNTER_BLOCK_SIZE) * @@ -136,6 +138,8 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, } if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) || + /* Special case: zero length HMAC */ + req->hmac_zlen || /* PE HW < 4.4 cannot do HMAC continue, fake using hash */ ((req->processed[1] || (req->processed[0] != req->block_sz)))) { @@ -144,11 +148,18 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, CONTEXT_CONTROL_SIZE((req->state_sz >> 2) + 1) | CONTEXT_CONTROL_TYPE_HASH_OUT | CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + /* For zero-len HMAC, don't finalize, already padded! */ + if (req->hmac_zlen) + cdesc->control_data.control0 |= + CONTEXT_CONTROL_NO_FINISH_HASH; cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT; ctx->base.ctxr->data[req->state_sz >> 2] = cpu_to_le32(count); req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + + /* Clear zero-length HMAC flag for next operation! */ + req->hmac_zlen = false; } else { /* HMAC */ /* Need outer digest for HMAC finalization */ memcpy(ctx->base.ctxr->data + (req->state_sz >> 2), @@ -701,8 +712,37 @@ static int safexcel_ahash_final(struct ahash_request *areq) } else if (unlikely(req->hmac && !req->len[1] && (req->len[0] == req->block_sz) && !areq->nbytes)) { - /* TODO: add support for zero length HMAC */ - return 0; + /* + * If we have an overall 0 length *HMAC* request: + * For HMAC, we need to finalize the inner digest + * and then perform the outer hash. + */ + + /* generate pad block in the cache */ + /* start with a hash block of all zeroes */ + memset(req->cache, 0, req->block_sz); + /* set the first byte to 0x80 to 'append a 1 bit' */ + req->cache[0] = 0x80; + /* add the length in bits in the last 2 bytes */ + if (req->len_is_le) { + /* Little endian length word (e.g. MD5) */ + req->cache[req->block_sz-8] = (req->block_sz << 3) & + 255; + req->cache[req->block_sz-7] = (req->block_sz >> 5); + } else { + /* Big endian length word (e.g. any SHA) */ + req->cache[req->block_sz-2] = (req->block_sz >> 5); + req->cache[req->block_sz-1] = (req->block_sz << 3) & + 255; + } + + req->len[0] += req->block_sz; /* plus 1 hash block */ + + /* Set special zero-length HMAC flag */ + req->hmac_zlen = true; + + /* Finalize HMAC */ + req->digest = CONTEXT_CONTROL_DIGEST_HMAC; } else if (req->hmac) { /* Finalize HMAC */ req->digest = CONTEXT_CONTROL_DIGEST_HMAC; @@ -1667,6 +1707,7 @@ static int safexcel_hmac_md5_init(struct ahash_request *areq) req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = MD5_DIGEST_SIZE; req->block_sz = MD5_HMAC_BLOCK_SIZE; + req->len_is_le = true; /* MD5 is little endian! ... */ req->hmac = true; return 0; -- cgit From 724ecd3c0eb7040d423b22332a60d097e2666820 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 2 Jul 2019 21:41:20 +0200 Subject: crypto: aes - rename local routines to prevent future clashes Rename some local AES encrypt/decrypt routines so they don't clash with the names we are about to introduce for the routines exposed by the generic AES library. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/padlock-aes.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index ad020133da19..fbba32e8cb26 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -300,7 +300,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, return iv; } -static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +static void padlock_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); @@ -309,7 +309,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) padlock_store_cword(&ctx->cword.encrypt); } -static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +static void padlock_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); @@ -332,8 +332,8 @@ static struct crypto_alg aes_alg = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, .cia_setkey = aes_set_key, - .cia_encrypt = aes_encrypt, - .cia_decrypt = aes_decrypt, + .cia_encrypt = padlock_aes_encrypt, + .cia_decrypt = padlock_aes_decrypt, } } }; -- cgit From 8131878db76c2bd3065bd3b75cb6615390a393e6 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 2 Jul 2019 21:41:25 +0200 Subject: crypto: padlock/aes - switch to library version of key expansion routine Switch to the new AES library that also provides an implementation of the AES key expansion routine. This removes the dependency on the generic AES cipher, allowing it to be omitted entirely in the future. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 2 +- drivers/crypto/padlock-aes.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 603413f28fa3..27121e530ca4 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -27,7 +27,7 @@ config CRYPTO_DEV_PADLOCK_AES tristate "PadLock driver for AES algorithm" depends on CRYPTO_DEV_PADLOCK select CRYPTO_BLKCIPHER - select CRYPTO_AES + select CRYPTO_LIB_AES help Use VIA PadLock for AES algorithm. diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index fbba32e8cb26..8a0661250078 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -145,7 +145,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, ctx->cword.encrypt.keygen = 1; ctx->cword.decrypt.keygen = 1; - if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) { + if (aes_expandkey(&gen_aes, in_key, key_len)) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } -- cgit From 18d8b96daded4123d6f1ec00a390b16e4cf1d40c Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 2 Jul 2019 21:41:26 +0200 Subject: crypto: cesa/aes - switch to library version of key expansion routine Switch to the new AES library that also provides an implementation of the AES key expansion routine. This removes the dependency on the generic AES cipher, allowing it to be omitted entirely in the future. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 2 +- drivers/crypto/marvell/cipher.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 27121e530ca4..cfb4c2a42b35 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -214,7 +214,7 @@ config CRYPTO_CRC32_S390 config CRYPTO_DEV_MARVELL_CESA tristate "Marvell's Cryptographic Engine driver" depends on PLAT_ORION || ARCH_MVEBU - select CRYPTO_AES + select CRYPTO_LIB_AES select CRYPTO_DES select CRYPTO_BLKCIPHER select CRYPTO_HASH diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index f4321f3c0777..fa1997e70b63 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c @@ -254,7 +254,7 @@ static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, int ret; int i; - ret = crypto_aes_expand_key(&ctx->aes, key, len); + ret = aes_expandkey(&ctx->aes, key, len); if (ret) { crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return ret; -- cgit From 363a90c2d517e69776dcf71cc3d6fcaee9fef868 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 2 Jul 2019 21:41:27 +0200 Subject: crypto: safexcel/aes - switch to library version of key expansion routine Switch to the new AES library that also provides an implementation of the AES key expansion routine. This removes the dependency on the generic AES cipher, allowing it to be omitted entirely in the future. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 2 +- drivers/crypto/inside-secure/safexcel_cipher.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index cfb4c2a42b35..72d6c2ca0809 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -719,7 +719,7 @@ config CRYPTO_DEV_SAFEXCEL tristate "Inside Secure's SafeXcel cryptographic engine driver" depends on OF depends on (ARM64 && ARCH_MVEBU) || (COMPILE_TEST && 64BIT) - select CRYPTO_AES + select CRYPTO_LIB_AES select CRYPTO_AUTHENC select CRYPTO_BLKCIPHER select CRYPTO_DES diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index e87428733671..ee3a90f028b5 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -159,7 +159,7 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm, struct crypto_aes_ctx aes; int ret, i; - ret = crypto_aes_expand_key(&aes, key, len); + ret = aes_expandkey(&aes, key, len); if (ret) { crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return ret; -- cgit From da3e7a9715ea041299a9e1eae1e73405b110a333 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 2 Jul 2019 21:41:42 +0200 Subject: crypto: amcc - switch to AES library for GCM key derivation The AMCC code for GCM key derivation allocates a AES cipher to perform a single block encryption. So let's switch to the new and more lightweight AES library instead. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 2 +- drivers/crypto/amcc/crypto4xx_alg.c | 24 ++++++++---------------- 2 files changed, 9 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 72d6c2ca0809..69d1bbd5d9bf 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -312,7 +312,7 @@ config CRYPTO_DEV_PPC4XX depends on PPC && 4xx select CRYPTO_HASH select CRYPTO_AEAD - select CRYPTO_AES + select CRYPTO_LIB_AES select CRYPTO_CCM select CRYPTO_CTR select CRYPTO_GCM diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index cbfc607282f4..a42f8619589d 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -527,28 +527,20 @@ static int crypto4xx_aes_gcm_validate_keylen(unsigned int keylen) static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key, unsigned int keylen) { - struct crypto_cipher *aes_tfm = NULL; + struct crypto_aes_ctx ctx; uint8_t src[16] = { 0 }; - int rc = 0; - - aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(aes_tfm)) { - rc = PTR_ERR(aes_tfm); - pr_warn("could not load aes cipher driver: %d\n", rc); - return rc; - } + int rc; - rc = crypto_cipher_setkey(aes_tfm, key, keylen); + rc = aes_expandkey(&ctx, key, keylen); if (rc) { - pr_err("setkey() failed: %d\n", rc); - goto out; + pr_err("aes_expandkey() failed: %d\n", rc); + return rc; } - crypto_cipher_encrypt_one(aes_tfm, src, src); + aes_encrypt(&ctx, src, src); crypto4xx_memcpy_to_le32(hash_start, src, 16); -out: - crypto_free_cipher(aes_tfm); - return rc; + memzero_explicit(&ctx, sizeof(ctx)); + return 0; } int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher, -- cgit From 6273fd7a5a99549bc5b90ce8a70df88cc6b570c3 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 2 Jul 2019 21:41:43 +0200 Subject: crypto: ccp - move to AES library for CMAC key derivation Use the AES library instead of the cipher interface to perform the single block of AES processing involved in updating the key of the cmac(aes) hash. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/ccp/Kconfig | 1 + drivers/crypto/ccp/ccp-crypto-aes-cmac.c | 25 ++++--------------------- drivers/crypto/ccp/ccp-crypto.h | 3 --- 3 files changed, 5 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 48f3edc1e3fb..ce1c47286fc6 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -30,6 +30,7 @@ config CRYPTO_DEV_CCP_CRYPTO select CRYPTO_BLKCIPHER select CRYPTO_AUTHENC select CRYPTO_RSA + select CRYPTO_LIB_AES help Support for using the cryptographic API with the AMD Cryptographic Coprocessor. This module supports offload of SHA and AES algorithms. diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c index bb7219d36b2c..32f19f402073 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c @@ -261,6 +261,7 @@ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm)); u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo; u64 rb_hi = 0x00, rb_lo = 0x87; + struct crypto_aes_ctx aes; __be64 *gk; int ret; @@ -284,14 +285,14 @@ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, ctx->u.aes.key_len = 0; /* Set the key for the AES cipher used to generate the keys */ - ret = crypto_cipher_setkey(ctx->u.aes.tfm_cipher, key, key_len); + ret = aes_expandkey(&aes, key, key_len); if (ret) return ret; /* Encrypt a block of zeroes - use key area in context */ memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key)); - crypto_cipher_encrypt_one(ctx->u.aes.tfm_cipher, ctx->u.aes.key, - ctx->u.aes.key); + aes_encrypt(&aes, ctx->u.aes.key, ctx->u.aes.key); + memzero_explicit(&aes, sizeof(aes)); /* Generate K1 and K2 */ k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key)); @@ -336,32 +337,15 @@ static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm) { struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); - struct crypto_cipher *cipher_tfm; ctx->complete = ccp_aes_cmac_complete; ctx->u.aes.key_len = 0; crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx)); - cipher_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(cipher_tfm)) { - pr_warn("could not load aes cipher driver\n"); - return PTR_ERR(cipher_tfm); - } - ctx->u.aes.tfm_cipher = cipher_tfm; - return 0; } -static void ccp_aes_cmac_cra_exit(struct crypto_tfm *tfm) -{ - struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); - - if (ctx->u.aes.tfm_cipher) - crypto_free_cipher(ctx->u.aes.tfm_cipher); - ctx->u.aes.tfm_cipher = NULL; -} - int ccp_register_aes_cmac_algs(struct list_head *head) { struct ccp_crypto_ahash_alg *ccp_alg; @@ -401,7 +385,6 @@ int ccp_register_aes_cmac_algs(struct list_head *head) base->cra_ctxsize = sizeof(struct ccp_ctx); base->cra_priority = CCP_CRA_PRIORITY; base->cra_init = ccp_aes_cmac_cra_init; - base->cra_exit = ccp_aes_cmac_cra_exit; base->cra_module = THIS_MODULE; ret = crypto_register_ahash(alg); diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 622b34c17643..25409cea8465 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -87,9 +87,6 @@ struct ccp_aes_ctx { /* Fallback cipher for XTS with unsupported unit sizes */ struct crypto_sync_skcipher *tfm_skcipher; - /* Cipher used to generate CMAC K1/K2 keys */ - struct crypto_cipher *tfm_cipher; - enum ccp_engine engine; enum ccp_aes_type type; enum ccp_aes_mode mode; -- cgit From 571c47ab98ad8c85e44b28f2f0c41b0e27312a9b Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 2 Jul 2019 21:41:44 +0200 Subject: crypto: chelsio - replace AES cipher calls with library calls Replace a couple of occurrences where the "aes-generic" cipher is instantiated explicitly and only used for encryption of a single block. Use AES library calls instead. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/Kconfig | 1 + drivers/crypto/chelsio/chcr_algo.c | 46 ++++++++++----------------------- drivers/crypto/chelsio/chcr_crypto.h | 1 - drivers/crypto/chelsio/chcr_ipsec.c | 19 +++++--------- drivers/crypto/chelsio/chtls/chtls_hw.c | 20 +++++--------- 5 files changed, 26 insertions(+), 61 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig index 4b9b37a130d3..250150560e68 100644 --- a/drivers/crypto/chelsio/Kconfig +++ b/drivers/crypto/chelsio/Kconfig @@ -2,6 +2,7 @@ config CRYPTO_DEV_CHELSIO tristate "Chelsio Crypto Co-processor Driver" depends on CHELSIO_T4 + select CRYPTO_LIB_AES select CRYPTO_SHA1 select CRYPTO_SHA256 select CRYPTO_SHA512 diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 177f572b9589..38ee38b37ae6 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -1023,22 +1023,21 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv, struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); - struct crypto_cipher *cipher; + struct crypto_aes_ctx aes; int ret, i; u8 *key; unsigned int keylen; int round = reqctx->last_req_len / AES_BLOCK_SIZE; int round8 = round / 8; - cipher = ablkctx->aes_generic; memcpy(iv, reqctx->iv, AES_BLOCK_SIZE); keylen = ablkctx->enckey_len / 2; key = ablkctx->key + keylen; - ret = crypto_cipher_setkey(cipher, key, keylen); + ret = aes_expandkey(&aes, key, keylen); if (ret) - goto out; - crypto_cipher_encrypt_one(cipher, iv, iv); + return ret; + aes_encrypt(&aes, iv, iv); for (i = 0; i < round8; i++) gf128mul_x8_ble((le128 *)iv, (le128 *)iv); @@ -1046,9 +1045,10 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv, gf128mul_x_ble((le128 *)iv, (le128 *)iv); if (!isfinal) - crypto_cipher_decrypt_one(cipher, iv, iv); -out: - return ret; + aes_decrypt(&aes, iv, iv); + + memzero_explicit(&aes, sizeof(aes)); + return 0; } static int chcr_update_cipher_iv(struct ablkcipher_request *req, @@ -1411,16 +1411,6 @@ static int chcr_cra_init(struct crypto_tfm *tfm) return PTR_ERR(ablkctx->sw_cipher); } - if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) { - /* To update tweak*/ - ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0); - if (IS_ERR(ablkctx->aes_generic)) { - pr_err("failed to allocate aes cipher for tweak\n"); - return PTR_ERR(ablkctx->aes_generic); - } - } else - ablkctx->aes_generic = NULL; - tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); return chcr_device_init(crypto_tfm_ctx(tfm)); } @@ -1451,8 +1441,6 @@ static void chcr_cra_exit(struct crypto_tfm *tfm) struct ablk_ctx *ablkctx = ABLK_CTX(ctx); crypto_free_sync_skcipher(ablkctx->sw_cipher); - if (ablkctx->aes_generic) - crypto_free_cipher(ablkctx->aes_generic); } static int get_alg_config(struct algo_param *params, @@ -3364,9 +3352,9 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, { struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx); - struct crypto_cipher *cipher; unsigned int ck_size; int ret = 0, key_ctx_size = 0; + struct crypto_aes_ctx aes; aeadctx->enckey_len = 0; crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); @@ -3409,23 +3397,15 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, /* Calculate the H = CIPH(K, 0 repeated 16 times). * It will go in key context */ - cipher = crypto_alloc_cipher("aes-generic", 0, 0); - if (IS_ERR(cipher)) { - aeadctx->enckey_len = 0; - ret = -ENOMEM; - goto out; - } - - ret = crypto_cipher_setkey(cipher, key, keylen); + ret = aes_expandkey(&aes, key, keylen); if (ret) { aeadctx->enckey_len = 0; - goto out1; + goto out; } memset(gctx->ghash_h, 0, AEAD_H_SIZE); - crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h); + aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h); + memzero_explicit(&aes, sizeof(aes)); -out1: - crypto_free_cipher(cipher); out: return ret; } diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index 655606f2e4d0..993c97e70565 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -172,7 +172,6 @@ static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm) struct ablk_ctx { struct crypto_sync_skcipher *sw_cipher; - struct crypto_cipher *aes_generic; __be32 key_ctx_hdr; unsigned int enckey_len; unsigned char ciph_mode; diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c index f429aae72542..24355680f30a 100644 --- a/drivers/crypto/chelsio/chcr_ipsec.c +++ b/drivers/crypto/chelsio/chcr_ipsec.c @@ -132,11 +132,11 @@ static inline int chcr_ipsec_setauthsize(struct xfrm_state *x, static inline int chcr_ipsec_setkey(struct xfrm_state *x, struct ipsec_sa_entry *sa_entry) { - struct crypto_cipher *cipher; int keylen = (x->aead->alg_key_len + 7) / 8; unsigned char *key = x->aead->alg_key; int ck_size, key_ctx_size = 0; unsigned char ghash_h[AEAD_H_SIZE]; + struct crypto_aes_ctx aes; int ret = 0; if (keylen > 3) { @@ -170,26 +170,19 @@ static inline int chcr_ipsec_setkey(struct xfrm_state *x, /* Calculate the H = CIPH(K, 0 repeated 16 times). * It will go in key context */ - cipher = crypto_alloc_cipher("aes-generic", 0, 0); - if (IS_ERR(cipher)) { - sa_entry->enckey_len = 0; - ret = -ENOMEM; - goto out; - } - - ret = crypto_cipher_setkey(cipher, key, keylen); + ret = aes_expandkey(&aes, key, keylen); if (ret) { sa_entry->enckey_len = 0; - goto out1; + goto out; } memset(ghash_h, 0, AEAD_H_SIZE); - crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h); + aes_encrypt(&aes, ghash_h, ghash_h); + memzero_explicit(&aes, sizeof(aes)); + memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) * 16), ghash_h, AEAD_H_SIZE); sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) + AEAD_H_SIZE; -out1: - crypto_free_cipher(cipher); out: return ret; } diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c index f2424f4c5f78..2a34035d3cfb 100644 --- a/drivers/crypto/chelsio/chtls/chtls_hw.c +++ b/drivers/crypto/chelsio/chtls/chtls_hw.c @@ -213,8 +213,8 @@ static int chtls_key_info(struct chtls_sock *csk, unsigned char key[AES_KEYSIZE_128]; struct tls12_crypto_info_aes_gcm_128 *gcm_ctx; unsigned char ghash_h[AEAD_H_SIZE]; - struct crypto_cipher *cipher; int ck_size, key_ctx_size; + struct crypto_aes_ctx aes; int ret; gcm_ctx = (struct tls12_crypto_info_aes_gcm_128 *) @@ -234,18 +234,13 @@ static int chtls_key_info(struct chtls_sock *csk, /* Calculate the H = CIPH(K, 0 repeated 16 times). * It will go in key context */ - cipher = crypto_alloc_cipher("aes", 0, 0); - if (IS_ERR(cipher)) { - ret = -ENOMEM; - goto out; - } - - ret = crypto_cipher_setkey(cipher, key, keylen); + ret = aes_expandkey(&aes, key, keylen); if (ret) - goto out1; + return ret; memset(ghash_h, 0, AEAD_H_SIZE); - crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h); + aes_encrypt(&aes, ghash_h, ghash_h); + memzero_explicit(&aes, sizeof(aes)); csk->tlshws.keylen = key_ctx_size; /* Copy the Key context */ @@ -269,10 +264,7 @@ static int chtls_key_info(struct chtls_sock *csk, /* erase key info from driver */ memset(gcm_ctx->key, 0, keylen); -out1: - crypto_free_cipher(cipher); -out: - return ret; + return 0; } static void chtls_set_scmd(struct chtls_sock *csk) -- cgit From cc2a58f14fb88e753fbc38b658e320c3a62d8c47 Mon Sep 17 00:00:00 2001 From: Fuqian Huang Date: Thu, 4 Jul 2019 00:27:08 +0800 Subject: crypto: drivers - Use kmemdup rather than duplicating its implementation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit kmemdup is introduced to duplicate a region of memory in a neat way. Rather than kmalloc/kzalloc + memcpy, which the programmer needs to write the size twice (sometimes lead to mistakes), kmemdup improves readability, leads to smaller code and also reduce the chances of mistakes. Suggestion to use kmemdup rather than using kmalloc/kzalloc + memcpy. Signed-off-by: Fuqian Huang Reviewed-by: Horia Geantă Acked-by: Michael S. Tsirkin Signed-off-by: Herbert Xu --- drivers/crypto/caam/caampkc.c | 11 +++-------- drivers/crypto/virtio/virtio_crypto_algs.c | 4 +--- 2 files changed, 4 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 80574106af29..2340f9441a80 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -867,7 +867,7 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, return ret; /* Copy key in DMA zone */ - rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL); + rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL); if (!rsa_key->e) goto err; @@ -889,8 +889,6 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, rsa_key->e_sz = raw_key.e_sz; rsa_key->n_sz = raw_key.n_sz; - memcpy(rsa_key->e, raw_key.e, raw_key.e_sz); - return 0; err: caam_rsa_free_key(rsa_key); @@ -971,11 +969,11 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, return ret; /* Copy key in DMA zone */ - rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL); + rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL); if (!rsa_key->d) goto err; - rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL); + rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL); if (!rsa_key->e) goto err; @@ -998,9 +996,6 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, rsa_key->e_sz = raw_key.e_sz; rsa_key->n_sz = raw_key.n_sz; - memcpy(rsa_key->d, raw_key.d, raw_key.d_sz); - memcpy(rsa_key->e, raw_key.e, raw_key.e_sz); - caam_rsa_set_priv_key_form(ctx, &raw_key); return 0; diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index 10f266d462d6..42d19205166b 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -129,13 +129,11 @@ static int virtio_crypto_alg_ablkcipher_init_session( * Avoid to do DMA from the stack, switch to using * dynamically-allocated for the key */ - uint8_t *cipher_key = kmalloc(keylen, GFP_ATOMIC); + uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC); if (!cipher_key) return -ENOMEM; - memcpy(cipher_key, key, keylen); - spin_lock(&vcrypto->ctrl_lock); /* Pad ctrl header */ vcrypto->ctrl.header.opcode = -- cgit From 5c9254ad7ae32a124fb084badd0cb3720f7c49cb Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 4 Jul 2019 17:37:57 +0200 Subject: crypto: ux500 - Use spinlock_t instead of struct spinlock For spinlocks the type spinlock_t should be used instead of "struct spinlock". Use spinlock_t for spinlock's definition. Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Herbert Xu --- drivers/crypto/ux500/cryp/cryp.h | 4 ++-- drivers/crypto/ux500/hash/hash_alg.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h index bd89504e8167..8da7f87b339b 100644 --- a/drivers/crypto/ux500/cryp/cryp.h +++ b/drivers/crypto/ux500/cryp/cryp.h @@ -241,12 +241,12 @@ struct cryp_device_data { struct clk *clk; struct regulator *pwr_regulator; int power_status; - struct spinlock ctx_lock; + spinlock_t ctx_lock; struct cryp_ctx *current_ctx; struct klist_node list_node; struct cryp_dma dma; bool power_state; - struct spinlock power_state_spinlock; + spinlock_t power_state_spinlock; bool restore_dev_ctx; }; diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h index ab2bd00c1c36..7c9bcc15125f 100644 --- a/drivers/crypto/ux500/hash/hash_alg.h +++ b/drivers/crypto/ux500/hash/hash_alg.h @@ -366,10 +366,10 @@ struct hash_device_data { phys_addr_t phybase; struct klist_node list_node; struct device *dev; - struct spinlock ctx_lock; + spinlock_t ctx_lock; struct hash_ctx *current_ctx; bool power_state; - struct spinlock power_state_lock; + spinlock_t power_state_lock; struct regulator *regulator; struct clk *clk; bool restore_dev_state; -- cgit From 77cdd4efe571345e9c116e65f64a616969e0bc35 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 5 Jul 2019 08:49:22 +0200 Subject: crypto: inside-secure - add support for authenc(hmac(sha1),cbc(des3_ede)) Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 1 + drivers/crypto/inside-secure/safexcel.h | 1 + drivers/crypto/inside-secure/safexcel_cipher.c | 89 ++++++++++++++++++++------ 3 files changed, 72 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 045919651272..29ea2341f10e 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -875,6 +875,7 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_authenc_hmac_sha256_cbc_aes, &safexcel_alg_authenc_hmac_sha384_cbc_aes, &safexcel_alg_authenc_hmac_sha512_cbc_aes, + &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede, }; static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index b73b17dcb8b1..e6ee12a68857 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -735,5 +735,6 @@ extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede; #endif diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index ee3a90f028b5..9c25dd0a1bfd 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -183,14 +183,16 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm, return 0; } -static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key, - unsigned int len) +static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key, + unsigned int len) { struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_ahash_export_state istate, ostate; struct safexcel_crypto_priv *priv = ctx->priv; struct crypto_authenc_keys keys; + u32 flags; + int err; if (crypto_authenc_extractkeys(&keys, key, len) != 0) goto badkey; @@ -199,6 +201,15 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key, goto badkey; /* Encryption key */ + if (ctx->alg == SAFEXCEL_3DES) { + flags = crypto_aead_get_flags(ctfm); + err = __des3_verify_key(&flags, keys.enckey); + crypto_aead_set_flags(ctfm, flags); + + if (unlikely(err)) + return err; + } + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma && memcmp(ctx->key, keys.enckey, keys.enckeylen)) ctx->base.needs_inv = true; @@ -1246,7 +1257,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des3_ede = { }, }; -static int safexcel_aead_encrypt(struct aead_request *req) +static int safexcel_aead_encrypt_aes(struct aead_request *req) { struct safexcel_cipher_req *creq = aead_request_ctx(req); @@ -1254,7 +1265,7 @@ static int safexcel_aead_encrypt(struct aead_request *req) CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES); } -static int safexcel_aead_decrypt(struct aead_request *req) +static int safexcel_aead_decrypt_aes(struct aead_request *req) { struct safexcel_cipher_req *creq = aead_request_ctx(req); @@ -1294,9 +1305,9 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, .engines = EIP97IES | EIP197B | EIP197D, .alg.aead = { - .setkey = safexcel_aead_aes_setkey, - .encrypt = safexcel_aead_encrypt, - .decrypt = safexcel_aead_decrypt, + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt_aes, + .decrypt = safexcel_aead_decrypt_aes, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, .base = { @@ -1329,9 +1340,9 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, .engines = EIP97IES | EIP197B | EIP197D, .alg.aead = { - .setkey = safexcel_aead_aes_setkey, - .encrypt = safexcel_aead_encrypt, - .decrypt = safexcel_aead_decrypt, + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt_aes, + .decrypt = safexcel_aead_decrypt_aes, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, .base = { @@ -1364,9 +1375,9 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, .engines = EIP97IES | EIP197B | EIP197D, .alg.aead = { - .setkey = safexcel_aead_aes_setkey, - .encrypt = safexcel_aead_encrypt, - .decrypt = safexcel_aead_decrypt, + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt_aes, + .decrypt = safexcel_aead_decrypt_aes, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, .base = { @@ -1399,9 +1410,9 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, .engines = EIP97IES | EIP197B | EIP197D, .alg.aead = { - .setkey = safexcel_aead_aes_setkey, - .encrypt = safexcel_aead_encrypt, - .decrypt = safexcel_aead_decrypt, + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt_aes, + .decrypt = safexcel_aead_decrypt_aes, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, .base = { @@ -1434,9 +1445,9 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, .engines = EIP97IES | EIP197B | EIP197D, .alg.aead = { - .setkey = safexcel_aead_aes_setkey, - .encrypt = safexcel_aead_encrypt, - .decrypt = safexcel_aead_decrypt, + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt_aes, + .decrypt = safexcel_aead_decrypt_aes, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, .base = { @@ -1454,3 +1465,43 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { }, }, }; + +static int safexcel_aead_encrypt_3des(struct aead_request *req) +{ + struct safexcel_cipher_req *creq = aead_request_ctx(req); + + return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT, + CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_3DES); +} + +static int safexcel_aead_decrypt_3des(struct aead_request *req) +{ + struct safexcel_cipher_req *creq = aead_request_ctx(req); + + return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT, + CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_3DES); +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt_3des, + .decrypt = safexcel_aead_decrypt_3des, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", + .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des3_ede", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha1_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; -- cgit From 54f9e8fa6668366620655df4e6ce5e7b9bf0de8c Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 5 Jul 2019 08:49:23 +0200 Subject: crypto: inside-secure - added support for rfc3686(ctr(aes)) Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 15 +-- drivers/crypto/inside-secure/safexcel.h | 32 +---- drivers/crypto/inside-secure/safexcel_cipher.c | 155 +++++++++++++++++++++---- 3 files changed, 138 insertions(+), 64 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 29ea2341f10e..47cb2da06eb3 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -404,17 +404,9 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX; writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe)); - /* H/W capabilities selection */ - val = EIP197_FUNCTION_RSVD; - val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY; - val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT; - val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC; - val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC; - val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC; - val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5; - val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1; - val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2; - writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe)); + /* H/W capabilities selection: just enable everything */ + writel(EIP197_FUNCTION_ALL, + EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe)); } /* Command Descriptor Rings prepare */ @@ -858,6 +850,7 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_cbc_des3_ede, &safexcel_alg_ecb_aes, &safexcel_alg_cbc_aes, + &safexcel_alg_ctr_aes, &safexcel_alg_md5, &safexcel_alg_sha1, &safexcel_alg_sha224, diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index e6ee12a68857..64274c7bd65d 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -256,35 +256,7 @@ #define EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX BIT(20) /* EIP197_PE_EIP96_FUNCTION_EN */ -#define EIP197_FUNCTION_RSVD (BIT(6) | BIT(15) | BIT(20) | BIT(23)) -#define EIP197_PROTOCOL_HASH_ONLY BIT(0) -#define EIP197_PROTOCOL_ENCRYPT_ONLY BIT(1) -#define EIP197_PROTOCOL_HASH_ENCRYPT BIT(2) -#define EIP197_PROTOCOL_HASH_DECRYPT BIT(3) -#define EIP197_PROTOCOL_ENCRYPT_HASH BIT(4) -#define EIP197_PROTOCOL_DECRYPT_HASH BIT(5) -#define EIP197_ALG_ARC4 BIT(7) -#define EIP197_ALG_AES_ECB BIT(8) -#define EIP197_ALG_AES_CBC BIT(9) -#define EIP197_ALG_AES_CTR_ICM BIT(10) -#define EIP197_ALG_AES_OFB BIT(11) -#define EIP197_ALG_AES_CFB BIT(12) -#define EIP197_ALG_DES_ECB BIT(13) -#define EIP197_ALG_DES_CBC BIT(14) -#define EIP197_ALG_DES_OFB BIT(16) -#define EIP197_ALG_DES_CFB BIT(17) -#define EIP197_ALG_3DES_ECB BIT(18) -#define EIP197_ALG_3DES_CBC BIT(19) -#define EIP197_ALG_3DES_OFB BIT(21) -#define EIP197_ALG_3DES_CFB BIT(22) -#define EIP197_ALG_MD5 BIT(24) -#define EIP197_ALG_HMAC_MD5 BIT(25) -#define EIP197_ALG_SHA1 BIT(26) -#define EIP197_ALG_HMAC_SHA1 BIT(27) -#define EIP197_ALG_SHA2 BIT(28) -#define EIP197_ALG_HMAC_SHA2 BIT(29) -#define EIP197_ALG_AES_XCBC_MAC BIT(30) -#define EIP197_ALG_GCM_HASH BIT(31) +#define EIP197_FUNCTION_ALL 0xffffffff /* EIP197_PE_EIP96_CONTEXT_CTRL */ #define EIP197_CONTEXT_SIZE(n) (n) @@ -333,6 +305,7 @@ struct safexcel_context_record { /* control1 */ #define CONTEXT_CONTROL_CRYPTO_MODE_ECB (0 << 0) #define CONTEXT_CONTROL_CRYPTO_MODE_CBC (1 << 0) +#define CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD (6 << 0) #define CONTEXT_CONTROL_IV0 BIT(5) #define CONTEXT_CONTROL_IV1 BIT(6) #define CONTEXT_CONTROL_IV2 BIT(7) @@ -718,6 +691,7 @@ extern struct safexcel_alg_template safexcel_alg_ecb_des3_ede; extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede; extern struct safexcel_alg_template safexcel_alg_ecb_aes; extern struct safexcel_alg_template safexcel_alg_cbc_aes; +extern struct safexcel_alg_template safexcel_alg_ctr_aes; extern struct safexcel_alg_template safexcel_alg_md5; extern struct safexcel_alg_template safexcel_alg_sha1; extern struct safexcel_alg_template safexcel_alg_sha224; diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 9c25dd0a1bfd..776da6515511 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -40,6 +40,7 @@ struct safexcel_cipher_ctx { bool aead; __le32 key[8]; + u32 nonce; unsigned int key_len; /* All the below is AEAD specific */ @@ -62,9 +63,9 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, u32 length) { struct safexcel_token *token; - u32 offset = 0, block_sz = 0; + u32 block_sz = 0; - if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { + if (ctx->mode != CONTEXT_CONTROL_CRYPTO_MODE_ECB) { switch (ctx->alg) { case SAFEXCEL_DES: block_sz = DES_BLOCK_SIZE; @@ -80,11 +81,20 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, break; } - offset = block_sz / sizeof(u32); - memcpy(cdesc->control_data.token, iv, block_sz); + if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) { + /* 32 bit nonce */ + cdesc->control_data.token[0] = ctx->nonce; + /* 64 bit IV part */ + memcpy(&cdesc->control_data.token[1], iv, 8); + /* 32 bit counter, start at 1 (big endian!) */ + cdesc->control_data.token[3] = cpu_to_be32(1); + } else { + memcpy(cdesc->control_data.token, iv, block_sz); + } } - token = (struct safexcel_token *)(cdesc->control_data.token + offset); + /* skip over worst case IV of 4 dwords, no need to be exact */ + token = (struct safexcel_token *)(cdesc->control_data.token + 4); token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; token[0].packet_length = length; @@ -101,33 +111,35 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, u32 cryptlen, u32 assoclen, u32 digestsize) { struct safexcel_token *token; - unsigned offset = 0; + u32 block_sz = 0; - if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { - offset = AES_BLOCK_SIZE / sizeof(u32); - memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE); + if (ctx->mode != CONTEXT_CONTROL_CRYPTO_MODE_ECB) { + switch (ctx->alg) { + case SAFEXCEL_DES: + block_sz = DES_BLOCK_SIZE; + cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; + break; + case SAFEXCEL_3DES: + block_sz = DES3_EDE_BLOCK_SIZE; + cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; + break; + case SAFEXCEL_AES: + block_sz = AES_BLOCK_SIZE; + cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; + break; + } - cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; + memcpy(cdesc->control_data.token, iv, block_sz); } - token = (struct safexcel_token *)(cdesc->control_data.token + offset); - if (direction == SAFEXCEL_DECRYPT) cryptlen -= digestsize; - token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; - token[0].packet_length = assoclen; - token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH; - - token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION; - token[1].packet_length = cryptlen; - token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; - token[1].instructions = EIP197_TOKEN_INS_LAST | - EIP197_TOKEN_INS_TYPE_CRYPTO | - EIP197_TOKEN_INS_TYPE_HASH | - EIP197_TOKEN_INS_TYPE_OUTPUT; - if (direction == SAFEXCEL_ENCRYPT) { + /* align end of instruction sequence to end of token */ + token = (struct safexcel_token *)(cdesc->control_data.token + + EIP197_MAX_TOKENS - 3); + token[2].opcode = EIP197_TOKEN_OPCODE_INSERT; token[2].packet_length = digestsize; token[2].stat = EIP197_TOKEN_STAT_LAST_HASH | @@ -135,6 +147,10 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | EIP197_TOKEN_INS_INSERT_HASH_DIGEST; } else { + /* align end of instruction sequence to end of token */ + token = (struct safexcel_token *)(cdesc->control_data.token + + EIP197_MAX_TOKENS - 4); + token[2].opcode = EIP197_TOKEN_OPCODE_RETRIEVE; token[2].packet_length = digestsize; token[2].stat = EIP197_TOKEN_STAT_LAST_HASH | @@ -148,6 +164,19 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, EIP197_TOKEN_STAT_LAST_PACKET; token[3].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT; } + + token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; + token[0].packet_length = assoclen; + token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH; + + token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION; + token[1].packet_length = cryptlen; + token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; + token[1].instructions = EIP197_TOKEN_INS_LAST | + EIP197_TOKEN_INS_TYPE_CRYPTO | + EIP197_TOKEN_INS_TYPE_HASH | + EIP197_TOKEN_INS_TYPE_OUTPUT; + } static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm, @@ -1046,6 +1075,84 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = { }, }; +static int safexcel_ctr_aes_encrypt(struct skcipher_request *req) +{ + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD, + SAFEXCEL_AES); +} + +static int safexcel_ctr_aes_decrypt(struct skcipher_request *req) +{ + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD, + SAFEXCEL_AES); +} + +static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm, + const u8 *key, unsigned int len) +{ + struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct safexcel_crypto_priv *priv = ctx->priv; + struct crypto_aes_ctx aes; + int ret, i; + unsigned int keylen; + + /* last 4 bytes of key are the nonce! */ + ctx->nonce = *(u32 *)(key + len - 4); + /* exclude the nonce here */ + keylen = len - 4; + ret = aes_expandkey(&aes, key, keylen); + if (ret) { + crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return ret; + } + + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { + for (i = 0; i < keylen / sizeof(u32); i++) { + if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) { + ctx->base.needs_inv = true; + break; + } + } + } + + for (i = 0; i < keylen / sizeof(u32); i++) + ctx->key[i] = cpu_to_le32(aes.key_enc[i]); + + ctx->key_len = keylen; + + memzero_explicit(&aes, sizeof(aes)); + return 0; +} + +struct safexcel_alg_template safexcel_alg_ctr_aes = { + .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .setkey = safexcel_skcipher_aesctr_setkey, + .encrypt = safexcel_ctr_aes_encrypt, + .decrypt = safexcel_ctr_aes_decrypt, + /* Add 4 to include the 4 byte nonce! */ + .min_keysize = AES_MIN_KEY_SIZE + 4, + .max_keysize = AES_MAX_KEY_SIZE + 4, + .ivsize = 8, + .base = { + .cra_name = "rfc3686(ctr(aes))", + .cra_driver_name = "safexcel-ctr-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_skcipher_cra_init, + .cra_exit = safexcel_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + static int safexcel_cbc_des_encrypt(struct skcipher_request *req) { return safexcel_queue_req(&req->base, skcipher_request_ctx(req), -- cgit From 0e17e3621a28a67871a8c69f5fccdf7d009fd939 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 5 Jul 2019 08:49:24 +0200 Subject: crypto: inside-secure - add support for authenc(hmac(sha*),rfc3686(ctr(aes))) suites This patch adds support for the following AEAD ciphersuites: - authenc(hmac(sha1),rfc3686(ctr(aes))) - authenc(hmac(sha224),rfc3686(ctr(aes))) - authenc(hmac(sha256),rfc3686(ctr(aes))) - authenc(hmac(sha384),rfc3686(ctr(aes))) - authenc(hmac(sha512),rfc3686(ctr(aes))) Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 14 +- drivers/crypto/inside-secure/safexcel.h | 5 + drivers/crypto/inside-secure/safexcel_cipher.c | 276 +++++++++++++++++++++---- 3 files changed, 250 insertions(+), 45 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 47cb2da06eb3..d1f60fd7e91a 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -592,17 +592,18 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, if (rdesc->buffer_overflow) dev_err(priv->dev, "Buffer overflow detected"); - if (rdesc->result_data.error_code & 0x4067) { - /* Fatal error (bits 0,1,2,5,6 & 14) */ + if (rdesc->result_data.error_code & 0x4066) { + /* Fatal error (bits 1,2,5,6 & 14) */ dev_err(priv->dev, "result descriptor error (%x)", rdesc->result_data.error_code); return -EIO; } else if (rdesc->result_data.error_code & - (BIT(7) | BIT(4) | BIT(3))) { + (BIT(7) | BIT(4) | BIT(3) | BIT(0))) { /* * Give priority over authentication fails: - * Blocksize & overflow errors, something wrong with the input! + * Blocksize, length & overflow errors, + * something wrong with the input! */ return -EINVAL; } else if (rdesc->result_data.error_code & BIT(9)) { @@ -869,6 +870,11 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_authenc_hmac_sha384_cbc_aes, &safexcel_alg_authenc_hmac_sha512_cbc_aes, &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede, + &safexcel_alg_authenc_hmac_sha1_ctr_aes, + &safexcel_alg_authenc_hmac_sha224_ctr_aes, + &safexcel_alg_authenc_hmac_sha256_ctr_aes, + &safexcel_alg_authenc_hmac_sha384_ctr_aes, + &safexcel_alg_authenc_hmac_sha512_ctr_aes, }; static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 64274c7bd65d..04dfbbc63971 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -710,5 +710,10 @@ extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes; #endif diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 776da6515511..477e0ec35f45 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -58,11 +58,9 @@ struct safexcel_cipher_req { int nr_src, nr_dst; }; -static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, - struct safexcel_command_desc *cdesc, - u32 length) +static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, + struct safexcel_command_desc *cdesc) { - struct safexcel_token *token; u32 block_sz = 0; if (ctx->mode != CONTEXT_CONTROL_CRYPTO_MODE_ECB) { @@ -92,6 +90,15 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, memcpy(cdesc->control_data.token, iv, block_sz); } } +} + +static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, + struct safexcel_command_desc *cdesc, + u32 length) +{ + struct safexcel_token *token; + + safexcel_cipher_token(ctx, iv, cdesc); /* skip over worst case IV of 4 dwords, no need to be exact */ token = (struct safexcel_token *)(cdesc->control_data.token + 4); @@ -111,26 +118,8 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, u32 cryptlen, u32 assoclen, u32 digestsize) { struct safexcel_token *token; - u32 block_sz = 0; - if (ctx->mode != CONTEXT_CONTROL_CRYPTO_MODE_ECB) { - switch (ctx->alg) { - case SAFEXCEL_DES: - block_sz = DES_BLOCK_SIZE; - cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; - break; - case SAFEXCEL_3DES: - block_sz = DES3_EDE_BLOCK_SIZE; - cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; - break; - case SAFEXCEL_AES: - block_sz = AES_BLOCK_SIZE; - cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; - break; - } - - memcpy(cdesc->control_data.token, iv, block_sz); - } + safexcel_cipher_token(ctx, iv, cdesc); if (direction == SAFEXCEL_DECRYPT) cryptlen -= digestsize; @@ -165,18 +154,27 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, token[3].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT; } - token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; - token[0].packet_length = assoclen; - token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH; - - token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION; - token[1].packet_length = cryptlen; - token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; - token[1].instructions = EIP197_TOKEN_INS_LAST | - EIP197_TOKEN_INS_TYPE_CRYPTO | - EIP197_TOKEN_INS_TYPE_HASH | - EIP197_TOKEN_INS_TYPE_OUTPUT; + if (unlikely(!cryptlen)) { + token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION; + token[1].packet_length = assoclen; + token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; + token[1].instructions = EIP197_TOKEN_INS_LAST | + EIP197_TOKEN_INS_TYPE_HASH; + } else { + if (likely(assoclen)) { + token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; + token[0].packet_length = assoclen; + token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH; + } + token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION; + token[1].packet_length = cryptlen; + token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; + token[1].instructions = EIP197_TOKEN_INS_LAST | + EIP197_TOKEN_INS_TYPE_CRYPTO | + EIP197_TOKEN_INS_TYPE_HASH | + EIP197_TOKEN_INS_TYPE_OUTPUT; + } } static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm, @@ -220,23 +218,43 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key, struct safexcel_ahash_export_state istate, ostate; struct safexcel_crypto_priv *priv = ctx->priv; struct crypto_authenc_keys keys; + struct crypto_aes_ctx aes; u32 flags; - int err; + int err = -EINVAL; if (crypto_authenc_extractkeys(&keys, key, len) != 0) goto badkey; - if (keys.enckeylen > sizeof(ctx->key)) - goto badkey; + if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) { + /* 20 is minimum AES key: 16 bytes + 4 bytes nonce */ + if (keys.enckeylen < 20) + goto badkey; + /* last 4 bytes of key are the nonce! */ + ctx->nonce = *(u32 *)(keys.enckey + keys.enckeylen - 4); + /* exclude the nonce here */ + keys.enckeylen -= 4; + } /* Encryption key */ - if (ctx->alg == SAFEXCEL_3DES) { + switch (ctx->alg) { + case SAFEXCEL_3DES: + if (keys.enckeylen != 24) + goto badkey; flags = crypto_aead_get_flags(ctfm); err = __des3_verify_key(&flags, keys.enckey); crypto_aead_set_flags(ctfm, flags); if (unlikely(err)) - return err; + goto badkey_expflags; + break; + case SAFEXCEL_AES: + err = aes_expandkey(&aes, keys.enckey, keys.enckeylen); + if (unlikely(err)) + goto badkey; + break; + default: + dev_err(priv->dev, "aead: unsupported cipher algorithm\n"); + goto badkey; } if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma && @@ -295,8 +313,9 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key, badkey: crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); +badkey_expflags: memzero_explicit(&keys, sizeof(keys)); - return -EINVAL; + return err; } static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, @@ -1392,6 +1411,7 @@ static int safexcel_aead_cra_init(struct crypto_tfm *tfm) ctx->priv = tmpl->priv; + ctx->alg = SAFEXCEL_AES; /* default */ ctx->aead = true; ctx->base.send = safexcel_aead_send; ctx->base.handle_result = safexcel_aead_handle_result; @@ -1573,6 +1593,15 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { }, }; +static int safexcel_aead_sha1_des3_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha1_cra_init(tfm); + ctx->alg = SAFEXCEL_3DES; /* override default */ + return 0; +} + static int safexcel_aead_encrypt_3des(struct aead_request *req) { struct safexcel_cipher_req *creq = aead_request_ctx(req); @@ -1606,7 +1635,172 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = { .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), .cra_alignmask = 0, - .cra_init = safexcel_aead_sha1_cra_init, + .cra_init = safexcel_aead_sha1_des3_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha1_ctr_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha1_cra_init(tfm); + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */ + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt_aes, + .decrypt = safexcel_aead_decrypt_aes, + .ivsize = 8, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", + .cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha1_ctr_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha256_ctr_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha256_cra_init(tfm); + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */ + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt_aes, + .decrypt = safexcel_aead_decrypt_aes, + .ivsize = 8, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", + .cra_driver_name = "safexcel-authenc-hmac-sha256-ctr-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha256_ctr_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha224_ctr_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha224_cra_init(tfm); + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */ + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt_aes, + .decrypt = safexcel_aead_decrypt_aes, + .ivsize = 8, + .maxauthsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))", + .cra_driver_name = "safexcel-authenc-hmac-sha224-ctr-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha224_ctr_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha512_ctr_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha512_cra_init(tfm); + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */ + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt_aes, + .decrypt = safexcel_aead_decrypt_aes, + .ivsize = 8, + .maxauthsize = SHA512_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))", + .cra_driver_name = "safexcel-authenc-hmac-sha512-ctr-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha512_ctr_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha384_ctr_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha384_cra_init(tfm); + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */ + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt_aes, + .decrypt = safexcel_aead_decrypt_aes, + .ivsize = 8, + .maxauthsize = SHA384_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))", + .cra_driver_name = "safexcel-authenc-hmac-sha384-ctr-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha384_ctr_cra_init, .cra_exit = safexcel_aead_cra_exit, .cra_module = THIS_MODULE, }, -- cgit From 31fb084c4eb556fac30115d99bc518c7944887d2 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 5 Jul 2019 09:36:31 +0200 Subject: crypto: inside-secure -reduce hash byte counters to 64 bits This patch recognises the fact that the hardware cannot ever process more than 2,199,023,386,111 bytes of hash or HMAC payload, so there is no point in maintaining 128 bit wide byte counters, 64 bits is more than sufficient Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.h | 4 +- drivers/crypto/inside-secure/safexcel_hash.c | 88 +++++++++++----------------- 2 files changed, 36 insertions(+), 56 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 04dfbbc63971..6f9875e7a0c0 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -613,8 +613,8 @@ struct safexcel_context { #define HASH_CACHE_SIZE SHA512_BLOCK_SIZE struct safexcel_ahash_export_state { - u64 len[2]; - u64 processed[2]; + u64 len; + u64 processed; u32 digest; diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index a777dec5f41f..c1776b6690fc 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -41,8 +41,8 @@ struct safexcel_ahash_req { u8 block_sz; /* block size, only set once */ u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); - u64 len[2]; - u64 processed[2]; + u64 len; + u64 processed; u8 cache[HASH_CACHE_SIZE] __aligned(sizeof(u32)); dma_addr_t cache_dma; @@ -53,12 +53,7 @@ struct safexcel_ahash_req { static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) { - u64 len, processed; - - len = (0xffffffff * req->len[1]) + req->len[0]; - processed = (0xffffffff * req->processed[1]) + req->processed[0]; - - return len - processed; + return req->len - req->processed; } static void safexcel_hash_token(struct safexcel_command_desc *cdesc, @@ -94,7 +89,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, * fields. Do this now as we need it to setup the first command * descriptor. */ - if ((!req->processed[0]) && (!req->processed[1])) { + if (!req->processed) { /* First - and possibly only - block of basic hash only */ if (req->finish) { cdesc->control_data.control0 |= @@ -119,11 +114,8 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, if (req->finish) { /* Compute digest count for hash/HMAC finish operations */ if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) || - req->hmac_zlen || req->processed[1] || - (req->processed[0] != req->block_sz)) { - count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE; - count += ((0x100000000ULL / EIP197_COUNTER_BLOCK_SIZE) * - req->processed[1]); + req->hmac_zlen || (req->processed != req->block_sz)) { + count = req->processed / EIP197_COUNTER_BLOCK_SIZE; /* This is a hardware limitation, as the * counter must fit into an u32. This represents @@ -141,8 +133,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, /* Special case: zero length HMAC */ req->hmac_zlen || /* PE HW < 4.4 cannot do HMAC continue, fake using hash */ - ((req->processed[1] || - (req->processed[0] != req->block_sz)))) { + (req->processed != req->block_sz)) { /* Basic hash continue operation, need digest + cnt */ cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE((req->state_sz >> 2) + 1) | @@ -234,11 +225,9 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, memcpy(sreq->state, ctx->opad, sreq->state_sz); - sreq->len[0] = sreq->block_sz + - crypto_ahash_digestsize(ahash); - sreq->len[1] = 0; - sreq->processed[0] = sreq->block_sz; - sreq->processed[1] = 0; + sreq->len = sreq->block_sz + + crypto_ahash_digestsize(ahash); + sreq->processed = sreq->block_sz; sreq->hmac = 0; ctx->base.needs_inv = true; @@ -393,9 +382,7 @@ send_command: safexcel_rdr_req_set(priv, ring, rdesc, &areq->base); - req->processed[0] += len; - if (req->processed[0] < len) - req->processed[1]++; + req->processed += len; *commands = n_cdesc; *results = 1; @@ -603,15 +590,14 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) if (ctx->base.ctxr) { if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv && - (req->processed[0] || req->processed[1]) && + req->processed && (/* invalidate for basic hash continuation finish */ (req->finish && (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)) || /* invalidate if (i)digest changed */ memcmp(ctx->base.ctxr->data, req->state, req->state_sz) || /* invalidate for HMAC continuation finish */ - (req->finish && (req->processed[1] || - (req->processed[0] != req->block_sz))) || + (req->finish && (req->processed != req->block_sz)) || /* invalidate for HMAC finish with odigest changed */ (req->finish && memcmp(ctx->base.ctxr->data + (req->state_sz>>2), @@ -662,9 +648,7 @@ static int safexcel_ahash_update(struct ahash_request *areq) ret = safexcel_ahash_cache(areq); /* Update total request length */ - req->len[0] += areq->nbytes; - if (req->len[0] < areq->nbytes) - req->len[1]++; + req->len += areq->nbytes; /* If not all data could fit into the cache, go process the excess. * Also go process immediately for an HMAC IV precompute, which @@ -683,7 +667,7 @@ static int safexcel_ahash_final(struct ahash_request *areq) req->finish = true; - if (unlikely(!req->len[0] && !req->len[1] && !areq->nbytes)) { + if (unlikely(!req->len && !areq->nbytes)) { /* * If we have an overall 0 length *hash* request: * The HW cannot do 0 length hash, so we provide the correct @@ -709,8 +693,8 @@ static int safexcel_ahash_final(struct ahash_request *areq) SHA512_DIGEST_SIZE); return 0; - } else if (unlikely(req->hmac && !req->len[1] && - (req->len[0] == req->block_sz) && + } else if (unlikely(req->hmac && + (req->len == req->block_sz) && !areq->nbytes)) { /* * If we have an overall 0 length *HMAC* request: @@ -736,7 +720,7 @@ static int safexcel_ahash_final(struct ahash_request *areq) 255; } - req->len[0] += req->block_sz; /* plus 1 hash block */ + req->len += req->block_sz; /* plus 1 hash block */ /* Set special zero-length HMAC flag */ req->hmac_zlen = true; @@ -766,10 +750,8 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out) struct safexcel_ahash_req *req = ahash_request_ctx(areq); struct safexcel_ahash_export_state *export = out; - export->len[0] = req->len[0]; - export->len[1] = req->len[1]; - export->processed[0] = req->processed[0]; - export->processed[1] = req->processed[1]; + export->len = req->len; + export->processed = req->processed; export->digest = req->digest; @@ -789,10 +771,8 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in) if (ret) return ret; - req->len[0] = export->len[0]; - req->len[1] = export->len[1]; - req->processed[0] = export->processed[0]; - req->processed[1] = export->processed[1]; + req->len = export->len; + req->processed = export->processed; req->digest = export->digest; @@ -903,8 +883,8 @@ static int safexcel_hmac_sha1_init(struct ahash_request *areq) /* Start from ipad precompute */ memcpy(req->state, ctx->ipad, SHA1_DIGEST_SIZE); /* Already processed the key^ipad part now! */ - req->len[0] = SHA1_BLOCK_SIZE; - req->processed[0] = SHA1_BLOCK_SIZE; + req->len = SHA1_BLOCK_SIZE; + req->processed = SHA1_BLOCK_SIZE; ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; @@ -1262,8 +1242,8 @@ static int safexcel_hmac_sha224_init(struct ahash_request *areq) /* Start from ipad precompute */ memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE); /* Already processed the key^ipad part now! */ - req->len[0] = SHA256_BLOCK_SIZE; - req->processed[0] = SHA256_BLOCK_SIZE; + req->len = SHA256_BLOCK_SIZE; + req->processed = SHA256_BLOCK_SIZE; ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; @@ -1332,8 +1312,8 @@ static int safexcel_hmac_sha256_init(struct ahash_request *areq) /* Start from ipad precompute */ memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE); /* Already processed the key^ipad part now! */ - req->len[0] = SHA256_BLOCK_SIZE; - req->processed[0] = SHA256_BLOCK_SIZE; + req->len = SHA256_BLOCK_SIZE; + req->processed = SHA256_BLOCK_SIZE; ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; @@ -1512,8 +1492,8 @@ static int safexcel_hmac_sha512_init(struct ahash_request *areq) /* Start from ipad precompute */ memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE); /* Already processed the key^ipad part now! */ - req->len[0] = SHA512_BLOCK_SIZE; - req->processed[0] = SHA512_BLOCK_SIZE; + req->len = SHA512_BLOCK_SIZE; + req->processed = SHA512_BLOCK_SIZE; ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; @@ -1582,8 +1562,8 @@ static int safexcel_hmac_sha384_init(struct ahash_request *areq) /* Start from ipad precompute */ memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE); /* Already processed the key^ipad part now! */ - req->len[0] = SHA512_BLOCK_SIZE; - req->processed[0] = SHA512_BLOCK_SIZE; + req->len = SHA512_BLOCK_SIZE; + req->processed = SHA512_BLOCK_SIZE; ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; @@ -1700,8 +1680,8 @@ static int safexcel_hmac_md5_init(struct ahash_request *areq) /* Start from ipad precompute */ memcpy(req->state, ctx->ipad, MD5_DIGEST_SIZE); /* Already processed the key^ipad part now! */ - req->len[0] = MD5_HMAC_BLOCK_SIZE; - req->processed[0] = MD5_HMAC_BLOCK_SIZE; + req->len = MD5_HMAC_BLOCK_SIZE; + req->processed = MD5_HMAC_BLOCK_SIZE; ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; -- cgit From 93308baf075039dccf347391491773f272cc24f6 Mon Sep 17 00:00:00 2001 From: "Hook, Gary" Date: Tue, 9 Jul 2019 15:07:15 +0000 Subject: crypto: ccp - Make CCP debugfs support optional Add a config option to exclude DebugFS support in the CCP driver. Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/Kconfig | 8 ++++++++ drivers/crypto/ccp/Makefile | 4 ++-- drivers/crypto/ccp/ccp-dev-v5.c | 4 ++++ 3 files changed, 14 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index ce1c47286fc6..8fec733f567f 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -46,3 +46,11 @@ config CRYPTO_DEV_SP_PSP management commands in Secure Encrypted Virtualization (SEV) mode, along with software-based Trusted Execution Environment (TEE) to enable third-party trusted applications. + +config CRYPTO_DEV_CCP_DEBUGFS + bool "Enable CCP Internals in DebugFS" + default n + depends on CRYPTO_DEV_SP_CCP + help + Expose CCP device information such as operation statistics, feature + information, and descriptor queue contents. diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 51d1c0cf66c7..6b86f1e6d634 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -5,8 +5,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \ ccp-ops.o \ ccp-dev-v3.o \ ccp-dev-v5.o \ - ccp-dmaengine.o \ - ccp-debugfs.o + ccp-dmaengine.o +ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o ccp-$(CONFIG_PCI) += sp-pci.o ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 217e41bbadaf..35b3d866726d 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -970,8 +970,10 @@ static int ccp5_init(struct ccp_device *ccp) if (ret) goto e_hwrng; +#ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS /* Set up debugfs entries */ ccp5_debugfs_setup(ccp); +#endif return 0; @@ -1009,11 +1011,13 @@ static void ccp5_destroy(struct ccp_device *ccp) /* Remove this device from the list of available units first */ ccp_del_device(ccp); +#ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS /* We're in the process of tearing down the entire driver; * when all the devices are gone clean up debugfs */ if (ccp_present()) ccp5_debugfs_destroy(); +#endif /* Disable and clear interrupts */ ccp5_disable_queue_interrupts(ccp); -- cgit From c4a89279086e9c98eba659d7665b1732e0d5e3e3 Mon Sep 17 00:00:00 2001 From: "Hook, Gary" Date: Tue, 9 Jul 2019 15:07:22 +0000 Subject: crypto: ccp - Add a module parameter to specify a queue count Add a module parameter to limit the number of queues per CCP. The default value (nqueues=0) is to set up every available queue on each device. The count of queues starts from the first one found on the device (which varies based on the device ID). Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev-v3.c | 2 +- drivers/crypto/ccp/ccp-dev-v5.c | 7 ++----- drivers/crypto/ccp/ccp-dev.c | 11 +++++++++++ drivers/crypto/ccp/ccp-dev.h | 1 + 4 files changed, 15 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 2b7d47ed5c74..4005d438dff9 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -379,7 +379,7 @@ static int ccp_init(struct ccp_device *ccp) /* Find available queues */ ccp->qim = 0; qmr = ioread32(ccp->io_regs + Q_MASK_REG); - for (i = 0; i < MAX_HW_QUEUES; i++) { + for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { if (!(qmr & (1 << i))) continue; diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 35b3d866726d..f146b51a23a5 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -2,16 +2,14 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. + * Copyright (C) 2016,2019 Advanced Micro Devices, Inc. * * Author: Gary R Hook */ -#include #include #include #include -#include #include #include #include @@ -792,8 +790,7 @@ static int ccp5_init(struct ccp_device *ccp) /* Find available queues */ qmr = ioread32(ccp->io_regs + Q_MASK_REG); - for (i = 0; i < MAX_HW_QUEUES; i++) { - + for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { if (!(qmr & (1 << i))) continue; diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index f79eede71c62..352059d0c572 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -8,6 +8,7 @@ * Author: Gary R Hook */ +#include #include #include #include @@ -26,6 +27,11 @@ #include "ccp-dev.h" +/* Limit CCP use to a specifed number of queues per device */ +static unsigned int nqueues = 0; +module_param(nqueues, uint, 0444); +MODULE_PARM_DESC(nqueues, "Number of queues per CCP (minimum 1; default: all available)"); + struct ccp_tasklet_data { struct completion completion; struct ccp_cmd *cmd; @@ -592,6 +598,11 @@ int ccp_dev_init(struct sp_device *sp) goto e_err; sp->ccp_data = ccp; + if (!nqueues || (nqueues > MAX_HW_QUEUES)) + ccp->max_q_count = MAX_HW_QUEUES; + else + ccp->max_q_count = nqueues; + ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata; if (!ccp->vdata || !ccp->vdata->version) { ret = -ENODEV; diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 5e624920fd99..dd13468111cd 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -379,6 +379,7 @@ struct ccp_device { */ struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES]; unsigned int cmd_q_count; + unsigned int max_q_count; /* Support for the CCP True RNG */ -- cgit From 90773bc1ab407bc2198d7e9fc1c6d6b5ef27a24f Mon Sep 17 00:00:00 2001 From: "Hook, Gary" Date: Tue, 9 Jul 2019 15:07:29 +0000 Subject: crypto: ccp - module parameter to limit the number of enabled CCPs Provide the ability to constrain the total number of enabled devices in the system. Once max_devs devices have been configured, subsequently probed devices are ignored. The max_devs parameter may be zero, in which case all CCPs are disabled. PSPs are always enabled and active. Disabling the CCPs also disables DMA and RNG registration. Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 352059d0c572..e58d69d4dd43 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -2,7 +2,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2019 Advanced Micro Devices, Inc. * * Author: Tom Lendacky * Author: Gary R Hook @@ -20,6 +20,7 @@ #include #include #include +#include #ifdef CONFIG_X86 #include #endif @@ -27,11 +28,19 @@ #include "ccp-dev.h" +#define MAX_CCPS 32 + /* Limit CCP use to a specifed number of queues per device */ static unsigned int nqueues = 0; module_param(nqueues, uint, 0444); MODULE_PARM_DESC(nqueues, "Number of queues per CCP (minimum 1; default: all available)"); +/* Limit the maximum number of configured CCPs */ +static atomic_t dev_count = ATOMIC_INIT(0); +static unsigned int max_devs = MAX_CCPS; +module_param(max_devs, uint, 0444); +MODULE_PARM_DESC(max_devs, "Maximum number of CCPs to enable (default: all; 0 disables all CCPs)"); + struct ccp_tasklet_data { struct completion completion; struct ccp_cmd *cmd; @@ -592,6 +601,13 @@ int ccp_dev_init(struct sp_device *sp) struct ccp_device *ccp; int ret; + /* + * Check how many we have so far, and stop after reaching + * that number + */ + if (atomic_inc_return(&dev_count) > max_devs) + return 0; /* don't fail the load */ + ret = -ENOMEM; ccp = ccp_alloc_struct(sp); if (!ccp) -- cgit From a7c2647034fa34e03d3f4e2cf66315440db2aa14 Mon Sep 17 00:00:00 2001 From: "Hook, Gary" Date: Tue, 9 Jul 2019 15:07:35 +0000 Subject: crypto: ccp - Add a module parameter to control registration for DMA The CCP driver is able to act as a DMA engine. Add a module parameter that allows this feature to be enabled/disabled. Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dmaengine.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index 7f22a45bbc11..9d077c42bcbe 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -2,7 +2,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. + * Copyright (C) 2016,2019 Advanced Micro Devices, Inc. * * Author: Gary R Hook */ @@ -35,6 +35,10 @@ static unsigned int dma_chan_attr = CCP_DMA_DFLT; module_param(dma_chan_attr, uint, 0444); MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public"); +static unsigned int dmaengine = 1; +module_param(dmaengine, uint, 0444); +MODULE_PARM_DESC(dmaengine, "Register services with the DMA subsystem (any non-zero value, default: 1)"); + static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp) { switch (dma_chan_attr) { @@ -637,6 +641,9 @@ int ccp_dmaengine_register(struct ccp_device *ccp) unsigned int i; int ret; + if (!dmaengine) + return 0; + ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count, sizeof(*(ccp->ccp_dma_chan)), GFP_KERNEL); @@ -740,6 +747,9 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp) { struct dma_device *dma_dev = &ccp->dma_dev; + if (!dmaengine) + return; + dma_async_device_unregister(dma_dev); kmem_cache_destroy(ccp->dma_desc_cache); -- cgit From a7268c4d4205b7f92eb77ac99d64099152b0f738 Mon Sep 17 00:00:00 2001 From: Phani Kiran Hemadri Date: Tue, 9 Jul 2019 15:24:24 +0000 Subject: crypto: cavium/nitrox - Add support for loading asymmetric crypto firmware This patch adds support to load Asymmetric crypto firmware on AE cores of CNN55XX device. Firmware is stored on UCD block 2 and all available AE cores are tagged to group 0. Signed-off-by: Phani Kiran Hemadri Reviewed-by: Srikanth Jampala Signed-off-by: Herbert Xu --- drivers/crypto/cavium/nitrox/nitrox_csr.h | 124 +++++++++++++++++++++- drivers/crypto/cavium/nitrox/nitrox_debugfs.c | 3 +- drivers/crypto/cavium/nitrox/nitrox_dev.h | 4 +- drivers/crypto/cavium/nitrox/nitrox_main.c | 144 +++++++++++++++++++++----- 4 files changed, 244 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/cavium/nitrox/nitrox_csr.h b/drivers/crypto/cavium/nitrox/nitrox_csr.h index a2a452642b38..da1d73303780 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_csr.h +++ b/drivers/crypto/cavium/nitrox/nitrox_csr.h @@ -40,9 +40,77 @@ #define EMU_FUSE_MAPX(_i) (0x1402708 + ((_i) * 0x40000)) /* UCD registers */ +#define UCD_SE_EID_UCODE_BLOCK_NUMX(_i) (0x12C0000 + ((_i) * 0x1000)) +#define UCD_AE_EID_UCODE_BLOCK_NUMX(_i) (0x12C0008 + ((_i) * 0x800)) #define UCD_UCODE_LOAD_BLOCK_NUM 0x12C0010 #define UCD_UCODE_LOAD_IDX_DATAX(_i) (0x12C0018 + ((_i) * 0x20)) -#define UCD_SE_EID_UCODE_BLOCK_NUMX(_i) (0x12C0000 + ((_i) * 0x1000)) +#define UCD_SE_CNTX(_i) (0x12C0040 + ((_i) * 0x1000)) +#define UCD_AE_CNTX(_i) (0x12C0048 + ((_i) * 0x800)) + +/* AQM registers */ +#define AQM_CTL 0x1300000 +#define AQM_INT 0x1300008 +#define AQM_DBELL_OVF_LO 0x1300010 +#define AQM_DBELL_OVF_HI 0x1300018 +#define AQM_DBELL_OVF_LO_W1S 0x1300020 +#define AQM_DBELL_OVF_LO_ENA_W1C 0x1300028 +#define AQM_DBELL_OVF_LO_ENA_W1S 0x1300030 +#define AQM_DBELL_OVF_HI_W1S 0x1300038 +#define AQM_DBELL_OVF_HI_ENA_W1C 0x1300040 +#define AQM_DBELL_OVF_HI_ENA_W1S 0x1300048 +#define AQM_DMA_RD_ERR_LO 0x1300050 +#define AQM_DMA_RD_ERR_HI 0x1300058 +#define AQM_DMA_RD_ERR_LO_W1S 0x1300060 +#define AQM_DMA_RD_ERR_LO_ENA_W1C 0x1300068 +#define AQM_DMA_RD_ERR_LO_ENA_W1S 0x1300070 +#define AQM_DMA_RD_ERR_HI_W1S 0x1300078 +#define AQM_DMA_RD_ERR_HI_ENA_W1C 0x1300080 +#define AQM_DMA_RD_ERR_HI_ENA_W1S 0x1300088 +#define AQM_EXEC_NA_LO 0x1300090 +#define AQM_EXEC_NA_HI 0x1300098 +#define AQM_EXEC_NA_LO_W1S 0x13000A0 +#define AQM_EXEC_NA_LO_ENA_W1C 0x13000A8 +#define AQM_EXEC_NA_LO_ENA_W1S 0x13000B0 +#define AQM_EXEC_NA_HI_W1S 0x13000B8 +#define AQM_EXEC_NA_HI_ENA_W1C 0x13000C0 +#define AQM_EXEC_NA_HI_ENA_W1S 0x13000C8 +#define AQM_EXEC_ERR_LO 0x13000D0 +#define AQM_EXEC_ERR_HI 0x13000D8 +#define AQM_EXEC_ERR_LO_W1S 0x13000E0 +#define AQM_EXEC_ERR_LO_ENA_W1C 0x13000E8 +#define AQM_EXEC_ERR_LO_ENA_W1S 0x13000F0 +#define AQM_EXEC_ERR_HI_W1S 0x13000F8 +#define AQM_EXEC_ERR_HI_ENA_W1C 0x1300100 +#define AQM_EXEC_ERR_HI_ENA_W1S 0x1300108 +#define AQM_ECC_INT 0x1300110 +#define AQM_ECC_INT_W1S 0x1300118 +#define AQM_ECC_INT_ENA_W1C 0x1300120 +#define AQM_ECC_INT_ENA_W1S 0x1300128 +#define AQM_ECC_CTL 0x1300130 +#define AQM_BIST_STATUS 0x1300138 +#define AQM_CMD_INF_THRX(x) (0x1300400 + ((x) * 0x8)) +#define AQM_CMD_INFX(x) (0x1300800 + ((x) * 0x8)) +#define AQM_GRP_EXECMSK_LOX(x) (0x1300C00 + ((x) * 0x10)) +#define AQM_GRP_EXECMSK_HIX(x) (0x1300C08 + ((x) * 0x10)) +#define AQM_ACTIVITY_STAT_LO 0x1300C80 +#define AQM_ACTIVITY_STAT_HI 0x1300C88 +#define AQM_Q_CMD_PROCX(x) (0x1301000 + ((x) * 0x8)) +#define AQM_PERF_CTL_LO 0x1301400 +#define AQM_PERF_CTL_HI 0x1301408 +#define AQM_PERF_CNT 0x1301410 + +#define AQMQ_DRBLX(x) (0x20000 + ((x) * 0x40000)) +#define AQMQ_QSZX(x) (0x20008 + ((x) * 0x40000)) +#define AQMQ_BADRX(x) (0x20010 + ((x) * 0x40000)) +#define AQMQ_NXT_CMDX(x) (0x20018 + ((x) * 0x40000)) +#define AQMQ_CMD_CNTX(x) (0x20020 + ((x) * 0x40000)) +#define AQMQ_CMP_THRX(x) (0x20028 + ((x) * 0x40000)) +#define AQMQ_CMP_CNTX(x) (0x20030 + ((x) * 0x40000)) +#define AQMQ_TIM_LDX(x) (0x20038 + ((x) * 0x40000)) +#define AQMQ_TIMERX(x) (0x20040 + ((x) * 0x40000)) +#define AQMQ_ENX(x) (0x20048 + ((x) * 0x40000)) +#define AQMQ_ACTIVITY_STATX(x) (0x20050 + ((x) * 0x40000)) +#define AQM_VF_CMP_STATX(x) (0x28000 + ((x) * 0x40000)) /* NPS core registers */ #define NPS_CORE_GBL_VFCFG 0x1000000 @@ -134,6 +202,60 @@ /* PEM registers */ #define PEM0_INT 0x1080428 +/** + * struct ucd_core_eid_ucode_block_num - Core Eid to Ucode Blk Mapping Registers + * @ucode_len: Ucode length identifier 32KB or 64KB + * @ucode_blk: Ucode Block Number + */ +union ucd_core_eid_ucode_block_num { + u64 value; + struct { +#if (defined(__BIG_ENDIAN_BITFIELD)) + u64 raz_4_63 : 60; + u64 ucode_len : 1; + u64 ucode_blk : 3; +#else + u64 ucode_blk : 3; + u64 ucode_len : 1; + u64 raz_4_63 : 60; +#endif + }; +}; + +/** + * struct aqm_grp_execmsk_lo - Available AE engines for the group + * @exec_0_to_39: AE engines 0 to 39 status + */ +union aqm_grp_execmsk_lo { + u64 value; + struct { +#if (defined(__BIG_ENDIAN_BITFIELD)) + u64 raz_40_63 : 24; + u64 exec_0_to_39 : 40; +#else + u64 exec_0_to_39 : 40; + u64 raz_40_63 : 24; +#endif + }; +}; + +/** + * struct aqm_grp_execmsk_hi - Available AE engines for the group + * @exec_40_to_79: AE engines 40 to 79 status + */ +union aqm_grp_execmsk_hi { + u64 value; + struct { +#if (defined(__BIG_ENDIAN_BITFIELD)) + u64 raz_40_63 : 24; + u64 exec_40_to_79 : 40; +#else + u64 exec_40_to_79 : 40; + u64 raz_40_63 : 24; +#endif + }; +}; + /** * struct emu_fuse_map - EMU Fuse Map Registers * @ae_fuse: Fuse settings for AE 19..0 diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c index 848ec93d4333..16f7d0bd1303 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c +++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c @@ -9,7 +9,8 @@ static int firmware_show(struct seq_file *s, void *v) { struct nitrox_device *ndev = s->private; - seq_printf(s, "Version: %s\n", ndev->hw.fw_name); + seq_printf(s, "Version: %s\n", ndev->hw.fw_name[0]); + seq_printf(s, "Version: %s\n", ndev->hw.fw_name[1]); return 0; } diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h index 0338877b828f..5ee98eca728c 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_dev.h +++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h @@ -10,6 +10,8 @@ #define VERSION_LEN 32 /* Maximum queues in PF mode */ #define MAX_PF_QUEUES 64 +/* Maximum UCD Blocks */ +#define CNN55XX_MAX_UCD_BLOCKS 8 /** * struct nitrox_cmdq - NITROX command queue @@ -74,7 +76,7 @@ struct nitrox_cmdq { */ struct nitrox_hw { char partname[IFNAMSIZ * 2]; - char fw_name[VERSION_LEN]; + char fw_name[CNN55XX_MAX_UCD_BLOCKS][VERSION_LEN]; int freq; u16 vendor_id; diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c index fe825d0ef9ca..345d3ea10b1f 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_main.c +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c @@ -17,12 +17,17 @@ #define CNN55XX_DEV_ID 0x12 #define UCODE_HLEN 48 -#define SE_GROUP 0 +#define DEFAULT_SE_GROUP 0 +#define DEFAULT_AE_GROUP 0 -#define DRIVER_VERSION "1.1" +#define DRIVER_VERSION "1.2" +#define CNN55XX_UCD_BLOCK_SIZE 32768 +#define CNN55XX_MAX_UCODE_SIZE (CNN55XX_UCD_BLOCK_SIZE * 2) #define FW_DIR "cavium/" /* SE microcode */ #define SE_FW FW_DIR "cnn55xx_se.fw" +/* AE microcode */ +#define AE_FW FW_DIR "cnn55xx_ae.fw" static const char nitrox_driver_name[] = "CNN55XX"; @@ -72,10 +77,10 @@ struct ucode { /** * write_to_ucd_unit - Write Firmware to NITROX UCD unit */ -static void write_to_ucd_unit(struct nitrox_device *ndev, - struct ucode *ucode) +static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size, + u64 *ucode_data, int block_num) { - u32 code_size = be32_to_cpu(ucode->code_size) * 2; + u32 code_size; u64 offset, data; int i = 0; @@ -96,11 +101,12 @@ static void write_to_ucd_unit(struct nitrox_device *ndev, /* set the block number */ offset = UCD_UCODE_LOAD_BLOCK_NUM; - nitrox_write_csr(ndev, offset, 0); + nitrox_write_csr(ndev, offset, block_num); + code_size = ucode_size; code_size = roundup(code_size, 8); while (code_size) { - data = ucode->code[i]; + data = ucode_data[i]; /* write 8 bytes at a time */ offset = UCD_UCODE_LOAD_IDX_DATAX(i); nitrox_write_csr(ndev, offset, data); @@ -108,29 +114,74 @@ static void write_to_ucd_unit(struct nitrox_device *ndev, i++; } - /* put all SE cores in group 0 */ - offset = POM_GRP_EXECMASKX(SE_GROUP); - nitrox_write_csr(ndev, offset, (~0ULL)); - - for (i = 0; i < ndev->hw.se_cores; i++) { - /* - * write block number and firware length - * bit:<2:0> block number - * bit:3 is set SE uses 32KB microcode - * bit:3 is clear SE uses 64KB microcode - */ - offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i); - nitrox_write_csr(ndev, offset, 0x8); - } usleep_range(300, 400); } -static int nitrox_load_fw(struct nitrox_device *ndev, const char *fw_name) +static int nitrox_load_fw(struct nitrox_device *ndev) { const struct firmware *fw; + const char *fw_name; struct ucode *ucode; - int ret; + u64 *ucode_data; + u64 offset; + union ucd_core_eid_ucode_block_num core_2_eid_val; + union aqm_grp_execmsk_lo aqm_grp_execmask_lo; + union aqm_grp_execmsk_hi aqm_grp_execmask_hi; + u32 ucode_size; + int ret, i = 0; + + fw_name = SE_FW; + dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name); + + ret = request_firmware(&fw, fw_name, DEV(ndev)); + if (ret < 0) { + dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name); + return ret; + } + + ucode = (struct ucode *)fw->data; + + ucode_size = be32_to_cpu(ucode->code_size) * 2; + if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) { + dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n", + ucode_size, fw_name); + release_firmware(fw); + return -EINVAL; + } + ucode_data = ucode->code; + + /* copy the firmware version */ + memcpy(&ndev->hw.fw_name[0][0], ucode->version, (VERSION_LEN - 2)); + ndev->hw.fw_name[0][VERSION_LEN - 1] = '\0'; + + /* Load SE Firmware on UCD Block 0 */ + write_to_ucd_unit(ndev, ucode_size, ucode_data, 0); + release_firmware(fw); + + /* put all SE cores in DEFAULT_SE_GROUP */ + offset = POM_GRP_EXECMASKX(DEFAULT_SE_GROUP); + nitrox_write_csr(ndev, offset, (~0ULL)); + + /* write block number and firmware length + * bit:<2:0> block number + * bit:3 is set SE uses 32KB microcode + * bit:3 is clear SE uses 64KB microcode + */ + core_2_eid_val.value = 0ULL; + core_2_eid_val.ucode_blk = 0; + if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE) + core_2_eid_val.ucode_len = 1; + else + core_2_eid_val.ucode_len = 0; + + for (i = 0; i < ndev->hw.se_cores; i++) { + offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i); + nitrox_write_csr(ndev, offset, core_2_eid_val.value); + } + + + fw_name = AE_FW; dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name); ret = request_firmware(&fw, fw_name, DEV(ndev)); @@ -140,13 +191,50 @@ static int nitrox_load_fw(struct nitrox_device *ndev, const char *fw_name) } ucode = (struct ucode *)fw->data; + + ucode_size = be32_to_cpu(ucode->code_size) * 2; + if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) { + dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n", + ucode_size, fw_name); + release_firmware(fw); + return -EINVAL; + } + ucode_data = ucode->code; + /* copy the firmware version */ - memcpy(ndev->hw.fw_name, ucode->version, (VERSION_LEN - 2)); - ndev->hw.fw_name[VERSION_LEN - 1] = '\0'; + memcpy(&ndev->hw.fw_name[1][0], ucode->version, (VERSION_LEN - 2)); + ndev->hw.fw_name[1][VERSION_LEN - 1] = '\0'; + + /* Load AE Firmware on UCD Block 2 */ + write_to_ucd_unit(ndev, ucode_size, ucode_data, 2); - write_to_ucd_unit(ndev, ucode); release_firmware(fw); + /* put all AE cores in DEFAULT_AE_GROUP */ + offset = AQM_GRP_EXECMSK_LOX(DEFAULT_AE_GROUP); + aqm_grp_execmask_lo.exec_0_to_39 = 0xFFFFFFFFFFULL; + nitrox_write_csr(ndev, offset, aqm_grp_execmask_lo.value); + offset = AQM_GRP_EXECMSK_HIX(DEFAULT_AE_GROUP); + aqm_grp_execmask_hi.exec_40_to_79 = 0xFFFFFFFFFFULL; + nitrox_write_csr(ndev, offset, aqm_grp_execmask_hi.value); + + /* write block number and firmware length + * bit:<2:0> block number + * bit:3 is set SE uses 32KB microcode + * bit:3 is clear SE uses 64KB microcode + */ + core_2_eid_val.value = 0ULL; + core_2_eid_val.ucode_blk = 0; + if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE) + core_2_eid_val.ucode_len = 1; + else + core_2_eid_val.ucode_len = 0; + + for (i = 0; i < ndev->hw.ae_cores; i++) { + offset = UCD_AE_EID_UCODE_BLOCK_NUMX(i); + nitrox_write_csr(ndev, offset, core_2_eid_val.value); + } + return 0; } @@ -309,8 +397,8 @@ static int nitrox_pf_hw_init(struct nitrox_device *ndev) nitrox_config_lbc_unit(ndev); nitrox_config_rand_unit(ndev); - /* load firmware on SE cores */ - err = nitrox_load_fw(ndev, SE_FW); + /* load firmware on cores */ + err = nitrox_load_fw(ndev); if (err) return err; -- cgit From 2a03e3a50a61494fd26d82eabd43a26be0f736cc Mon Sep 17 00:00:00 2001 From: "Hook, Gary" Date: Wed, 10 Jul 2019 21:45:37 +0000 Subject: crypto: ccp - Include the module name in system log messages Redefine pr_fmt so that the module name is prefixed to every log message produced by the ccp-crypto module Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-crypto.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 25409cea8465..599f7f2820a0 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -24,6 +24,10 @@ #include #include +/* We want the module name in front of our messages */ +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #define CCP_LOG_LEVEL KERN_INFO #define CCP_CRA_PRIORITY 300 -- cgit From 72c8117adfced37df101c8c0b3f363e0906f83f0 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 12 Jul 2019 10:59:24 +0200 Subject: crypto: ccp - Reduce maximum stack usage Each of the operations in ccp_run_cmd() needs several hundred bytes of kernel stack. Depending on the inlining, these may need separate stack slots that add up to more than the warning limit, as shown in this clang based build: drivers/crypto/ccp/ccp-ops.c:871:12: error: stack frame size of 1164 bytes in function 'ccp_run_aes_cmd' [-Werror,-Wframe-larger-than=] static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) The problem may also happen when there is no warning, e.g. in the ccp_run_cmd()->ccp_run_aes_cmd()->ccp_run_aes_gcm_cmd() call chain with over 2000 bytes. Mark each individual function as 'noinline_for_stack' to prevent this from happening, and move the calls to the two special cases for aes into the top-level function. This will keep the actual combined stack usage to the mimimum: 828 bytes for ccp_run_aes_gcm_cmd() and at most 524 bytes for each of the other cases. Fixes: 63b945091a07 ("crypto: ccp - CCP device driver and interface support") Signed-off-by: Arnd Bergmann Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-ops.c | 52 ++++++++++++++++++++++++++------------------ 1 file changed, 31 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index c69ed4bae2eb..acf4f653f25b 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -455,8 +455,8 @@ static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q, return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true); } -static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, - struct ccp_cmd *cmd) +static noinline_for_stack int +ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_aes_engine *aes = &cmd->u.aes; struct ccp_dm_workarea key, ctx; @@ -611,8 +611,8 @@ e_key: return ret; } -static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, - struct ccp_cmd *cmd) +static noinline_for_stack int +ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_aes_engine *aes = &cmd->u.aes; struct ccp_dm_workarea key, ctx, final_wa, tag; @@ -879,7 +879,8 @@ e_key: return ret; } -static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +static noinline_for_stack int +ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_aes_engine *aes = &cmd->u.aes; struct ccp_dm_workarea key, ctx; @@ -889,12 +890,6 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) bool in_place = false; int ret; - if (aes->mode == CCP_AES_MODE_CMAC) - return ccp_run_aes_cmac_cmd(cmd_q, cmd); - - if (aes->mode == CCP_AES_MODE_GCM) - return ccp_run_aes_gcm_cmd(cmd_q, cmd); - if (!((aes->key_len == AES_KEYSIZE_128) || (aes->key_len == AES_KEYSIZE_192) || (aes->key_len == AES_KEYSIZE_256))) @@ -1061,8 +1056,8 @@ e_key: return ret; } -static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, - struct ccp_cmd *cmd) +static noinline_for_stack int +ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_xts_aes_engine *xts = &cmd->u.xts; struct ccp_dm_workarea key, ctx; @@ -1261,7 +1256,8 @@ e_key: return ret; } -static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +static noinline_for_stack int +ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_des3_engine *des3 = &cmd->u.des3; @@ -1457,7 +1453,8 @@ e_key: return ret; } -static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +static noinline_for_stack int +ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_sha_engine *sha = &cmd->u.sha; struct ccp_dm_workarea ctx; @@ -1801,7 +1798,8 @@ e_ctx: return ret; } -static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +static noinline_for_stack int +ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_rsa_engine *rsa = &cmd->u.rsa; struct ccp_dm_workarea exp, src, dst; @@ -1932,8 +1930,8 @@ e_sb: return ret; } -static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, - struct ccp_cmd *cmd) +static noinline_for_stack int +ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_passthru_engine *pt = &cmd->u.passthru; struct ccp_dm_workarea mask; @@ -2064,7 +2062,8 @@ e_mask: return ret; } -static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, +static noinline_for_stack int +ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap; @@ -2405,7 +2404,8 @@ e_src: return ret; } -static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +static noinline_for_stack int +ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_ecc_engine *ecc = &cmd->u.ecc; @@ -2442,7 +2442,17 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) switch (cmd->engine) { case CCP_ENGINE_AES: - ret = ccp_run_aes_cmd(cmd_q, cmd); + switch (cmd->u.aes.mode) { + case CCP_AES_MODE_CMAC: + ret = ccp_run_aes_cmac_cmd(cmd_q, cmd); + break; + case CCP_AES_MODE_GCM: + ret = ccp_run_aes_gcm_cmd(cmd_q, cmd); + break; + default: + ret = ccp_run_aes_cmd(cmd_q, cmd); + break; + } break; case CCP_ENGINE_XTS_AES_128: ret = ccp_run_xts_aes_cmd(cmd_q, cmd); -- cgit From d10d094cbfd338f361a83a82e5d99feb7933008d Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Wed, 17 Jul 2019 17:04:37 +0800 Subject: hwrng: imx-rngc - use devm_platform_ioremap_resource() to simplify code Use the new helper devm_platform_ioremap_resource() which wraps the platform_get_resource() and devm_ioremap_resource() together, to simplify the code. Signed-off-by: Anson Huang Acked-by: Arnd Bergmann Reviewed-by: Dong Aisheng Signed-off-by: Herbert Xu --- drivers/char/hw_random/imx-rngc.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c index 69f537980004..30cf00f8e9a0 100644 --- a/drivers/char/hw_random/imx-rngc.c +++ b/drivers/char/hw_random/imx-rngc.c @@ -196,7 +196,6 @@ static int imx_rngc_init(struct hwrng *rng) static int imx_rngc_probe(struct platform_device *pdev) { struct imx_rngc *rngc; - struct resource *res; int ret; int irq; @@ -204,8 +203,7 @@ static int imx_rngc_probe(struct platform_device *pdev) if (!rngc) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - rngc->base = devm_ioremap_resource(&pdev->dev, res); + rngc->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rngc->base)) return PTR_ERR(rngc->base); -- cgit From f2f1d75ab725106c4958bda30f87bd39a0e3040a Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Wed, 17 Jul 2019 17:04:38 +0800 Subject: hwrng: mxc-rnga - use devm_platform_ioremap_resource() to simplify code Use the new helper devm_platform_ioremap_resource() which wraps the platform_get_resource() and devm_ioremap_resource() together, to simplify the code. Signed-off-by: Anson Huang Reviewed-by: Dong Aisheng Signed-off-by: Herbert Xu --- drivers/char/hw_random/mxc-rnga.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c index ea2bf18b1fbb..025083c838f5 100644 --- a/drivers/char/hw_random/mxc-rnga.c +++ b/drivers/char/hw_random/mxc-rnga.c @@ -134,7 +134,6 @@ static void mxc_rnga_cleanup(struct hwrng *rng) static int __init mxc_rnga_probe(struct platform_device *pdev) { int err; - struct resource *res; struct mxc_rng *mxc_rng; mxc_rng = devm_kzalloc(&pdev->dev, sizeof(*mxc_rng), GFP_KERNEL); @@ -158,8 +157,7 @@ static int __init mxc_rnga_probe(struct platform_device *pdev) if (err) return err; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res); + mxc_rng->mem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mxc_rng->mem)) { err = PTR_ERR(mxc_rng->mem); goto err_ioremap; -- cgit From 6ed01097f4378199614fe625495657af7c601cea Mon Sep 17 00:00:00 2001 From: Vakul Garg Date: Thu, 18 Jul 2019 11:29:16 +0000 Subject: crypto: caam/qi2 - Increase napi budget to process more caam responses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While running ipsec processing for traffic through multiple network interfaces, it is observed that caam driver gets less time to poll responses from caam block compared to ethernet driver. This is because ethernet driver has as many napi instances per cpu as the number of ethernet interfaces in system. Therefore, caam driver's napi executes lesser than the ethernet driver's napi instances. This results in situation that we end up submitting more requests to caam (which it is able to finish off quite fast), but don't dequeue the responses at same rate. This makes caam response FQs bloat with large number of frames. In some situations, it makes kernel crash due to out-of-memory. To prevent it We increase the napi budget of dpseci driver to a big value so that caam driver is able to drain its response queues at enough rate. Signed-off-by: Vakul Garg Reviewed-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_qi2.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h index be5085451053..973f6296bc6f 100644 --- a/drivers/crypto/caam/caamalg_qi2.h +++ b/drivers/crypto/caam/caamalg_qi2.h @@ -15,7 +15,7 @@ #define DPAA2_CAAM_STORE_SIZE 16 /* NAPI weight *must* be a multiple of the store size. */ -#define DPAA2_CAAM_NAPI_WEIGHT 64 +#define DPAA2_CAAM_NAPI_WEIGHT 512 /* The congestion entrance threshold was chosen so that on LS2088 * we support the maximum throughput for the available memory -- cgit From bfb5eb084ae93eb86d3b088c301500db138963e5 Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Thu, 18 Jul 2019 21:16:09 +0800 Subject: crypto: ccp - Replace dma_pool_alloc + memset with dma_pool_zalloc Use dma_pool_zalloc instead of using dma_pool_alloc to allocate memory and then zeroing it with memset 0. This simplifies the code. Signed-off-by: Chuhong Yuan Acked-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-ops.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index acf4f653f25b..42d167574131 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -150,14 +150,13 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa, if (len <= CCP_DMAPOOL_MAX_SIZE) { wa->dma_pool = cmd_q->dma_pool; - wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL, + wa->address = dma_pool_zalloc(wa->dma_pool, GFP_KERNEL, &wa->dma.address); if (!wa->address) return -ENOMEM; wa->dma.length = CCP_DMAPOOL_MAX_SIZE; - memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE); } else { wa->address = kzalloc(len, GFP_KERNEL); if (!wa->address) -- cgit From b93ecf4296742c36192621645f59e4a607263233 Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Fri, 19 Jul 2019 10:09:23 +0300 Subject: crypto: ccree - check assoclen for rfc4543 Check assoclen to solve the extra tests that expect -EINVAL to be returned when the associated data size is not valid. Validated assoclen for RFC4543 which expects an assoclen of 16 or 20, the same as RFC4106. Based on seqiv, IPsec ESP and RFC4543/RFC4106 the assoclen is sizeof IP Header (spi, seq_no, extended seq_no) and IV len. This can be 16 or 20 bytes. Signed-off-by: Iuliana Prodan Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_aead.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index a9914ea6c95f..ce302adc76c7 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -2272,9 +2272,16 @@ out: static int cc_rfc4543_gcm_encrypt(struct aead_request *req) { /* Very similar to cc_aead_encrypt() above. */ - + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct device *dev = drvdata_to_dev(ctx->drvdata); struct aead_req_ctx *areq_ctx = aead_request_ctx(req); - int rc; + int rc = -EINVAL; + + if (!valid_assoclen(req)) { + dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); + goto out; + } memset(areq_ctx, 0, sizeof(*areq_ctx)); @@ -2291,7 +2298,7 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req) rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); if (rc != -EINPROGRESS && rc != -EBUSY) req->iv = areq_ctx->backup_iv; - +out: return rc; } @@ -2330,9 +2337,16 @@ out: static int cc_rfc4543_gcm_decrypt(struct aead_request *req) { /* Very similar to cc_aead_decrypt() above. */ - + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct device *dev = drvdata_to_dev(ctx->drvdata); struct aead_req_ctx *areq_ctx = aead_request_ctx(req); - int rc; + int rc = -EINVAL; + + if (!valid_assoclen(req)) { + dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); + goto out; + } memset(areq_ctx, 0, sizeof(*areq_ctx)); @@ -2349,7 +2363,7 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req) rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); if (rc != -EINPROGRESS && rc != -EBUSY) req->iv = areq_ctx->backup_iv; - +out: return rc; } -- cgit From b3553effafa027feca33e0aa357fce08b3c80b02 Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Fri, 19 Jul 2019 10:09:24 +0300 Subject: crypto: bcm - check assoclen for rfc4543/rfc4106 Validated assoclen for RFC4543 which expects an assoclen of 16 or 20, the same as RFC4106. Based on seqiv, IPsec ESP and RFC4543/RFC4106 the assoclen is sizeof IP Header (spi, seq_no, extended seq_no) and IV len. This can be 16 or 20 bytes. Signed-off-by: Iuliana Prodan Signed-off-by: Herbert Xu --- drivers/crypto/bcm/cipher.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 869602fcfd96..1c23e452700b 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -2629,6 +2629,19 @@ static int aead_need_fallback(struct aead_request *req) return 1; } + /* + * RFC4106 and RFC4543 cannot handle the case where AAD is other than + * 16 or 20 bytes long. So use fallback in this case. + */ + if (ctx->cipher.mode == CIPHER_MODE_GCM && + ctx->cipher.alg == CIPHER_ALG_AES && + rctx->iv_ctr_len == GCM_RFC4106_IV_SIZE && + req->assoclen != 16 && req->assoclen != 20) { + flow_log("RFC4106/RFC4543 needs fallback for assoclen" + " other than 16 or 20 bytes\n"); + return 1; + } + payload_len = req->cryptlen; if (spu->spu_type == SPU_TYPE_SPUM) payload_len += req->assoclen; -- cgit From 8dfa20fcfbeb245642dfe3a43f8a3735d9aed42a Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 19 Jul 2019 23:09:18 -0700 Subject: crypto: ghash - add comment and improve help text To help avoid confusion, add a comment to ghash-generic.c which explains the convention that the kernel's implementation of GHASH uses. Also update the Kconfig help text and module descriptions to call GHASH a "hash function" rather than a "message digest", since the latter normally means a real cryptographic hash function, which GHASH is not. Cc: Pascal Van Leeuwen Signed-off-by: Eric Biggers Reviewed-by: Ard Biesheuvel Acked-by: Pascal Van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 69d1bbd5d9bf..b8c50871f11b 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -189,12 +189,12 @@ config S390_PRNG It is available as of z9. config CRYPTO_GHASH_S390 - tristate "GHASH digest algorithm" + tristate "GHASH hash function" depends on S390 select CRYPTO_HASH help - This is the s390 hardware accelerated implementation of the - GHASH message digest algorithm for GCM (Galois/Counter Mode). + This is the s390 hardware accelerated implementation of GHASH, + the hash function used in GCM (Galois/Counter mode). It is available as of z196. -- cgit From 01970282a4ce0a049eddeb85bbe677e765111ee5 Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Tue, 23 Jul 2019 15:19:36 +0800 Subject: crypto: atmel-sha204a - Use device-managed registration API Use devm_hwrng_register to get rid of manual unregistration. Signed-off-by: Chuhong Yuan Acked-by: Ludovic Desroches Signed-off-by: Herbert Xu --- drivers/crypto/atmel-sha204a.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c index ea0d2068ea4f..c96c14e7dab1 100644 --- a/drivers/crypto/atmel-sha204a.c +++ b/drivers/crypto/atmel-sha204a.c @@ -109,7 +109,7 @@ static int atmel_sha204a_probe(struct i2c_client *client, i2c_priv->hwrng.read = atmel_sha204a_rng_read; i2c_priv->hwrng.quality = 1024; - ret = hwrng_register(&i2c_priv->hwrng); + ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng); if (ret) dev_warn(&client->dev, "failed to register RNG (%d)\n", ret); @@ -127,7 +127,6 @@ static int atmel_sha204a_remove(struct i2c_client *client) if (i2c_priv->hwrng.priv) kfree((void *)i2c_priv->hwrng.priv); - hwrng_unregister(&i2c_priv->hwrng); return 0; } -- cgit From 1b82feb6c5e1996513d0fb0bbb475417088b4954 Mon Sep 17 00:00:00 2001 From: Alexander Sverdlin Date: Tue, 23 Jul 2019 07:24:01 +0000 Subject: crypto: qat - Silence smp_processor_id() warning It seems that smp_processor_id() is only used for a best-effort load-balancing, refer to qat_crypto_get_instance_node(). It's not feasible to disable preemption for the duration of the crypto requests. Therefore, just silence the warning. This commit is similar to e7a9b05ca4 ("crypto: cavium - Fix smp_processor_id() warnings"). Silences the following splat: BUG: using smp_processor_id() in preemptible [00000000] code: cryptomgr_test/2904 caller is qat_alg_ablkcipher_setkey+0x300/0x4a0 [intel_qat] CPU: 1 PID: 2904 Comm: cryptomgr_test Tainted: P O 4.14.69 #1 ... Call Trace: dump_stack+0x5f/0x86 check_preemption_disabled+0xd3/0xe0 qat_alg_ablkcipher_setkey+0x300/0x4a0 [intel_qat] skcipher_setkey_ablkcipher+0x2b/0x40 __test_skcipher+0x1f3/0xb20 ? cpumask_next_and+0x26/0x40 ? find_busiest_group+0x10e/0x9d0 ? preempt_count_add+0x49/0xa0 ? try_module_get+0x61/0xf0 ? crypto_mod_get+0x15/0x30 ? __kmalloc+0x1df/0x1f0 ? __crypto_alloc_tfm+0x116/0x180 ? crypto_skcipher_init_tfm+0xa6/0x180 ? crypto_create_tfm+0x4b/0xf0 test_skcipher+0x21/0xa0 alg_test_skcipher+0x3f/0xa0 alg_test.part.6+0x126/0x2a0 ? finish_task_switch+0x21b/0x260 ? __schedule+0x1e9/0x800 ? __wake_up_common+0x8d/0x140 cryptomgr_test+0x40/0x50 kthread+0xff/0x130 ? cryptomgr_notify+0x540/0x540 ? kthread_create_on_node+0x70/0x70 ret_from_fork+0x24/0x50 Fixes: ed8ccaef52 ("crypto: qat - Add support for SRIOV") Cc: stable@vger.kernel.org Signed-off-by: Alexander Sverdlin Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_common_drv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 5c4c0a253129..d78f8d5c89c3 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -95,7 +95,7 @@ struct service_hndl { static inline int get_current_node(void) { - return topology_physical_package_id(smp_processor_id()); + return topology_physical_package_id(raw_smp_processor_id()); } int adf_service_register(struct service_hndl *service); -- cgit From 1c0ab408bb6e16285fcddc9b4ce74507081d053f Mon Sep 17 00:00:00 2001 From: Vakul Garg Date: Tue, 23 Jul 2019 09:14:24 +0000 Subject: crypto: caam/qi2 - Add printing dpseci fq stats using debugfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support of printing the dpseci frame queue statistics using debugfs. Signed-off-by: Vakul Garg Reviewed-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/Makefile | 1 + drivers/crypto/caam/caamalg_qi2.c | 5 +++ drivers/crypto/caam/caamalg_qi2.h | 2 + drivers/crypto/caam/dpseci-debugfs.c | 79 ++++++++++++++++++++++++++++++++++++ drivers/crypto/caam/dpseci-debugfs.h | 18 ++++++++ 5 files changed, 105 insertions(+) create mode 100644 drivers/crypto/caam/dpseci-debugfs.c create mode 100644 drivers/crypto/caam/dpseci-debugfs.h (limited to 'drivers') diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile index 9ab4e81ea21e..68d5cc0f28e2 100644 --- a/drivers/crypto/caam/Makefile +++ b/drivers/crypto/caam/Makefile @@ -30,3 +30,4 @@ endif obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o dpaa2_caam-y := caamalg_qi2.o dpseci.o +dpaa2_caam-$(CONFIG_DEBUG_FS) += dpseci-debugfs.o diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 06bf32c32cbd..a78a36dfa7b9 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -15,6 +15,7 @@ #include "key_gen.h" #include "caamalg_desc.h" #include "caamhash_desc.h" +#include "dpseci-debugfs.h" #include #include #include @@ -5098,6 +5099,8 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) goto err_bind; } + dpaa2_dpseci_debugfs_init(priv); + /* register crypto algorithms the device supports */ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { struct caam_skcipher_alg *t_alg = driver_algs + i; @@ -5265,6 +5268,8 @@ static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev) dev = &ls_dev->dev; priv = dev_get_drvdata(dev); + dpaa2_dpseci_debugfs_exit(priv); + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { struct caam_aead_alg *t_alg = driver_aeads + i; diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h index 973f6296bc6f..b450e2a25c1f 100644 --- a/drivers/crypto/caam/caamalg_qi2.h +++ b/drivers/crypto/caam/caamalg_qi2.h @@ -10,6 +10,7 @@ #include #include #include +#include #include "dpseci.h" #include "desc_constr.h" @@ -64,6 +65,7 @@ struct dpaa2_caam_priv { struct iommu_domain *domain; struct dpaa2_caam_priv_per_cpu __percpu *ppriv; + struct dentry *dfs_root; }; /** diff --git a/drivers/crypto/caam/dpseci-debugfs.c b/drivers/crypto/caam/dpseci-debugfs.c new file mode 100644 index 000000000000..c5bfc923abd8 --- /dev/null +++ b/drivers/crypto/caam/dpseci-debugfs.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ + +#include +#include +#include +#include "dpseci-debugfs.h" + +static int dpseci_dbg_fqs_show(struct seq_file *file, void *offset) +{ + struct dpaa2_caam_priv *priv = (struct dpaa2_caam_priv *)file->private; + u32 fqid, fcnt, bcnt; + int i, err; + + seq_printf(file, "FQ stats for %s:\n", dev_name(priv->dev)); + seq_printf(file, "%s%16s%16s\n", + "Rx-VFQID", + "Pending frames", + "Pending bytes"); + + for (i = 0; i < priv->num_pairs; i++) { + fqid = priv->rx_queue_attr[i].fqid; + err = dpaa2_io_query_fq_count(NULL, fqid, &fcnt, &bcnt); + if (err) + continue; + + seq_printf(file, "%5d%16u%16u\n", fqid, fcnt, bcnt); + } + + seq_printf(file, "%s%16s%16s\n", + "Tx-VFQID", + "Pending frames", + "Pending bytes"); + + for (i = 0; i < priv->num_pairs; i++) { + fqid = priv->tx_queue_attr[i].fqid; + err = dpaa2_io_query_fq_count(NULL, fqid, &fcnt, &bcnt); + if (err) + continue; + + seq_printf(file, "%5d%16u%16u\n", fqid, fcnt, bcnt); + } + + return 0; +} + +static int dpseci_dbg_fqs_open(struct inode *inode, struct file *file) +{ + int err; + struct dpaa2_caam_priv *priv; + + priv = (struct dpaa2_caam_priv *)inode->i_private; + + err = single_open(file, dpseci_dbg_fqs_show, priv); + if (err < 0) + dev_err(priv->dev, "single_open() failed\n"); + + return err; +} + +static const struct file_operations dpseci_dbg_fq_ops = { + .open = dpseci_dbg_fqs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv) +{ + priv->dfs_root = debugfs_create_dir(dev_name(priv->dev), NULL); + + debugfs_create_file("fq_stats", 0444, priv->dfs_root, priv, + &dpseci_dbg_fq_ops); +} + +void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv) +{ + debugfs_remove_recursive(priv->dfs_root); +} diff --git a/drivers/crypto/caam/dpseci-debugfs.h b/drivers/crypto/caam/dpseci-debugfs.h new file mode 100644 index 000000000000..bc22af7bec37 --- /dev/null +++ b/drivers/crypto/caam/dpseci-debugfs.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2019 NXP */ + +#ifndef DPSECI_DEBUGFS_H +#define DPSECI_DEBUGFS_H + +#include +#include "caamalg_qi2.h" + +#ifdef CONFIG_DEBUG_FS +void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv); +void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv); +#else +static inline void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv) {} +static inline void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv) {} +#endif /* CONFIG_DEBUG_FS */ + +#endif /* DPSECI_DEBUGFS_H */ -- cgit From 3bd4952853d69659e58d1d4a4964383cb536b5be Mon Sep 17 00:00:00 2001 From: Nishka Dasgupta Date: Wed, 24 Jul 2019 13:24:33 +0530 Subject: crypto: nx - Add of_node_put() before return in 842 Each iteration of for_each_compatible_node puts the previous node, but in the case of a return from the middle of the loop, there is no put, thus causing a memory leak. Add an of_node_put before the return. Issue found with Coccinelle. Acked-by: Stewart Smith Signed-off-by: Nishka Dasgupta Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-842-powernv.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index e78ff5c65ed6..c037a2403b82 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c @@ -1020,6 +1020,7 @@ static __init int nx842_powernv_init(void) ret = nx842_powernv_probe_vas(dn); if (ret) { nx842_delete_coprocs(); + of_node_put(dn); return ret; } } -- cgit From 3e75241be8081f22f7382f4041496b10a4d9aed0 Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Thu, 25 Jul 2019 16:01:55 +0800 Subject: hwrng: drivers - Use device-managed registration API MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use devm_hwrng_register to simplify the implementation. Manual unregistration and some remove functions can be removed now. Signed-off-by: Chuhong Yuan Acked-by: Łukasz Stelmach Acked-by: Ludovic Desroches Signed-off-by: Herbert Xu --- drivers/char/hw_random/atmel-rng.c | 3 +-- drivers/char/hw_random/cavium-rng-vf.c | 11 +---------- drivers/char/hw_random/exynos-trng.c | 3 +-- drivers/char/hw_random/n2-drv.c | 4 +--- drivers/char/hw_random/nomadik-rng.c | 3 +-- drivers/char/hw_random/omap-rng.c | 3 +-- drivers/char/hw_random/powernv-rng.c | 10 +--------- drivers/char/hw_random/st-rng.c | 4 +--- drivers/char/hw_random/xgene-rng.c | 4 +--- 9 files changed, 9 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c index 433426242b87..e55705745d5e 100644 --- a/drivers/char/hw_random/atmel-rng.c +++ b/drivers/char/hw_random/atmel-rng.c @@ -86,7 +86,7 @@ static int atmel_trng_probe(struct platform_device *pdev) trng->rng.name = pdev->name; trng->rng.read = atmel_trng_read; - ret = hwrng_register(&trng->rng); + ret = devm_hwrng_register(&pdev->dev, &trng->rng); if (ret) goto err_register; @@ -103,7 +103,6 @@ static int atmel_trng_remove(struct platform_device *pdev) { struct atmel_trng *trng = platform_get_drvdata(pdev); - hwrng_unregister(&trng->rng); atmel_trng_disable(trng); clk_disable_unprepare(trng->clk); diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c index 2d1352b67168..3de4a6a443ef 100644 --- a/drivers/char/hw_random/cavium-rng-vf.c +++ b/drivers/char/hw_random/cavium-rng-vf.c @@ -67,7 +67,7 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev, pci_set_drvdata(pdev, rng); - ret = hwrng_register(&rng->ops); + ret = devm_hwrng_register(&pdev->dev, &rng->ops); if (ret) { dev_err(&pdev->dev, "Error registering device as HWRNG.\n"); return ret; @@ -76,14 +76,6 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev, return 0; } -/* Remove the VF */ -static void cavium_rng_remove_vf(struct pci_dev *pdev) -{ - struct cavium_rng *rng; - - rng = pci_get_drvdata(pdev); - hwrng_unregister(&rng->ops); -} static const struct pci_device_id cavium_rng_vf_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa033), 0, 0, 0}, @@ -95,7 +87,6 @@ static struct pci_driver cavium_rng_vf_driver = { .name = "cavium_rng_vf", .id_table = cavium_rng_vf_id_table, .probe = cavium_rng_probe_vf, - .remove = cavium_rng_remove_vf, }; module_pci_driver(cavium_rng_vf_driver); diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c index 94235761955c..b4b52ab23b6b 100644 --- a/drivers/char/hw_random/exynos-trng.c +++ b/drivers/char/hw_random/exynos-trng.c @@ -153,7 +153,7 @@ static int exynos_trng_probe(struct platform_device *pdev) goto err_clock; } - ret = hwrng_register(&trng->rng); + ret = devm_hwrng_register(&pdev->dev, &trng->rng); if (ret) { dev_err(&pdev->dev, "Could not register hwrng device.\n"); goto err_register; @@ -179,7 +179,6 @@ static int exynos_trng_remove(struct platform_device *pdev) { struct exynos_trng_dev *trng = platform_get_drvdata(pdev); - hwrng_unregister(&trng->rng); clk_disable_unprepare(trng->clk); pm_runtime_put_sync(&pdev->dev); diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c index d4cab105796f..2d256b3470db 100644 --- a/drivers/char/hw_random/n2-drv.c +++ b/drivers/char/hw_random/n2-drv.c @@ -768,7 +768,7 @@ static int n2rng_probe(struct platform_device *op) np->hwrng.data_read = n2rng_data_read; np->hwrng.priv = (unsigned long) np; - err = hwrng_register(&np->hwrng); + err = devm_hwrng_register(&pdev->dev, &np->hwrng); if (err) goto out_hvapi_unregister; @@ -793,8 +793,6 @@ static int n2rng_remove(struct platform_device *op) cancel_delayed_work_sync(&np->work); - hwrng_unregister(&np->hwrng); - sun4v_hvapi_unregister(HV_GRP_RNG); return 0; diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index fc0f6b0cb80d..74ed29f42e4f 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c @@ -57,7 +57,7 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) if (!base) goto out_release; nmk_rng.priv = (unsigned long)base; - ret = hwrng_register(&nmk_rng); + ret = devm_hwrng_register(&dev->dev, &nmk_rng); if (ret) goto out_release; return 0; @@ -71,7 +71,6 @@ out_clk: static int nmk_rng_remove(struct amba_device *dev) { - hwrng_unregister(&nmk_rng); amba_release_regions(dev); clk_disable(rng_clk); return 0; diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index e9b6ac61fb7f..b27f39688b5e 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -500,7 +500,7 @@ static int omap_rng_probe(struct platform_device *pdev) if (ret) goto err_register; - ret = hwrng_register(&priv->rng); + ret = devm_hwrng_register(&pdev->dev, &priv->rng); if (ret) goto err_register; @@ -525,7 +525,6 @@ static int omap_rng_remove(struct platform_device *pdev) { struct omap_rng_dev *priv = platform_get_drvdata(pdev); - hwrng_unregister(&priv->rng); priv->pdata->cleanup(priv); diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c index f2e8272e276a..8da1d7917bdc 100644 --- a/drivers/char/hw_random/powernv-rng.c +++ b/drivers/char/hw_random/powernv-rng.c @@ -33,18 +33,11 @@ static struct hwrng powernv_hwrng = { .read = powernv_rng_read, }; -static int powernv_rng_remove(struct platform_device *pdev) -{ - hwrng_unregister(&powernv_hwrng); - - return 0; -} - static int powernv_rng_probe(struct platform_device *pdev) { int rc; - rc = hwrng_register(&powernv_hwrng); + rc = devm_hwrng_register(&pdev->dev, &powernv_hwrng); if (rc) { /* We only register one device, ignore any others */ if (rc == -EEXIST) @@ -70,7 +63,6 @@ static struct platform_driver powernv_rng_driver = { .of_match_table = powernv_rng_match, }, .probe = powernv_rng_probe, - .remove = powernv_rng_remove, }; module_platform_driver(powernv_rng_driver); diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c index bd6a98b3479b..863448360a7d 100644 --- a/drivers/char/hw_random/st-rng.c +++ b/drivers/char/hw_random/st-rng.c @@ -102,7 +102,7 @@ static int st_rng_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, ddata); - ret = hwrng_register(&ddata->ops); + ret = devm_hwrng_register(&pdev->dev, &ddata->ops); if (ret) { dev_err(&pdev->dev, "Failed to register HW RNG\n"); clk_disable_unprepare(clk); @@ -118,8 +118,6 @@ static int st_rng_remove(struct platform_device *pdev) { struct st_rng_data *ddata = dev_get_drvdata(&pdev->dev); - hwrng_unregister(&ddata->ops); - clk_disable_unprepare(ddata->clk); return 0; diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c index 8c6f9f63da5e..7e568db87ae2 100644 --- a/drivers/char/hw_random/xgene-rng.c +++ b/drivers/char/hw_random/xgene-rng.c @@ -361,7 +361,7 @@ static int xgene_rng_probe(struct platform_device *pdev) xgene_rng_func.priv = (unsigned long) ctx; - rc = hwrng_register(&xgene_rng_func); + rc = devm_hwrng_register(&pdev->dev, &xgene_rng_func); if (rc) { dev_err(&pdev->dev, "RNG registering failed error %d\n", rc); if (!IS_ERR(ctx->clk)) @@ -375,7 +375,6 @@ static int xgene_rng_probe(struct platform_device *pdev) rc); if (!IS_ERR(ctx->clk)) clk_disable_unprepare(ctx->clk); - hwrng_unregister(&xgene_rng_func); return rc; } @@ -392,7 +391,6 @@ static int xgene_rng_remove(struct platform_device *pdev) dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc); if (!IS_ERR(ctx->clk)) clk_disable_unprepare(ctx->clk); - hwrng_unregister(&xgene_rng_func); return rc; } -- cgit From 176435ad2ac7733014d6429369130ddb78f14ee0 Mon Sep 17 00:00:00 2001 From: Horia Geantă Date: Sun, 28 Jul 2019 22:26:38 +0300 Subject: crypto: caam - defer probing until QMan is available MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When QI (Queue Interface) support is enabled on DPAA 1.x platforms, defer probing if dependencies (QMan drivers) are not available yet. Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 74 ++++++++++++++++++++++++++++++---------------- 1 file changed, 48 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 4e43ca4d3656..e0590beae240 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -527,15 +527,54 @@ static int caam_probe(struct platform_device *pdev) dev_set_drvdata(dev, ctrlpriv); nprop = pdev->dev.of_node; + /* Get configuration properties from device tree */ + /* First, get register page */ + ctrl = of_iomap(nprop, 0); + if (!ctrl) { + dev_err(dev, "caam: of_iomap() failed\n"); + return -ENOMEM; + } + + caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) & + (CSTA_PLEND | CSTA_ALT_PLEND)); caam_imx = (bool)soc_device_match(imx_soc); + comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms); + caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2); + ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK); + +#ifdef CONFIG_CAAM_QI + /* If (DPAA 1.x) QI present, check whether dependencies are available */ + if (ctrlpriv->qi_present && !caam_dpaa2) { + ret = qman_is_probed(); + if (!ret) { + ret = -EPROBE_DEFER; + goto iounmap_ctrl; + } else if (ret < 0) { + dev_err(dev, "failing probe due to qman probe error\n"); + ret = -ENODEV; + goto iounmap_ctrl; + } + + ret = qman_portals_probed(); + if (!ret) { + ret = -EPROBE_DEFER; + goto iounmap_ctrl; + } else if (ret < 0) { + dev_err(dev, "failing probe due to qman portals probe error\n"); + ret = -ENODEV; + goto iounmap_ctrl; + } + } +#endif + /* Enable clocking */ clk = caam_drv_identify_clk(&pdev->dev, "ipg"); if (IS_ERR(clk)) { ret = PTR_ERR(clk); dev_err(&pdev->dev, "can't identify CAAM ipg clk: %d\n", ret); - return ret; + goto iounmap_ctrl; } ctrlpriv->caam_ipg = clk; @@ -547,7 +586,7 @@ static int caam_probe(struct platform_device *pdev) ret = PTR_ERR(clk); dev_err(&pdev->dev, "can't identify CAAM mem clk: %d\n", ret); - return ret; + goto iounmap_ctrl; } ctrlpriv->caam_mem = clk; } @@ -557,7 +596,7 @@ static int caam_probe(struct platform_device *pdev) ret = PTR_ERR(clk); dev_err(&pdev->dev, "can't identify CAAM aclk clk: %d\n", ret); - return ret; + goto iounmap_ctrl; } ctrlpriv->caam_aclk = clk; @@ -570,7 +609,7 @@ static int caam_probe(struct platform_device *pdev) ret = PTR_ERR(clk); dev_err(&pdev->dev, "can't identify CAAM emi_slow clk: %d\n", ret); - return ret; + goto iounmap_ctrl; } ctrlpriv->caam_emi_slow = clk; } @@ -578,7 +617,7 @@ static int caam_probe(struct platform_device *pdev) ret = clk_prepare_enable(ctrlpriv->caam_ipg); if (ret < 0) { dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret); - return ret; + goto iounmap_ctrl; } if (ctrlpriv->caam_mem) { @@ -605,25 +644,10 @@ static int caam_probe(struct platform_device *pdev) } } - /* Get configuration properties from device tree */ - /* First, get register page */ - ctrl = of_iomap(nprop, 0); - if (ctrl == NULL) { - dev_err(dev, "caam: of_iomap() failed\n"); - ret = -ENOMEM; - goto disable_caam_emi_slow; - } - - caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) & - (CSTA_PLEND | CSTA_ALT_PLEND)); - - /* Finding the page size for using the CTPR_MS register */ - comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms); - pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT; - /* Allocating the BLOCK_OFFSET based on the supported page size on * the platform */ + pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT; if (pg_size == 0) BLOCK_OFFSET = PG_SIZE_4K; else @@ -648,7 +672,6 @@ static int caam_probe(struct platform_device *pdev) * In case of SoCs with Management Complex, MC f/w performs * the configuration. */ - caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2); np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc"); ctrlpriv->mc_en = !!np; of_node_put(np); @@ -700,7 +723,7 @@ static int caam_probe(struct platform_device *pdev) } if (ret) { dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); - goto iounmap_ctrl; + goto disable_caam_emi_slow; } ctrlpriv->era = caam_get_era(ctrl); @@ -719,7 +742,6 @@ static int caam_probe(struct platform_device *pdev) #endif /* Check to see if (DPAA 1.x) QI present. If so, enable */ - ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK); if (ctrlpriv->qi_present && !caam_dpaa2) { ctrlpriv->qi = (struct caam_queue_if __iomem __force *) ((__force uint8_t *)ctrl + @@ -906,8 +928,6 @@ shutdown_qi: if (ctrlpriv->qi_init) caam_qi_shutdown(dev); #endif -iounmap_ctrl: - iounmap(ctrl); disable_caam_emi_slow: if (ctrlpriv->caam_emi_slow) clk_disable_unprepare(ctrlpriv->caam_emi_slow); @@ -918,6 +938,8 @@ disable_caam_mem: clk_disable_unprepare(ctrlpriv->caam_mem); disable_caam_ipg: clk_disable_unprepare(ctrlpriv->caam_ipg); +iounmap_ctrl: + iounmap(ctrl); return ret; } -- cgit From b7a2758f20251e1d7f884d96e46ea165810431c3 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Mon, 5 Aug 2019 14:49:59 +1000 Subject: hwrng: n2-drv - fix typo Fixes: 3e75241be808 ("hwrng: drivers - Use device-managed registration API") Signed-off-by: Stephen Rothwell Signed-off-by: Herbert Xu --- drivers/char/hw_random/n2-drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c index 2d256b3470db..73e408146420 100644 --- a/drivers/char/hw_random/n2-drv.c +++ b/drivers/char/hw_random/n2-drv.c @@ -768,7 +768,7 @@ static int n2rng_probe(struct platform_device *op) np->hwrng.data_read = n2rng_data_read; np->hwrng.priv = (unsigned long) np; - err = devm_hwrng_register(&pdev->dev, &np->hwrng); + err = devm_hwrng_register(&op->dev, &np->hwrng); if (err) goto out_hvapi_unregister; -- cgit From 7a4be6c113c1f721818d1e3722a9015fe393295c Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Mon, 29 Jul 2019 13:40:18 +0300 Subject: crypto: ccree - use the full crypt length value In case of AEAD decryption verifcation error we were using the wrong value to zero out the plaintext buffer leaving the end of the buffer with the false plaintext. Signed-off-by: Gilad Ben-Yossef Fixes: ff27e85a85bb ("crypto: ccree - add AEAD support") CC: stable@vger.kernel.org # v4.17+ Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_aead.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index ce302adc76c7..339fabfe3de6 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -236,7 +236,7 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err) /* In case of payload authentication failure, MUST NOT * revealed the decrypted message --> zero its memory. */ - cc_zero_sgl(areq->dst, areq_ctx->cryptlen); + cc_zero_sgl(areq->dst, areq->cryptlen); err = -EBADMSG; } /*ENCRYPT*/ -- cgit From e88b27c8eaa8f3126791778803dd2fdd81828f5c Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Mon, 29 Jul 2019 13:40:19 +0300 Subject: crypto: ccree - use std api sg_zero_buffer Replace internal cc_zero_sgl() with kernel API of the same function sg_zero_buffer(). Signed-off-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_aead.c | 3 ++- drivers/crypto/ccree/cc_buffer_mgr.c | 21 --------------------- drivers/crypto/ccree/cc_buffer_mgr.h | 2 -- 3 files changed, 2 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index 339fabfe3de6..a9779a212b18 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -236,7 +236,8 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err) /* In case of payload authentication failure, MUST NOT * revealed the decrypted message --> zero its memory. */ - cc_zero_sgl(areq->dst, areq->cryptlen); + sg_zero_buffer(areq->dst, sg_nents(areq->dst), + areq->cryptlen, 0); err = -EBADMSG; } /*ENCRYPT*/ diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index c81ad33f9115..a72586eccd81 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -99,27 +99,6 @@ static unsigned int cc_get_sgl_nents(struct device *dev, return nents; } -/** - * cc_zero_sgl() - Zero scatter scatter list data. - * - * @sgl: - */ -void cc_zero_sgl(struct scatterlist *sgl, u32 data_len) -{ - struct scatterlist *current_sg = sgl; - int sg_index = 0; - - while (sg_index <= data_len) { - if (!current_sg) { - /* reached the end of the sgl --> just return back */ - return; - } - memset(sg_virt(current_sg), 0, current_sg->length); - sg_index += current_sg->length; - current_sg = sg_next(current_sg); - } -} - /** * cc_copy_sg_portion() - Copy scatter list data, * from to_skip to end, to dest and vice versa diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h index a726016bdbc1..af434872c6ff 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.h +++ b/drivers/crypto/ccree/cc_buffer_mgr.h @@ -66,6 +66,4 @@ void cc_unmap_hash_request(struct device *dev, void *ctx, void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, u32 to_skip, u32 end, enum cc_sg_cpy_direct direct); -void cc_zero_sgl(struct scatterlist *sgl, u32 data_len); - #endif /*__BUFFER_MGR_H__*/ -- cgit From f6ebfd7826a8b69ad6691695a97409bd35d146a9 Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Mon, 29 Jul 2019 12:56:08 +0000 Subject: crypto: ccp - Log an error message when ccp-crypto fails to load If there are no CCP devices on the system, ccp-crypto will not load. Write a message to the system log clarifying the reason for the failure of the modprobe operation Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-crypto-main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 818096490829..8ee4cb45a3f3 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -405,8 +405,10 @@ static int ccp_crypto_init(void) int ret; ret = ccp_present(); - if (ret) + if (ret) { + pr_err("Cannot load: there are no available CCPs\n"); return ret; + } spin_lock_init(&req_queue_lock); INIT_LIST_HEAD(&req_queue.cmds); -- cgit From 15b2455ab4e6434a53c639bd2a168cd2a3e1eb47 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 29 Jul 2019 17:38:19 -0500 Subject: crypto: ux500/crypt - Mark expected switch fall-throughs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mark switch cases where we are expecting to fall through. This patch fixes the following warning (Building: arm): drivers/crypto/ux500/cryp/cryp.c: In function ‘cryp_save_device_context’: drivers/crypto/ux500/cryp/cryp.c:316:16: warning: this statement may fall through [-Wimplicit-fallthrough=] ctx->key_4_r = readl_relaxed(&src_reg->key_4_r); drivers/crypto/ux500/cryp/cryp.c:318:2: note: here case CRYP_KEY_SIZE_192: ^~~~ drivers/crypto/ux500/cryp/cryp.c:320:16: warning: this statement may fall through [-Wimplicit-fallthrough=] ctx->key_3_r = readl_relaxed(&src_reg->key_3_r); drivers/crypto/ux500/cryp/cryp.c:322:2: note: here case CRYP_KEY_SIZE_128: ^~~~ drivers/crypto/ux500/cryp/cryp.c:324:16: warning: this statement may fall through [-Wimplicit-fallthrough=] ctx->key_2_r = readl_relaxed(&src_reg->key_2_r); drivers/crypto/ux500/cryp/cryp.c:326:2: note: here default: ^~~~~~~ In file included from ./include/linux/io.h:13:0, from drivers/crypto/ux500/cryp/cryp_p.h:14, from drivers/crypto/ux500/cryp/cryp.c:15: drivers/crypto/ux500/cryp/cryp.c: In function ‘cryp_restore_device_context’: ./arch/arm/include/asm/io.h:92:22: warning: this statement may fall through [-Wimplicit-fallthrough=] #define __raw_writel __raw_writel ^ ./arch/arm/include/asm/io.h:299:29: note: in expansion of macro ‘__raw_writel’ #define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c) ^~~~~~~~~~~~ drivers/crypto/ux500/cryp/cryp.c:363:3: note: in expansion of macro ‘writel_relaxed’ writel_relaxed(ctx->key_4_r, ®->key_4_r); ^~~~~~~~~~~~~~ drivers/crypto/ux500/cryp/cryp.c:365:2: note: here case CRYP_KEY_SIZE_192: ^~~~ In file included from ./include/linux/io.h:13:0, from drivers/crypto/ux500/cryp/cryp_p.h:14, from drivers/crypto/ux500/cryp/cryp.c:15: ./arch/arm/include/asm/io.h:92:22: warning: this statement may fall through [-Wimplicit-fallthrough=] #define __raw_writel __raw_writel ^ ./arch/arm/include/asm/io.h:299:29: note: in expansion of macro ‘__raw_writel’ #define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c) ^~~~~~~~~~~~ drivers/crypto/ux500/cryp/cryp.c:367:3: note: in expansion of macro ‘writel_relaxed’ writel_relaxed(ctx->key_3_r, ®->key_3_r); ^~~~~~~~~~~~~~ drivers/crypto/ux500/cryp/cryp.c:369:2: note: here case CRYP_KEY_SIZE_128: ^~~~ In file included from ./include/linux/io.h:13:0, from drivers/crypto/ux500/cryp/cryp_p.h:14, from drivers/crypto/ux500/cryp/cryp.c:15: ./arch/arm/include/asm/io.h:92:22: warning: this statement may fall through [-Wimplicit-fallthrough=] #define __raw_writel __raw_writel ^ ./arch/arm/include/asm/io.h:299:29: note: in expansion of macro ‘__raw_writel’ #define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c) ^~~~~~~~~~~~ drivers/crypto/ux500/cryp/cryp.c:371:3: note: in expansion of macro ‘writel_relaxed’ writel_relaxed(ctx->key_2_r, ®->key_2_r); ^~~~~~~~~~~~~~ drivers/crypto/ux500/cryp/cryp.c:373:2: note: here default: ^~~~~~~ Signed-off-by: Gustavo A. R. Silva Reviewed-by: Kees Cook Signed-off-by: Herbert Xu --- drivers/crypto/ux500/cryp/cryp.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index ece83a363e11..f22f6fa612b3 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -314,14 +314,17 @@ void cryp_save_device_context(struct cryp_device_data *device_data, case CRYP_KEY_SIZE_256: ctx->key_4_l = readl_relaxed(&src_reg->key_4_l); ctx->key_4_r = readl_relaxed(&src_reg->key_4_r); + /* Fall through */ case CRYP_KEY_SIZE_192: ctx->key_3_l = readl_relaxed(&src_reg->key_3_l); ctx->key_3_r = readl_relaxed(&src_reg->key_3_r); + /* Fall through */ case CRYP_KEY_SIZE_128: ctx->key_2_l = readl_relaxed(&src_reg->key_2_l); ctx->key_2_r = readl_relaxed(&src_reg->key_2_r); + /* Fall through */ default: ctx->key_1_l = readl_relaxed(&src_reg->key_1_l); @@ -361,14 +364,17 @@ void cryp_restore_device_context(struct cryp_device_data *device_data, case CRYP_KEY_SIZE_256: writel_relaxed(ctx->key_4_l, ®->key_4_l); writel_relaxed(ctx->key_4_r, ®->key_4_r); + /* Fall through */ case CRYP_KEY_SIZE_192: writel_relaxed(ctx->key_3_l, ®->key_3_l); writel_relaxed(ctx->key_3_r, ®->key_3_r); + /* Fall through */ case CRYP_KEY_SIZE_128: writel_relaxed(ctx->key_2_l, ®->key_2_l); writel_relaxed(ctx->key_2_r, ®->key_2_r); + /* Fall through */ default: writel_relaxed(ctx->key_1_l, ®->key_1_l); -- cgit From 48f89d2a2920166c35b1c0b69917dbb0390ebec7 Mon Sep 17 00:00:00 2001 From: Horia Geantă Date: Tue, 30 Jul 2019 08:48:33 +0300 Subject: crypto: caam - fix concurrency issue in givencrypt descriptor MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit IV transfer from ofifo to class2 (set up at [29][30]) is not guaranteed to be scheduled before the data transfer from ofifo to external memory (set up at [38]: [29] 10FA0004 ld: ind-nfifo (len=4) imm [30] 81F00010 class2 type=msg len=16> [31] 14820004 ld: ccb2-datasz len=4 offs=0 imm [32] 00000010 data:0x00000010 [33] 8210010D operation: cls1-op aes cbc init-final enc [34] A8080B04 math: (seqin + math0)->vseqout len=4 [35] 28000010 seqfifold: skip len=16 [36] A8080A04 math: (seqin + math0)->vseqin len=4 [37] 2F1E0000 seqfifold: both msg1->2-last2-last1 len=vseqinsz [38] 69300000 seqfifostr: msg len=vseqoutsz [39] 5C20000C seqstr: ccb2 ctx len=12 offs=0 If ofifo -> external memory transfer happens first, DECO will hang (issuing a Watchdog Timeout error, if WDOG is enabled) waiting for data availability in ofifo for the ofifo -> c2 ififo transfer. Make sure IV transfer happens first by waiting for all CAAM internal transfers to end before starting payload transfer. New descriptor with jump command inserted at [37]: [..] [36] A8080A04 math: (seqin + math0)->vseqin len=4 [37] A1000401 jump: jsl1 all-match[!nfifopend] offset=[01] local->[38] [38] 2F1E0000 seqfifold: both msg1->2-last2-last1 len=vseqinsz [39] 69300000 seqfifostr: msg len=vseqoutsz [40] 5C20000C seqstr: ccb2 ctx len=12 offs=0 [Note: the issue is present in the descriptor from the very beginning (cf. Fixes tag). However I've marked it v4.19+ since it's the oldest maintained kernel that the patch applies clean against.] Cc: # v4.19+ Fixes: 1acebad3d8db8 ("crypto: caam - faster aead implementation") Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_desc.c | 9 +++++++++ drivers/crypto/caam/caamalg_desc.h | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c index 72531837571e..28ecef7a481c 100644 --- a/drivers/crypto/caam/caamalg_desc.c +++ b/drivers/crypto/caam/caamalg_desc.c @@ -503,6 +503,7 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, const bool is_qi, int era) { u32 geniv, moveiv; + u32 *wait_cmd; /* Note: Context registers are saved. */ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); @@ -598,6 +599,14 @@ copy_iv: /* Will read cryptlen */ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* + * Wait for IV transfer (ofifo -> class2) to finish before starting + * ciphertext transfer (ofifo -> external memory). + */ + wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP); + set_jump_tgt_here(desc, wait_cmd); + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF | FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH); append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h index da4a4ee60c80..706007624d82 100644 --- a/drivers/crypto/caam/caamalg_desc.h +++ b/drivers/crypto/caam/caamalg_desc.h @@ -12,7 +12,7 @@ #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ) #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) -#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) +#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 8 * CAAM_CMD_SZ) #define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ) #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ) #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ) -- cgit From f26882a3475eb7b2c35fd3b1e291924161b1327d Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Tue, 30 Jul 2019 15:27:11 +0200 Subject: crypto: inside-secure - Use defines instead of some constants (cosmetic) This patch replaces some hard constants regarding key, IV and nonce sizes with appropriate defines from the crypto header files. Signed-off-by: Pascal van Leeuwen Acked-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_cipher.c | 35 ++++++++++++++------------ 1 file changed, 19 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 477e0ec35f45..718d64f6e3ee 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -226,19 +227,21 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key, goto badkey; if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) { - /* 20 is minimum AES key: 16 bytes + 4 bytes nonce */ - if (keys.enckeylen < 20) + /* Minimum keysize is minimum AES key size + nonce size */ + if (keys.enckeylen < (AES_MIN_KEY_SIZE + + CTR_RFC3686_NONCE_SIZE)) goto badkey; /* last 4 bytes of key are the nonce! */ - ctx->nonce = *(u32 *)(keys.enckey + keys.enckeylen - 4); + ctx->nonce = *(u32 *)(keys.enckey + keys.enckeylen - + CTR_RFC3686_NONCE_SIZE); /* exclude the nonce here */ - keys.enckeylen -= 4; + keys.enckeylen -= CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; } /* Encryption key */ switch (ctx->alg) { case SAFEXCEL_3DES: - if (keys.enckeylen != 24) + if (keys.enckeylen != DES3_EDE_KEY_SIZE) goto badkey; flags = crypto_aead_get_flags(ctfm); err = __des3_verify_key(&flags, keys.enckey); @@ -1119,9 +1122,9 @@ static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm, unsigned int keylen; /* last 4 bytes of key are the nonce! */ - ctx->nonce = *(u32 *)(key + len - 4); + ctx->nonce = *(u32 *)(key + len - CTR_RFC3686_NONCE_SIZE); /* exclude the nonce here */ - keylen = len - 4; + keylen = len - CTR_RFC3686_NONCE_SIZE; ret = aes_expandkey(&aes, key, keylen); if (ret) { crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); @@ -1152,10 +1155,10 @@ struct safexcel_alg_template safexcel_alg_ctr_aes = { .setkey = safexcel_skcipher_aesctr_setkey, .encrypt = safexcel_ctr_aes_encrypt, .decrypt = safexcel_ctr_aes_decrypt, - /* Add 4 to include the 4 byte nonce! */ - .min_keysize = AES_MIN_KEY_SIZE + 4, - .max_keysize = AES_MAX_KEY_SIZE + 4, - .ivsize = 8, + /* Add nonce size */ + .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .ivsize = CTR_RFC3686_IV_SIZE, .base = { .cra_name = "rfc3686(ctr(aes))", .cra_driver_name = "safexcel-ctr-aes", @@ -1657,7 +1660,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt_aes, .decrypt = safexcel_aead_decrypt_aes, - .ivsize = 8, + .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, .base = { .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", @@ -1690,7 +1693,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt_aes, .decrypt = safexcel_aead_decrypt_aes, - .ivsize = 8, + .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, .base = { .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", @@ -1723,7 +1726,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt_aes, .decrypt = safexcel_aead_decrypt_aes, - .ivsize = 8, + .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, .base = { .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))", @@ -1756,7 +1759,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt_aes, .decrypt = safexcel_aead_decrypt_aes, - .ivsize = 8, + .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, .base = { .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))", @@ -1789,7 +1792,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt_aes, .decrypt = safexcel_aead_decrypt_aes, - .ivsize = 8, + .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, .base = { .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))", -- cgit From 514838e920d06f5c2ba56e9656107dfcbbb90219 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Tue, 30 Jul 2019 11:15:05 -0700 Subject: crypto: drivers - Remove dev_err() usage after platform_get_irq() We don't need dev_err() messages when platform_get_irq() fails now that platform_get_irq() prints an error message itself when something goes wrong. Let's remove these prints with a simple semantic patch. // @@ expression ret; struct platform_device *E; @@ ret = ( platform_get_irq(E, ...) | platform_get_irq_byname(E, ...) ); if ( \( ret < 0 \| ret <= 0 \) ) { ( -if (ret != -EPROBE_DEFER) -{ ... -dev_err(...); -... } | ... -dev_err(...); ) ... } // While we're here, remove braces on if statements that only have one statement (manually). Cc: Greg Kroah-Hartman Cc: Herbert Xu Cc: "David S. Miller" Cc: Signed-off-by: Stephen Boyd Signed-off-by: Herbert Xu --- drivers/crypto/atmel-aes.c | 1 - drivers/crypto/atmel-sha.c | 1 - drivers/crypto/atmel-tdes.c | 1 - drivers/crypto/ccree/cc_driver.c | 4 +--- drivers/crypto/img-hash.c | 1 - drivers/crypto/mediatek/mtk-platform.c | 4 +--- drivers/crypto/mxs-dcp.c | 8 ++------ drivers/crypto/omap-aes.c | 1 - drivers/crypto/omap-des.c | 1 - drivers/crypto/omap-sham.c | 1 - drivers/crypto/sahara.c | 4 +--- drivers/crypto/stm32/stm32-cryp.c | 4 +--- drivers/crypto/stm32/stm32-hash.c | 4 +--- 13 files changed, 7 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 2b7af44c7b85..026f193556f9 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -2673,7 +2673,6 @@ static int atmel_aes_probe(struct platform_device *pdev) /* Get the IRQ */ aes_dd->irq = platform_get_irq(pdev, 0); if (aes_dd->irq < 0) { - dev_err(dev, "no IRQ resource info\n"); err = aes_dd->irq; goto res_err; } diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index ab0cfe748931..84cb8748a795 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -2779,7 +2779,6 @@ static int atmel_sha_probe(struct platform_device *pdev) /* Get the IRQ */ sha_dd->irq = platform_get_irq(pdev, 0); if (sha_dd->irq < 0) { - dev_err(dev, "no IRQ resource info\n"); err = sha_dd->irq; goto res_err; } diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index fa76620281e8..6256883a89ed 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -1281,7 +1281,6 @@ static int atmel_tdes_probe(struct platform_device *pdev) /* Get the IRQ */ tdes_dd->irq = platform_get_irq(pdev, 0); if (tdes_dd->irq < 0) { - dev_err(dev, "no IRQ resource info\n"); err = tdes_dd->irq; goto res_err; } diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 196e3d140355..8b8eee513c27 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -338,10 +338,8 @@ static int init_cc_resources(struct platform_device *plat_dev) /* Then IRQ */ new_drvdata->irq = platform_get_irq(plat_dev, 0); - if (new_drvdata->irq < 0) { - dev_err(dev, "Failed getting IRQ resource\n"); + if (new_drvdata->irq < 0) return new_drvdata->irq; - } init_completion(&new_drvdata->hw_queue_avail); diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index d27c812c3d8d..6754eaafdc85 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -980,7 +980,6 @@ static int img_hash_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(dev, "no IRQ resource info\n"); err = irq; goto res_err; } diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c index 125318a88cd4..4f43318ca14b 100644 --- a/drivers/crypto/mediatek/mtk-platform.c +++ b/drivers/crypto/mediatek/mtk-platform.c @@ -495,10 +495,8 @@ static int mtk_crypto_probe(struct platform_device *pdev) for (i = 0; i < MTK_IRQ_NUM; i++) { cryp->irq[i] = platform_get_irq(pdev, i); - if (cryp->irq[i] < 0) { - dev_err(cryp->dev, "no IRQ:%d resource info\n", i); + if (cryp->irq[i] < 0) return cryp->irq[i]; - } } cryp->clk_cryp = devm_clk_get(&pdev->dev, "cryp"); diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index f1fa637cb029..bf8d2197bc11 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -994,16 +994,12 @@ static int mxs_dcp_probe(struct platform_device *pdev) } dcp_vmi_irq = platform_get_irq(pdev, 0); - if (dcp_vmi_irq < 0) { - dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq); + if (dcp_vmi_irq < 0) return dcp_vmi_irq; - } dcp_irq = platform_get_irq(pdev, 1); - if (dcp_irq < 0) { - dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_irq); + if (dcp_irq < 0) return dcp_irq; - } sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL); if (!sdcp) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 45a4647f7030..2f53fbb74100 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -1180,7 +1180,6 @@ static int omap_aes_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(dev, "can't get IRQ resource\n"); err = irq; goto err_irq; } diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 1ee69a979677..484a693122af 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -1049,7 +1049,6 @@ static int omap_des_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(dev, "can't get IRQ resource: %d\n", irq); err = irq; goto err_irq; } diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index e8e2907bd9f4..ac80bc6af093 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1989,7 +1989,6 @@ static int omap_sham_get_res_pdev(struct omap_sham_dev *dd, /* Get the IRQ */ dd->irq = platform_get_irq(pdev, 0); if (dd->irq < 0) { - dev_err(dev, "no IRQ resource info\n"); err = dd->irq; goto err; } diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index b0b8e3d48aef..8ac8ec6decd5 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -1403,10 +1403,8 @@ static int sahara_probe(struct platform_device *pdev) /* Get the IRQ */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "failed to get irq resource\n"); + if (irq < 0) return irq; - } err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler, 0, dev_name(&pdev->dev), dev); diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index 98ae02826e8f..72f86063b046 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -1975,10 +1975,8 @@ static int stm32_cryp_probe(struct platform_device *pdev) return PTR_ERR(cryp->regs); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "Cannot get IRQ resource\n"); + if (irq < 0) return irq; - } ret = devm_request_threaded_irq(dev, irq, stm32_cryp_irq, stm32_cryp_irq_thread, IRQF_ONESHOT, diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 2b70d8796f25..cfc8e0e37bee 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -1450,10 +1450,8 @@ static int stm32_hash_probe(struct platform_device *pdev) return ret; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "Cannot get IRQ resource\n"); + if (irq < 0) return irq; - } ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler, stm32_hash_irq_thread, IRQF_ONESHOT, -- cgit From 114e2ccd9eff5859760cfa3bcbb707c3eda63fc3 Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Wed, 31 Jul 2019 00:03:25 +0000 Subject: crypto: ccp - Clean up and exit correctly on allocation failure Return and fail driver initialization if a DMA pool or coherent memory can't be allocated. Be sure to clean up allocated memory. Fixes: 4b394a232df7 ("crypto: ccp - Let a v5 CCP provide the same function as v3") Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev-v5.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index f146b51a23a5..9ee72cf46a0f 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -803,6 +803,7 @@ static int ccp5_init(struct ccp_device *ccp) if (!dma_pool) { dev_err(dev, "unable to allocate dma pool\n"); ret = -ENOMEM; + goto e_pool; } cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; @@ -816,9 +817,9 @@ static int ccp5_init(struct ccp_device *ccp) /* Page alignment satisfies our needs for N <= 128 */ BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); - cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize, - &cmd_q->qbase_dma, - GFP_KERNEL); + cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, + &cmd_q->qbase_dma, + GFP_KERNEL); if (!cmd_q->qbase) { dev_err(dev, "unable to allocate command queue\n"); ret = -ENOMEM; @@ -994,7 +995,6 @@ e_pool: static void ccp5_destroy(struct ccp_device *ccp) { - struct device *dev = ccp->dev; struct ccp_cmd_queue *cmd_q; struct ccp_cmd *cmd; unsigned int i; @@ -1037,12 +1037,6 @@ static void ccp5_destroy(struct ccp_device *ccp) sp_free_ccp_irq(ccp->sp, ccp); - for (i = 0; i < ccp->cmd_q_count; i++) { - cmd_q = &ccp->cmd_q[i]; - dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, - cmd_q->qbase_dma); - } - /* Flush the cmd and backlog queue */ while (!list_empty(&ccp->cmd)) { /* Invoke the callback directly with an error code */ -- cgit From 1bfaac7c53f5cfe9c44d2093263fa50630a81b63 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Wed, 31 Jul 2019 08:10:54 +0200 Subject: crypto: inside-secure - Remove redundant DES ECB & CBC keysize check This patch removes a DES key size check that is redundant as it is already performed by the crypto API itself due to min_keysize = max_keysize. Signed-off-by: Pascal van Leeuwen Acked-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_cipher.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 718d64f6e3ee..5682fe8b606e 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -1197,11 +1197,6 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key, u32 tmp[DES_EXPKEY_WORDS]; int ret; - if (len != DES_KEY_SIZE) { - crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - ret = des_ekey(tmp, key); if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; -- cgit From 51fab3d73054ca5b06b26e20edac0486b052c6f4 Mon Sep 17 00:00:00 2001 From: Horia Geantă Date: Wed, 31 Jul 2019 16:08:02 +0300 Subject: crypto: caam/qi - fix error handling in ERN handler MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ERN handler calls the caam/qi frontend "done" callback with a status of -EIO. This is incorrect, since the callback expects a status value meaningful for the crypto engine - hence the cryptic messages like the one below: platform caam_qi: 15: unknown error source Fix this by providing the callback with: -the status returned by the crypto engine (fd[status]) in case it contains an error, OR -a QI "No error" code otherwise; this will trigger the message: platform caam_qi: 50000000: Queue Manager Interface: No error which is fine, since QMan driver provides details about the cause of failure Cc: # v5.1+ Fixes: 67c2315def06 ("crypto: caam - add Queue Interface (QI) backend support") Signed-off-by: Horia Geantă Reviewed-by: Iuliana Prodan Signed-off-by: Herbert Xu --- drivers/crypto/caam/error.c | 1 + drivers/crypto/caam/qi.c | 5 ++++- drivers/crypto/caam/regs.h | 1 + 3 files changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 4f0d45865aa2..95da6ae43482 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c @@ -118,6 +118,7 @@ static const struct { u8 value; const char *error_text; } qi_error_list[] = { + { 0x00, "No error" }, { 0x1F, "Job terminated by FQ or ICID flush" }, { 0x20, "FD format error"}, { 0x21, "FD command format error"}, diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index 0fe618e3804a..19a378bdf331 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -163,7 +163,10 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd), sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL); - drv_req->cbk(drv_req, -EIO); + if (fd->status) + drv_req->cbk(drv_req, be32_to_cpu(fd->status)); + else + drv_req->cbk(drv_req, JRSTA_SSRC_QI); } static struct qman_fq *create_caam_req_fq(struct device *qidev, diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 8591914d5c51..7c7ea8af6a48 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -641,6 +641,7 @@ struct caam_job_ring { #define JRSTA_SSRC_CCB_ERROR 0x20000000 #define JRSTA_SSRC_JUMP_HALT_USER 0x30000000 #define JRSTA_SSRC_DECO 0x40000000 +#define JRSTA_SSRC_QI 0x50000000 #define JRSTA_SSRC_JRERROR 0x60000000 #define JRSTA_SSRC_JUMP_HALT_CC 0x70000000 -- cgit From 1984aaeec372fbfb597883074253d290cbd543d4 Mon Sep 17 00:00:00 2001 From: Horia Geantă Date: Wed, 31 Jul 2019 16:08:03 +0300 Subject: crypto: caam - fix return code in completion callbacks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Modify drive to provide a valid errno (and not the HW error ID) to the user, via completion callbacks. A "valid errno" is currently not explicitly mentioned in the docs, however the error code is expected to match the one returned by the generic SW implementation. Note: in most error cases caam/qi and caam/qi2 returned -EIO; align all caam drivers to return -EINVAL. While here, ratelimit prints triggered by fuzz testing, such that console is not flooded. Signed-off-by: Horia Geantă Signed-off-by: Iuliana Prodan Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 26 ++++++++-------- drivers/crypto/caam/caamalg_qi.c | 21 ++++--------- drivers/crypto/caam/caamalg_qi2.c | 62 ++++++++++++--------------------------- drivers/crypto/caam/caamhash.c | 20 ++++++++----- drivers/crypto/caam/caampkc.c | 20 ++++++++----- drivers/crypto/caam/error.c | 60 +++++++++++++++++++++++-------------- drivers/crypto/caam/error.h | 2 +- drivers/crypto/caam/key_gen.c | 5 ++-- drivers/crypto/caam/qi.c | 5 ++-- 9 files changed, 104 insertions(+), 117 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 43f18253e5b6..06b4f2d47be4 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -930,19 +930,20 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, { struct aead_request *req = context; struct aead_edesc *edesc; + int ecode = 0; dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); edesc = container_of(desc, struct aead_edesc, hw_desc[0]); if (err) - caam_jr_strstatus(jrdev, err); + ecode = caam_jr_strstatus(jrdev, err); aead_unmap(jrdev, edesc, req); kfree(edesc); - aead_request_complete(req, err); + aead_request_complete(req, ecode); } static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, @@ -950,25 +951,20 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, { struct aead_request *req = context; struct aead_edesc *edesc; + int ecode = 0; dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); edesc = container_of(desc, struct aead_edesc, hw_desc[0]); if (err) - caam_jr_strstatus(jrdev, err); + ecode = caam_jr_strstatus(jrdev, err); aead_unmap(jrdev, edesc, req); - /* - * verify hw auth check passed else return -EBADMSG - */ - if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) - err = -EBADMSG; - kfree(edesc); - aead_request_complete(req, err); + aead_request_complete(req, ecode); } static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, @@ -978,13 +974,14 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, struct skcipher_edesc *edesc; struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); int ivsize = crypto_skcipher_ivsize(skcipher); + int ecode = 0; dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); if (err) - caam_jr_strstatus(jrdev, err); + ecode = caam_jr_strstatus(jrdev, err); skcipher_unmap(jrdev, edesc, req); @@ -1008,7 +1005,7 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, kfree(edesc); - skcipher_request_complete(req, err); + skcipher_request_complete(req, ecode); } static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, @@ -1018,12 +1015,13 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, struct skcipher_edesc *edesc; struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); int ivsize = crypto_skcipher_ivsize(skcipher); + int ecode = 0; dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); if (err) - caam_jr_strstatus(jrdev, err); + ecode = caam_jr_strstatus(jrdev, err); skcipher_unmap(jrdev, edesc, req); @@ -1047,7 +1045,7 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, kfree(edesc); - skcipher_request_complete(req, err); + skcipher_request_complete(req, ecode); } /* diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 32f0f8a72067..ab263b1bf7f1 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -884,20 +884,8 @@ static void aead_done(struct caam_drv_req *drv_req, u32 status) qidev = caam_ctx->qidev; - if (unlikely(status)) { - u32 ssrc = status & JRSTA_SSRC_MASK; - u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; - - caam_jr_strstatus(qidev, status); - /* - * verify hw auth check passed else return -EBADMSG - */ - if (ssrc == JRSTA_SSRC_CCB_ERROR && - err_id == JRSTA_CCBERR_ERRID_ICVCHK) - ecode = -EBADMSG; - else - ecode = -EIO; - } + if (unlikely(status)) + ecode = caam_jr_strstatus(qidev, status); edesc = container_of(drv_req, typeof(*edesc), drv_req); aead_unmap(qidev, edesc, aead_req); @@ -1190,13 +1178,14 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status) struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher); struct device *qidev = caam_ctx->qidev; int ivsize = crypto_skcipher_ivsize(skcipher); + int ecode = 0; dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); edesc = container_of(drv_req, typeof(*edesc), drv_req); if (status) - caam_jr_strstatus(qidev, status); + ecode = caam_jr_strstatus(qidev, status); print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->iv, @@ -1215,7 +1204,7 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status) memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); qi_cache_free(edesc); - skcipher_request_complete(req, status); + skcipher_request_complete(req, ecode); } static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index a78a36dfa7b9..008528b563ea 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -1228,10 +1228,8 @@ static void aead_encrypt_done(void *cbk_ctx, u32 status) dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); - if (unlikely(status)) { - caam_qi2_strstatus(ctx->dev, status); - ecode = -EIO; - } + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); aead_unmap(ctx->dev, edesc, req); qi_cache_free(edesc); @@ -1251,17 +1249,8 @@ static void aead_decrypt_done(void *cbk_ctx, u32 status) dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); - if (unlikely(status)) { - caam_qi2_strstatus(ctx->dev, status); - /* - * verify hw auth check passed else return -EBADMSG - */ - if ((status & JRSTA_CCBERR_ERRID_MASK) == - JRSTA_CCBERR_ERRID_ICVCHK) - ecode = -EBADMSG; - else - ecode = -EIO; - } + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); aead_unmap(ctx->dev, edesc, req); qi_cache_free(edesc); @@ -1353,10 +1342,8 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status) dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); - if (unlikely(status)) { - caam_qi2_strstatus(ctx->dev, status); - ecode = -EIO; - } + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->iv, @@ -1391,10 +1378,8 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status) dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); - if (unlikely(status)) { - caam_qi2_strstatus(ctx->dev, status); - ecode = -EIO; - } + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->iv, @@ -3095,10 +3080,7 @@ static void split_key_sh_done(void *cbk_ctx, u32 err) dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); - if (err) - caam_qi2_strstatus(res->dev, err); - - res->err = err; + res->err = err ? caam_qi2_strstatus(res->dev, err) : 0; complete(&res->completion); } @@ -3283,10 +3265,8 @@ static void ahash_done(void *cbk_ctx, u32 status) dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); - if (unlikely(status)) { - caam_qi2_strstatus(ctx->dev, status); - ecode = -EIO; - } + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); memcpy(req->result, state->caam_ctx, digestsize); @@ -3311,10 +3291,8 @@ static void ahash_done_bi(void *cbk_ctx, u32 status) dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); - if (unlikely(status)) { - caam_qi2_strstatus(ctx->dev, status); - ecode = -EIO; - } + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); switch_buf(state); @@ -3344,10 +3322,8 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status) dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); - if (unlikely(status)) { - caam_qi2_strstatus(ctx->dev, status); - ecode = -EIO; - } + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); memcpy(req->result, state->caam_ctx, digestsize); @@ -3372,10 +3348,8 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status) dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); - if (unlikely(status)) { - caam_qi2_strstatus(ctx->dev, status); - ecode = -EIO; - } + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); switch_buf(state); @@ -4701,7 +4675,7 @@ static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv, fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK; if (unlikely(fd_err)) - dev_err(priv->dev, "FD error: %08x\n", fd_err); + dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err); /* * FD[ADDR] is guaranteed to be valid, irrespective of errors reported diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index e4ac5d591ad6..73abefa262ab 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -584,12 +584,13 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, int digestsize = crypto_ahash_digestsize(ahash); struct caam_hash_state *state = ahash_request_ctx(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int ecode = 0; dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); if (err) - caam_jr_strstatus(jrdev, err); + ecode = caam_jr_strstatus(jrdev, err); ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); memcpy(req->result, state->caam_ctx, digestsize); @@ -599,7 +600,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); - req->base.complete(&req->base, err); + req->base.complete(&req->base, ecode); } static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, @@ -611,12 +612,13 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_state *state = ahash_request_ctx(req); int digestsize = crypto_ahash_digestsize(ahash); + int ecode = 0; dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); if (err) - caam_jr_strstatus(jrdev, err); + ecode = caam_jr_strstatus(jrdev, err); ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); switch_buf(state); @@ -630,7 +632,7 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, DUMP_PREFIX_ADDRESS, 16, 4, req->result, digestsize, 1); - req->base.complete(&req->base, err); + req->base.complete(&req->base, ecode); } static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, @@ -642,12 +644,13 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, int digestsize = crypto_ahash_digestsize(ahash); struct caam_hash_state *state = ahash_request_ctx(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int ecode = 0; dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); if (err) - caam_jr_strstatus(jrdev, err); + ecode = caam_jr_strstatus(jrdev, err); ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); memcpy(req->result, state->caam_ctx, digestsize); @@ -657,7 +660,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); - req->base.complete(&req->base, err); + req->base.complete(&req->base, ecode); } static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, @@ -669,12 +672,13 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_state *state = ahash_request_ctx(req); int digestsize = crypto_ahash_digestsize(ahash); + int ecode = 0; dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); if (err) - caam_jr_strstatus(jrdev, err); + ecode = caam_jr_strstatus(jrdev, err); ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); switch_buf(state); @@ -688,7 +692,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, DUMP_PREFIX_ADDRESS, 16, 4, req->result, digestsize, 1); - req->base.complete(&req->base, err); + req->base.complete(&req->base, ecode); } /* diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 2340f9441a80..e05d975da582 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -107,9 +107,10 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context) { struct akcipher_request *req = context; struct rsa_edesc *edesc; + int ecode = 0; if (err) - caam_jr_strstatus(dev, err); + ecode = caam_jr_strstatus(dev, err); edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); @@ -117,7 +118,7 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context) rsa_io_unmap(dev, edesc, req); kfree(edesc); - akcipher_request_complete(req, err); + akcipher_request_complete(req, ecode); } static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err, @@ -125,9 +126,10 @@ static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err, { struct akcipher_request *req = context; struct rsa_edesc *edesc; + int ecode = 0; if (err) - caam_jr_strstatus(dev, err); + ecode = caam_jr_strstatus(dev, err); edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); @@ -135,7 +137,7 @@ static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err, rsa_io_unmap(dev, edesc, req); kfree(edesc); - akcipher_request_complete(req, err); + akcipher_request_complete(req, ecode); } static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err, @@ -143,9 +145,10 @@ static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err, { struct akcipher_request *req = context; struct rsa_edesc *edesc; + int ecode = 0; if (err) - caam_jr_strstatus(dev, err); + ecode = caam_jr_strstatus(dev, err); edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); @@ -153,7 +156,7 @@ static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err, rsa_io_unmap(dev, edesc, req); kfree(edesc); - akcipher_request_complete(req, err); + akcipher_request_complete(req, ecode); } static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, @@ -161,9 +164,10 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, { struct akcipher_request *req = context; struct rsa_edesc *edesc; + int ecode = 0; if (err) - caam_jr_strstatus(dev, err); + ecode = caam_jr_strstatus(dev, err); edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); @@ -171,7 +175,7 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, rsa_io_unmap(dev, edesc, req); kfree(edesc); - akcipher_request_complete(req, err); + akcipher_request_complete(req, ecode); } /** diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 95da6ae43482..b7fbf1be37a4 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c @@ -211,8 +211,8 @@ static const char * const rng_err_id_list[] = { "Secure key generation", }; -static void report_ccb_status(struct device *jrdev, const u32 status, - const char *error) +static int report_ccb_status(struct device *jrdev, const u32 status, + const char *error) { u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >> JRSTA_CCBERR_CHAID_SHIFT; @@ -248,22 +248,27 @@ static void report_ccb_status(struct device *jrdev, const u32 status, * CCB ICV check failures are part of normal operation life; * we leave the upper layers to do what they want with them. */ - if (err_id != JRSTA_CCBERR_ERRID_ICVCHK) - dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", - status, error, idx_str, idx, - cha_str, cha_err_code, - err_str, err_err_code); + if (err_id == JRSTA_CCBERR_ERRID_ICVCHK) + return -EBADMSG; + + dev_err_ratelimited(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", status, + error, idx_str, idx, cha_str, cha_err_code, + err_str, err_err_code); + + return -EINVAL; } -static void report_jump_status(struct device *jrdev, const u32 status, - const char *error) +static int report_jump_status(struct device *jrdev, const u32 status, + const char *error) { dev_err(jrdev, "%08x: %s: %s() not implemented\n", status, error, __func__); + + return -EINVAL; } -static void report_deco_status(struct device *jrdev, const u32 status, - const char *error) +static int report_deco_status(struct device *jrdev, const u32 status, + const char *error) { u8 err_id = status & JRSTA_DECOERR_ERROR_MASK; u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >> @@ -289,10 +294,12 @@ static void report_deco_status(struct device *jrdev, const u32 status, dev_err(jrdev, "%08x: %s: %s %d: %s%s\n", status, error, idx_str, idx, err_str, err_err_code); + + return -EINVAL; } -static void report_qi_status(struct device *qidev, const u32 status, - const char *error) +static int report_qi_status(struct device *qidev, const u32 status, + const char *error) { u8 err_id = status & JRSTA_QIERR_ERROR_MASK; const char *err_str = "unidentified error value 0x"; @@ -310,27 +317,33 @@ static void report_qi_status(struct device *qidev, const u32 status, dev_err(qidev, "%08x: %s: %s%s\n", status, error, err_str, err_err_code); + + return -EINVAL; } -static void report_jr_status(struct device *jrdev, const u32 status, - const char *error) +static int report_jr_status(struct device *jrdev, const u32 status, + const char *error) { dev_err(jrdev, "%08x: %s: %s() not implemented\n", status, error, __func__); + + return -EINVAL; } -static void report_cond_code_status(struct device *jrdev, const u32 status, - const char *error) +static int report_cond_code_status(struct device *jrdev, const u32 status, + const char *error) { dev_err(jrdev, "%08x: %s: %s() not implemented\n", status, error, __func__); + + return -EINVAL; } -void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2) +int caam_strstatus(struct device *jrdev, u32 status, bool qi_v2) { static const struct stat_src { - void (*report_ssed)(struct device *jrdev, const u32 status, - const char *error); + int (*report_ssed)(struct device *jrdev, const u32 status, + const char *error); const char *error; } status_src[16] = { { NULL, "No error" }, @@ -358,11 +371,14 @@ void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2) * Otherwise print the error source name. */ if (status_src[ssrc].report_ssed) - status_src[ssrc].report_ssed(jrdev, status, error); - else if (error) + return status_src[ssrc].report_ssed(jrdev, status, error); + + if (error) dev_err(jrdev, "%d: %s\n", ssrc, error); else dev_err(jrdev, "%d: unknown error source\n", ssrc); + + return -EINVAL; } EXPORT_SYMBOL(caam_strstatus); diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h index d9726e66edbf..16809fa8fec7 100644 --- a/drivers/crypto/caam/error.h +++ b/drivers/crypto/caam/error.h @@ -12,7 +12,7 @@ #define CAAM_ERROR_STR_MAX 302 -void caam_strstatus(struct device *dev, u32 status, bool qi_v2); +int caam_strstatus(struct device *dev, u32 status, bool qi_v2); #define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false) #define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true) diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index 48dd3536060d..c6f8375ae215 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c @@ -15,13 +15,14 @@ void split_key_done(struct device *dev, u32 *desc, u32 err, void *context) { struct split_key_result *res = context; + int ecode = 0; dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); if (err) - caam_jr_strstatus(dev, err); + ecode = caam_jr_strstatus(dev, err); - res->err = err; + res->err = ecode; complete(&res->completion); } diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index 19a378bdf331..378f627e1d64 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -577,8 +577,9 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p, if (ssrc != JRSTA_SSRC_CCB_ERROR || err_id != JRSTA_CCBERR_ERRID_ICVCHK) - dev_err(qidev, "Error: %#x in CAAM response FD\n", - status); + dev_err_ratelimited(qidev, + "Error: %#x in CAAM response FD\n", + status); } if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) { -- cgit From 1ccb39eb5a9822543273c35f5114a9dc101e7315 Mon Sep 17 00:00:00 2001 From: Horia Geantă Date: Wed, 31 Jul 2019 16:08:04 +0300 Subject: crypto: caam - update IV only when crypto operation succeeds MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit skcipher encryption might fail and in some cases, like (invalid) input length smaller then block size, updating the IV would lead to a useless IV copy in case hardware issued an error. Signed-off-by: Horia Geantă Signed-off-by: Iuliana Prodan Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 5 ++--- drivers/crypto/caam/caamalg_qi.c | 4 +++- drivers/crypto/caam/caamalg_qi2.c | 8 ++++++-- 3 files changed, 11 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 06b4f2d47be4..28d55a05b0f3 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -990,10 +990,9 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, * ciphertext block (CBC mode) or last counter (CTR mode). * This is used e.g. by the CTS mode. */ - if (ivsize) { + if (ivsize && !ecode) { memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes, ivsize); - print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->iv, edesc->src_nents > 1 ? 100 : ivsize, 1); @@ -1030,7 +1029,7 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, * ciphertext block (CBC mode) or last counter (CTR mode). * This is used e.g. by the CTS mode. */ - if (ivsize) { + if (ivsize && !ecode) { memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes, ivsize); diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index ab263b1bf7f1..66531d6fbd0b 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -1201,7 +1201,9 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status) * ciphertext block (CBC mode) or last counter (CTR mode). * This is used e.g. by the CTS mode. */ - memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); + if (!ecode) + memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, + ivsize); qi_cache_free(edesc); skcipher_request_complete(req, ecode); diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 008528b563ea..fcd8a488efc5 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -1359,7 +1359,9 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status) * ciphertext block (CBC mode) or last counter (CTR mode). * This is used e.g. by the CTS mode. */ - memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); + if (!ecode) + memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, + ivsize); qi_cache_free(edesc); skcipher_request_complete(req, ecode); @@ -1395,7 +1397,9 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status) * ciphertext block (CBC mode) or last counter (CTR mode). * This is used e.g. by the CTS mode. */ - memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); + if (!ecode) + memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, + ivsize); qi_cache_free(edesc); skcipher_request_complete(req, ecode); -- cgit From 836d8f43c5e529935bb3140edc710c23ffd1d54a Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Wed, 31 Jul 2019 16:08:05 +0300 Subject: crypto: caam - check key length Check key length to solve the extra tests that expect -EINVAL to be returned when the key size is not valid. Validated AES keylen for skcipher, ahash and aead. Signed-off-by: Iuliana Prodan Reviewed-by: Horia Geanta Signed-off-by: Herbert Xu --- drivers/crypto/caam/Kconfig | 2 + drivers/crypto/caam/caamalg.c | 125 ++++++++++++++++++++++-------- drivers/crypto/caam/caamalg_qi.c | 130 ++++++++++++++++++++++++-------- drivers/crypto/caam/caamalg_qi2.c | 155 ++++++++++++++++++++++++++++---------- drivers/crypto/caam/caamhash.c | 12 +++ 5 files changed, 324 insertions(+), 100 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index 3720ddabb507..e4fdf545ac90 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -111,6 +111,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC select CRYPTO_AUTHENC select CRYPTO_BLKCIPHER + select CRYPTO_DES help Selecting this will use CAAM Queue Interface (QI) for sending & receiving crypto jobs to/from CAAM. This gives better performance @@ -161,6 +162,7 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM select CRYPTO_AUTHENC select CRYPTO_AEAD select CRYPTO_HASH + select CRYPTO_DES help CAAM driver for QorIQ Data Path Acceleration Architecture 2. It handles DPSECI DPAA2 objects that sit on the Management Complex diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 28d55a05b0f3..ce50ae1dc825 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -667,6 +667,13 @@ static int gcm_setkey(struct crypto_aead *aead, { struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; + int err; + + err = aes_check_keylen(keylen); + if (err) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return err; + } print_hex_dump_debug("key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -683,9 +690,13 @@ static int rfc4106_setkey(struct crypto_aead *aead, { struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; + int err; - if (keylen < 4) - return -EINVAL; + err = aes_check_keylen(keylen - 4); + if (err) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return err; + } print_hex_dump_debug("key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -707,9 +718,13 @@ static int rfc4543_setkey(struct crypto_aead *aead, { struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; + int err; - if (keylen < 4) - return -EINVAL; + err = aes_check_keylen(keylen - 4); + if (err) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return err; + } print_hex_dump_debug("key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -727,7 +742,7 @@ static int rfc4543_setkey(struct crypto_aead *aead, } static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, - unsigned int keylen) + unsigned int keylen, const u32 ctx1_iv_off) { struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); struct caam_skcipher_alg *alg = @@ -736,30 +751,10 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, struct device *jrdev = ctx->jrdev; unsigned int ivsize = crypto_skcipher_ivsize(skcipher); u32 *desc; - u32 ctx1_iv_off = 0; - const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == - OP_ALG_AAI_CTR_MOD128); const bool is_rfc3686 = alg->caam.rfc3686; print_hex_dump_debug("key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); - /* - * AES-CTR needs to load IV in CONTEXT1 reg - * at an offset of 128bits (16bytes) - * CONTEXT1[255:128] = IV - */ - if (ctr_mode) - ctx1_iv_off = 16; - - /* - * RFC3686 specific: - * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} - * | *key = {KEY, NONCE} - */ - if (is_rfc3686) { - ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; - keylen -= CTR_RFC3686_NONCE_SIZE; - } ctx->cdata.keylen = keylen; ctx->cdata.key_virt = key; @@ -782,6 +777,74 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, return 0; } +static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + int err; + + err = aes_check_keylen(keylen); + if (err) { + crypto_skcipher_set_flags(skcipher, + CRYPTO_TFM_RES_BAD_KEY_LEN); + return err; + } + + return skcipher_setkey(skcipher, key, keylen, 0); +} + +static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 ctx1_iv_off; + int err; + + /* + * RFC3686 specific: + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} + * | *key = {KEY, NONCE} + */ + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; + keylen -= CTR_RFC3686_NONCE_SIZE; + + err = aes_check_keylen(keylen); + if (err) { + crypto_skcipher_set_flags(skcipher, + CRYPTO_TFM_RES_BAD_KEY_LEN); + return err; + } + + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); +} + +static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 ctx1_iv_off; + int err; + + /* + * AES-CTR needs to load IV in CONTEXT1 reg + * at an offset of 128bits (16bytes) + * CONTEXT1[255:128] = IV + */ + ctx1_iv_off = 16; + + err = aes_check_keylen(keylen); + if (err) { + crypto_skcipher_set_flags(skcipher, + CRYPTO_TFM_RES_BAD_KEY_LEN); + return err; + } + + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); +} + +static int arc4_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + return skcipher_setkey(skcipher, key, keylen, 0); +} + static int des_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { @@ -800,7 +863,7 @@ static int des_skcipher_setkey(struct crypto_skcipher *skcipher, return -EINVAL; } - return skcipher_setkey(skcipher, key, keylen); + return skcipher_setkey(skcipher, key, keylen, 0); } static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, @@ -1880,7 +1943,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "cbc-aes-caam", .cra_blocksize = AES_BLOCK_SIZE, }, - .setkey = skcipher_setkey, + .setkey = aes_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, @@ -1928,7 +1991,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "ctr-aes-caam", .cra_blocksize = 1, }, - .setkey = skcipher_setkey, + .setkey = ctr_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, @@ -1946,7 +2009,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "rfc3686-ctr-aes-caam", .cra_blocksize = 1, }, - .setkey = skcipher_setkey, + .setkey = rfc3686_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE + @@ -2000,7 +2063,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "ecb-aes-caam", .cra_blocksize = AES_BLOCK_SIZE, }, - .setkey = skcipher_setkey, + .setkey = aes_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, @@ -2030,7 +2093,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "ecb-arc4-caam", .cra_blocksize = ARC4_BLOCK_SIZE, }, - .setkey = skcipher_setkey, + .setkey = arc4_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = ARC4_MIN_KEY_SIZE, diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 66531d6fbd0b..b8854f268e5a 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -385,6 +385,12 @@ static int gcm_setkey(struct crypto_aead *aead, struct device *jrdev = ctx->jrdev; int ret; + ret = aes_check_keylen(keylen); + if (ret) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return ret; + } + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -480,8 +486,11 @@ static int rfc4106_setkey(struct crypto_aead *aead, struct device *jrdev = ctx->jrdev; int ret; - if (keylen < 4) - return -EINVAL; + ret = aes_check_keylen(keylen - 4); + if (ret) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return ret; + } print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -582,8 +591,11 @@ static int rfc4543_setkey(struct crypto_aead *aead, struct device *jrdev = ctx->jrdev; int ret; - if (keylen < 4) - return -EINVAL; + ret = aes_check_keylen(keylen - 4); + if (ret) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return ret; + } print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -624,7 +636,7 @@ static int rfc4543_setkey(struct crypto_aead *aead, } static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, - unsigned int keylen) + unsigned int keylen, const u32 ctx1_iv_off) { struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); struct caam_skcipher_alg *alg = @@ -632,33 +644,12 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, skcipher); struct device *jrdev = ctx->jrdev; unsigned int ivsize = crypto_skcipher_ivsize(skcipher); - u32 ctx1_iv_off = 0; - const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == - OP_ALG_AAI_CTR_MOD128); const bool is_rfc3686 = alg->caam.rfc3686; int ret = 0; print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); - /* - * AES-CTR needs to load IV in CONTEXT1 reg - * at an offset of 128bits (16bytes) - * CONTEXT1[255:128] = IV - */ - if (ctr_mode) - ctx1_iv_off = 16; - - /* - * RFC3686 specific: - * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} - * | *key = {KEY, NONCE} - */ - if (is_rfc3686) { - ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; - keylen -= CTR_RFC3686_NONCE_SIZE; - } - ctx->cdata.keylen = keylen; ctx->cdata.key_virt = key; ctx->cdata.key_inline = true; @@ -694,11 +685,88 @@ badkey: return -EINVAL; } +static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + int err; + + err = aes_check_keylen(keylen); + if (err) { + crypto_skcipher_set_flags(skcipher, + CRYPTO_TFM_RES_BAD_KEY_LEN); + return err; + } + + return skcipher_setkey(skcipher, key, keylen, 0); +} + +static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 ctx1_iv_off; + int err; + + /* + * RFC3686 specific: + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} + * | *key = {KEY, NONCE} + */ + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; + keylen -= CTR_RFC3686_NONCE_SIZE; + + err = aes_check_keylen(keylen); + if (err) { + crypto_skcipher_set_flags(skcipher, + CRYPTO_TFM_RES_BAD_KEY_LEN); + return err; + } + + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); +} + +static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 ctx1_iv_off; + int err; + + /* + * AES-CTR needs to load IV in CONTEXT1 reg + * at an offset of 128bits (16bytes) + * CONTEXT1[255:128] = IV + */ + ctx1_iv_off = 16; + + err = aes_check_keylen(keylen); + if (err) { + crypto_skcipher_set_flags(skcipher, + CRYPTO_TFM_RES_BAD_KEY_LEN); + return err; + } + + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); +} + static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { return unlikely(des3_verify_key(skcipher, key)) ?: - skcipher_setkey(skcipher, key, keylen); + skcipher_setkey(skcipher, key, keylen, 0); +} + +static int des_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 tmp[DES_EXPKEY_WORDS]; + + if (!des_ekey(tmp, key) && (crypto_skcipher_get_flags(skcipher) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { + crypto_skcipher_set_flags(skcipher, + CRYPTO_TFM_RES_WEAK_KEY); + return -EINVAL; + } + + return skcipher_setkey(skcipher, key, keylen, 0); } static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, @@ -1405,7 +1473,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "cbc-aes-caam-qi", .cra_blocksize = AES_BLOCK_SIZE, }, - .setkey = skcipher_setkey, + .setkey = aes_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, @@ -1437,7 +1505,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "cbc-des-caam-qi", .cra_blocksize = DES_BLOCK_SIZE, }, - .setkey = skcipher_setkey, + .setkey = des_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES_KEY_SIZE, @@ -1453,7 +1521,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "ctr-aes-caam-qi", .cra_blocksize = 1, }, - .setkey = skcipher_setkey, + .setkey = ctr_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, @@ -1471,7 +1539,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "rfc3686-ctr-aes-caam-qi", .cra_blocksize = 1, }, - .setkey = skcipher_setkey, + .setkey = rfc3686_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE + diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index fcd8a488efc5..337150bdf3d1 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -732,7 +732,13 @@ static int gcm_setkey(struct crypto_aead *aead, { struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *dev = ctx->dev; + int ret; + ret = aes_check_keylen(keylen); + if (ret) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return ret; + } print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -818,9 +824,13 @@ static int rfc4106_setkey(struct crypto_aead *aead, { struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *dev = ctx->dev; + int ret; - if (keylen < 4) - return -EINVAL; + ret = aes_check_keylen(keylen - 4); + if (ret) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return ret; + } print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -912,9 +922,13 @@ static int rfc4543_setkey(struct crypto_aead *aead, { struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *dev = ctx->dev; + int ret; - if (keylen < 4) - return -EINVAL; + ret = aes_check_keylen(keylen - 4); + if (ret) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return ret; + } print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -932,7 +946,7 @@ static int rfc4543_setkey(struct crypto_aead *aead, } static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, - unsigned int keylen) + unsigned int keylen, const u32 ctx1_iv_off) { struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); struct caam_skcipher_alg *alg = @@ -942,34 +956,11 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, struct caam_flc *flc; unsigned int ivsize = crypto_skcipher_ivsize(skcipher); u32 *desc; - u32 ctx1_iv_off = 0; - const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == - OP_ALG_AAI_CTR_MOD128) && - ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) != - OP_ALG_ALGSEL_CHACHA20); const bool is_rfc3686 = alg->caam.rfc3686; print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); - /* - * AES-CTR needs to load IV in CONTEXT1 reg - * at an offset of 128bits (16bytes) - * CONTEXT1[255:128] = IV - */ - if (ctr_mode) - ctx1_iv_off = 16; - - /* - * RFC3686 specific: - * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} - * | *key = {KEY, NONCE} - */ - if (is_rfc3686) { - ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; - keylen -= CTR_RFC3686_NONCE_SIZE; - } - ctx->cdata.keylen = keylen; ctx->cdata.key_virt = key; ctx->cdata.key_inline = true; @@ -997,11 +988,99 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, return 0; } -static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, - const u8 *key, unsigned int keylen) +static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) { - return unlikely(des3_verify_key(skcipher, key)) ?: - skcipher_setkey(skcipher, key, keylen); + int err; + + err = aes_check_keylen(keylen); + if (err) { + crypto_skcipher_set_flags(skcipher, + CRYPTO_TFM_RES_BAD_KEY_LEN); + return err; + } + + return skcipher_setkey(skcipher, key, keylen, 0); +} + +static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 ctx1_iv_off; + int err; + + /* + * RFC3686 specific: + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} + * | *key = {KEY, NONCE} + */ + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; + keylen -= CTR_RFC3686_NONCE_SIZE; + + err = aes_check_keylen(keylen); + if (err) { + crypto_skcipher_set_flags(skcipher, + CRYPTO_TFM_RES_BAD_KEY_LEN); + return err; + } + + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); +} + +static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 ctx1_iv_off; + int err; + + /* + * AES-CTR needs to load IV in CONTEXT1 reg + * at an offset of 128bits (16bytes) + * CONTEXT1[255:128] = IV + */ + ctx1_iv_off = 16; + + err = aes_check_keylen(keylen); + if (err) { + crypto_skcipher_set_flags(skcipher, + CRYPTO_TFM_RES_BAD_KEY_LEN); + return err; + } + + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); +} + +static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + if (keylen != CHACHA_KEY_SIZE) { + crypto_skcipher_set_flags(skcipher, + CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + return skcipher_setkey(skcipher, key, keylen, 0); +} + +static int des_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 tmp[DES3_EDE_EXPKEY_WORDS]; + struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); + + if (keylen == DES3_EDE_KEY_SIZE && + __des3_ede_setkey(tmp, &tfm->crt_flags, key, DES3_EDE_KEY_SIZE)) { + return -EINVAL; + } + + if (!des_ekey(tmp, key) && (crypto_skcipher_get_flags(skcipher) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { + crypto_skcipher_set_flags(skcipher, + CRYPTO_TFM_RES_WEAK_KEY); + return -EINVAL; + } + + return skcipher_setkey(skcipher, key, keylen, 0); } static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, @@ -1535,7 +1614,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "cbc-aes-caam-qi2", .cra_blocksize = AES_BLOCK_SIZE, }, - .setkey = skcipher_setkey, + .setkey = aes_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, @@ -1551,7 +1630,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "cbc-3des-caam-qi2", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .setkey = des3_skcipher_setkey, + .setkey = des_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, @@ -1567,7 +1646,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "cbc-des-caam-qi2", .cra_blocksize = DES_BLOCK_SIZE, }, - .setkey = skcipher_setkey, + .setkey = des_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES_KEY_SIZE, @@ -1583,7 +1662,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "ctr-aes-caam-qi2", .cra_blocksize = 1, }, - .setkey = skcipher_setkey, + .setkey = ctr_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, @@ -1601,7 +1680,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "rfc3686-ctr-aes-caam-qi2", .cra_blocksize = 1, }, - .setkey = skcipher_setkey, + .setkey = rfc3686_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE + @@ -1640,7 +1719,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "chacha20-caam-qi2", .cra_blocksize = 1, }, - .setkey = skcipher_setkey, + .setkey = chacha20_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = CHACHA_KEY_SIZE, diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 73abefa262ab..d6ef3c0d9b94 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -501,6 +501,11 @@ static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key, struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct device *jrdev = ctx->jrdev; + if (keylen != AES_KEYSIZE_128) { + crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + memcpy(ctx->key, key, keylen); dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); ctx->adata.keylen = keylen; @@ -515,6 +520,13 @@ static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int err; + + err = aes_check_keylen(keylen); + if (err) { + crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); + return err; + } /* key is immediate data for all cmac shared descriptors */ ctx->adata.key_virt = key; -- cgit From 68a51394f37ae8c6eb51209efc406355a9861874 Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Wed, 31 Jul 2019 16:08:06 +0300 Subject: crypto: caam - check authsize Check authsize to solve the extra tests that expect -EINVAL to be returned when the authentication tag size is not valid. Validated authsize for GCM, RFC4106 and RFC4543. Signed-off-by: Iuliana Prodan Reviewed-by: Horia Geanta Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 13 +++++++++++++ drivers/crypto/caam/caamalg_qi.c | 13 +++++++++++++ drivers/crypto/caam/caamalg_qi2.c | 13 +++++++++++++ 3 files changed, 39 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index ce50ae1dc825..591906988a22 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -376,6 +376,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx(authenc); + int err; + + err = crypto_gcm_check_authsize(authsize); + if (err) + return err; ctx->authsize = authsize; gcm_set_sh_desc(authenc); @@ -439,6 +444,11 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx(authenc); + int err; + + err = crypto_rfc4106_check_authsize(authsize); + if (err) + return err; ctx->authsize = authsize; rfc4106_set_sh_desc(authenc); @@ -503,6 +513,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc, { struct caam_ctx *ctx = crypto_aead_ctx(authenc); + if (authsize != 16) + return -EINVAL; + ctx->authsize = authsize; rfc4543_set_sh_desc(authenc); diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index b8854f268e5a..6c69f54d1f79 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -371,6 +371,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx(authenc); + int err; + + err = crypto_gcm_check_authsize(authsize); + if (err) + return err; ctx->authsize = authsize; gcm_set_sh_desc(authenc); @@ -472,6 +477,11 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx(authenc); + int err; + + err = crypto_rfc4106_check_authsize(authsize); + if (err) + return err; ctx->authsize = authsize; rfc4106_set_sh_desc(authenc); @@ -578,6 +588,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc, { struct caam_ctx *ctx = crypto_aead_ctx(authenc); + if (authsize != 16) + return -EINVAL; + ctx->authsize = authsize; rfc4543_set_sh_desc(authenc); diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 337150bdf3d1..141d6231e4b9 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -720,6 +720,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx(authenc); + int err; + + err = crypto_gcm_check_authsize(authsize); + if (err) + return err; ctx->authsize = authsize; gcm_set_sh_desc(authenc); @@ -812,6 +817,11 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx(authenc); + int err; + + err = crypto_rfc4106_check_authsize(authsize); + if (err) + return err; ctx->authsize = authsize; rfc4106_set_sh_desc(authenc); @@ -911,6 +921,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc, { struct caam_ctx *ctx = crypto_aead_ctx(authenc); + if (authsize != 16) + return -EINVAL; + ctx->authsize = authsize; rfc4543_set_sh_desc(authenc); -- cgit From fcd23ed57c4cccda17b0be736cdfbd7970b842c8 Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Wed, 31 Jul 2019 16:08:07 +0300 Subject: crypto: caam - check assoclen Check assoclen to solve the extra tests that expect -EINVAL to be returned when the associated data size is not valid. Validated assoclen for RFC4106 and RFC4543 which expects an assoclen of 16 or 20. Based on seqiv, IPsec ESP and RFC4543/RFC4106 the assoclen is sizeof IP Header (spi, seq_no, extended seq_no) and IV len. This can be 16 or 20 bytes. Signed-off-by: Iuliana Prodan Reviewed-by: Horia Geanta Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 10 ++-------- drivers/crypto/caam/caamalg_qi.c | 12 ++++-------- drivers/crypto/caam/caamalg_qi2.c | 10 ++-------- 3 files changed, 8 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 591906988a22..f832491a6bd9 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -1598,10 +1598,7 @@ static int chachapoly_decrypt(struct aead_request *req) static int ipsec_gcm_encrypt(struct aead_request *req) { - if (req->assoclen < 8) - return -EINVAL; - - return gcm_encrypt(req); + return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req); } static int aead_encrypt(struct aead_request *req) @@ -1675,10 +1672,7 @@ static int gcm_decrypt(struct aead_request *req) static int ipsec_gcm_decrypt(struct aead_request *req) { - if (req->assoclen < 8) - return -EINVAL; - - return gcm_decrypt(req); + return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req); } static int aead_decrypt(struct aead_request *req) diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 6c69f54d1f79..215802017d53 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -1237,18 +1237,14 @@ static int aead_decrypt(struct aead_request *req) static int ipsec_gcm_encrypt(struct aead_request *req) { - if (req->assoclen < 8) - return -EINVAL; - - return aead_crypt(req, true); + return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req, + true); } static int ipsec_gcm_decrypt(struct aead_request *req) { - if (req->assoclen < 8) - return -EINVAL; - - return aead_crypt(req, false); + return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req, + false); } static void skcipher_done(struct caam_drv_req *drv_req, u32 status) diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 141d6231e4b9..7350142050cc 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -1407,18 +1407,12 @@ static int aead_decrypt(struct aead_request *req) static int ipsec_gcm_encrypt(struct aead_request *req) { - if (req->assoclen < 8) - return -EINVAL; - - return aead_encrypt(req); + return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req); } static int ipsec_gcm_decrypt(struct aead_request *req) { - if (req->assoclen < 8) - return -EINVAL; - - return aead_decrypt(req); + return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req); } static void skcipher_encrypt_done(void *cbk_ctx, u32 status) -- cgit From 31bb2f0da1b5099732337d9ecef792ab67970ddf Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Wed, 31 Jul 2019 16:08:08 +0300 Subject: crypto: caam - check zero-length input Check zero-length input, for skcipher algorithm, to solve the extra tests. This is a valid operation, therefore the API will return no error. Signed-off-by: Iuliana Prodan Reviewed-by: Horia Geanta Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 6 ++++++ drivers/crypto/caam/caamalg_qi.c | 3 +++ drivers/crypto/caam/caamalg_qi2.c | 5 +++++ 3 files changed, 14 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index f832491a6bd9..21e30ded365a 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -1884,6 +1884,9 @@ static int skcipher_encrypt(struct skcipher_request *req) u32 *desc; int ret = 0; + if (!req->cryptlen) + return 0; + /* allocate extended descriptor */ edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); if (IS_ERR(edesc)) @@ -1918,6 +1921,9 @@ static int skcipher_decrypt(struct skcipher_request *req) u32 *desc; int ret = 0; + if (!req->cryptlen) + return 0; + /* allocate extended descriptor */ edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); if (IS_ERR(edesc)) diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 215802017d53..e63b2f719695 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -1445,6 +1445,9 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); int ret; + if (!req->cryptlen) + return 0; + if (unlikely(caam_congested)) return -EAGAIN; diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 7350142050cc..63a86b6b8b96 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -1499,6 +1499,9 @@ static int skcipher_encrypt(struct skcipher_request *req) struct caam_request *caam_req = skcipher_request_ctx(req); int ret; + if (!req->cryptlen) + return 0; + /* allocate extended descriptor */ edesc = skcipher_edesc_alloc(req); if (IS_ERR(edesc)) @@ -1527,6 +1530,8 @@ static int skcipher_decrypt(struct skcipher_request *req) struct caam_request *caam_req = skcipher_request_ctx(req); int ret; + if (!req->cryptlen) + return 0; /* allocate extended descriptor */ edesc = skcipher_edesc_alloc(req); if (IS_ERR(edesc)) -- cgit From d28a43edf3caff971b58d75c83ad00487152e448 Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Wed, 31 Jul 2019 16:08:09 +0300 Subject: crypto: caam - update rfc4106 sh desc to support zero length input Update share descriptor for rfc4106 to skip instructions in case cryptlen is zero. If no instructions are jumped the DECO hangs and a timeout error is thrown. Signed-off-by: Iuliana Prodan Reviewed-by: Horia Geanta Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_desc.c | 47 ++++++++++++++++++++++++++------------ drivers/crypto/caam/caamalg_desc.h | 2 +- 2 files changed, 33 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c index 28ecef7a481c..aa9ccca67045 100644 --- a/drivers/crypto/caam/caamalg_desc.c +++ b/drivers/crypto/caam/caamalg_desc.c @@ -852,13 +852,16 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap); * @ivsize: initialization vector size * @icvsize: integrity check value (ICV) size (truncated or full) * @is_qi: true when called from caam/qi + * + * Input sequence: AAD | PTXT + * Output sequence: AAD | CTXT | ICV + * AAD length (assoclen), which includes the IV length, is available in Math3. */ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, unsigned int ivsize, unsigned int icvsize, const bool is_qi) { - u32 *key_jump_cmd; - + u32 *key_jump_cmd, *zero_cryptlen_jump_cmd, *skip_instructions; init_sh_desc(desc, HDR_SHARE_SERIAL); /* Skip key loading if it is loaded due to sharing */ @@ -901,24 +904,26 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); - /* Read assoc data */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + /* Skip AAD */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); - /* Skip IV */ - append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); + /* Read cryptlen and set this value into VARSEQOUTLEN */ + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ); - /* Will read cryptlen bytes */ - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + /* If cryptlen is ZERO jump to AAD command */ + zero_cryptlen_jump_cmd = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); - /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); + /* Read AAD data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); - /* Skip assoc data */ - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); + /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA); - /* cryptlen = seqoutlen - assoclen */ - append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ); + /* Skip IV */ + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); + append_math_add(desc, VARSEQINLEN, VARSEQOUTLEN, REG0, CAAM_CMD_SZ); /* Write encrypted data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); @@ -927,6 +932,18 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); + /* Jump instructions to avoid double reading of AAD */ + skip_instructions = append_jump(desc, JUMP_TEST_ALL); + + /* There is no input data, cryptlen = 0 */ + set_jump_tgt_here(desc, zero_cryptlen_jump_cmd); + + /* Read AAD */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1); + + set_jump_tgt_here(desc, skip_instructions); + /* Write ICV */ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h index 706007624d82..f2893393ba5e 100644 --- a/drivers/crypto/caam/caamalg_desc.h +++ b/drivers/crypto/caam/caamalg_desc.h @@ -31,7 +31,7 @@ #define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ) #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) -#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) +#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 16 * CAAM_CMD_SZ) #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) #define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ) #define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ) -- cgit From a2fb864c042b00debc7696d4459d8058ec7c8013 Mon Sep 17 00:00:00 2001 From: Horia Geantă Date: Wed, 31 Jul 2019 16:08:10 +0300 Subject: crypto: caam - keep both virtual and dma key addresses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update alginfo struct to keep both virtual and dma key addresses, so that descriptors have them at hand. One example where this is needed is in the xcbc(aes) shared descriptors, which are updated in current patch. Another example is the upcoming fix for DKP. Signed-off-by: Horia Geantă Reviewed-by: Iuliana Prodan Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 37 +++++++++++++++++-------------------- drivers/crypto/caam/caamhash_desc.c | 5 ++--- drivers/crypto/caam/caamhash_desc.h | 2 +- drivers/crypto/caam/desc_constr.h | 10 ++++------ 4 files changed, 24 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index d6ef3c0d9b94..d19373e6ed4a 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -95,7 +95,6 @@ struct caam_hash_ctx { dma_addr_t sh_desc_update_first_dma; dma_addr_t sh_desc_fin_dma; dma_addr_t sh_desc_digest_dma; - dma_addr_t key_dma; enum dma_data_direction dir; struct device *jrdev; int ctx_len; @@ -282,13 +281,10 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash) struct device *jrdev = ctx->jrdev; u32 *desc; - /* key is loaded from memory for UPDATE and FINALIZE states */ - ctx->adata.key_dma = ctx->key_dma; - /* shared descriptor for ahash_update */ desc = ctx->sh_desc_update; cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, - ctx->ctx_len, ctx->ctx_len, 0); + ctx->ctx_len, ctx->ctx_len); dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ", @@ -298,7 +294,7 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash) /* shared descriptor for ahash_{final,finup} */ desc = ctx->sh_desc_fin; cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, - digestsize, ctx->ctx_len, 0); + digestsize, ctx->ctx_len); dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ", @@ -311,7 +307,7 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash) /* shared descriptor for first invocation of ahash_update */ desc = ctx->sh_desc_update_first; cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, - ctx->ctx_len, ctx->key_dma); + ctx->ctx_len); dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__) @@ -321,7 +317,7 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash) /* shared descriptor for ahash_digest */ desc = ctx->sh_desc_digest; cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, - digestsize, ctx->ctx_len, 0); + digestsize, ctx->ctx_len); dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ", @@ -340,7 +336,7 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash) /* shared descriptor for ahash_update */ desc = ctx->sh_desc_update; cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, - ctx->ctx_len, ctx->ctx_len, 0); + ctx->ctx_len, ctx->ctx_len); dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ", @@ -350,7 +346,7 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash) /* shared descriptor for ahash_{final,finup} */ desc = ctx->sh_desc_fin; cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, - digestsize, ctx->ctx_len, 0); + digestsize, ctx->ctx_len); dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ", @@ -360,7 +356,7 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash) /* shared descriptor for first invocation of ahash_update */ desc = ctx->sh_desc_update_first; cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, - ctx->ctx_len, 0); + ctx->ctx_len); dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__) @@ -370,7 +366,7 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash) /* shared descriptor for ahash_digest */ desc = ctx->sh_desc_digest; cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, - digestsize, ctx->ctx_len, 0); + digestsize, ctx->ctx_len); dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ", @@ -507,7 +503,8 @@ static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key, } memcpy(ctx->key, key, keylen); - dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); + dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen, + DMA_TO_DEVICE); ctx->adata.keylen = keylen; print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ", @@ -1831,11 +1828,11 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; ctx->ctx_len = 48; - ctx->key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key, - ARRAY_SIZE(ctx->key), - DMA_BIDIRECTIONAL, - DMA_ATTR_SKIP_CPU_SYNC); - if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { + ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key, + ARRAY_SIZE(ctx->key), + DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); + if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) { dev_err(ctx->jrdev, "unable to map key\n"); caam_jr_free(ctx->jrdev); return -ENOMEM; @@ -1859,7 +1856,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) dev_err(ctx->jrdev, "unable to map shared descriptors\n"); if (is_xcbc_aes(caam_hash->alg_type)) - dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma, + dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); @@ -1895,7 +1892,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) offsetof(struct caam_hash_ctx, key), ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); if (is_xcbc_aes(ctx->adata.algtype)) - dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma, + dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); caam_jr_free(ctx->jrdev); diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c index 71d018343ee4..78383d77da99 100644 --- a/drivers/crypto/caam/caamhash_desc.c +++ b/drivers/crypto/caam/caamhash_desc.c @@ -83,10 +83,9 @@ EXPORT_SYMBOL(cnstr_shdsc_ahash); * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE} * @digestsize: algorithm's digest size * @ctx_len: size of Context Register - * @key_dma: I/O Virtual Address of the key */ void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state, - int digestsize, int ctx_len, dma_addr_t key_dma) + int digestsize, int ctx_len) { u32 *skip_key_load; @@ -136,7 +135,7 @@ void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state, LDST_SRCDST_BYTE_CONTEXT); if (is_xcbc_aes(adata->algtype) && state == OP_ALG_AS_INIT) /* Save K1 */ - append_fifo_store(desc, key_dma, adata->keylen, + append_fifo_store(desc, adata->key_dma, adata->keylen, LDST_CLASS_1_CCB | FIFOST_TYPE_KEY_KEK); } EXPORT_SYMBOL(cnstr_shdsc_sk_hash); diff --git a/drivers/crypto/caam/caamhash_desc.h b/drivers/crypto/caam/caamhash_desc.h index 6947ee1f200c..4f369b8cb6ae 100644 --- a/drivers/crypto/caam/caamhash_desc.h +++ b/drivers/crypto/caam/caamhash_desc.h @@ -25,5 +25,5 @@ void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state, int digestsize, int ctx_len, bool import_ctx, int era); void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state, - int digestsize, int ctx_len, dma_addr_t key_dma); + int digestsize, int ctx_len); #endif /* _CAAMHASH_DESC_H_ */ diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 5988a26a2441..815417411a18 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h @@ -457,8 +457,8 @@ do { \ * functions where it is used. * @keylen: length of the provided algorithm key, in bytes * @keylen_pad: padded length of the provided algorithm key, in bytes - * @key: address where algorithm key resides; virtual address if key_inline - * is true, dma (bus) address if key_inline is false. + * @key_dma: dma (bus) address where algorithm key resides + * @key_virt: virtual address where algorithm key resides * @key_inline: true - key can be inlined in the descriptor; false - key is * referenced by the descriptor */ @@ -466,10 +466,8 @@ struct alginfo { u32 algtype; unsigned int keylen; unsigned int keylen_pad; - union { - dma_addr_t key_dma; - const void *key_virt; - }; + dma_addr_t key_dma; + const void *key_virt; bool key_inline; }; -- cgit From e9b4913a5f944b23d6109c44b6f3fc6e092e30ce Mon Sep 17 00:00:00 2001 From: Horia Geantă Date: Wed, 31 Jul 2019 16:08:11 +0300 Subject: crypto: caam - fix MDHA key derivation for certain user key lengths MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fuzz testing uncovered an issue when |user key| > |derived key|. Derived key generation has to be fixed in two cases: 1. Era >= 6 (DKP is available) DKP cannot be used with immediate input key if |user key| > |derived key|, since the resulting descriptor (after DKP execution) would be invalid - having a few bytes from user key left in descriptor buffer as incorrect opcodes. Fix DKP usage both in standalone hmac and in authenc algorithms. For authenc the logic is simplified, by always storing both virtual and dma key addresses. 2. Era < 6 The same case (|user key| > |derived key|) fails when DKP is not available. Make sure gen_split_key() dma maps max(|user key|, |derived key|), since this is an in-place (bidirectional) operation. Signed-off-by: Horia Geantă Reviewed-by: Iuliana Prodan Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 42 +++++++----------------- drivers/crypto/caam/caamalg_qi.c | 42 +++++++----------------- drivers/crypto/caam/caamalg_qi2.c | 67 +++++++++++++++++++++++++++------------ drivers/crypto/caam/caamhash.c | 53 ++++++++++++++++++++++--------- drivers/crypto/caam/desc_constr.h | 24 ++++++++++---- drivers/crypto/caam/key_gen.c | 9 +++--- 6 files changed, 132 insertions(+), 105 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 21e30ded365a..947ba8ef487a 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -205,6 +205,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); } + /* + * In case |user key| > |derived key|, using DKP + * would result in invalid opcodes (last bytes of user key) in + * the resulting descriptor. Use DKP instead => both + * virtual and dma key addresses are needed. + */ + ctx->adata.key_virt = ctx->key; + ctx->adata.key_dma = ctx->key_dma; + + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; + data_len[0] = ctx->adata.keylen_pad; data_len[1] = ctx->cdata.keylen; @@ -221,16 +233,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ARRAY_SIZE(data_len)) < 0) return -EINVAL; - if (inl_mask & 1) - ctx->adata.key_virt = ctx->key; - else - ctx->adata.key_dma = ctx->key_dma; - - if (inl_mask & 2) - ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; - else - ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; - ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); @@ -253,16 +255,6 @@ skip_enc: ARRAY_SIZE(data_len)) < 0) return -EINVAL; - if (inl_mask & 1) - ctx->adata.key_virt = ctx->key; - else - ctx->adata.key_dma = ctx->key_dma; - - if (inl_mask & 2) - ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; - else - ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; - ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); @@ -287,16 +279,6 @@ skip_enc: ARRAY_SIZE(data_len)) < 0) return -EINVAL; - if (inl_mask & 1) - ctx->adata.key_virt = ctx->key; - else - ctx->adata.key_dma = ctx->key_dma; - - if (inl_mask & 2) - ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; - else - ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; - ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index e63b2f719695..59b59f5e9550 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -105,6 +105,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); } + /* + * In case |user key| > |derived key|, using DKP would result + * in invalid opcodes (last bytes of user key) in the resulting + * descriptor. Use DKP instead => both virtual and dma key + * addresses are needed. + */ + ctx->adata.key_virt = ctx->key; + ctx->adata.key_dma = ctx->key_dma; + + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; + data_len[0] = ctx->adata.keylen_pad; data_len[1] = ctx->cdata.keylen; @@ -118,16 +130,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ARRAY_SIZE(data_len)) < 0) return -EINVAL; - if (inl_mask & 1) - ctx->adata.key_virt = ctx->key; - else - ctx->adata.key_dma = ctx->key_dma; - - if (inl_mask & 2) - ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; - else - ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; - ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); @@ -143,16 +145,6 @@ skip_enc: ARRAY_SIZE(data_len)) < 0) return -EINVAL; - if (inl_mask & 1) - ctx->adata.key_virt = ctx->key; - else - ctx->adata.key_dma = ctx->key_dma; - - if (inl_mask & 2) - ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; - else - ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; - ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); @@ -171,16 +163,6 @@ skip_enc: ARRAY_SIZE(data_len)) < 0) return -EINVAL; - if (inl_mask & 1) - ctx->adata.key_virt = ctx->key; - else - ctx->adata.key_dma = ctx->key_dma; - - if (inl_mask & 2) - ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; - else - ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; - ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 63a86b6b8b96..bd01bcd799e8 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -199,6 +199,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); } + /* + * In case |user key| > |derived key|, using DKP would result + * in invalid opcodes (last bytes of user key) in the resulting + * descriptor. Use DKP instead => both virtual and dma key + * addresses are needed. + */ + ctx->adata.key_virt = ctx->key; + ctx->adata.key_dma = ctx->key_dma; + + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; + data_len[0] = ctx->adata.keylen_pad; data_len[1] = ctx->cdata.keylen; @@ -210,16 +222,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ARRAY_SIZE(data_len)) < 0) return -EINVAL; - if (inl_mask & 1) - ctx->adata.key_virt = ctx->key; - else - ctx->adata.key_dma = ctx->key_dma; - - if (inl_mask & 2) - ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; - else - ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; - ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); @@ -248,16 +250,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ARRAY_SIZE(data_len)) < 0) return -EINVAL; - if (inl_mask & 1) - ctx->adata.key_virt = ctx->key; - else - ctx->adata.key_dma = ctx->key_dma; - - if (inl_mask & 2) - ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; - else - ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; - ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); @@ -2999,6 +2991,7 @@ enum hash_optype { /** * caam_hash_ctx - ahash per-session context * @flc: Flow Contexts array + * @key: authentication key * @flc_dma: I/O virtual addresses of the Flow Contexts * @dev: dpseci device * @ctx_len: size of Context Register @@ -3006,6 +2999,7 @@ enum hash_optype { */ struct caam_hash_ctx { struct caam_flc flc[HASH_NUM_OP]; + u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; dma_addr_t flc_dma[HASH_NUM_OP]; struct device *dev; int ctx_len; @@ -3306,6 +3300,19 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, ctx->adata.key_virt = key; ctx->adata.key_inline = true; + /* + * In case |user key| > |derived key|, using DKP would result + * in invalid opcodes (last bytes of user key) in the resulting + * descriptor. Use DKP instead => both virtual and dma key + * addresses are needed. + */ + if (keylen > ctx->adata.keylen_pad) { + memcpy(ctx->key, key, keylen); + dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma, + ctx->adata.keylen_pad, + DMA_TO_DEVICE); + } + ret = ahash_set_sh_desc(ahash); kfree(hashed_key); return ret; @@ -4536,11 +4543,27 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) ctx->dev = caam_hash->dev; + if (alg->setkey) { + ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key, + ARRAY_SIZE(ctx->key), + DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) { + dev_err(ctx->dev, "unable to map key\n"); + return -ENOMEM; + } + } + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc), DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(ctx->dev, dma_addr)) { dev_err(ctx->dev, "unable to map shared descriptors\n"); + if (ctx->adata.key_dma) + dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma, + ARRAY_SIZE(ctx->key), + DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); return -ENOMEM; } @@ -4566,6 +4589,10 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc), DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); + if (ctx->adata.key_dma) + dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma, + ARRAY_SIZE(ctx->key), DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); } static struct caam_hash_alg *caam_hash_alloc(struct device *dev, diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index d19373e6ed4a..ed1931f97df1 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -96,6 +96,7 @@ struct caam_hash_ctx { dma_addr_t sh_desc_fin_dma; dma_addr_t sh_desc_digest_dma; enum dma_data_direction dir; + enum dma_data_direction key_dir; struct device *jrdev; int ctx_len; struct alginfo adata; @@ -476,6 +477,18 @@ static int ahash_setkey(struct crypto_ahash *ahash, goto bad_free_key; memcpy(ctx->key, key, keylen); + + /* + * In case |user key| > |derived key|, using DKP + * would result in invalid opcodes (last bytes of user key) in + * the resulting descriptor. Use DKP instead => both + * virtual and dma key addresses are needed. + */ + if (keylen > ctx->adata.keylen_pad) + dma_sync_single_for_device(ctx->jrdev, + ctx->adata.key_dma, + ctx->adata.keylen_pad, + DMA_TO_DEVICE); } else { ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen, CAAM_MAX_HASH_KEY_SIZE); @@ -1825,40 +1838,50 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) if (is_xcbc_aes(caam_hash->alg_type)) { ctx->dir = DMA_TO_DEVICE; + ctx->key_dir = DMA_BIDIRECTIONAL; ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; ctx->ctx_len = 48; - - ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key, - ARRAY_SIZE(ctx->key), - DMA_BIDIRECTIONAL, - DMA_ATTR_SKIP_CPU_SYNC); - if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) { - dev_err(ctx->jrdev, "unable to map key\n"); - caam_jr_free(ctx->jrdev); - return -ENOMEM; - } } else if (is_cmac_aes(caam_hash->alg_type)) { ctx->dir = DMA_TO_DEVICE; + ctx->key_dir = DMA_NONE; ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; ctx->ctx_len = 32; } else { - ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + if (priv->era >= 6) { + ctx->dir = DMA_BIDIRECTIONAL; + ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE; + } else { + ctx->dir = DMA_TO_DEVICE; + ctx->key_dir = DMA_NONE; + } ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; ctx->ctx_len = runninglen[(ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT]; } + if (ctx->key_dir != DMA_NONE) { + ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key, + ARRAY_SIZE(ctx->key), + ctx->key_dir, + DMA_ATTR_SKIP_CPU_SYNC); + if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) { + dev_err(ctx->jrdev, "unable to map key\n"); + caam_jr_free(ctx->jrdev); + return -ENOMEM; + } + } + dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, offsetof(struct caam_hash_ctx, key), ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(ctx->jrdev, dma_addr)) { dev_err(ctx->jrdev, "unable to map shared descriptors\n"); - if (is_xcbc_aes(caam_hash->alg_type)) + if (ctx->key_dir != DMA_NONE) dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, ARRAY_SIZE(ctx->key), - DMA_BIDIRECTIONAL, + ctx->key_dir, DMA_ATTR_SKIP_CPU_SYNC); caam_jr_free(ctx->jrdev); @@ -1891,9 +1914,9 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, offsetof(struct caam_hash_ctx, key), ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); - if (is_xcbc_aes(ctx->adata.algtype)) + if (ctx->key_dir != DMA_NONE) dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, - ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL, + ARRAY_SIZE(ctx->key), ctx->key_dir, DMA_ATTR_SKIP_CPU_SYNC); caam_jr_free(ctx->jrdev); } diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 815417411a18..536f360bf131 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h @@ -533,14 +533,26 @@ static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata) if (adata->key_inline) { int words; - append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | - OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM | - adata->keylen); - append_data(desc, adata->key_virt, adata->keylen); + if (adata->keylen > adata->keylen_pad) { + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | + OP_PCL_DKP_SRC_PTR | + OP_PCL_DKP_DST_IMM | adata->keylen); + append_ptr(desc, adata->key_dma); + + words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) - + CAAM_PTR_SZ) / CAAM_CMD_SZ; + } else { + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | + OP_PCL_DKP_SRC_IMM | + OP_PCL_DKP_DST_IMM | adata->keylen); + append_data(desc, adata->key_virt, adata->keylen); + + words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) - + ALIGN(adata->keylen, CAAM_CMD_SZ)) / + CAAM_CMD_SZ; + } /* Reserve space in descriptor buffer for the derived key */ - words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) - - ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ; if (words) (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words); } else { diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index c6f8375ae215..5a851ddc48fb 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c @@ -48,18 +48,20 @@ int gen_split_key(struct device *jrdev, u8 *key_out, u32 *desc; struct split_key_result result; dma_addr_t dma_addr; + unsigned int local_max; int ret = -ENOMEM; adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK); adata->keylen_pad = split_key_pad_len(adata->algtype & OP_ALG_ALGSEL_MASK); + local_max = max(keylen, adata->keylen_pad); dev_dbg(jrdev, "split keylen %d split keylen padded %d\n", adata->keylen, adata->keylen_pad); print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); - if (adata->keylen_pad > max_keylen) + if (local_max > max_keylen) return -EINVAL; desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); @@ -70,8 +72,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, memcpy(key_out, key_in, keylen); - dma_addr = dma_map_single(jrdev, key_out, adata->keylen_pad, - DMA_BIDIRECTIONAL); + dma_addr = dma_map_single(jrdev, key_out, local_max, DMA_BIDIRECTIONAL); if (dma_mapping_error(jrdev, dma_addr)) { dev_err(jrdev, "unable to map key memory\n"); goto out_free; @@ -117,7 +118,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, adata->keylen_pad, 1); } - dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL); + dma_unmap_single(jrdev, dma_addr, local_max, DMA_BIDIRECTIONAL); out_free: kfree(desc); return ret; -- cgit From c59a1d41672a89b5cac49db1a472ff889e35a2d2 Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Wed, 31 Jul 2019 16:08:12 +0300 Subject: crypto: caam - free resources in case caam_rng registration failed Check the return value of the hardware registration for caam_rng and free resources in case of failure. Fixes: e24f7c9e87d4 ("crypto: caam - hwrng support") Signed-off-by: Iuliana Prodan Reviewed-by: Horia Geanta Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamrng.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 561bcb535184..54c32d53760c 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -333,7 +333,10 @@ int caam_rng_init(struct device *ctrldev) goto free_rng_ctx; dev_info(dev, "registering rng-caam\n"); - return hwrng_register(&caam_rng); + + err = hwrng_register(&caam_rng); + if (!err) + return err; free_rng_ctx: kfree(rng_ctx); -- cgit From 4e3a61c55b8c5db9bfaaf1fc1f448f8f874f66eb Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Wed, 31 Jul 2019 16:08:13 +0300 Subject: crypto: caam - execute module exit point only if necessary Commit 1b46c90c8e00 ("crypto: caam - convert top level drivers to libraries") changed entry and exit points behavior for caamalg, caamalg_qi, caamalg_qi2, caamhash, caampkc, caamrng. For example, previously caam_pkc_init() and caam_pkc_exit() were module entry/exit points. This means that if an error would happen in caam_pkc_init(), then caam_pkc_exit() wouldn't have been called. After the mentioned commit, caam_pkc_init() and caam_pkc_exit() are manually called - from jr.c. caam_pkc_exit() is called unconditionally, even if caam_pkc_init() failed. Added a global variable to keep the status of the algorithm registration and free of resources. The exit point of caampkc/caamrng module is executed only if the registration was successful. Therefore we avoid double free of resources in case the algorithm registration failed. Fixes: 1b46c90c8e00 ("crypto: caam - convert top level drivers to libraries") Signed-off-by: Iuliana Prodan Reviewed-by: Horia Geanta Signed-off-by: Herbert Xu --- drivers/crypto/caam/caampkc.c | 11 +++++++++++ drivers/crypto/caam/caamrng.c | 14 +++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index e05d975da582..e00a4701b950 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -29,6 +29,12 @@ /* buffer filled with zeros, used for padding */ static u8 *zero_buffer; +/* + * variable used to avoid double free of resources in case + * algorithm registration was unsuccessful + */ +static bool init_done; + static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, struct akcipher_request *req) { @@ -1076,6 +1082,7 @@ int caam_pkc_init(struct device *ctrldev) struct caam_drv_private *priv = dev_get_drvdata(ctrldev); u32 pk_inst; int err; + init_done = false; /* Determine public key hardware accelerator presence. */ if (priv->era < 10) @@ -1100,6 +1107,7 @@ int caam_pkc_init(struct device *ctrldev) dev_warn(ctrldev, "%s alg registration failed\n", caam_rsa.base.cra_driver_name); } else { + init_done = true; dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); } @@ -1108,6 +1116,9 @@ int caam_pkc_init(struct device *ctrldev) void caam_pkc_exit(void) { + if (!init_done) + return; + kfree(zero_buffer); crypto_unregister_akcipher(&caam_rsa); } diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 54c32d53760c..7fbda1b08360 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -80,6 +80,12 @@ struct caam_rng_ctx { static struct caam_rng_ctx *rng_ctx; +/* + * Variable used to avoid double free of resources in case + * algorithm registration was unsuccessful + */ +static bool init_done; + static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd) { if (bd->addr) @@ -296,6 +302,9 @@ static struct hwrng caam_rng = { void caam_rng_exit(void) { + if (!init_done) + return; + caam_jr_free(rng_ctx->jrdev); hwrng_unregister(&caam_rng); kfree(rng_ctx); @@ -307,6 +316,7 @@ int caam_rng_init(struct device *ctrldev) u32 rng_inst; struct caam_drv_private *priv = dev_get_drvdata(ctrldev); int err; + init_done = false; /* Check for an instantiated RNG before registration */ if (priv->era < 10) @@ -335,8 +345,10 @@ int caam_rng_init(struct device *ctrldev) dev_info(dev, "registering rng-caam\n"); err = hwrng_register(&caam_rng); - if (!err) + if (!err) { + init_done = true; return err; + } free_rng_ctx: kfree(rng_ctx); -- cgit From 58068cfc810c3b5cc8c25b0fab73c02625aeadf1 Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Wed, 31 Jul 2019 16:08:14 +0300 Subject: crypto: caam - unregister algorithm only if the registration succeeded To know if a registration succeeded added a new struct, caam_akcipher_alg, that keeps, also, the registration status. This status is updated in caam_pkc_init and verified in caam_pkc_exit to unregister an algorithm. Fixes: 1b46c90c8e00 ("crypto: caam - convert top level drivers to libraries") Signed-off-by: Iuliana Prodan Reviewed-by: Horia Geanta Signed-off-by: Herbert Xu --- drivers/crypto/caam/caampkc.c | 49 ++++++++++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index e00a4701b950..5b12b232ee5e 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -35,6 +35,11 @@ static u8 *zero_buffer; */ static bool init_done; +struct caam_akcipher_alg { + struct akcipher_alg akcipher; + bool registered; +}; + static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, struct akcipher_request *req) { @@ -1058,22 +1063,24 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm) caam_jr_free(ctx->dev); } -static struct akcipher_alg caam_rsa = { - .encrypt = caam_rsa_enc, - .decrypt = caam_rsa_dec, - .set_pub_key = caam_rsa_set_pub_key, - .set_priv_key = caam_rsa_set_priv_key, - .max_size = caam_rsa_max_size, - .init = caam_rsa_init_tfm, - .exit = caam_rsa_exit_tfm, - .reqsize = sizeof(struct caam_rsa_req_ctx), - .base = { - .cra_name = "rsa", - .cra_driver_name = "rsa-caam", - .cra_priority = 3000, - .cra_module = THIS_MODULE, - .cra_ctxsize = sizeof(struct caam_rsa_ctx), - }, +static struct caam_akcipher_alg caam_rsa = { + .akcipher = { + .encrypt = caam_rsa_enc, + .decrypt = caam_rsa_dec, + .set_pub_key = caam_rsa_set_pub_key, + .set_priv_key = caam_rsa_set_priv_key, + .max_size = caam_rsa_max_size, + .init = caam_rsa_init_tfm, + .exit = caam_rsa_exit_tfm, + .reqsize = sizeof(struct caam_rsa_req_ctx), + .base = { + .cra_name = "rsa", + .cra_driver_name = "rsa-caam", + .cra_priority = 3000, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct caam_rsa_ctx), + }, + } }; /* Public Key Cryptography module initialization handler */ @@ -1101,13 +1108,15 @@ int caam_pkc_init(struct device *ctrldev) if (!zero_buffer) return -ENOMEM; - err = crypto_register_akcipher(&caam_rsa); + err = crypto_register_akcipher(&caam_rsa.akcipher); + if (err) { kfree(zero_buffer); dev_warn(ctrldev, "%s alg registration failed\n", - caam_rsa.base.cra_driver_name); + caam_rsa.akcipher.base.cra_driver_name); } else { init_done = true; + caam_rsa.registered = true; dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); } @@ -1119,6 +1128,8 @@ void caam_pkc_exit(void) if (!init_done) return; + if (caam_rsa.registered) + crypto_unregister_akcipher(&caam_rsa.akcipher); + kfree(zero_buffer); - crypto_unregister_akcipher(&caam_rsa); } -- cgit From 0435d47e2627b76b6dfb59e2e95ffed7232bd0ed Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Wed, 31 Jul 2019 16:08:15 +0300 Subject: crypto: caam - change return value in case CAAM has no MDHA To be consistent with other CAAM modules, caamhash should return 0 instead of -ENODEV in case CAAM has no MDHA. Based on commit 1b46c90c8e00 ("crypto: caam - convert top level drivers to libraries") the value returned by entry point is never checked and the exit point is always executed. Signed-off-by: Iuliana Prodan Reviewed-by: Horia Geanta Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index ed1931f97df1..8a07edb45dad 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -2007,7 +2007,7 @@ int caam_algapi_hash_init(struct device *ctrldev) * is not present. */ if (!md_inst) - return -ENODEV; + return 0; /* Limit digest size based on LP256 */ if (md_vid == CHA_VER_VID_MD_LP256) -- cgit From 263c9959c9376ec0217d6adc61222a53469eed3c Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Fri, 2 Aug 2019 15:57:50 +0800 Subject: crypto: hisilicon - add queue management driver for HiSilicon QM module QM is a general IP used by HiSilicon accelerators. It provides a general PCIe interface for the CPU and the accelerator to share a group of queues. A QM integrated in an accelerator provides queue management service. Queues can be assigned to PF and VFs, and queues can be controlled by unified mailboxes and doorbells. Specific task request are descripted by specific description buffer, which will be controlled and pass to related accelerator IP by QM. This patch adds a QM driver used by the accelerator driver to access the QM hardware. Signed-off-by: Zhou Wang Signed-off-by: Kenneth Lee Signed-off-by: Shiju Jose Signed-off-by: Hao Fang Reviewed-by: Jonathan Cameron Reviewed-by: John Garry Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/Kconfig | 7 + drivers/crypto/hisilicon/Makefile | 1 + drivers/crypto/hisilicon/qm.c | 1550 +++++++++++++++++++++++++++++++++++++ drivers/crypto/hisilicon/qm.h | 182 +++++ 4 files changed, 1740 insertions(+) create mode 100644 drivers/crypto/hisilicon/qm.c create mode 100644 drivers/crypto/hisilicon/qm.h (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index 8ca9c503bcb0..b79be8dc78e7 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -12,3 +12,10 @@ config CRYPTO_DEV_HISI_SEC To compile this as a module, choose M here: the module will be called hisi_sec. + +config CRYPTO_DEV_HISI_QM + tristate + depends on ARM64 && PCI && PCI_MSI + help + HiSilicon accelerator engines use a common queue management + interface. Specific engine driver may use this module. diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile index 463f46ace182..05e9052e0f52 100644 --- a/drivers/crypto/hisilicon/Makefile +++ b/drivers/crypto/hisilicon/Makefile @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/ +obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += qm.o diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c new file mode 100644 index 000000000000..c095d4747812 --- /dev/null +++ b/drivers/crypto/hisilicon/qm.c @@ -0,0 +1,1550 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 HiSilicon Limited. */ +#include +#include +#include +#include +#include +#include +#include +#include "qm.h" + +/* eq/aeq irq enable */ +#define QM_VF_AEQ_INT_SOURCE 0x0 +#define QM_VF_AEQ_INT_MASK 0x4 +#define QM_VF_EQ_INT_SOURCE 0x8 +#define QM_VF_EQ_INT_MASK 0xc +#define QM_IRQ_NUM_V1 1 +#define QM_IRQ_NUM_PF_V2 4 + +#define QM_EQ_EVENT_IRQ_VECTOR 0 +#define QM_AEQ_EVENT_IRQ_VECTOR 1 +#define QM_ABNORMAL_EVENT_IRQ_VECTOR 3 + +/* mailbox */ +#define QM_MB_CMD_SQC 0x0 +#define QM_MB_CMD_CQC 0x1 +#define QM_MB_CMD_EQC 0x2 +#define QM_MB_CMD_AEQC 0x3 +#define QM_MB_CMD_SQC_BT 0x4 +#define QM_MB_CMD_CQC_BT 0x5 +#define QM_MB_CMD_SQC_VFT_V2 0x6 + +#define QM_MB_CMD_SEND_BASE 0x300 +#define QM_MB_EVENT_SHIFT 8 +#define QM_MB_BUSY_SHIFT 13 +#define QM_MB_OP_SHIFT 14 +#define QM_MB_CMD_DATA_ADDR_L 0x304 +#define QM_MB_CMD_DATA_ADDR_H 0x308 + +/* sqc shift */ +#define QM_SQ_HOP_NUM_SHIFT 0 +#define QM_SQ_PAGE_SIZE_SHIFT 4 +#define QM_SQ_BUF_SIZE_SHIFT 8 +#define QM_SQ_SQE_SIZE_SHIFT 12 +#define QM_SQ_PRIORITY_SHIFT 0 +#define QM_SQ_ORDERS_SHIFT 4 +#define QM_SQ_TYPE_SHIFT 8 + +#define QM_SQ_TYPE_MASK GENMASK(3, 0) + +/* cqc shift */ +#define QM_CQ_HOP_NUM_SHIFT 0 +#define QM_CQ_PAGE_SIZE_SHIFT 4 +#define QM_CQ_BUF_SIZE_SHIFT 8 +#define QM_CQ_CQE_SIZE_SHIFT 12 +#define QM_CQ_PHASE_SHIFT 0 +#define QM_CQ_FLAG_SHIFT 1 + +#define QM_CQE_PHASE(cqe) ((cqe)->w7 & 0x1) +#define QM_QC_CQE_SIZE 4 + +/* eqc shift */ +#define QM_EQE_AEQE_SIZE (2UL << 12) +#define QM_EQC_PHASE_SHIFT 16 + +#define QM_EQE_PHASE(eqe) (((eqe)->dw0 >> 16) & 0x1) +#define QM_EQE_CQN_MASK GENMASK(15, 0) + +#define QM_AEQE_PHASE(aeqe) (((aeqe)->dw0 >> 16) & 0x1) +#define QM_AEQE_TYPE_SHIFT 17 + +#define QM_DOORBELL_CMD_SQ 0 +#define QM_DOORBELL_CMD_CQ 1 +#define QM_DOORBELL_CMD_EQ 2 +#define QM_DOORBELL_CMD_AEQ 3 + +#define QM_DOORBELL_BASE_V1 0x340 +#define QM_DB_CMD_SHIFT_V1 16 +#define QM_DB_INDEX_SHIFT_V1 32 +#define QM_DB_PRIORITY_SHIFT_V1 48 +#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000 +#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000 +#define QM_DB_CMD_SHIFT_V2 12 +#define QM_DB_RAND_SHIFT_V2 16 +#define QM_DB_INDEX_SHIFT_V2 32 +#define QM_DB_PRIORITY_SHIFT_V2 48 + +#define QM_MEM_START_INIT 0x100040 +#define QM_MEM_INIT_DONE 0x100044 +#define QM_VFT_CFG_RDY 0x10006c +#define QM_VFT_CFG_OP_WR 0x100058 +#define QM_VFT_CFG_TYPE 0x10005c +#define QM_SQC_VFT 0x0 +#define QM_CQC_VFT 0x1 +#define QM_VFT_CFG 0x100060 +#define QM_VFT_CFG_OP_ENABLE 0x100054 + +#define QM_VFT_CFG_DATA_L 0x100064 +#define QM_VFT_CFG_DATA_H 0x100068 +#define QM_SQC_VFT_BUF_SIZE (7ULL << 8) +#define QM_SQC_VFT_SQC_SIZE (5ULL << 12) +#define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16) +#define QM_SQC_VFT_START_SQN_SHIFT 28 +#define QM_SQC_VFT_VALID (1ULL << 44) +#define QM_SQC_VFT_SQN_SHIFT 45 +#define QM_CQC_VFT_BUF_SIZE (7ULL << 8) +#define QM_CQC_VFT_SQC_SIZE (5ULL << 12) +#define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16) +#define QM_CQC_VFT_VALID (1ULL << 28) + +#define QM_SQC_VFT_BASE_SHIFT_V2 28 +#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(5, 0) +#define QM_SQC_VFT_NUM_SHIFT_V2 45 +#define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0) + + +#define QM_ABNORMAL_INT_SOURCE 0x100000 +#define QM_ABNORMAL_INT_MASK 0x100004 +#define QM_ABNORMAL_INT_MASK_VALUE 0x1fff +#define QM_ABNORMAL_INT_STATUS 0x100008 +#define QM_ABNORMAL_INF00 0x100010 +#define QM_FIFO_OVERFLOW_TYPE 0xc0 +#define QM_FIFO_OVERFLOW_TYPE_SHIFT 6 +#define QM_FIFO_OVERFLOW_VF 0x3f +#define QM_ABNORMAL_INF01 0x100014 +#define QM_DB_TIMEOUT_TYPE 0xc0 +#define QM_DB_TIMEOUT_TYPE_SHIFT 6 +#define QM_DB_TIMEOUT_VF 0x3f +#define QM_RAS_CE_ENABLE 0x1000ec +#define QM_RAS_FE_ENABLE 0x1000f0 +#define QM_RAS_NFE_ENABLE 0x1000f4 +#define QM_RAS_CE_THRESHOLD 0x1000f8 +#define QM_RAS_CE_TIMES_PER_IRQ 1 +#define QM_RAS_MSI_INT_SEL 0x1040f4 + +#define QM_CACHE_WB_START 0x204 +#define QM_CACHE_WB_DONE 0x208 + +#define PCI_BAR_2 2 +#define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0) +#define QMC_ALIGN(sz) ALIGN(sz, 32) + + +#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ + (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ + ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ + ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \ + ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) + +#define QM_MK_CQC_DW3_V2(cqe_sz) \ + ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) + +#define QM_MK_SQC_W13(priority, orders, alg_type) \ + (((priority) << QM_SQ_PRIORITY_SHIFT) | \ + ((orders) << QM_SQ_ORDERS_SHIFT) | \ + (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT)) + +#define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \ + (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \ + ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \ + ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \ + ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) + +#define QM_MK_SQC_DW3_V2(sqe_sz) \ + ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) + +#define INIT_QC_COMMON(qc, base, pasid) do { \ + (qc)->head = 0; \ + (qc)->tail = 0; \ + (qc)->base_l = lower_32_bits(base); \ + (qc)->base_h = upper_32_bits(base); \ + (qc)->dw3 = 0; \ + (qc)->w8 = 0; \ + (qc)->rsvd0 = 0; \ + (qc)->pasid = pasid; \ + (qc)->w11 = 0; \ + (qc)->rsvd1 = 0; \ +} while (0) + +enum vft_type { + SQC_VFT = 0, + CQC_VFT, +}; + +struct qm_cqe { + __le32 rsvd0; + __le16 cmd_id; + __le16 rsvd1; + __le16 sq_head; + __le16 sq_num; + __le16 rsvd2; + __le16 w7; +}; + +struct qm_eqe { + __le32 dw0; +}; + +struct qm_aeqe { + __le32 dw0; +}; + +struct qm_sqc { + __le16 head; + __le16 tail; + __le32 base_l; + __le32 base_h; + __le32 dw3; + __le16 w8; + __le16 rsvd0; + __le16 pasid; + __le16 w11; + __le16 cq_num; + __le16 w13; + __le32 rsvd1; +}; + +struct qm_cqc { + __le16 head; + __le16 tail; + __le32 base_l; + __le32 base_h; + __le32 dw3; + __le16 w8; + __le16 rsvd0; + __le16 pasid; + __le16 w11; + __le32 dw6; + __le32 rsvd1; +}; + +struct qm_eqc { + __le16 head; + __le16 tail; + __le32 base_l; + __le32 base_h; + __le32 dw3; + __le32 rsvd[2]; + __le32 dw6; +}; + +struct qm_aeqc { + __le16 head; + __le16 tail; + __le32 base_l; + __le32 base_h; + __le32 dw3; + __le32 rsvd[2]; + __le32 dw6; +}; + +struct qm_mailbox { + __le16 w0; + __le16 queue_num; + __le32 base_l; + __le32 base_h; + __le32 rsvd; +}; + +struct qm_doorbell { + __le16 queue_num; + __le16 cmd; + __le16 index; + __le16 priority; +}; + +struct hisi_qm_hw_ops { + void (*qm_db)(struct hisi_qm *qm, u16 qn, + u8 cmd, u16 index, u8 priority); + u32 (*get_irq_num)(struct hisi_qm *qm); + void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, + u32 msi); + pci_ers_result_t (*hw_error_handle)(struct hisi_qm *qm); +}; + +struct hisi_qm_hw_error { + u32 int_msk; + const char *msg; +}; + +static const struct hisi_qm_hw_error qm_hw_error[] = { + { .int_msk = BIT(0), .msg = "qm_axi_rresp" }, + { .int_msk = BIT(1), .msg = "qm_axi_bresp" }, + { .int_msk = BIT(2), .msg = "qm_ecc_mbit" }, + { .int_msk = BIT(3), .msg = "qm_ecc_1bit" }, + { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" }, + { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" }, + { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" }, + { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" }, + { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" }, + { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" }, + { .int_msk = BIT(10), .msg = "qm_db_timeout" }, + { .int_msk = BIT(11), .msg = "qm_of_fifo_of" }, + { .int_msk = BIT(12), .msg = "qm_db_random_invalid" }, + { /* sentinel */ } +}; + +static const char * const qm_db_timeout[] = { + "sq", "cq", "eq", "aeq", +}; + +static const char * const qm_fifo_overflow[] = { + "cq", "eq", "aeq", +}; + +/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ +static int qm_wait_mb_ready(struct hisi_qm *qm) +{ + u32 val; + + return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, + val, !((val >> QM_MB_BUSY_SHIFT) & + 0x1), 10, 1000); +} + +/* 128 bit should be written to hardware at one time to trigger a mailbox */ +static void qm_mb_write(struct hisi_qm *qm, const void *src) +{ + void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; + unsigned long tmp0 = 0, tmp1 = 0; + + asm volatile("ldp %0, %1, %3\n" + "stp %0, %1, %2\n" + "dsb sy\n" + : "=&r" (tmp0), + "=&r" (tmp1), + "+Q" (*((char *)fun_base)) + : "Q" (*((char *)src)) + : "memory"); +} + +static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, + bool op) +{ + struct qm_mailbox mailbox; + int ret = 0; + + dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n", queue, + cmd, dma_addr); + + mailbox.w0 = cmd | + (op ? 0x1 << QM_MB_OP_SHIFT : 0) | + (0x1 << QM_MB_BUSY_SHIFT); + mailbox.queue_num = queue; + mailbox.base_l = lower_32_bits(dma_addr); + mailbox.base_h = upper_32_bits(dma_addr); + mailbox.rsvd = 0; + + mutex_lock(&qm->mailbox_lock); + + if (unlikely(qm_wait_mb_ready(qm))) { + ret = -EBUSY; + dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); + goto busy_unlock; + } + + qm_mb_write(qm, &mailbox); + + if (unlikely(qm_wait_mb_ready(qm))) { + ret = -EBUSY; + dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); + goto busy_unlock; + } + +busy_unlock: + mutex_unlock(&qm->mailbox_lock); + + return ret; +} + +static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) +{ + u64 doorbell; + + doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) | + ((u64)index << QM_DB_INDEX_SHIFT_V1) | + ((u64)priority << QM_DB_PRIORITY_SHIFT_V1); + + writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); +} + +static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) +{ + u64 doorbell; + u64 dbase; + u16 randata = 0; + + if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ) + dbase = QM_DOORBELL_SQ_CQ_BASE_V2; + else + dbase = QM_DOORBELL_EQ_AEQ_BASE_V2; + + doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) | + ((u64)randata << QM_DB_RAND_SHIFT_V2) | + ((u64)index << QM_DB_INDEX_SHIFT_V2) | + ((u64)priority << QM_DB_PRIORITY_SHIFT_V2); + + writeq(doorbell, qm->io_base + dbase); +} + +static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) +{ + dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", + qn, cmd, index); + + qm->ops->qm_db(qm, qn, cmd, index, priority); +} + +static int qm_dev_mem_reset(struct hisi_qm *qm) +{ + u32 val; + + writel(0x1, qm->io_base + QM_MEM_START_INIT); + return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, + val & BIT(0), 10, 1000); +} + +static u32 qm_get_irq_num_v1(struct hisi_qm *qm) +{ + return QM_IRQ_NUM_V1; +} + +static u32 qm_get_irq_num_v2(struct hisi_qm *qm) +{ + return QM_IRQ_NUM_PF_V2; +} + +static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe) +{ + u16 cqn = eqe->dw0 & QM_EQE_CQN_MASK; + + return qm->qp_array[cqn]; +} + +static void qm_cq_head_update(struct hisi_qp *qp) +{ + if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) { + qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; + qp->qp_status.cq_head = 0; + } else { + qp->qp_status.cq_head++; + } +} + +static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm) +{ + struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; + + if (qp->req_cb) { + while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { + dma_rmb(); + qp->req_cb(qp, qp->sqe + qm->sqe_size * cqe->sq_head); + qm_cq_head_update(qp); + cqe = qp->cqe + qp->qp_status.cq_head; + qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, + qp->qp_status.cq_head, 0); + atomic_dec(&qp->qp_status.used); + } + + /* set c_flag */ + qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, + qp->qp_status.cq_head, 1); + } +} + +static void qm_qp_work_func(struct work_struct *work) +{ + struct hisi_qp *qp; + + qp = container_of(work, struct hisi_qp, work); + qm_poll_qp(qp, qp->qm); +} + +static irqreturn_t qm_irq_handler(int irq, void *data) +{ + struct hisi_qm *qm = data; + struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; + struct hisi_qp *qp; + int eqe_num = 0; + + while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { + eqe_num++; + qp = qm_to_hisi_qp(qm, eqe); + if (qp) + queue_work(qp->wq, &qp->work); + + if (qm->status.eq_head == QM_Q_DEPTH - 1) { + qm->status.eqc_phase = !qm->status.eqc_phase; + eqe = qm->eqe; + qm->status.eq_head = 0; + } else { + eqe++; + qm->status.eq_head++; + } + + if (eqe_num == QM_Q_DEPTH / 2 - 1) { + eqe_num = 0; + qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); + } + } + + qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); + + return IRQ_HANDLED; +} + +static irqreturn_t qm_irq(int irq, void *data) +{ + struct hisi_qm *qm = data; + + if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) + return qm_irq_handler(irq, data); + + dev_err(&qm->pdev->dev, "invalid int source\n"); + qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); + + return IRQ_NONE; +} + +static irqreturn_t qm_aeq_irq(int irq, void *data) +{ + struct hisi_qm *qm = data; + struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; + u32 type; + + if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE)) + return IRQ_NONE; + + while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { + type = aeqe->dw0 >> QM_AEQE_TYPE_SHIFT; + if (type < ARRAY_SIZE(qm_fifo_overflow)) + dev_err(&qm->pdev->dev, "%s overflow\n", + qm_fifo_overflow[type]); + else + dev_err(&qm->pdev->dev, "unknown error type %d\n", + type); + + if (qm->status.aeq_head == QM_Q_DEPTH - 1) { + qm->status.aeqc_phase = !qm->status.aeqc_phase; + aeqe = qm->aeqe; + qm->status.aeq_head = 0; + } else { + aeqe++; + qm->status.aeq_head++; + } + + qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); + } + + return IRQ_HANDLED; +} + +static irqreturn_t qm_abnormal_irq(int irq, void *data) +{ + const struct hisi_qm_hw_error *err = qm_hw_error; + struct hisi_qm *qm = data; + struct device *dev = &qm->pdev->dev; + u32 error_status, tmp; + + /* read err sts */ + tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); + error_status = qm->msi_mask & tmp; + + while (err->msg) { + if (err->int_msk & error_status) + dev_err(dev, "%s [error status=0x%x] found\n", + err->msg, err->int_msk); + + err++; + } + + /* clear err sts */ + writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); + + return IRQ_HANDLED; +} + +static int qm_irq_register(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + + ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), + qm_irq, IRQF_SHARED, qm->dev_name, qm); + if (ret) + return ret; + + if (qm->ver == QM_HW_V2) { + ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), + qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm); + if (ret) + goto err_aeq_irq; + + ret = request_irq(pci_irq_vector(pdev, + QM_ABNORMAL_EVENT_IRQ_VECTOR), + qm_abnormal_irq, IRQF_SHARED, + qm->dev_name, qm); + if (ret) + goto err_abonormal_irq; + } + + return 0; + +err_abonormal_irq: + free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); +err_aeq_irq: + free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); + return ret; +} + +static void qm_irq_unregister(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + + free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); + + if (qm->ver == QM_HW_V2) { + free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); + free_irq(pci_irq_vector(pdev, + QM_ABNORMAL_EVENT_IRQ_VECTOR), qm); + } +} + +static void qm_init_qp_status(struct hisi_qp *qp) +{ + struct hisi_qp_status *qp_status = &qp->qp_status; + + qp_status->sq_tail = 0; + qp_status->cq_head = 0; + qp_status->cqc_phase = 1; + qp_status->flags = 0; +} + +static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, + u32 number) +{ + u64 tmp = 0; + + if (number > 0) { + switch (type) { + case SQC_VFT: + switch (qm->ver) { + case QM_HW_V1: + tmp = QM_SQC_VFT_BUF_SIZE | + QM_SQC_VFT_SQC_SIZE | + QM_SQC_VFT_INDEX_NUMBER | + QM_SQC_VFT_VALID | + (u64)base << QM_SQC_VFT_START_SQN_SHIFT; + break; + case QM_HW_V2: + tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT | + QM_SQC_VFT_VALID | + (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; + break; + case QM_HW_UNKNOWN: + break; + } + break; + case CQC_VFT: + switch (qm->ver) { + case QM_HW_V1: + tmp = QM_CQC_VFT_BUF_SIZE | + QM_CQC_VFT_SQC_SIZE | + QM_CQC_VFT_INDEX_NUMBER | + QM_CQC_VFT_VALID; + break; + case QM_HW_V2: + tmp = QM_CQC_VFT_VALID; + break; + case QM_HW_UNKNOWN: + break; + } + break; + } + } + + writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); + writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); +} + +static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, + u32 fun_num, u32 base, u32 number) +{ + unsigned int val; + int ret; + + ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, + val & BIT(0), 10, 1000); + if (ret) + return ret; + + writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); + writel(type, qm->io_base + QM_VFT_CFG_TYPE); + writel(fun_num, qm->io_base + QM_VFT_CFG); + + qm_vft_data_cfg(qm, type, base, number); + + writel(0x0, qm->io_base + QM_VFT_CFG_RDY); + writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); + + return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, + val & BIT(0), 10, 1000); +} + +/* The config should be conducted after qm_dev_mem_reset() */ +static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, + u32 number) +{ + int ret, i; + + for (i = SQC_VFT; i <= CQC_VFT; i++) { + ret = qm_set_vft_common(qm, i, fun_num, base, number); + if (ret) + return ret; + } + + return 0; +} + +static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, + u32 msi) +{ + dev_info(&qm->pdev->dev, + "QM v%d does not support hw error handle\n", qm->ver); + + writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); +} + +static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, + u32 msi) +{ + u32 irq_enable = ce | nfe | fe | msi; + u32 irq_unmask = ~irq_enable; + + qm->error_mask = ce | nfe | fe; + qm->msi_mask = msi; + + /* configure error type */ + writel(ce, qm->io_base + QM_RAS_CE_ENABLE); + writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); + writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE); + writel(fe, qm->io_base + QM_RAS_FE_ENABLE); + + /* use RAS irq default, so only set QM_RAS_MSI_INT_SEL for MSI */ + writel(msi, qm->io_base + QM_RAS_MSI_INT_SEL); + + irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); + writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); +} + +static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) +{ + const struct hisi_qm_hw_error *err = qm_hw_error; + struct device *dev = &qm->pdev->dev; + u32 reg_val, type, vf_num; + + while (err->msg) { + if (err->int_msk & error_status) { + dev_err(dev, "%s [error status=0x%x] found\n", + err->msg, err->int_msk); + + if (error_status & QM_DB_TIMEOUT) { + reg_val = readl(qm->io_base + + QM_ABNORMAL_INF01); + type = (reg_val & QM_DB_TIMEOUT_TYPE) >> + QM_DB_TIMEOUT_TYPE_SHIFT; + vf_num = reg_val & QM_DB_TIMEOUT_VF; + dev_err(dev, "qm %s doorbell timeout in function %u\n", + qm_db_timeout[type], vf_num); + } + + if (error_status & QM_OF_FIFO_OF) { + reg_val = readl(qm->io_base + + QM_ABNORMAL_INF00); + type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >> + QM_FIFO_OVERFLOW_TYPE_SHIFT; + vf_num = reg_val & QM_FIFO_OVERFLOW_VF; + + if (type < ARRAY_SIZE(qm_fifo_overflow)) + dev_err(dev, "qm %s fifo overflow in function %u\n", + qm_fifo_overflow[type], + vf_num); + else + dev_err(dev, "unknown error type\n"); + } + } + err++; + } +} + +static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm) +{ + u32 error_status, tmp; + + /* read err sts */ + tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); + error_status = qm->error_mask & tmp; + + if (error_status) { + qm_log_hw_error(qm, error_status); + + /* clear err sts */ + writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); + + return PCI_ERS_RESULT_NEED_RESET; + } + + return PCI_ERS_RESULT_RECOVERED; +} + +static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { + .qm_db = qm_db_v1, + .get_irq_num = qm_get_irq_num_v1, + .hw_error_init = qm_hw_error_init_v1, +}; + +static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { + .qm_db = qm_db_v2, + .get_irq_num = qm_get_irq_num_v2, + .hw_error_init = qm_hw_error_init_v2, + .hw_error_handle = qm_hw_error_handle_v2, +}; + +static void *qm_get_avail_sqe(struct hisi_qp *qp) +{ + struct hisi_qp_status *qp_status = &qp->qp_status; + u16 sq_tail = qp_status->sq_tail; + + if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH)) + return NULL; + + return qp->sqe + sq_tail * qp->qm->sqe_size; +} + +/** + * hisi_qm_create_qp() - Create a queue pair from qm. + * @qm: The qm we create a qp from. + * @alg_type: Accelerator specific algorithm type in sqc. + * + * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating + * qp memory fails. + */ +struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) +{ + struct device *dev = &qm->pdev->dev; + struct hisi_qp *qp; + int qp_id, ret; + + qp = kzalloc(sizeof(*qp), GFP_KERNEL); + if (!qp) + return ERR_PTR(-ENOMEM); + + write_lock(&qm->qps_lock); + + qp_id = find_first_zero_bit(qm->qp_bitmap, qm->qp_num); + if (qp_id >= qm->qp_num) { + write_unlock(&qm->qps_lock); + dev_info(&qm->pdev->dev, "QM all queues are busy!\n"); + ret = -EBUSY; + goto err_free_qp; + } + set_bit(qp_id, qm->qp_bitmap); + qm->qp_array[qp_id] = qp; + + write_unlock(&qm->qps_lock); + + qp->qm = qm; + + if (qm->use_dma_api) { + qp->qdma.size = qm->sqe_size * QM_Q_DEPTH + + sizeof(struct qm_cqe) * QM_Q_DEPTH; + qp->qdma.va = dma_alloc_coherent(dev, qp->qdma.size, + &qp->qdma.dma, GFP_KERNEL); + if (!qp->qdma.va) { + ret = -ENOMEM; + goto err_clear_bit; + } + + dev_dbg(dev, "allocate qp dma buf(va=%pK, dma=%pad, size=%lx)\n", + qp->qdma.va, &qp->qdma.dma, qp->qdma.size); + } + + qp->qp_id = qp_id; + qp->alg_type = alg_type; + INIT_WORK(&qp->work, qm_qp_work_func); + qp->wq = alloc_workqueue("hisi_qm", WQ_UNBOUND | WQ_HIGHPRI | + WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0); + if (!qp->wq) { + ret = -EFAULT; + goto err_free_qp_mem; + } + + return qp; + +err_free_qp_mem: + if (qm->use_dma_api) + dma_free_coherent(dev, qp->qdma.size, qp->qdma.va, + qp->qdma.dma); +err_clear_bit: + write_lock(&qm->qps_lock); + qm->qp_array[qp_id] = NULL; + clear_bit(qp_id, qm->qp_bitmap); + write_unlock(&qm->qps_lock); +err_free_qp: + kfree(qp); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(hisi_qm_create_qp); + +/** + * hisi_qm_release_qp() - Release a qp back to its qm. + * @qp: The qp we want to release. + * + * This function releases the resource of a qp. + */ +void hisi_qm_release_qp(struct hisi_qp *qp) +{ + struct hisi_qm *qm = qp->qm; + struct qm_dma *qdma = &qp->qdma; + struct device *dev = &qm->pdev->dev; + + if (qm->use_dma_api && qdma->va) + dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); + + write_lock(&qm->qps_lock); + qm->qp_array[qp->qp_id] = NULL; + clear_bit(qp->qp_id, qm->qp_bitmap); + write_unlock(&qm->qps_lock); + + kfree(qp); +} +EXPORT_SYMBOL_GPL(hisi_qm_release_qp); + +static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) +{ + struct hisi_qm *qm = qp->qm; + struct device *dev = &qm->pdev->dev; + enum qm_hw_ver ver = qm->ver; + struct qm_sqc *sqc; + struct qm_cqc *cqc; + dma_addr_t sqc_dma; + dma_addr_t cqc_dma; + int ret; + + qm_init_qp_status(qp); + + sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL); + if (!sqc) + return -ENOMEM; + sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc), + DMA_TO_DEVICE); + if (dma_mapping_error(dev, sqc_dma)) { + kfree(sqc); + return -ENOMEM; + } + + INIT_QC_COMMON(sqc, qp->sqe_dma, pasid); + if (ver == QM_HW_V1) { + sqc->dw3 = QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size); + sqc->w8 = QM_Q_DEPTH - 1; + } else if (ver == QM_HW_V2) { + sqc->dw3 = QM_MK_SQC_DW3_V2(qm->sqe_size); + sqc->w8 = 0; /* rand_qc */ + } + sqc->cq_num = qp_id; + sqc->w13 = QM_MK_SQC_W13(0, 1, qp->alg_type); + + ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); + dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE); + kfree(sqc); + if (ret) + return ret; + + cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL); + if (!cqc) + return -ENOMEM; + cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc), + DMA_TO_DEVICE); + if (dma_mapping_error(dev, cqc_dma)) { + kfree(cqc); + return -ENOMEM; + } + + INIT_QC_COMMON(cqc, qp->cqe_dma, pasid); + if (ver == QM_HW_V1) { + cqc->dw3 = QM_MK_CQC_DW3_V1(0, 0, 0, 4); + cqc->w8 = QM_Q_DEPTH - 1; + } else if (ver == QM_HW_V2) { + cqc->dw3 = QM_MK_CQC_DW3_V2(4); + cqc->w8 = 0; + } + cqc->dw6 = 1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT; + + ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); + dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE); + kfree(cqc); + + return ret; +} + +/** + * hisi_qm_start_qp() - Start a qp into running. + * @qp: The qp we want to start to run. + * @arg: Accelerator specific argument. + * + * After this function, qp can receive request from user. Return qp_id if + * successful, Return -EBUSY if failed. + */ +int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) +{ + struct hisi_qm *qm = qp->qm; + struct device *dev = &qm->pdev->dev; + enum qm_hw_ver ver = qm->ver; + int qp_id = qp->qp_id; + int pasid = arg; + size_t off = 0; + int ret; + +#define QP_INIT_BUF(qp, type, size) do { \ + (qp)->type = ((qp)->qdma.va + (off)); \ + (qp)->type##_dma = (qp)->qdma.dma + (off); \ + off += (size); \ +} while (0) + + if (!qp->qdma.dma) { + dev_err(dev, "cannot get qm dma buffer\n"); + return -EINVAL; + } + + /* sq need 128 bytes alignment */ + if (qp->qdma.dma & QM_SQE_DATA_ALIGN_MASK) { + dev_err(dev, "qm sq is not aligned to 128 byte\n"); + return -EINVAL; + } + + QP_INIT_BUF(qp, sqe, qm->sqe_size * QM_Q_DEPTH); + QP_INIT_BUF(qp, cqe, sizeof(struct qm_cqe) * QM_Q_DEPTH); + + dev_dbg(dev, "init qp buffer(v%d):\n" + " sqe (%pK, %lx)\n" + " cqe (%pK, %lx)\n", + ver, qp->sqe, (unsigned long)qp->sqe_dma, + qp->cqe, (unsigned long)qp->cqe_dma); + + ret = qm_qp_ctx_cfg(qp, qp_id, pasid); + if (ret) + return ret; + + dev_dbg(dev, "queue %d started\n", qp_id); + + return qp_id; +} +EXPORT_SYMBOL_GPL(hisi_qm_start_qp); + +/** + * hisi_qm_stop_qp() - Stop a qp in qm. + * @qp: The qp we want to stop. + * + * This function is reverse of hisi_qm_start_qp. Return 0 if successful. + */ +int hisi_qm_stop_qp(struct hisi_qp *qp) +{ + struct device *dev = &qp->qm->pdev->dev; + int i = 0; + + /* it is stopped */ + if (test_bit(QP_STOP, &qp->qp_status.flags)) + return 0; + + while (atomic_read(&qp->qp_status.used)) { + i++; + msleep(20); + if (i == 10) { + dev_err(dev, "Cannot drain out data for stopping, Force to stop!\n"); + return 0; + } + } + + set_bit(QP_STOP, &qp->qp_status.flags); + + dev_dbg(dev, "stop queue %u!", qp->qp_id); + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); + +/** + * hisi_qp_send() - Queue up a task in the hardware queue. + * @qp: The qp in which to put the message. + * @msg: The message. + * + * This function will return -EBUSY if qp is currently full, and -EAGAIN + * if qp related qm is resetting. + */ +int hisi_qp_send(struct hisi_qp *qp, const void *msg) +{ + struct hisi_qp_status *qp_status = &qp->qp_status; + u16 sq_tail = qp_status->sq_tail; + u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH; + void *sqe = qm_get_avail_sqe(qp); + + if (unlikely(test_bit(QP_STOP, &qp->qp_status.flags))) { + dev_info(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); + return -EAGAIN; + } + + if (!sqe) + return -EBUSY; + + memcpy(sqe, msg, qp->qm->sqe_size); + + qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); + atomic_inc(&qp->qp_status.used); + qp_status->sq_tail = sq_tail_next; + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_qp_send); + +static void hisi_qm_cache_wb(struct hisi_qm *qm) +{ + unsigned int val; + + if (qm->ver == QM_HW_V2) { + writel(0x1, qm->io_base + QM_CACHE_WB_START); + if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, + val, val & BIT(0), 10, 1000)) + dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); + } +} + +/** + * hisi_qm_init() - Initialize configures about qm. + * @qm: The qm needing init. + * + * This function init qm, then we can call hisi_qm_start to put qm into work. + */ +int hisi_qm_init(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct device *dev = &pdev->dev; + unsigned int num_vec; + int ret; + + switch (qm->ver) { + case QM_HW_V1: + qm->ops = &qm_hw_ops_v1; + break; + case QM_HW_V2: + qm->ops = &qm_hw_ops_v2; + break; + default: + return -EINVAL; + } + + ret = pci_enable_device_mem(pdev); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to enable device mem!\n"); + return ret; + } + + ret = pci_request_mem_regions(pdev, qm->dev_name); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to request mem regions!\n"); + goto err_disable_pcidev; + } + + qm->io_base = ioremap(pci_resource_start(pdev, PCI_BAR_2), + pci_resource_len(qm->pdev, PCI_BAR_2)); + if (!qm->io_base) { + ret = -EIO; + goto err_release_mem_regions; + } + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (ret < 0) + goto err_iounmap; + pci_set_master(pdev); + + if (!qm->ops->get_irq_num) { + ret = -EOPNOTSUPP; + goto err_iounmap; + } + num_vec = qm->ops->get_irq_num(qm); + ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); + if (ret < 0) { + dev_err(dev, "Failed to enable MSI vectors!\n"); + goto err_iounmap; + } + + ret = qm_irq_register(qm); + if (ret) + goto err_free_irq_vectors; + + mutex_init(&qm->mailbox_lock); + rwlock_init(&qm->qps_lock); + + return 0; + +err_free_irq_vectors: + pci_free_irq_vectors(pdev); +err_iounmap: + iounmap(qm->io_base); +err_release_mem_regions: + pci_release_mem_regions(pdev); +err_disable_pcidev: + pci_disable_device(pdev); + + return ret; +} +EXPORT_SYMBOL_GPL(hisi_qm_init); + +/** + * hisi_qm_uninit() - Uninitialize qm. + * @qm: The qm needed uninit. + * + * This function uninits qm related device resources. + */ +void hisi_qm_uninit(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct device *dev = &pdev->dev; + + if (qm->use_dma_api && qm->qdma.va) { + hisi_qm_cache_wb(qm); + dma_free_coherent(dev, qm->qdma.size, + qm->qdma.va, qm->qdma.dma); + memset(&qm->qdma, 0, sizeof(qm->qdma)); + } + + qm_irq_unregister(qm); + pci_free_irq_vectors(pdev); + iounmap(qm->io_base); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} +EXPORT_SYMBOL_GPL(hisi_qm_uninit); + +/** + * hisi_qm_set_vft() - Set "virtual function table" for a qm. + * @fun_num: Number of operated function. + * @qm: The qm in which to set vft, alway in a PF. + * @base: The base number of queue in vft. + * @number: The number of queues in vft. 0 means invalid vft. + * + * This function is alway called in PF driver, it is used to assign queues + * among PF and VFs. + * + * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1) + * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1) + * (VF function number 0x2) + */ +int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, + u32 number) +{ + u32 max_q_num = qm->ctrl_qp_num; + + if (base >= max_q_num || number > max_q_num || + (base + number) > max_q_num) + return -EINVAL; + + return qm_set_sqc_cqc_vft(qm, fun_num, base, number); +} +EXPORT_SYMBOL_GPL(hisi_qm_set_vft); + +static void qm_init_eq_aeq_status(struct hisi_qm *qm) +{ + struct hisi_qm_status *status = &qm->status; + + status->eq_head = 0; + status->aeq_head = 0; + status->eqc_phase = 1; + status->aeqc_phase = 1; +} + +static int qm_eq_ctx_cfg(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + struct qm_eqc *eqc; + struct qm_aeqc *aeqc; + dma_addr_t eqc_dma; + dma_addr_t aeqc_dma; + int ret; + + qm_init_eq_aeq_status(qm); + + eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL); + if (!eqc) + return -ENOMEM; + eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc), + DMA_TO_DEVICE); + if (dma_mapping_error(dev, eqc_dma)) { + kfree(eqc); + return -ENOMEM; + } + + eqc->base_l = lower_32_bits(qm->eqe_dma); + eqc->base_h = upper_32_bits(qm->eqe_dma); + if (qm->ver == QM_HW_V1) + eqc->dw3 = QM_EQE_AEQE_SIZE; + eqc->dw6 = (QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT); + ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); + dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE); + kfree(eqc); + if (ret) + return ret; + + aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL); + if (!aeqc) + return -ENOMEM; + aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc), + DMA_TO_DEVICE); + if (dma_mapping_error(dev, aeqc_dma)) { + kfree(aeqc); + return -ENOMEM; + } + + aeqc->base_l = lower_32_bits(qm->aeqe_dma); + aeqc->base_h = upper_32_bits(qm->aeqe_dma); + aeqc->dw6 = (QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT); + + ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); + dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE); + kfree(aeqc); + + return ret; +} + +static int __hisi_qm_start(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct device *dev = &pdev->dev; + size_t off = 0; + int ret; + +#define QM_INIT_BUF(qm, type, num) do { \ + (qm)->type = ((qm)->qdma.va + (off)); \ + (qm)->type##_dma = (qm)->qdma.dma + (off); \ + off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \ +} while (0) + + WARN_ON(!qm->qdma.dma); + + if (qm->qp_num == 0) + return -EINVAL; + + ret = qm_dev_mem_reset(qm); + if (ret) + return ret; + + ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); + if (ret) + return ret; + + QM_INIT_BUF(qm, eqe, QM_Q_DEPTH); + QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH); + QM_INIT_BUF(qm, sqc, qm->qp_num); + QM_INIT_BUF(qm, cqc, qm->qp_num); + + dev_dbg(dev, "init qm buffer:\n" + " eqe (%pK, %lx)\n" + " aeqe (%pK, %lx)\n" + " sqc (%pK, %lx)\n" + " cqc (%pK, %lx)\n", + qm->eqe, (unsigned long)qm->eqe_dma, + qm->aeqe, (unsigned long)qm->aeqe_dma, + qm->sqc, (unsigned long)qm->sqc_dma, + qm->cqc, (unsigned long)qm->cqc_dma); + + ret = qm_eq_ctx_cfg(qm); + if (ret) + return ret; + + ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); + if (ret) + return ret; + + ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); + if (ret) + return ret; + + writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); + writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); + + return 0; +} + +/** + * hisi_qm_start() - start qm + * @qm: The qm to be started. + * + * This function starts a qm, then we can allocate qp from this qm. + */ +int hisi_qm_start(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + + dev_dbg(dev, "qm start with %d queue pairs\n", qm->qp_num); + + if (!qm->qp_num) { + dev_err(dev, "qp_num should not be 0\n"); + return -EINVAL; + } + + if (!qm->qp_bitmap) { + qm->qp_bitmap = devm_kcalloc(dev, BITS_TO_LONGS(qm->qp_num), + sizeof(long), GFP_KERNEL); + qm->qp_array = devm_kcalloc(dev, qm->qp_num, + sizeof(struct hisi_qp *), + GFP_KERNEL); + if (!qm->qp_bitmap || !qm->qp_array) + return -ENOMEM; + } + + if (!qm->use_dma_api) { + dev_dbg(&qm->pdev->dev, "qm delay start\n"); + return 0; + } else if (!qm->qdma.va) { + qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) + + QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) + + QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + + QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); + qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, + &qm->qdma.dma, GFP_KERNEL); + dev_dbg(dev, "allocate qm dma buf(va=%pK, dma=%pad, size=%lx)\n", + qm->qdma.va, &qm->qdma.dma, qm->qdma.size); + if (!qm->qdma.va) + return -ENOMEM; + } + + return __hisi_qm_start(qm); +} +EXPORT_SYMBOL_GPL(hisi_qm_start); + +/** + * hisi_qm_stop() - Stop a qm. + * @qm: The qm which will be stopped. + * + * This function stops qm and its qps, then qm can not accept request. + * Related resources are not released at this state, we can use hisi_qm_start + * to let qm start again. + */ +int hisi_qm_stop(struct hisi_qm *qm) +{ + struct device *dev; + struct hisi_qp *qp; + int ret = 0, i; + + if (!qm || !qm->pdev) { + WARN_ON(1); + return -EINVAL; + } + + dev = &qm->pdev->dev; + + /* Mask eq and aeq irq */ + writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); + writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); + + /* Stop all qps belong to this qm */ + for (i = 0; i < qm->qp_num; i++) { + qp = qm->qp_array[i]; + if (qp) { + ret = hisi_qm_stop_qp(qp); + if (ret < 0) { + dev_err(dev, "Failed to stop qp%d!\n", i); + return -EBUSY; + } + } + } + + ret = hisi_qm_set_vft(qm, 0, 0, 0); + if (ret < 0) + dev_err(dev, "Failed to set vft!\n"); + + return ret; +} +EXPORT_SYMBOL_GPL(hisi_qm_stop); + +/** + * hisi_qm_hw_error_init() - Configure qm hardware error report method. + * @qm: The qm which we want to configure. + * @ce: Bit mask of correctable error configure. + * @nfe: Bit mask of non-fatal error configure. + * @fe: Bit mask of fatal error configure. + * @msi: Bit mask of error reported by message signal interrupt. + * + * Hardware errors of qm can be reported either by RAS interrupts which will + * be handled by UEFI and then PCIe AER or by device MSI. User can configure + * each error to use either of above two methods. For RAS interrupts, we can + * configure an error as one of correctable error, non-fatal error or + * fatal error. + * + * Bits indicating errors can be configured to ce, nfe, fe and msi to enable + * related report methods. Error report will be masked if related error bit + * does not configure. + */ +void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, + u32 msi) +{ + if (!qm->ops->hw_error_init) { + dev_err(&qm->pdev->dev, "QM version %d doesn't support hw error handling!\n", + qm->ver); + return; + } + + qm->ops->hw_error_init(qm, ce, nfe, fe, msi); +} +EXPORT_SYMBOL_GPL(hisi_qm_hw_error_init); + +/** + * hisi_qm_hw_error_handle() - Handle qm non-fatal hardware errors. + * @qm: The qm which has non-fatal hardware errors. + * + * Accelerators use this function to handle qm non-fatal hardware errors. + */ +int hisi_qm_hw_error_handle(struct hisi_qm *qm) +{ + if (!qm->ops->hw_error_handle) { + dev_err(&qm->pdev->dev, "QM version %d doesn't support hw error report!\n", + qm->ver); + return PCI_ERS_RESULT_NONE; + } + + return qm->ops->hw_error_handle(qm); +} +EXPORT_SYMBOL_GPL(hisi_qm_hw_error_handle); + +/** + * hisi_qm_get_hw_version() - Get hardware version of a qm. + * @pdev: The device which hardware version we want to get. + * + * This function gets the hardware version of a qm. Return QM_HW_UNKNOWN + * if the hardware version is not supported. + */ +enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev) +{ + switch (pdev->revision) { + case QM_HW_V1: + case QM_HW_V2: + return pdev->revision; + default: + return QM_HW_UNKNOWN; + } +} +EXPORT_SYMBOL_GPL(hisi_qm_get_hw_version); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Zhou Wang "); +MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver"); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h new file mode 100644 index 000000000000..a5849db163cb --- /dev/null +++ b/drivers/crypto/hisilicon/qm.h @@ -0,0 +1,182 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 HiSilicon Limited. */ +#ifndef HISI_ACC_QM_H +#define HISI_ACC_QM_H + +#include +#include +#include +#include + +/* qm user domain */ +#define QM_ARUSER_M_CFG_1 0x100088 +#define AXUSER_SNOOP_ENABLE BIT(30) +#define AXUSER_CMD_TYPE GENMASK(14, 12) +#define AXUSER_CMD_SMMU_NORMAL 1 +#define AXUSER_NS BIT(6) +#define AXUSER_NO BIT(5) +#define AXUSER_FP BIT(4) +#define AXUSER_SSV BIT(0) +#define AXUSER_BASE (AXUSER_SNOOP_ENABLE | \ + FIELD_PREP(AXUSER_CMD_TYPE, \ + AXUSER_CMD_SMMU_NORMAL) | \ + AXUSER_NS | AXUSER_NO | AXUSER_FP) +#define QM_ARUSER_M_CFG_ENABLE 0x100090 +#define ARUSER_M_CFG_ENABLE 0xfffffffe +#define QM_AWUSER_M_CFG_1 0x100098 +#define QM_AWUSER_M_CFG_ENABLE 0x1000a0 +#define AWUSER_M_CFG_ENABLE 0xfffffffe +#define QM_WUSER_M_CFG_ENABLE 0x1000a8 +#define WUSER_M_CFG_ENABLE 0xffffffff + +/* qm cache */ +#define QM_CACHE_CTL 0x100050 +#define SQC_CACHE_ENABLE BIT(0) +#define CQC_CACHE_ENABLE BIT(1) +#define SQC_CACHE_WB_ENABLE BIT(4) +#define SQC_CACHE_WB_THRD GENMASK(10, 5) +#define CQC_CACHE_WB_ENABLE BIT(11) +#define CQC_CACHE_WB_THRD GENMASK(17, 12) +#define QM_AXI_M_CFG 0x1000ac +#define AXI_M_CFG 0xffff +#define QM_AXI_M_CFG_ENABLE 0x1000b0 +#define AXI_M_CFG_ENABLE 0xffffffff +#define QM_PEH_AXUSER_CFG 0x1000cc +#define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0 +#define PEH_AXUSER_CFG 0x401001 +#define PEH_AXUSER_CFG_ENABLE 0xffffffff + + +#define QM_AXI_RRESP BIT(0) +#define QM_AXI_BRESP BIT(1) +#define QM_ECC_MBIT BIT(2) +#define QM_ECC_1BIT BIT(3) +#define QM_ACC_GET_TASK_TIMEOUT BIT(4) +#define QM_ACC_DO_TASK_TIMEOUT BIT(5) +#define QM_ACC_WB_NOT_READY_TIMEOUT BIT(6) +#define QM_SQ_CQ_VF_INVALID BIT(7) +#define QM_CQ_VF_INVALID BIT(8) +#define QM_SQ_VF_INVALID BIT(9) +#define QM_DB_TIMEOUT BIT(10) +#define QM_OF_FIFO_OF BIT(11) +#define QM_DB_RANDOM_INVALID BIT(12) + +#define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \ + QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \ + QM_OF_FIFO_OF) +#define QM_BASE_CE QM_ECC_1BIT + +#define QM_Q_DEPTH 1024 + +enum qp_state { + QP_STOP, +}; + +enum qm_hw_ver { + QM_HW_UNKNOWN = -1, + QM_HW_V1 = 0x20, + QM_HW_V2 = 0x21, +}; + +enum qm_fun_type { + QM_HW_PF, +}; + +struct qm_dma { + void *va; + dma_addr_t dma; + size_t size; +}; + +struct hisi_qm_status { + u32 eq_head; + bool eqc_phase; + u32 aeq_head; + bool aeqc_phase; + unsigned long flags; +}; + +struct hisi_qm { + enum qm_hw_ver ver; + const char *dev_name; + struct pci_dev *pdev; + void __iomem *io_base; + u32 sqe_size; + u32 qp_base; + u32 qp_num; + u32 ctrl_qp_num; + + struct qm_dma qdma; + struct qm_sqc *sqc; + struct qm_cqc *cqc; + struct qm_eqe *eqe; + struct qm_aeqe *aeqe; + dma_addr_t sqc_dma; + dma_addr_t cqc_dma; + dma_addr_t eqe_dma; + dma_addr_t aeqe_dma; + + struct hisi_qm_status status; + + rwlock_t qps_lock; + unsigned long *qp_bitmap; + struct hisi_qp **qp_array; + + struct mutex mailbox_lock; + + const struct hisi_qm_hw_ops *ops; + + u32 error_mask; + u32 msi_mask; + + bool use_dma_api; +}; + +struct hisi_qp_status { + atomic_t used; + u16 sq_tail; + u16 cq_head; + bool cqc_phase; + unsigned long flags; +}; + +struct hisi_qp_ops { + int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm); +}; + +struct hisi_qp { + u32 qp_id; + u8 alg_type; + u8 req_type; + + struct qm_dma qdma; + void *sqe; + struct qm_cqe *cqe; + dma_addr_t sqe_dma; + dma_addr_t cqe_dma; + + struct hisi_qp_status qp_status; + struct hisi_qp_ops *hw_ops; + void *qp_ctx; + void (*req_cb)(struct hisi_qp *qp, void *data); + struct work_struct work; + struct workqueue_struct *wq; + + struct hisi_qm *qm; +}; + +int hisi_qm_init(struct hisi_qm *qm); +void hisi_qm_uninit(struct hisi_qm *qm); +int hisi_qm_start(struct hisi_qm *qm); +int hisi_qm_stop(struct hisi_qm *qm); +struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type); +int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg); +int hisi_qm_stop_qp(struct hisi_qp *qp); +void hisi_qm_release_qp(struct hisi_qp *qp); +int hisi_qp_send(struct hisi_qp *qp, const void *msg); +int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number); +void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, + u32 msi); +int hisi_qm_hw_error_handle(struct hisi_qm *qm); +enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev); +#endif -- cgit From dfed0098ab91f647b5720ab6f1e03b5b55139408 Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Fri, 2 Aug 2019 15:57:51 +0800 Subject: crypto: hisilicon - add hardware SGL support HiSilicon accelerators in Hip08 use same hardware scatterlist for data format. We support it in this module. Specific accelerator drivers can use hisi_acc_create_sgl_pool to allocate hardware SGLs ahead. Then use hisi_acc_sg_buf_map_to_hw_sgl to get one hardware SGL and pass related information to hardware SGL. The DMA address of mapped hardware SGL can be passed to SGL src/dst field in QM SQE. Signed-off-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/Kconfig | 8 ++ drivers/crypto/hisilicon/Makefile | 1 + drivers/crypto/hisilicon/sgl.c | 214 ++++++++++++++++++++++++++++++++++++++ drivers/crypto/hisilicon/sgl.h | 24 +++++ 4 files changed, 247 insertions(+) create mode 100644 drivers/crypto/hisilicon/sgl.c create mode 100644 drivers/crypto/hisilicon/sgl.h (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index b79be8dc78e7..457d9bcb0d4e 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -19,3 +19,11 @@ config CRYPTO_DEV_HISI_QM help HiSilicon accelerator engines use a common queue management interface. Specific engine driver may use this module. + +config CRYPTO_HISI_SGL + tristate + depends on ARM64 + help + HiSilicon accelerator engines use a common hardware scatterlist + interface for data format. Specific engine driver may use this + module. diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile index 05e9052e0f52..96cb9134194e 100644 --- a/drivers/crypto/hisilicon/Makefile +++ b/drivers/crypto/hisilicon/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/ obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += qm.o +obj-$(CONFIG_CRYPTO_HISI_SGL) += sgl.o diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c new file mode 100644 index 000000000000..8ef7679a365e --- /dev/null +++ b/drivers/crypto/hisilicon/sgl.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 HiSilicon Limited. */ +#include +#include +#include "./sgl.h" + +#define HISI_ACC_SGL_SGE_NR_MIN 1 +#define HISI_ACC_SGL_SGE_NR_MAX 255 +#define HISI_ACC_SGL_SGE_NR_DEF 10 +#define HISI_ACC_SGL_NR_MAX 256 +#define HISI_ACC_SGL_ALIGN_SIZE 64 + +static int acc_sgl_sge_set(const char *val, const struct kernel_param *kp) +{ + int ret; + u32 n; + + if (!val) + return -EINVAL; + + ret = kstrtou32(val, 10, &n); + if (ret != 0 || n > HISI_ACC_SGL_SGE_NR_MAX || n == 0) + return -EINVAL; + + return param_set_int(val, kp); +} + +static const struct kernel_param_ops acc_sgl_sge_ops = { + .set = acc_sgl_sge_set, + .get = param_get_int, +}; + +static u32 acc_sgl_sge_nr = HISI_ACC_SGL_SGE_NR_DEF; +module_param_cb(acc_sgl_sge_nr, &acc_sgl_sge_ops, &acc_sgl_sge_nr, 0444); +MODULE_PARM_DESC(acc_sgl_sge_nr, "Number of sge in sgl(1-255)"); + +struct acc_hw_sge { + dma_addr_t buf; + void *page_ctrl; + __le32 len; + __le32 pad; + __le32 pad0; + __le32 pad1; +}; + +/* use default sgl head size 64B */ +struct hisi_acc_hw_sgl { + dma_addr_t next_dma; + __le16 entry_sum_in_chain; + __le16 entry_sum_in_sgl; + __le16 entry_length_in_sgl; + __le16 pad0; + __le64 pad1[5]; + struct hisi_acc_hw_sgl *next; + struct acc_hw_sge sge_entries[]; +} __aligned(1); + +/** + * hisi_acc_create_sgl_pool() - Create a hw sgl pool. + * @dev: The device which hw sgl pool belongs to. + * @pool: Pointer of pool. + * @count: Count of hisi_acc_hw_sgl in pool. + * + * This function creates a hw sgl pool, after this user can get hw sgl memory + * from it. + */ +int hisi_acc_create_sgl_pool(struct device *dev, + struct hisi_acc_sgl_pool *pool, u32 count) +{ + u32 sgl_size; + u32 size; + + if (!dev || !pool || !count) + return -EINVAL; + + sgl_size = sizeof(struct acc_hw_sge) * acc_sgl_sge_nr + + sizeof(struct hisi_acc_hw_sgl); + size = sgl_size * count; + + pool->sgl = dma_alloc_coherent(dev, size, &pool->sgl_dma, GFP_KERNEL); + if (!pool->sgl) + return -ENOMEM; + + pool->size = size; + pool->count = count; + pool->sgl_size = sgl_size; + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool); + +/** + * hisi_acc_free_sgl_pool() - Free a hw sgl pool. + * @dev: The device which hw sgl pool belongs to. + * @pool: Pointer of pool. + * + * This function frees memory of a hw sgl pool. + */ +void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool) +{ + dma_free_coherent(dev, pool->size, pool->sgl, pool->sgl_dma); + memset(pool, 0, sizeof(struct hisi_acc_sgl_pool)); +} +EXPORT_SYMBOL_GPL(hisi_acc_free_sgl_pool); + +struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool, u32 index, + dma_addr_t *hw_sgl_dma) +{ + if (!pool || !hw_sgl_dma || index >= pool->count || !pool->sgl) + return ERR_PTR(-EINVAL); + + *hw_sgl_dma = pool->sgl_dma + pool->sgl_size * index; + return (void *)pool->sgl + pool->sgl_size * index; +} + +void acc_put_sgl(struct hisi_acc_sgl_pool *pool, u32 index) {} + +static void sg_map_to_hw_sg(struct scatterlist *sgl, + struct acc_hw_sge *hw_sge) +{ + hw_sge->buf = sgl->dma_address; + hw_sge->len = sgl->dma_length; +} + +static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl) +{ + hw_sgl->entry_sum_in_sgl++; +} + +static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum) +{ + hw_sgl->entry_sum_in_chain = sum; +} + +/** + * hisi_acc_sg_buf_map_to_hw_sgl - Map a scatterlist to a hw sgl. + * @dev: The device which hw sgl belongs to. + * @sgl: Scatterlist which will be mapped to hw sgl. + * @pool: Pool which hw sgl memory will be allocated in. + * @index: Index of hisi_acc_hw_sgl in pool. + * @hw_sgl_dma: The dma address of allocated hw sgl. + * + * This function builds hw sgl according input sgl, user can use hw_sgl_dma + * as src/dst in its BD. Only support single hw sgl currently. + */ +struct hisi_acc_hw_sgl * +hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, + struct scatterlist *sgl, + struct hisi_acc_sgl_pool *pool, + u32 index, dma_addr_t *hw_sgl_dma) +{ + struct hisi_acc_hw_sgl *curr_hw_sgl; + dma_addr_t curr_sgl_dma; + struct acc_hw_sge *curr_hw_sge; + struct scatterlist *sg; + int sg_n = sg_nents(sgl); + int i, ret; + + if (!dev || !sgl || !pool || !hw_sgl_dma || sg_n > acc_sgl_sge_nr) + return ERR_PTR(-EINVAL); + + ret = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); + if (!ret) + return ERR_PTR(-EINVAL); + + curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma); + if (!curr_hw_sgl) { + ret = -ENOMEM; + goto err_unmap_sg; + } + curr_hw_sgl->entry_length_in_sgl = acc_sgl_sge_nr; + curr_hw_sge = curr_hw_sgl->sge_entries; + + for_each_sg(sgl, sg, sg_n, i) { + sg_map_to_hw_sg(sg, curr_hw_sge); + inc_hw_sgl_sge(curr_hw_sgl); + curr_hw_sge++; + } + + update_hw_sgl_sum_sge(curr_hw_sgl, acc_sgl_sge_nr); + *hw_sgl_dma = curr_sgl_dma; + + return curr_hw_sgl; + +err_unmap_sg: + dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl); + +/** + * hisi_acc_sg_buf_unmap() - Unmap allocated hw sgl. + * @dev: The device which hw sgl belongs to. + * @sgl: Related scatterlist. + * @hw_sgl: Virtual address of hw sgl. + * @hw_sgl_dma: DMA address of hw sgl. + * @pool: Pool which hw sgl is allocated in. + * + * This function unmaps allocated hw sgl. + */ +void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, + struct hisi_acc_hw_sgl *hw_sgl) +{ + dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL); + + hw_sgl->entry_sum_in_chain = 0; + hw_sgl->entry_sum_in_sgl = 0; + hw_sgl->entry_length_in_sgl = 0; +} +EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_unmap); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Zhou Wang "); +MODULE_DESCRIPTION("HiSilicon Accelerator SGL support"); diff --git a/drivers/crypto/hisilicon/sgl.h b/drivers/crypto/hisilicon/sgl.h new file mode 100644 index 000000000000..3ac8871c7acf --- /dev/null +++ b/drivers/crypto/hisilicon/sgl.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 HiSilicon Limited. */ +#ifndef HISI_ACC_SGL_H +#define HISI_ACC_SGL_H + +struct hisi_acc_sgl_pool { + struct hisi_acc_hw_sgl *sgl; + dma_addr_t sgl_dma; + size_t size; + u32 count; + size_t sgl_size; +}; + +struct hisi_acc_hw_sgl * +hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, + struct scatterlist *sgl, + struct hisi_acc_sgl_pool *pool, + u32 index, dma_addr_t *hw_sgl_dma); +void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, + struct hisi_acc_hw_sgl *hw_sgl); +int hisi_acc_create_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool, + u32 count); +void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool); +#endif -- cgit From 62c455ca853e3e352e465d66a6cc39f1f88caa60 Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Fri, 2 Aug 2019 15:57:52 +0800 Subject: crypto: hisilicon - add HiSilicon ZIP accelerator support The HiSilicon ZIP accelerator implements the zlib and gzip algorithm. It uses Hisilicon QM as the interface to the CPU. This patch provides PCIe driver to the accelerator and registers it to crypto acomp interface. It also uses sgl as data input/output interface. Signed-off-by: Zhou Wang Signed-off-by: Shiju Jose Signed-off-by: Kenneth Lee Signed-off-by: Hao Fang Reviewed-by: Jonathan Cameron Reviewed-by: John Garry Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/Kconfig | 8 + drivers/crypto/hisilicon/Makefile | 1 + drivers/crypto/hisilicon/zip/Makefile | 2 + drivers/crypto/hisilicon/zip/zip.h | 71 ++++ drivers/crypto/hisilicon/zip/zip_crypto.c | 651 ++++++++++++++++++++++++++++++ drivers/crypto/hisilicon/zip/zip_main.c | 504 +++++++++++++++++++++++ 6 files changed, 1237 insertions(+) create mode 100644 drivers/crypto/hisilicon/zip/Makefile create mode 100644 drivers/crypto/hisilicon/zip/zip.h create mode 100644 drivers/crypto/hisilicon/zip/zip_crypto.c create mode 100644 drivers/crypto/hisilicon/zip/zip_main.c (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index 457d9bcb0d4e..19293172b408 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -27,3 +27,11 @@ config CRYPTO_HISI_SGL HiSilicon accelerator engines use a common hardware scatterlist interface for data format. Specific engine driver may use this module. + +config CRYPTO_DEV_HISI_ZIP + tristate "Support for HiSilicon ZIP accelerator" + select CRYPTO_DEV_HISI_QM + select CRYPTO_HISI_SGL + select SG_SPLIT + help + Support for HiSilicon ZIP Driver diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile index 96cb9134194e..45a279741126 100644 --- a/drivers/crypto/hisilicon/Makefile +++ b/drivers/crypto/hisilicon/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/ obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += qm.o obj-$(CONFIG_CRYPTO_HISI_SGL) += sgl.o +obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/ diff --git a/drivers/crypto/hisilicon/zip/Makefile b/drivers/crypto/hisilicon/zip/Makefile new file mode 100644 index 000000000000..a936f099ee22 --- /dev/null +++ b/drivers/crypto/hisilicon/zip/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += hisi_zip.o +hisi_zip-objs = zip_main.o zip_crypto.o diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h new file mode 100644 index 000000000000..ffb00d987d02 --- /dev/null +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 HiSilicon Limited. */ +#ifndef HISI_ZIP_H +#define HISI_ZIP_H + +#undef pr_fmt +#define pr_fmt(fmt) "hisi_zip: " fmt + +#include +#include "../qm.h" +#include "../sgl.h" + +/* hisi_zip_sqe dw3 */ +#define HZIP_BD_STATUS_M GENMASK(7, 0) +/* hisi_zip_sqe dw9 */ +#define HZIP_REQ_TYPE_M GENMASK(7, 0) +#define HZIP_ALG_TYPE_ZLIB 0x02 +#define HZIP_ALG_TYPE_GZIP 0x03 +#define HZIP_BUF_TYPE_M GENMASK(11, 8) +#define HZIP_PBUFFER 0x0 +#define HZIP_SGL 0x1 + +enum hisi_zip_error_type { + /* negative compression */ + HZIP_NC_ERR = 0x0d, +}; + +struct hisi_zip_ctrl; + +struct hisi_zip { + struct hisi_qm qm; + struct list_head list; + struct hisi_zip_ctrl *ctrl; +}; + +struct hisi_zip_sqe { + u32 consumed; + u32 produced; + u32 comp_data_length; + u32 dw3; + u32 input_data_length; + u32 lba_l; + u32 lba_h; + u32 dw7; + u32 dw8; + u32 dw9; + u32 dw10; + u32 priv_info; + u32 dw12; + u32 tag; + u32 dest_avail_out; + u32 rsvd0; + u32 comp_head_addr_l; + u32 comp_head_addr_h; + u32 source_addr_l; + u32 source_addr_h; + u32 dest_addr_l; + u32 dest_addr_h; + u32 stream_ctx_addr_l; + u32 stream_ctx_addr_h; + u32 cipher_key1_addr_l; + u32 cipher_key1_addr_h; + u32 cipher_key2_addr_l; + u32 cipher_key2_addr_h; + u32 rsvd1[4]; +}; + +struct hisi_zip *find_zip_device(int node); +int hisi_zip_register_to_crypto(void); +void hisi_zip_unregister_from_crypto(void); +#endif diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c new file mode 100644 index 000000000000..303351325a58 --- /dev/null +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -0,0 +1,651 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 HiSilicon Limited. */ +#include +#include +#include +#include +#include "zip.h" + +#define HZIP_ZLIB_HEAD_SIZE 2 +#define HZIP_GZIP_HEAD_SIZE 10 + +#define GZIP_HEAD_FHCRC_BIT BIT(1) +#define GZIP_HEAD_FEXTRA_BIT BIT(2) +#define GZIP_HEAD_FNAME_BIT BIT(3) +#define GZIP_HEAD_FCOMMENT_BIT BIT(4) + +#define GZIP_HEAD_FLG_SHIFT 3 +#define GZIP_HEAD_FEXTRA_SHIFT 10 +#define GZIP_HEAD_FEXTRA_XLEN 2 +#define GZIP_HEAD_FHCRC_SIZE 2 + +#define HZIP_CTX_Q_NUM 2 +#define HZIP_GZIP_HEAD_BUF 256 +#define HZIP_ALG_PRIORITY 300 + +static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c}; +static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {0x1f, 0x8b, 0x08, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x03}; +enum hisi_zip_alg_type { + HZIP_ALG_TYPE_COMP = 0, + HZIP_ALG_TYPE_DECOMP = 1, +}; + +#define COMP_NAME_TO_TYPE(alg_name) \ + (!strcmp((alg_name), "zlib-deflate") ? HZIP_ALG_TYPE_ZLIB : \ + !strcmp((alg_name), "gzip") ? HZIP_ALG_TYPE_GZIP : 0) \ + +#define TO_HEAD_SIZE(req_type) \ + (((req_type) == HZIP_ALG_TYPE_ZLIB) ? sizeof(zlib_head) : \ + ((req_type) == HZIP_ALG_TYPE_GZIP) ? sizeof(gzip_head) : 0) \ + +#define TO_HEAD(req_type) \ + (((req_type) == HZIP_ALG_TYPE_ZLIB) ? zlib_head : \ + ((req_type) == HZIP_ALG_TYPE_GZIP) ? gzip_head : 0) \ + +struct hisi_zip_req { + struct acomp_req *req; + struct scatterlist *src; + struct scatterlist *dst; + size_t slen; + size_t dlen; + struct hisi_acc_hw_sgl *hw_src; + struct hisi_acc_hw_sgl *hw_dst; + dma_addr_t dma_src; + dma_addr_t dma_dst; + int req_id; +}; + +struct hisi_zip_req_q { + struct hisi_zip_req *q; + unsigned long *req_bitmap; + rwlock_t req_lock; + u16 size; +}; + +struct hisi_zip_qp_ctx { + struct hisi_qp *qp; + struct hisi_zip_sqe zip_sqe; + struct hisi_zip_req_q req_q; + struct hisi_acc_sgl_pool sgl_pool; + struct hisi_zip *zip_dev; + struct hisi_zip_ctx *ctx; +}; + +struct hisi_zip_ctx { +#define QPC_COMP 0 +#define QPC_DECOMP 1 + struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM]; +}; + +static void hisi_zip_config_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type) +{ + u32 val; + + val = (sqe->dw9) & ~HZIP_BUF_TYPE_M; + val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type); + sqe->dw9 = val; +} + +static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag) +{ + sqe->tag = tag; +} + +static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type, + dma_addr_t s_addr, dma_addr_t d_addr, u32 slen, + u32 dlen) +{ + memset(sqe, 0, sizeof(struct hisi_zip_sqe)); + + sqe->input_data_length = slen; + sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type); + sqe->dest_avail_out = dlen; + sqe->source_addr_l = lower_32_bits(s_addr); + sqe->source_addr_h = upper_32_bits(s_addr); + sqe->dest_addr_l = lower_32_bits(d_addr); + sqe->dest_addr_h = upper_32_bits(d_addr); +} + +static int hisi_zip_create_qp(struct hisi_qm *qm, struct hisi_zip_qp_ctx *ctx, + int alg_type, int req_type) +{ + struct hisi_qp *qp; + int ret; + + qp = hisi_qm_create_qp(qm, alg_type); + if (IS_ERR(qp)) + return PTR_ERR(qp); + + qp->req_type = req_type; + qp->qp_ctx = ctx; + ctx->qp = qp; + + ret = hisi_qm_start_qp(qp, 0); + if (ret < 0) + goto err_release_qp; + + return 0; + +err_release_qp: + hisi_qm_release_qp(qp); + return ret; +} + +static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx) +{ + hisi_qm_stop_qp(ctx->qp); + hisi_qm_release_qp(ctx->qp); +} + +static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type) +{ + struct hisi_zip *hisi_zip; + struct hisi_qm *qm; + int ret, i, j; + + /* find the proper zip device */ + hisi_zip = find_zip_device(cpu_to_node(smp_processor_id())); + if (!hisi_zip) { + pr_err("Failed to find a proper ZIP device!\n"); + return -ENODEV; + } + qm = &hisi_zip->qm; + + for (i = 0; i < HZIP_CTX_Q_NUM; i++) { + /* alg_type = 0 for compress, 1 for decompress in hw sqe */ + ret = hisi_zip_create_qp(qm, &hisi_zip_ctx->qp_ctx[i], i, + req_type); + if (ret) + goto err; + + hisi_zip_ctx->qp_ctx[i].zip_dev = hisi_zip; + } + + return 0; +err: + for (j = i - 1; j >= 0; j--) + hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[j]); + + return ret; +} + +static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx) +{ + int i; + + for (i = 1; i >= 0; i--) + hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]); +} + +static u16 get_extra_field_size(const u8 *start) +{ + return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN; +} + +static u32 get_name_field_size(const u8 *start) +{ + return strlen(start) + 1; +} + +static u32 get_comment_field_size(const u8 *start) +{ + return strlen(start) + 1; +} + +static u32 __get_gzip_head_size(const u8 *src) +{ + u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT); + u32 size = GZIP_HEAD_FEXTRA_SHIFT; + + if (head_flg & GZIP_HEAD_FEXTRA_BIT) + size += get_extra_field_size(src + size); + if (head_flg & GZIP_HEAD_FNAME_BIT) + size += get_name_field_size(src + size); + if (head_flg & GZIP_HEAD_FCOMMENT_BIT) + size += get_comment_field_size(src + size); + if (head_flg & GZIP_HEAD_FHCRC_BIT) + size += GZIP_HEAD_FHCRC_SIZE; + + return size; +} + +static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) +{ + struct hisi_zip_req_q *req_q; + int i, ret; + + for (i = 0; i < HZIP_CTX_Q_NUM; i++) { + req_q = &ctx->qp_ctx[i].req_q; + req_q->size = QM_Q_DEPTH; + + req_q->req_bitmap = kcalloc(BITS_TO_LONGS(req_q->size), + sizeof(long), GFP_KERNEL); + if (!req_q->req_bitmap) { + ret = -ENOMEM; + if (i == 1) + goto err_free_loop0; + } + rwlock_init(&req_q->req_lock); + + req_q->q = kcalloc(req_q->size, sizeof(struct hisi_zip_req), + GFP_KERNEL); + if (!req_q->q) { + ret = -ENOMEM; + if (i == 0) + goto err_free_bitmap; + else + goto err_free_loop1; + } + } + + return 0; + +err_free_loop1: + kfree(ctx->qp_ctx[QPC_DECOMP].req_q.req_bitmap); +err_free_loop0: + kfree(ctx->qp_ctx[QPC_COMP].req_q.q); +err_free_bitmap: + kfree(ctx->qp_ctx[QPC_COMP].req_q.req_bitmap); + return ret; +} + +static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx) +{ + int i; + + for (i = 0; i < HZIP_CTX_Q_NUM; i++) { + kfree(ctx->qp_ctx[i].req_q.q); + kfree(ctx->qp_ctx[i].req_q.req_bitmap); + } +} + +static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx) +{ + struct hisi_zip_qp_ctx *tmp; + int i, ret; + + for (i = 0; i < HZIP_CTX_Q_NUM; i++) { + tmp = &ctx->qp_ctx[i]; + ret = hisi_acc_create_sgl_pool(&tmp->qp->qm->pdev->dev, + &tmp->sgl_pool, + QM_Q_DEPTH << 1); + if (ret < 0) { + if (i == 1) + goto err_free_sgl_pool0; + return -ENOMEM; + } + } + + return 0; + +err_free_sgl_pool0: + hisi_acc_free_sgl_pool(&ctx->qp_ctx[QPC_COMP].qp->qm->pdev->dev, + &ctx->qp_ctx[QPC_COMP].sgl_pool); + return -ENOMEM; +} + +static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx) +{ + int i; + + for (i = 0; i < HZIP_CTX_Q_NUM; i++) + hisi_acc_free_sgl_pool(&ctx->qp_ctx[i].qp->qm->pdev->dev, + &ctx->qp_ctx[i].sgl_pool); +} + +static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx, + struct hisi_zip_req *req) +{ + struct hisi_zip_req_q *req_q = &qp_ctx->req_q; + + if (qp_ctx->qp->alg_type == HZIP_ALG_TYPE_COMP) + kfree(req->dst); + else + kfree(req->src); + + write_lock(&req_q->req_lock); + clear_bit(req->req_id, req_q->req_bitmap); + memset(req, 0, sizeof(struct hisi_zip_req)); + write_unlock(&req_q->req_lock); +} + +static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) +{ + struct hisi_zip_sqe *sqe = data; + struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx; + struct hisi_zip_req_q *req_q = &qp_ctx->req_q; + struct hisi_zip_req *req = req_q->q + sqe->tag; + struct acomp_req *acomp_req = req->req; + struct device *dev = &qp->qm->pdev->dev; + u32 status, dlen, head_size; + int err = 0; + + status = sqe->dw3 & HZIP_BD_STATUS_M; + + if (status != 0 && status != HZIP_NC_ERR) { + dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n", + (qp->alg_type == 0) ? "" : "de", qp->qp_id, status, + sqe->produced); + err = -EIO; + } + dlen = sqe->produced; + + hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src); + hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst); + + head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0; + acomp_req->dlen = dlen + head_size; + + if (acomp_req->base.complete) + acomp_request_complete(acomp_req, err); + + hisi_zip_remove_req(qp_ctx, req); +} + +static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx, + void (*fn)(struct hisi_qp *, void *)) +{ + int i; + + for (i = 0; i < HZIP_CTX_Q_NUM; i++) + ctx->qp_ctx[i].qp->req_cb = fn; +} + +static int hisi_zip_acomp_init(struct crypto_acomp *tfm) +{ + const char *alg_name = crypto_tfm_alg_name(&tfm->base); + struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base); + int ret; + + ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name)); + if (ret) + return ret; + + ret = hisi_zip_create_req_q(ctx); + if (ret) + goto err_ctx_exit; + + ret = hisi_zip_create_sgl_pool(ctx); + if (ret) + goto err_release_req_q; + + hisi_zip_set_acomp_cb(ctx, hisi_zip_acomp_cb); + + return 0; + +err_release_req_q: + hisi_zip_release_req_q(ctx); +err_ctx_exit: + hisi_zip_ctx_exit(ctx); + return ret; +} + +static void hisi_zip_acomp_exit(struct crypto_acomp *tfm) +{ + struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base); + + hisi_zip_set_acomp_cb(ctx, NULL); + hisi_zip_release_sgl_pool(ctx); + hisi_zip_release_req_q(ctx); + hisi_zip_ctx_exit(ctx); +} + +static int add_comp_head(struct scatterlist *dst, u8 req_type) +{ + int head_size = TO_HEAD_SIZE(req_type); + const u8 *head = TO_HEAD(req_type); + int ret; + + ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size); + if (ret != head_size) + return -ENOMEM; + + return head_size; +} + +static size_t get_gzip_head_size(struct scatterlist *sgl) +{ + char buf[HZIP_GZIP_HEAD_BUF]; + + sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf)); + + return __get_gzip_head_size(buf); +} + +static size_t get_comp_head_size(struct scatterlist *src, u8 req_type) +{ + switch (req_type) { + case HZIP_ALG_TYPE_ZLIB: + return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB); + case HZIP_ALG_TYPE_GZIP: + return get_gzip_head_size(src); + default: + pr_err("request type does not support!\n"); + return -EINVAL; + } +} + +static int get_sg_skip_bytes(struct scatterlist *sgl, size_t bytes, + size_t remains, struct scatterlist **out) +{ +#define SPLIT_NUM 2 + size_t split_sizes[SPLIT_NUM]; + int out_mapped_nents[SPLIT_NUM]; + + split_sizes[0] = bytes; + split_sizes[1] = remains; + + return sg_split(sgl, 0, 0, SPLIT_NUM, split_sizes, out, + out_mapped_nents, GFP_KERNEL); +} + +static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, + struct hisi_zip_qp_ctx *qp_ctx, + size_t head_size, bool is_comp) +{ + struct hisi_zip_req_q *req_q = &qp_ctx->req_q; + struct hisi_zip_req *q = req_q->q; + struct hisi_zip_req *req_cache; + struct scatterlist *out[2]; + struct scatterlist *sgl; + size_t len; + int ret, req_id; + + /* + * remove/add zlib/gzip head, as hardware operations do not include + * comp head. so split req->src to get sgl without heads in acomp, or + * add comp head to req->dst ahead of that hardware output compressed + * data in sgl splited from req->dst without comp head. + */ + if (is_comp) { + sgl = req->dst; + len = req->dlen - head_size; + } else { + sgl = req->src; + len = req->slen - head_size; + } + + ret = get_sg_skip_bytes(sgl, head_size, len, out); + if (ret) + return ERR_PTR(ret); + + /* sgl for comp head is useless, so free it now */ + kfree(out[0]); + + write_lock(&req_q->req_lock); + + req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size); + if (req_id >= req_q->size) { + write_unlock(&req_q->req_lock); + dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n"); + kfree(out[1]); + return ERR_PTR(-EBUSY); + } + set_bit(req_id, req_q->req_bitmap); + + req_cache = q + req_id; + req_cache->req_id = req_id; + req_cache->req = req; + if (is_comp) { + req_cache->src = req->src; + req_cache->dst = out[1]; + req_cache->slen = req->slen; + req_cache->dlen = req->dlen - head_size; + } else { + req_cache->src = out[1]; + req_cache->dst = req->dst; + req_cache->slen = req->slen - head_size; + req_cache->dlen = req->dlen; + } + + write_unlock(&req_q->req_lock); + + return req_cache; +} + +static int hisi_zip_do_work(struct hisi_zip_req *req, + struct hisi_zip_qp_ctx *qp_ctx) +{ + struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe; + struct hisi_qp *qp = qp_ctx->qp; + struct device *dev = &qp->qm->pdev->dev; + struct hisi_acc_sgl_pool *pool = &qp_ctx->sgl_pool; + dma_addr_t input; + dma_addr_t output; + int ret; + + if (!req->src || !req->slen || !req->dst || !req->dlen) + return -EINVAL; + + req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->src, pool, + req->req_id << 1, &input); + if (IS_ERR(req->hw_src)) + return PTR_ERR(req->hw_src); + req->dma_src = input; + + req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->dst, pool, + (req->req_id << 1) + 1, + &output); + if (IS_ERR(req->hw_dst)) { + ret = PTR_ERR(req->hw_dst); + goto err_unmap_input; + } + req->dma_dst = output; + + hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, req->slen, + req->dlen); + hisi_zip_config_buf_type(zip_sqe, HZIP_SGL); + hisi_zip_config_tag(zip_sqe, req->req_id); + + /* send command to start a task */ + ret = hisi_qp_send(qp, zip_sqe); + if (ret < 0) + goto err_unmap_output; + + return -EINPROGRESS; + +err_unmap_output: + hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst); +err_unmap_input: + hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src); + return ret; +} + +static int hisi_zip_acompress(struct acomp_req *acomp_req) +{ + struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm); + struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[QPC_COMP]; + struct hisi_zip_req *req; + size_t head_size; + int ret; + + /* let's output compression head now */ + head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type); + if (head_size < 0) + return -ENOMEM; + + req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true); + if (IS_ERR(req)) + return PTR_ERR(req); + + ret = hisi_zip_do_work(req, qp_ctx); + if (ret != -EINPROGRESS) + hisi_zip_remove_req(qp_ctx, req); + + return ret; +} + +static int hisi_zip_adecompress(struct acomp_req *acomp_req) +{ + struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm); + struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[QPC_DECOMP]; + struct hisi_zip_req *req; + size_t head_size; + int ret; + + head_size = get_comp_head_size(acomp_req->src, qp_ctx->qp->req_type); + + req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false); + if (IS_ERR(req)) + return PTR_ERR(req); + + ret = hisi_zip_do_work(req, qp_ctx); + if (ret != -EINPROGRESS) + hisi_zip_remove_req(qp_ctx, req); + + return ret; +} + +static struct acomp_alg hisi_zip_acomp_zlib = { + .init = hisi_zip_acomp_init, + .exit = hisi_zip_acomp_exit, + .compress = hisi_zip_acompress, + .decompress = hisi_zip_adecompress, + .base = { + .cra_name = "zlib-deflate", + .cra_driver_name = "hisi-zlib-acomp", + .cra_module = THIS_MODULE, + .cra_priority = HZIP_ALG_PRIORITY, + .cra_ctxsize = sizeof(struct hisi_zip_ctx), + } +}; + +static struct acomp_alg hisi_zip_acomp_gzip = { + .init = hisi_zip_acomp_init, + .exit = hisi_zip_acomp_exit, + .compress = hisi_zip_acompress, + .decompress = hisi_zip_adecompress, + .base = { + .cra_name = "gzip", + .cra_driver_name = "hisi-gzip-acomp", + .cra_module = THIS_MODULE, + .cra_priority = HZIP_ALG_PRIORITY, + .cra_ctxsize = sizeof(struct hisi_zip_ctx), + } +}; + +int hisi_zip_register_to_crypto(void) +{ + int ret = 0; + + ret = crypto_register_acomp(&hisi_zip_acomp_zlib); + if (ret) { + pr_err("Zlib acomp algorithm registration failed\n"); + return ret; + } + + ret = crypto_register_acomp(&hisi_zip_acomp_gzip); + if (ret) { + pr_err("Gzip acomp algorithm registration failed\n"); + crypto_unregister_acomp(&hisi_zip_acomp_zlib); + } + + return ret; +} + +void hisi_zip_unregister_from_crypto(void) +{ + crypto_unregister_acomp(&hisi_zip_acomp_gzip); + crypto_unregister_acomp(&hisi_zip_acomp_zlib); +} diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c new file mode 100644 index 000000000000..ee4e20e0230e --- /dev/null +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -0,0 +1,504 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 HiSilicon Limited. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "zip.h" + +#define PCI_DEVICE_ID_ZIP_PF 0xa250 + +#define HZIP_VF_NUM 63 +#define HZIP_QUEUE_NUM_V1 4096 +#define HZIP_QUEUE_NUM_V2 1024 + +#define HZIP_CLOCK_GATE_CTRL 0x301004 +#define COMP0_ENABLE BIT(0) +#define COMP1_ENABLE BIT(1) +#define DECOMP0_ENABLE BIT(2) +#define DECOMP1_ENABLE BIT(3) +#define DECOMP2_ENABLE BIT(4) +#define DECOMP3_ENABLE BIT(5) +#define DECOMP4_ENABLE BIT(6) +#define DECOMP5_ENABLE BIT(7) +#define ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \ + DECOMP0_ENABLE | DECOMP1_ENABLE | \ + DECOMP2_ENABLE | DECOMP3_ENABLE | \ + DECOMP4_ENABLE | DECOMP5_ENABLE) +#define DECOMP_CHECK_ENABLE BIT(16) + +#define HZIP_PORT_ARCA_CHE_0 0x301040 +#define HZIP_PORT_ARCA_CHE_1 0x301044 +#define HZIP_PORT_AWCA_CHE_0 0x301060 +#define HZIP_PORT_AWCA_CHE_1 0x301064 +#define CACHE_ALL_EN 0xffffffff + +#define HZIP_BD_RUSER_32_63 0x301110 +#define HZIP_SGL_RUSER_32_63 0x30111c +#define HZIP_DATA_RUSER_32_63 0x301128 +#define HZIP_DATA_WUSER_32_63 0x301134 +#define HZIP_BD_WUSER_32_63 0x301140 + + + +#define HZIP_CORE_INT_SOURCE 0x3010A0 +#define HZIP_CORE_INT_MASK 0x3010A4 +#define HZIP_CORE_INT_STATUS 0x3010AC +#define HZIP_CORE_INT_STATUS_M_ECC BIT(1) +#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148 +#define SRAM_ECC_ERR_NUM_SHIFT 16 +#define SRAM_ECC_ERR_ADDR_SHIFT 24 +#define HZIP_CORE_INT_DISABLE 0x000007FF +#define HZIP_SQE_SIZE 128 +#define HZIP_PF_DEF_Q_NUM 64 +#define HZIP_PF_DEF_Q_BASE 0 + + +#define HZIP_NUMA_DISTANCE 100 + +static const char hisi_zip_name[] = "hisi_zip"; +LIST_HEAD(hisi_zip_list); +DEFINE_MUTEX(hisi_zip_list_lock); + +#ifdef CONFIG_NUMA +static struct hisi_zip *find_zip_device_numa(int node) +{ + struct hisi_zip *zip = NULL; + struct hisi_zip *hisi_zip; + int min_distance = HZIP_NUMA_DISTANCE; + struct device *dev; + + list_for_each_entry(hisi_zip, &hisi_zip_list, list) { + dev = &hisi_zip->qm.pdev->dev; + if (node_distance(dev->numa_node, node) < min_distance) { + zip = hisi_zip; + min_distance = node_distance(dev->numa_node, node); + } + } + + return zip; +} +#endif + +struct hisi_zip *find_zip_device(int node) +{ + struct hisi_zip *zip = NULL; + + mutex_lock(&hisi_zip_list_lock); +#ifdef CONFIG_NUMA + zip = find_zip_device_numa(node); +#else + zip = list_first_entry(&hisi_zip_list, struct hisi_zip, list); +#endif + mutex_unlock(&hisi_zip_list_lock); + + return zip; +} + +struct hisi_zip_hw_error { + u32 int_msk; + const char *msg; +}; + +static const struct hisi_zip_hw_error zip_hw_error[] = { + { .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" }, + { .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" }, + { .int_msk = BIT(2), .msg = "zip_axi_rresp_err" }, + { .int_msk = BIT(3), .msg = "zip_axi_bresp_err" }, + { .int_msk = BIT(4), .msg = "zip_src_addr_parse_err" }, + { .int_msk = BIT(5), .msg = "zip_dst_addr_parse_err" }, + { .int_msk = BIT(6), .msg = "zip_pre_in_addr_err" }, + { .int_msk = BIT(7), .msg = "zip_pre_in_data_err" }, + { .int_msk = BIT(8), .msg = "zip_com_inf_err" }, + { .int_msk = BIT(9), .msg = "zip_enc_inf_err" }, + { .int_msk = BIT(10), .msg = "zip_pre_out_err" }, + { /* sentinel */ } +}; + +/* + * One ZIP controller has one PF and multiple VFs, some global configurations + * which PF has need this structure. + * + * Just relevant for PF. + */ +struct hisi_zip_ctrl { + struct hisi_zip *hisi_zip; +}; + +static int pf_q_num_set(const char *val, const struct kernel_param *kp) +{ + struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, + PCI_DEVICE_ID_ZIP_PF, NULL); + u32 n, q_num; + u8 rev_id; + int ret; + + if (!val) + return -EINVAL; + + if (!pdev) { + q_num = min_t(u32, HZIP_QUEUE_NUM_V1, HZIP_QUEUE_NUM_V2); + pr_info("No device found currently, suppose queue number is %d\n", + q_num); + } else { + rev_id = pdev->revision; + switch (rev_id) { + case QM_HW_V1: + q_num = HZIP_QUEUE_NUM_V1; + break; + case QM_HW_V2: + q_num = HZIP_QUEUE_NUM_V2; + break; + default: + return -EINVAL; + } + } + + ret = kstrtou32(val, 10, &n); + if (ret != 0 || n > q_num || n == 0) + return -EINVAL; + + return param_set_int(val, kp); +} + +static const struct kernel_param_ops pf_q_num_ops = { + .set = pf_q_num_set, + .get = param_get_int, +}; + +static u32 pf_q_num = HZIP_PF_DEF_Q_NUM; +module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444); +MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)"); + +static int uacce_mode; +module_param(uacce_mode, int, 0); + +static const struct pci_device_id hisi_zip_dev_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids); + +static inline void hisi_zip_add_to_list(struct hisi_zip *hisi_zip) +{ + mutex_lock(&hisi_zip_list_lock); + list_add_tail(&hisi_zip->list, &hisi_zip_list); + mutex_unlock(&hisi_zip_list_lock); +} + +static inline void hisi_zip_remove_from_list(struct hisi_zip *hisi_zip) +{ + mutex_lock(&hisi_zip_list_lock); + list_del(&hisi_zip->list); + mutex_unlock(&hisi_zip_list_lock); +} + +static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip) +{ + void __iomem *base = hisi_zip->qm.io_base; + + /* qm user domain */ + writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1); + writel(ARUSER_M_CFG_ENABLE, base + QM_ARUSER_M_CFG_ENABLE); + writel(AXUSER_BASE, base + QM_AWUSER_M_CFG_1); + writel(AWUSER_M_CFG_ENABLE, base + QM_AWUSER_M_CFG_ENABLE); + writel(WUSER_M_CFG_ENABLE, base + QM_WUSER_M_CFG_ENABLE); + + /* qm cache */ + writel(AXI_M_CFG, base + QM_AXI_M_CFG); + writel(AXI_M_CFG_ENABLE, base + QM_AXI_M_CFG_ENABLE); + /* disable FLR triggered by BME(bus master enable) */ + writel(PEH_AXUSER_CFG, base + QM_PEH_AXUSER_CFG); + writel(PEH_AXUSER_CFG_ENABLE, base + QM_PEH_AXUSER_CFG_ENABLE); + + /* cache */ + writel(CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0); + writel(CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1); + writel(CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0); + writel(CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1); + + /* user domain configurations */ + writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63); + writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63); + writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63); + writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63); + writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63); + + /* let's open all compression/decompression cores */ + writel(DECOMP_CHECK_ENABLE | ALL_COMP_DECOMP_EN, + base + HZIP_CLOCK_GATE_CTRL); + + /* enable sqc writeback */ + writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | + CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | + FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL); +} + +static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state) +{ + struct hisi_qm *qm = &hisi_zip->qm; + + if (qm->ver == QM_HW_V1) { + writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK); + dev_info(&qm->pdev->dev, "ZIP v%d does not support hw error handle\n", + qm->ver); + return; + } + + if (state) { + /* clear ZIP hw error source if having */ + writel(HZIP_CORE_INT_DISABLE, hisi_zip->qm.io_base + + HZIP_CORE_INT_SOURCE); + /* enable ZIP hw error interrupts */ + writel(0, hisi_zip->qm.io_base + HZIP_CORE_INT_MASK); + } else { + /* disable ZIP hw error interrupts */ + writel(HZIP_CORE_INT_DISABLE, + hisi_zip->qm.io_base + HZIP_CORE_INT_MASK); + } +} + +static void hisi_zip_hw_error_init(struct hisi_zip *hisi_zip) +{ + hisi_qm_hw_error_init(&hisi_zip->qm, QM_BASE_CE, + QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT, 0, + QM_DB_RANDOM_INVALID); + hisi_zip_hw_error_set_state(hisi_zip, true); +} + +static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) +{ + struct hisi_qm *qm = &hisi_zip->qm; + struct hisi_zip_ctrl *ctrl; + + ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return -ENOMEM; + + hisi_zip->ctrl = ctrl; + ctrl->hisi_zip = hisi_zip; + + switch (qm->ver) { + case QM_HW_V1: + qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1; + break; + + case QM_HW_V2: + qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2; + break; + + default: + return -EINVAL; + } + + hisi_zip_set_user_domain_and_cache(hisi_zip); + hisi_zip_hw_error_init(hisi_zip); + + return 0; +} + +static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hisi_zip *hisi_zip; + enum qm_hw_ver rev_id; + struct hisi_qm *qm; + int ret; + + rev_id = hisi_qm_get_hw_version(pdev); + if (rev_id == QM_HW_UNKNOWN) + return -EINVAL; + + hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL); + if (!hisi_zip) + return -ENOMEM; + pci_set_drvdata(pdev, hisi_zip); + + qm = &hisi_zip->qm; + qm->pdev = pdev; + qm->ver = rev_id; + + qm->sqe_size = HZIP_SQE_SIZE; + qm->dev_name = hisi_zip_name; + switch (uacce_mode) { + case 0: + qm->use_dma_api = true; + break; + case 1: + qm->use_dma_api = false; + break; + case 2: + qm->use_dma_api = true; + break; + default: + return -EINVAL; + } + + ret = hisi_qm_init(qm); + if (ret) { + dev_err(&pdev->dev, "Failed to init qm!\n"); + return ret; + } + + ret = hisi_zip_pf_probe_init(hisi_zip); + if (ret) + goto err_qm_uninit; + + qm->qp_base = HZIP_PF_DEF_Q_BASE; + qm->qp_num = pf_q_num; + + ret = hisi_qm_start(qm); + if (ret) + goto err_qm_uninit; + + hisi_zip_add_to_list(hisi_zip); + + return 0; + +err_qm_uninit: + hisi_qm_uninit(qm); + return ret; +} + +static void hisi_zip_remove(struct pci_dev *pdev) +{ + struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); + struct hisi_qm *qm = &hisi_zip->qm; + + hisi_qm_stop(qm); + hisi_zip_hw_error_set_state(hisi_zip, false); + hisi_qm_uninit(qm); + hisi_zip_remove_from_list(hisi_zip); +} + +static void hisi_zip_log_hw_error(struct hisi_zip *hisi_zip, u32 err_sts) +{ + const struct hisi_zip_hw_error *err = zip_hw_error; + struct device *dev = &hisi_zip->qm.pdev->dev; + u32 err_val; + + while (err->msg) { + if (err->int_msk & err_sts) { + dev_warn(dev, "%s [error status=0x%x] found\n", + err->msg, err->int_msk); + + if (HZIP_CORE_INT_STATUS_M_ECC & err->int_msk) { + err_val = readl(hisi_zip->qm.io_base + + HZIP_CORE_SRAM_ECC_ERR_INFO); + dev_warn(dev, "hisi-zip multi ecc sram num=0x%x\n", + ((err_val >> SRAM_ECC_ERR_NUM_SHIFT) & + 0xFF)); + dev_warn(dev, "hisi-zip multi ecc sram addr=0x%x\n", + (err_val >> SRAM_ECC_ERR_ADDR_SHIFT)); + } + } + err++; + } +} + +static pci_ers_result_t hisi_zip_hw_error_handle(struct hisi_zip *hisi_zip) +{ + u32 err_sts; + + /* read err sts */ + err_sts = readl(hisi_zip->qm.io_base + HZIP_CORE_INT_STATUS); + + if (err_sts) { + hisi_zip_log_hw_error(hisi_zip, err_sts); + /* clear error interrupts */ + writel(err_sts, hisi_zip->qm.io_base + HZIP_CORE_INT_SOURCE); + + return PCI_ERS_RESULT_NEED_RESET; + } + + return PCI_ERS_RESULT_RECOVERED; +} + +static pci_ers_result_t hisi_zip_process_hw_error(struct pci_dev *pdev) +{ + struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); + struct device *dev = &pdev->dev; + pci_ers_result_t qm_ret, zip_ret; + + if (!hisi_zip) { + dev_err(dev, + "Can't recover ZIP-error occurred during device init\n"); + return PCI_ERS_RESULT_NONE; + } + + qm_ret = hisi_qm_hw_error_handle(&hisi_zip->qm); + + zip_ret = hisi_zip_hw_error_handle(hisi_zip); + + return (qm_ret == PCI_ERS_RESULT_NEED_RESET || + zip_ret == PCI_ERS_RESULT_NEED_RESET) ? + PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED; +} + +static pci_ers_result_t hisi_zip_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + if (pdev->is_virtfn) + return PCI_ERS_RESULT_NONE; + + dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state); + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + return hisi_zip_process_hw_error(pdev); +} + +static const struct pci_error_handlers hisi_zip_err_handler = { + .error_detected = hisi_zip_error_detected, +}; + +static struct pci_driver hisi_zip_pci_driver = { + .name = "hisi_zip", + .id_table = hisi_zip_dev_ids, + .probe = hisi_zip_probe, + .remove = hisi_zip_remove, + .err_handler = &hisi_zip_err_handler, +}; + +static int __init hisi_zip_init(void) +{ + int ret; + + ret = pci_register_driver(&hisi_zip_pci_driver); + if (ret < 0) { + pr_err("Failed to register pci driver.\n"); + return ret; + } + + if (uacce_mode == 0 || uacce_mode == 2) { + ret = hisi_zip_register_to_crypto(); + if (ret < 0) { + pr_err("Failed to register driver to crypto.\n"); + goto err_crypto; + } + } + + return 0; + +err_crypto: + pci_unregister_driver(&hisi_zip_pci_driver); + return ret; +} + +static void __exit hisi_zip_exit(void) +{ + if (uacce_mode == 0 || uacce_mode == 2) + hisi_zip_unregister_from_crypto(); + pci_unregister_driver(&hisi_zip_pci_driver); +} + +module_init(hisi_zip_init); +module_exit(hisi_zip_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Zhou Wang "); +MODULE_DESCRIPTION("Driver for HiSilicon ZIP accelerator"); -- cgit From 79e09f30eeba857b09832209bfc66bd689c58328 Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Fri, 2 Aug 2019 15:57:53 +0800 Subject: crypto: hisilicon - add SRIOV support for ZIP HiSilicon ZIP engine supports PCI SRIOV. This patch enable this feature. User can enable VFs and pass through them to VM, same ZIP driver can work in VM to provide ZLIB and GZIP algorithm by crypto acomp interface. Signed-off-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 97 +++++++++++++++++---- drivers/crypto/hisilicon/qm.h | 4 + drivers/crypto/hisilicon/zip/zip_main.c | 150 ++++++++++++++++++++++++++++++-- 3 files changed, 226 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index c095d4747812..4f6bbdd4a25b 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -16,6 +16,7 @@ #define QM_VF_EQ_INT_MASK 0xc #define QM_IRQ_NUM_V1 1 #define QM_IRQ_NUM_PF_V2 4 +#define QM_IRQ_NUM_VF_V2 2 #define QM_EQ_EVENT_IRQ_VECTOR 0 #define QM_AEQ_EVENT_IRQ_VECTOR 1 @@ -265,6 +266,7 @@ struct qm_doorbell { }; struct hisi_qm_hw_ops { + int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number); void (*qm_db)(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority); u32 (*get_irq_num)(struct hisi_qm *qm); @@ -422,7 +424,10 @@ static u32 qm_get_irq_num_v1(struct hisi_qm *qm) static u32 qm_get_irq_num_v2(struct hisi_qm *qm) { - return QM_IRQ_NUM_PF_V2; + if (qm->fun_type == QM_HW_PF) + return QM_IRQ_NUM_PF_V2; + else + return QM_IRQ_NUM_VF_V2; } static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe) @@ -591,12 +596,14 @@ static int qm_irq_register(struct hisi_qm *qm) if (ret) goto err_aeq_irq; - ret = request_irq(pci_irq_vector(pdev, - QM_ABNORMAL_EVENT_IRQ_VECTOR), - qm_abnormal_irq, IRQF_SHARED, - qm->dev_name, qm); - if (ret) - goto err_abonormal_irq; + if (qm->fun_type == QM_HW_PF) { + ret = request_irq(pci_irq_vector(pdev, + QM_ABNORMAL_EVENT_IRQ_VECTOR), + qm_abnormal_irq, IRQF_SHARED, + qm->dev_name, qm); + if (ret) + goto err_abonormal_irq; + } } return 0; @@ -616,8 +623,10 @@ static void qm_irq_unregister(struct hisi_qm *qm) if (qm->ver == QM_HW_V2) { free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); - free_irq(pci_irq_vector(pdev, - QM_ABNORMAL_EVENT_IRQ_VECTOR), qm); + + if (qm->fun_type == QM_HW_PF) + free_irq(pci_irq_vector(pdev, + QM_ABNORMAL_EVENT_IRQ_VECTOR), qm); } } @@ -717,6 +726,24 @@ static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, return 0; } +static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) +{ + u64 sqc_vft; + int ret; + + ret = qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); + if (ret) + return ret; + + sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | + ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); + *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); + *number = (QM_SQC_VFT_NUM_MASK_v2 & + (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; + + return 0; +} + static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, u32 msi) { @@ -815,6 +842,7 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { }; static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { + .get_vft = qm_get_vft_v2, .qm_db = qm_db_v2, .get_irq_num = qm_get_irq_num_v2, .hw_error_init = qm_hw_error_init_v2, @@ -1195,6 +1223,9 @@ int hisi_qm_init(struct hisi_qm *qm) mutex_init(&qm->mailbox_lock); rwlock_init(&qm->qps_lock); + dev_dbg(dev, "init qm %s with %s\n", pdev->is_physfn ? "pf" : "vf", + qm->use_dma_api ? "dma api" : "iommu api"); + return 0; err_free_irq_vectors: @@ -1236,6 +1267,32 @@ void hisi_qm_uninit(struct hisi_qm *qm) } EXPORT_SYMBOL_GPL(hisi_qm_uninit); +/** + * hisi_qm_get_vft() - Get vft from a qm. + * @qm: The qm we want to get its vft. + * @base: The base number of queue in vft. + * @number: The number of queues in vft. + * + * We can allocate multiple queues to a qm by configuring virtual function + * table. We get related configures by this function. Normally, we call this + * function in VF driver to get the queue information. + * + * qm hw v1 does not support this interface. + */ +int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) +{ + if (!base || !number) + return -EINVAL; + + if (!qm->ops->get_vft) { + dev_err(&qm->pdev->dev, "Don't support vft read!\n"); + return -EINVAL; + } + + return qm->ops->get_vft(qm, base, number); +} +EXPORT_SYMBOL_GPL(hisi_qm_get_vft); + /** * hisi_qm_set_vft() - Set "virtual function table" for a qm. * @fun_num: Number of operated function. @@ -1344,13 +1401,15 @@ static int __hisi_qm_start(struct hisi_qm *qm) if (qm->qp_num == 0) return -EINVAL; - ret = qm_dev_mem_reset(qm); - if (ret) - return ret; + if (qm->fun_type == QM_HW_PF) { + ret = qm_dev_mem_reset(qm); + if (ret) + return ret; - ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); - if (ret) - return ret; + ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); + if (ret) + return ret; + } QM_INIT_BUF(qm, eqe, QM_Q_DEPTH); QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH); @@ -1469,9 +1528,11 @@ int hisi_qm_stop(struct hisi_qm *qm) } } - ret = hisi_qm_set_vft(qm, 0, 0, 0); - if (ret < 0) - dev_err(dev, "Failed to set vft!\n"); + if (qm->fun_type == QM_HW_PF) { + ret = hisi_qm_set_vft(qm, 0, 0, 0); + if (ret < 0) + dev_err(dev, "Failed to set vft!\n"); + } return ret; } diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index a5849db163cb..8b3cb69dffca 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -80,6 +80,7 @@ enum qm_hw_ver { enum qm_fun_type { QM_HW_PF, + QM_HW_VF, }; struct qm_dma { @@ -98,6 +99,7 @@ struct hisi_qm_status { struct hisi_qm { enum qm_hw_ver ver; + enum qm_fun_type fun_type; const char *dev_name; struct pci_dev *pdev; void __iomem *io_base; @@ -174,7 +176,9 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg); int hisi_qm_stop_qp(struct hisi_qp *qp); void hisi_qm_release_qp(struct hisi_qp *qp); int hisi_qp_send(struct hisi_qp *qp, const void *msg); +int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number); int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number); +int hisi_qm_debug_init(struct hisi_qm *qm); void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, u32 msi); int hisi_qm_hw_error_handle(struct hisi_qm *qm); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index ee4e20e0230e..b3e4f1ab9240 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -12,6 +12,7 @@ #include "zip.h" #define PCI_DEVICE_ID_ZIP_PF 0xa250 +#define PCI_DEVICE_ID_ZIP_VF 0xa251 #define HZIP_VF_NUM 63 #define HZIP_QUEUE_NUM_V1 4096 @@ -127,6 +128,7 @@ static const struct hisi_zip_hw_error zip_hw_error[] = { * Just relevant for PF. */ struct hisi_zip_ctrl { + u32 num_vfs; struct hisi_zip *hisi_zip; }; @@ -180,6 +182,7 @@ module_param(uacce_mode, int, 0); static const struct pci_device_id hisi_zip_dev_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_VF) }, { 0, } }; MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids); @@ -324,6 +327,8 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) qm->sqe_size = HZIP_SQE_SIZE; qm->dev_name = hisi_zip_name; + qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF : + QM_HW_VF; switch (uacce_mode) { case 0: qm->use_dma_api = true; @@ -344,12 +349,28 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) return ret; } - ret = hisi_zip_pf_probe_init(hisi_zip); - if (ret) - goto err_qm_uninit; - - qm->qp_base = HZIP_PF_DEF_Q_BASE; - qm->qp_num = pf_q_num; + if (qm->fun_type == QM_HW_PF) { + ret = hisi_zip_pf_probe_init(hisi_zip); + if (ret) + return ret; + + qm->qp_base = HZIP_PF_DEF_Q_BASE; + qm->qp_num = pf_q_num; + } else if (qm->fun_type == QM_HW_VF) { + /* + * have no way to get qm configure in VM in v1 hardware, + * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force + * to trigger only one VF in v1 hardware. + * + * v2 hardware has no such problem. + */ + if (qm->ver == QM_HW_V1) { + qm->qp_base = HZIP_PF_DEF_Q_NUM; + qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM; + } else if (qm->ver == QM_HW_V2) + /* v2 starts to support get vft by mailbox */ + hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + } ret = hisi_qm_start(qm); if (ret) @@ -364,13 +385,127 @@ err_qm_uninit: return ret; } +/* Currently we only support equal assignment */ +static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, int num_vfs) +{ + struct hisi_qm *qm = &hisi_zip->qm; + u32 qp_num = qm->qp_num; + u32 q_base = qp_num; + u32 q_num, remain_q_num, i; + int ret; + + if (!num_vfs) + return -EINVAL; + + remain_q_num = qm->ctrl_qp_num - qp_num; + if (remain_q_num < num_vfs) + return -EINVAL; + + q_num = remain_q_num / num_vfs; + for (i = 1; i <= num_vfs; i++) { + if (i == num_vfs) + q_num += remain_q_num % num_vfs; + ret = hisi_qm_set_vft(qm, i, q_base, q_num); + if (ret) + return ret; + q_base += q_num; + } + + return 0; +} + +static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip) +{ + struct hisi_zip_ctrl *ctrl = hisi_zip->ctrl; + struct hisi_qm *qm = &hisi_zip->qm; + u32 i, num_vfs = ctrl->num_vfs; + int ret; + + for (i = 1; i <= num_vfs; i++) { + ret = hisi_qm_set_vft(qm, i, 0, 0); + if (ret) + return ret; + } + + ctrl->num_vfs = 0; + + return 0; +} + +static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs) +{ +#ifdef CONFIG_PCI_IOV + struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); + int pre_existing_vfs, num_vfs, ret; + + pre_existing_vfs = pci_num_vf(pdev); + + if (pre_existing_vfs) { + dev_err(&pdev->dev, + "Can't enable VF. Please disable pre-enabled VFs!\n"); + return 0; + } + + num_vfs = min_t(int, max_vfs, HZIP_VF_NUM); + + ret = hisi_zip_vf_q_assign(hisi_zip, num_vfs); + if (ret) { + dev_err(&pdev->dev, "Can't assign queues for VF!\n"); + return ret; + } + + hisi_zip->ctrl->num_vfs = num_vfs; + + ret = pci_enable_sriov(pdev, num_vfs); + if (ret) { + dev_err(&pdev->dev, "Can't enable VF!\n"); + hisi_zip_clear_vft_config(hisi_zip); + return ret; + } + + return num_vfs; +#else + return 0; +#endif +} + +static int hisi_zip_sriov_disable(struct pci_dev *pdev) +{ + struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); + + if (pci_vfs_assigned(pdev)) { + dev_err(&pdev->dev, + "Can't disable VFs while VFs are assigned!\n"); + return -EPERM; + } + + /* remove in hisi_zip_pci_driver will be called to free VF resources */ + pci_disable_sriov(pdev); + + return hisi_zip_clear_vft_config(hisi_zip); +} + +static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + if (num_vfs == 0) + return hisi_zip_sriov_disable(pdev); + else + return hisi_zip_sriov_enable(pdev, num_vfs); +} + static void hisi_zip_remove(struct pci_dev *pdev) { struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); struct hisi_qm *qm = &hisi_zip->qm; + if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0) + hisi_zip_sriov_disable(pdev); + hisi_qm_stop(qm); - hisi_zip_hw_error_set_state(hisi_zip, false); + + if (qm->fun_type == QM_HW_PF) + hisi_zip_hw_error_set_state(hisi_zip, false); + hisi_qm_uninit(qm); hisi_zip_remove_from_list(hisi_zip); } @@ -461,6 +596,7 @@ static struct pci_driver hisi_zip_pci_driver = { .id_table = hisi_zip_dev_ids, .probe = hisi_zip_probe, .remove = hisi_zip_remove, + .sriov_configure = hisi_zip_sriov_configure, .err_handler = &hisi_zip_err_handler, }; -- cgit From 72c7a68d2ea34803e9c4ef948261ec6744fc72fc Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Fri, 2 Aug 2019 15:57:55 +0800 Subject: crypto: hisilicon - add debugfs for ZIP and QM HiSilicon ZIP engine driver uses debugfs to provide debug information, the usage can be found in /Documentation/ABI/testing/debugfs-hisi-zip. Signed-off-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 301 +++++++++++++++++++++++++ drivers/crypto/hisilicon/qm.h | 29 +++ drivers/crypto/hisilicon/zip/zip_main.c | 375 +++++++++++++++++++++++++++++++- 3 files changed, 704 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 4f6bbdd4a25b..363f71b77aed 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -2,10 +2,12 @@ /* Copyright (c) 2019 HiSilicon Limited. */ #include #include +#include #include #include #include #include +#include #include #include "qm.h" @@ -114,6 +116,7 @@ #define QM_SQC_VFT_NUM_SHIFT_V2 45 #define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0) +#define QM_DFX_CNT_CLR_CE 0x100118 #define QM_ABNORMAL_INT_SOURCE 0x100000 #define QM_ABNORMAL_INT_MASK 0x100004 @@ -141,6 +144,7 @@ #define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0) #define QMC_ALIGN(sz) ALIGN(sz, 32) +#define QM_DBG_TMP_BUF_LEN 22 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ @@ -270,11 +274,17 @@ struct hisi_qm_hw_ops { void (*qm_db)(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority); u32 (*get_irq_num)(struct hisi_qm *qm); + int (*debug_init)(struct hisi_qm *qm); void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, u32 msi); pci_ers_result_t (*hw_error_handle)(struct hisi_qm *qm); }; +static const char * const qm_debug_file_name[] = { + [CURRENT_Q] = "current_q", + [CLEAR_ENABLE] = "clear_enable", +}; + struct hisi_qm_hw_error { u32 int_msk; const char *msg; @@ -744,6 +754,229 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) return 0; } +static struct hisi_qm *file_to_qm(struct debugfs_file *file) +{ + struct qm_debug *debug = file->debug; + + return container_of(debug, struct hisi_qm, debug); +} + +static u32 current_q_read(struct debugfs_file *file) +{ + struct hisi_qm *qm = file_to_qm(file); + + return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT; +} + +static int current_q_write(struct debugfs_file *file, u32 val) +{ + struct hisi_qm *qm = file_to_qm(file); + u32 tmp; + + if (val >= qm->debug.curr_qm_qp_num) + return -EINVAL; + + tmp = val << QM_DFX_QN_SHIFT | + (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK); + writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); + + tmp = val << QM_DFX_QN_SHIFT | + (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK); + writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); + + return 0; +} + +static u32 clear_enable_read(struct debugfs_file *file) +{ + struct hisi_qm *qm = file_to_qm(file); + + return readl(qm->io_base + QM_DFX_CNT_CLR_CE); +} + +/* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */ +static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl) +{ + struct hisi_qm *qm = file_to_qm(file); + + if (rd_clr_ctrl > 1) + return -EINVAL; + + writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE); + + return 0; +} + +static ssize_t qm_debug_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + struct debugfs_file *file = filp->private_data; + enum qm_debug_file index = file->index; + char tbuf[QM_DBG_TMP_BUF_LEN]; + u32 val; + int ret; + + mutex_lock(&file->lock); + switch (index) { + case CURRENT_Q: + val = current_q_read(file); + break; + case CLEAR_ENABLE: + val = clear_enable_read(file); + break; + default: + mutex_unlock(&file->lock); + return -EINVAL; + } + mutex_unlock(&file->lock); + ret = sprintf(tbuf, "%u\n", val); + return simple_read_from_buffer(buf, count, pos, tbuf, ret); +} + +static ssize_t qm_debug_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct debugfs_file *file = filp->private_data; + enum qm_debug_file index = file->index; + unsigned long val; + char tbuf[QM_DBG_TMP_BUF_LEN]; + int len, ret; + + if (*pos != 0) + return 0; + + if (count >= QM_DBG_TMP_BUF_LEN) + return -ENOSPC; + + len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf, + count); + if (len < 0) + return len; + + tbuf[len] = '\0'; + if (kstrtoul(tbuf, 0, &val)) + return -EFAULT; + + mutex_lock(&file->lock); + switch (index) { + case CURRENT_Q: + ret = current_q_write(file, val); + if (ret) + goto err_input; + break; + case CLEAR_ENABLE: + ret = clear_enable_write(file, val); + if (ret) + goto err_input; + break; + default: + ret = -EINVAL; + goto err_input; + } + mutex_unlock(&file->lock); + + return count; + +err_input: + mutex_unlock(&file->lock); + return ret; +} + +static const struct file_operations qm_debug_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = qm_debug_read, + .write = qm_debug_write, +}; + +struct qm_dfx_registers { + char *reg_name; + u64 reg_offset; +}; + +#define CNT_CYC_REGS_NUM 10 +static struct qm_dfx_registers qm_dfx_regs[] = { + /* XXX_CNT are reading clear register */ + {"QM_ECC_1BIT_CNT ", 0x104000ull}, + {"QM_ECC_MBIT_CNT ", 0x104008ull}, + {"QM_DFX_MB_CNT ", 0x104018ull}, + {"QM_DFX_DB_CNT ", 0x104028ull}, + {"QM_DFX_SQE_CNT ", 0x104038ull}, + {"QM_DFX_CQE_CNT ", 0x104048ull}, + {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull}, + {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull}, + {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull}, + {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull}, + {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull}, + {"QM_ECC_1BIT_INF ", 0x104004ull}, + {"QM_ECC_MBIT_INF ", 0x10400cull}, + {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull}, + {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull}, + {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull}, + {"QM_DFX_FF_ST0 ", 0x1040c8ull}, + {"QM_DFX_FF_ST1 ", 0x1040ccull}, + {"QM_DFX_FF_ST2 ", 0x1040d0ull}, + {"QM_DFX_FF_ST3 ", 0x1040d4ull}, + {"QM_DFX_FF_ST4 ", 0x1040d8ull}, + {"QM_DFX_FF_ST5 ", 0x1040dcull}, + {"QM_DFX_FF_ST6 ", 0x1040e0ull}, + {"QM_IN_IDLE_ST ", 0x1040e4ull}, + { NULL, 0} +}; + +static struct qm_dfx_registers qm_vf_dfx_regs[] = { + {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull}, + { NULL, 0} +}; + +static int qm_regs_show(struct seq_file *s, void *unused) +{ + struct hisi_qm *qm = s->private; + struct qm_dfx_registers *regs; + u32 val; + + if (qm->fun_type == QM_HW_PF) + regs = qm_dfx_regs; + else + regs = qm_vf_dfx_regs; + + while (regs->reg_name) { + val = readl(qm->io_base + regs->reg_offset); + seq_printf(s, "%s= 0x%08x\n", regs->reg_name, val); + regs++; + } + + return 0; +} + +static int qm_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, qm_regs_show, inode->i_private); +} + +static const struct file_operations qm_regs_fops = { + .owner = THIS_MODULE, + .open = qm_regs_open, + .read = seq_read, +}; + +static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index) +{ + struct dentry *qm_d = qm->debug.qm_d, *tmp; + struct debugfs_file *file = qm->debug.files + index; + + tmp = debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file, + &qm_debug_fops); + if (IS_ERR(tmp)) + return -ENOENT; + + file->index = index; + mutex_init(&file->lock); + file->debug = &qm->debug; + + return 0; +} + static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, u32 msi) { @@ -1538,6 +1771,74 @@ int hisi_qm_stop(struct hisi_qm *qm) } EXPORT_SYMBOL_GPL(hisi_qm_stop); +/** + * hisi_qm_debug_init() - Initialize qm related debugfs files. + * @qm: The qm for which we want to add debugfs files. + * + * Create qm related debugfs files. + */ +int hisi_qm_debug_init(struct hisi_qm *qm) +{ + struct dentry *qm_d, *qm_regs; + int i, ret; + + qm_d = debugfs_create_dir("qm", qm->debug.debug_root); + if (IS_ERR(qm_d)) + return -ENOENT; + qm->debug.qm_d = qm_d; + + /* only show this in PF */ + if (qm->fun_type == QM_HW_PF) + for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++) + if (qm_create_debugfs_file(qm, i)) { + ret = -ENOENT; + goto failed_to_create; + } + + qm_regs = debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm, + &qm_regs_fops); + if (IS_ERR(qm_regs)) { + ret = -ENOENT; + goto failed_to_create; + } + + return 0; + +failed_to_create: + debugfs_remove_recursive(qm_d); + return ret; +} +EXPORT_SYMBOL_GPL(hisi_qm_debug_init); + +/** + * hisi_qm_debug_regs_clear() - clear qm debug related registers. + * @qm: The qm for which we want to clear its debug registers. + */ +void hisi_qm_debug_regs_clear(struct hisi_qm *qm) +{ + struct qm_dfx_registers *regs; + int i; + + /* clear current_q */ + writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); + writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); + + /* + * these registers are reading and clearing, so clear them after + * reading them. + */ + writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE); + + regs = qm_dfx_regs; + for (i = 0; i < CNT_CYC_REGS_NUM; i++) { + readl(qm->io_base + regs->reg_offset); + regs++; + } + + writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE); +} +EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear); + /** * hisi_qm_hw_error_init() - Configure qm hardware error report method. * @qm: The qm which we want to configure. diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 8b3cb69dffca..70e672ae86bf 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -46,6 +46,13 @@ #define PEH_AXUSER_CFG 0x401001 #define PEH_AXUSER_CFG_ENABLE 0xffffffff +#define QM_DFX_MB_CNT_VF 0x104010 +#define QM_DFX_DB_CNT_VF 0x104020 +#define QM_DFX_SQE_CNT_VF_SQN 0x104030 +#define QM_DFX_CQE_CNT_VF_CQN 0x104040 +#define QM_DFX_QN_SHIFT 16 +#define CURRENT_FUN_MASK GENMASK(5, 0) +#define CURRENT_Q_MASK GENMASK(31, 16) #define QM_AXI_RRESP BIT(0) #define QM_AXI_BRESP BIT(1) @@ -83,6 +90,25 @@ enum qm_fun_type { QM_HW_VF, }; +enum qm_debug_file { + CURRENT_Q, + CLEAR_ENABLE, + DEBUG_FILE_NUM, +}; + +struct debugfs_file { + enum qm_debug_file index; + struct mutex lock; + struct qm_debug *debug; +}; + +struct qm_debug { + u32 curr_qm_qp_num; + struct dentry *debug_root; + struct dentry *qm_d; + struct debugfs_file files[DEBUG_FILE_NUM]; +}; + struct qm_dma { void *va; dma_addr_t dma; @@ -128,6 +154,8 @@ struct hisi_qm { const struct hisi_qm_hw_ops *ops; + struct qm_debug debug; + u32 error_mask; u32 msi_mask; @@ -183,4 +211,5 @@ void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, u32 msi); int hisi_qm_hw_error_handle(struct hisi_qm *qm); enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev); +void hisi_qm_debug_regs_clear(struct hisi_qm *qm); #endif diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index b3e4f1ab9240..6e0ca75585d4 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -3,11 +3,13 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include "zip.h" @@ -32,6 +34,7 @@ DECOMP2_ENABLE | DECOMP3_ENABLE | \ DECOMP4_ENABLE | DECOMP5_ENABLE) #define DECOMP_CHECK_ENABLE BIT(16) +#define HZIP_FSM_MAX_CNT 0x301008 #define HZIP_PORT_ARCA_CHE_0 0x301040 #define HZIP_PORT_ARCA_CHE_1 0x301044 @@ -45,7 +48,16 @@ #define HZIP_DATA_WUSER_32_63 0x301134 #define HZIP_BD_WUSER_32_63 0x301140 +#define HZIP_QM_IDEL_STATUS 0x3040e4 +#define HZIP_CORE_DEBUG_COMP_0 0x302000 +#define HZIP_CORE_DEBUG_COMP_1 0x303000 +#define HZIP_CORE_DEBUG_DECOMP_0 0x304000 +#define HZIP_CORE_DEBUG_DECOMP_1 0x305000 +#define HZIP_CORE_DEBUG_DECOMP_2 0x306000 +#define HZIP_CORE_DEBUG_DECOMP_3 0x307000 +#define HZIP_CORE_DEBUG_DECOMP_4 0x308000 +#define HZIP_CORE_DEBUG_DECOMP_5 0x309000 #define HZIP_CORE_INT_SOURCE 0x3010A0 #define HZIP_CORE_INT_MASK 0x3010A4 @@ -55,14 +67,23 @@ #define SRAM_ECC_ERR_NUM_SHIFT 16 #define SRAM_ECC_ERR_ADDR_SHIFT 24 #define HZIP_CORE_INT_DISABLE 0x000007FF +#define HZIP_COMP_CORE_NUM 2 +#define HZIP_DECOMP_CORE_NUM 6 +#define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \ + HZIP_DECOMP_CORE_NUM) #define HZIP_SQE_SIZE 128 +#define HZIP_SQ_SIZE (HZIP_SQE_SIZE * QM_Q_DEPTH) #define HZIP_PF_DEF_Q_NUM 64 #define HZIP_PF_DEF_Q_BASE 0 +#define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000 +#define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0) #define HZIP_NUMA_DISTANCE 100 +#define HZIP_BUF_SIZE 22 static const char hisi_zip_name[] = "hisi_zip"; +static struct dentry *hzip_debugfs_root; LIST_HEAD(hisi_zip_list); DEFINE_MUTEX(hisi_zip_list_lock); @@ -121,6 +142,23 @@ static const struct hisi_zip_hw_error zip_hw_error[] = { { /* sentinel */ } }; +enum ctrl_debug_file_index { + HZIP_CURRENT_QM, + HZIP_CLEAR_ENABLE, + HZIP_DEBUG_FILE_NUM, +}; + +static const char * const ctrl_debug_file_name[] = { + [HZIP_CURRENT_QM] = "current_qm", + [HZIP_CLEAR_ENABLE] = "clear_enable", +}; + +struct ctrl_debug_file { + enum ctrl_debug_file_index index; + spinlock_t lock; + struct hisi_zip_ctrl *ctrl; +}; + /* * One ZIP controller has one PF and multiple VFs, some global configurations * which PF has need this structure. @@ -130,6 +168,55 @@ static const struct hisi_zip_hw_error zip_hw_error[] = { struct hisi_zip_ctrl { u32 num_vfs; struct hisi_zip *hisi_zip; + struct dentry *debug_root; + struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM]; +}; + +enum { + HZIP_COMP_CORE0, + HZIP_COMP_CORE1, + HZIP_DECOMP_CORE0, + HZIP_DECOMP_CORE1, + HZIP_DECOMP_CORE2, + HZIP_DECOMP_CORE3, + HZIP_DECOMP_CORE4, + HZIP_DECOMP_CORE5, +}; + +static const u64 core_offsets[] = { + [HZIP_COMP_CORE0] = 0x302000, + [HZIP_COMP_CORE1] = 0x303000, + [HZIP_DECOMP_CORE0] = 0x304000, + [HZIP_DECOMP_CORE1] = 0x305000, + [HZIP_DECOMP_CORE2] = 0x306000, + [HZIP_DECOMP_CORE3] = 0x307000, + [HZIP_DECOMP_CORE4] = 0x308000, + [HZIP_DECOMP_CORE5] = 0x309000, +}; + +static struct debugfs_reg32 hzip_dfx_regs[] = { + {"HZIP_GET_BD_NUM ", 0x00ull}, + {"HZIP_GET_RIGHT_BD ", 0x04ull}, + {"HZIP_GET_ERROR_BD ", 0x08ull}, + {"HZIP_DONE_BD_NUM ", 0x0cull}, + {"HZIP_WORK_CYCLE ", 0x10ull}, + {"HZIP_IDLE_CYCLE ", 0x18ull}, + {"HZIP_MAX_DELAY ", 0x20ull}, + {"HZIP_MIN_DELAY ", 0x24ull}, + {"HZIP_AVG_DELAY ", 0x28ull}, + {"HZIP_MEM_VISIBLE_DATA ", 0x30ull}, + {"HZIP_MEM_VISIBLE_ADDR ", 0x34ull}, + {"HZIP_COMSUMED_BYTE ", 0x38ull}, + {"HZIP_PRODUCED_BYTE ", 0x40ull}, + {"HZIP_COMP_INF ", 0x70ull}, + {"HZIP_PRE_OUT ", 0x78ull}, + {"HZIP_BD_RD ", 0x7cull}, + {"HZIP_BD_WR ", 0x80ull}, + {"HZIP_GET_BD_AXI_ERR_NUM ", 0x84ull}, + {"HZIP_GET_BD_PARSE_ERR_NUM ", 0x88ull}, + {"HZIP_ADD_BD_AXI_ERR_NUM ", 0x8cull}, + {"HZIP_DECOMP_STF_RELOAD_CURR_ST ", 0x94ull}, + {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull}, }; static int pf_q_num_set(const char *val, const struct kernel_param *kp) @@ -266,6 +353,265 @@ static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state) } } +static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file) +{ + struct hisi_zip *hisi_zip = file->ctrl->hisi_zip; + + return &hisi_zip->qm; +} + +static u32 current_qm_read(struct ctrl_debug_file *file) +{ + struct hisi_qm *qm = file_to_qm(file); + + return readl(qm->io_base + QM_DFX_MB_CNT_VF); +} + +static int current_qm_write(struct ctrl_debug_file *file, u32 val) +{ + struct hisi_qm *qm = file_to_qm(file); + struct hisi_zip_ctrl *ctrl = file->ctrl; + u32 vfq_num; + u32 tmp; + + if (val > ctrl->num_vfs) + return -EINVAL; + + /* Calculate curr_qm_qp_num and store */ + if (val == 0) { + qm->debug.curr_qm_qp_num = qm->qp_num; + } else { + vfq_num = (qm->ctrl_qp_num - qm->qp_num) / ctrl->num_vfs; + if (val == ctrl->num_vfs) + qm->debug.curr_qm_qp_num = qm->ctrl_qp_num - + qm->qp_num - (ctrl->num_vfs - 1) * vfq_num; + else + qm->debug.curr_qm_qp_num = vfq_num; + } + + writel(val, qm->io_base + QM_DFX_MB_CNT_VF); + writel(val, qm->io_base + QM_DFX_DB_CNT_VF); + + tmp = val | + (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); + + tmp = val | + (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); + + return 0; +} + +static u32 clear_enable_read(struct ctrl_debug_file *file) +{ + struct hisi_qm *qm = file_to_qm(file); + + return readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) & + SOFT_CTRL_CNT_CLR_CE_BIT; +} + +static int clear_enable_write(struct ctrl_debug_file *file, u32 val) +{ + struct hisi_qm *qm = file_to_qm(file); + u32 tmp; + + if (val != 1 && val != 0) + return -EINVAL; + + tmp = (readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) & + ~SOFT_CTRL_CNT_CLR_CE_BIT) | val; + writel(tmp, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); + + return 0; +} + +static ssize_t ctrl_debug_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + struct ctrl_debug_file *file = filp->private_data; + char tbuf[HZIP_BUF_SIZE]; + u32 val; + int ret; + + spin_lock_irq(&file->lock); + switch (file->index) { + case HZIP_CURRENT_QM: + val = current_qm_read(file); + break; + case HZIP_CLEAR_ENABLE: + val = clear_enable_read(file); + break; + default: + spin_unlock_irq(&file->lock); + return -EINVAL; + } + spin_unlock_irq(&file->lock); + ret = sprintf(tbuf, "%u\n", val); + return simple_read_from_buffer(buf, count, pos, tbuf, ret); +} + +static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct ctrl_debug_file *file = filp->private_data; + char tbuf[HZIP_BUF_SIZE]; + unsigned long val; + int len, ret; + + if (*pos != 0) + return 0; + + if (count >= HZIP_BUF_SIZE) + return -ENOSPC; + + len = simple_write_to_buffer(tbuf, HZIP_BUF_SIZE - 1, pos, buf, count); + if (len < 0) + return len; + + tbuf[len] = '\0'; + if (kstrtoul(tbuf, 0, &val)) + return -EFAULT; + + spin_lock_irq(&file->lock); + switch (file->index) { + case HZIP_CURRENT_QM: + ret = current_qm_write(file, val); + if (ret) + goto err_input; + break; + case HZIP_CLEAR_ENABLE: + ret = clear_enable_write(file, val); + if (ret) + goto err_input; + break; + default: + ret = -EINVAL; + goto err_input; + } + spin_unlock_irq(&file->lock); + + return count; + +err_input: + spin_unlock_irq(&file->lock); + return ret; +} + +static const struct file_operations ctrl_debug_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ctrl_debug_read, + .write = ctrl_debug_write, +}; + +static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl) +{ + struct hisi_zip *hisi_zip = ctrl->hisi_zip; + struct hisi_qm *qm = &hisi_zip->qm; + struct device *dev = &qm->pdev->dev; + struct debugfs_regset32 *regset; + struct dentry *tmp_d, *tmp; + char buf[HZIP_BUF_SIZE]; + int i; + + for (i = 0; i < HZIP_CORE_NUM; i++) { + if (i < HZIP_COMP_CORE_NUM) + sprintf(buf, "comp_core%d", i); + else + sprintf(buf, "decomp_core%d", i - HZIP_COMP_CORE_NUM); + + tmp_d = debugfs_create_dir(buf, ctrl->debug_root); + if (!tmp_d) + return -ENOENT; + + regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); + if (!regset) + return -ENOENT; + + regset->regs = hzip_dfx_regs; + regset->nregs = ARRAY_SIZE(hzip_dfx_regs); + regset->base = qm->io_base + core_offsets[i]; + + tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset); + if (!tmp) + return -ENOENT; + } + + return 0; +} + +static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl) +{ + struct dentry *tmp; + int i; + + for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) { + spin_lock_init(&ctrl->files[i].lock); + ctrl->files[i].ctrl = ctrl; + ctrl->files[i].index = i; + + tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600, + ctrl->debug_root, ctrl->files + i, + &ctrl_debug_fops); + if (!tmp) + return -ENOENT; + } + + return hisi_zip_core_debug_init(ctrl); +} + +static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip) +{ + struct hisi_qm *qm = &hisi_zip->qm; + struct device *dev = &qm->pdev->dev; + struct dentry *dev_d; + int ret; + + dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root); + if (!dev_d) + return -ENOENT; + + qm->debug.debug_root = dev_d; + ret = hisi_qm_debug_init(qm); + if (ret) + goto failed_to_create; + + if (qm->fun_type == QM_HW_PF) { + hisi_zip->ctrl->debug_root = dev_d; + ret = hisi_zip_ctrl_debug_init(hisi_zip->ctrl); + if (ret) + goto failed_to_create; + } + + return 0; + +failed_to_create: + debugfs_remove_recursive(hzip_debugfs_root); + return ret; +} + +static void hisi_zip_debug_regs_clear(struct hisi_zip *hisi_zip) +{ + struct hisi_qm *qm = &hisi_zip->qm; + + writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); + writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); + writel(0x0, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); + + hisi_qm_debug_regs_clear(qm); +} + +static void hisi_zip_debugfs_exit(struct hisi_zip *hisi_zip) +{ + struct hisi_qm *qm = &hisi_zip->qm; + + debugfs_remove_recursive(qm->debug.debug_root); + + if (qm->fun_type == QM_HW_PF) + hisi_zip_debug_regs_clear(hisi_zip); +} + static void hisi_zip_hw_error_init(struct hisi_zip *hisi_zip) { hisi_qm_hw_error_init(&hisi_zip->qm, QM_BASE_CE, @@ -301,6 +647,7 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) hisi_zip_set_user_domain_and_cache(hisi_zip); hisi_zip_hw_error_init(hisi_zip); + hisi_zip_debug_regs_clear(hisi_zip); return 0; } @@ -376,6 +723,10 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto err_qm_uninit; + ret = hisi_zip_debugfs_init(hisi_zip); + if (ret) + dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret); + hisi_zip_add_to_list(hisi_zip); return 0; @@ -501,6 +852,7 @@ static void hisi_zip_remove(struct pci_dev *pdev) if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0) hisi_zip_sriov_disable(pdev); + hisi_zip_debugfs_exit(hisi_zip); hisi_qm_stop(qm); if (qm->fun_type == QM_HW_PF) @@ -600,14 +952,31 @@ static struct pci_driver hisi_zip_pci_driver = { .err_handler = &hisi_zip_err_handler, }; +static void hisi_zip_register_debugfs(void) +{ + if (!debugfs_initialized()) + return; + + hzip_debugfs_root = debugfs_create_dir("hisi_zip", NULL); + if (IS_ERR_OR_NULL(hzip_debugfs_root)) + hzip_debugfs_root = NULL; +} + +static void hisi_zip_unregister_debugfs(void) +{ + debugfs_remove_recursive(hzip_debugfs_root); +} + static int __init hisi_zip_init(void) { int ret; + hisi_zip_register_debugfs(); + ret = pci_register_driver(&hisi_zip_pci_driver); if (ret < 0) { pr_err("Failed to register pci driver.\n"); - return ret; + goto err_pci; } if (uacce_mode == 0 || uacce_mode == 2) { @@ -622,6 +991,9 @@ static int __init hisi_zip_init(void) err_crypto: pci_unregister_driver(&hisi_zip_pci_driver); +err_pci: + hisi_zip_unregister_debugfs(); + return ret; } @@ -630,6 +1002,7 @@ static void __exit hisi_zip_exit(void) if (uacce_mode == 0 || uacce_mode == 2) hisi_zip_unregister_from_crypto(); pci_unregister_driver(&hisi_zip_pci_driver); + hisi_zip_unregister_debugfs(); } module_init(hisi_zip_init); -- cgit From 0866ba23b7efcc6837d6b4231bf91b79647b81ea Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 2 Aug 2019 21:27:58 +0800 Subject: crypto: artpec6 - use devm_platform_ioremap_resource() to simplify code Use devm_platform_ioremap_resource() to simplify the code a bit. This is detected by coccinelle. Reported-by: Hulk Robot Signed-off-by: YueHaibing Signed-off-by: Herbert Xu --- drivers/crypto/axis/artpec6_crypto.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 80fa04ef215f..4b20606983a4 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -2854,7 +2854,6 @@ static int artpec6_crypto_probe(struct platform_device *pdev) struct artpec6_crypto *ac; struct device *dev = &pdev->dev; void __iomem *base; - struct resource *res; int irq; int err; @@ -2867,8 +2866,7 @@ static int artpec6_crypto_probe(struct platform_device *pdev) variant = (enum artpec6_crypto_variant)match->data; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); -- cgit From 17729e56f9b8f9f0a057ea16571cdea0457900f0 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 2 Aug 2019 21:27:59 +0800 Subject: crypto: ccp - use devm_platform_ioremap_resource() to simplify code Use devm_platform_ioremap_resource() to simplify the code a bit. This is detected by coccinelle. Reported-by: Hulk Robot Signed-off-by: YueHaibing Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sp-platform.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c index 1b45236e3716..831aac1393a2 100644 --- a/drivers/crypto/ccp/sp-platform.c +++ b/drivers/crypto/ccp/sp-platform.c @@ -125,7 +125,6 @@ static int sp_platform_probe(struct platform_device *pdev) struct sp_platform *sp_platform; struct device *dev = &pdev->dev; enum dev_dma_attr attr; - struct resource *ior; int ret; ret = -ENOMEM; @@ -146,8 +145,7 @@ static int sp_platform_probe(struct platform_device *pdev) goto e_err; } - ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); - sp->io_map = devm_ioremap_resource(dev, ior); + sp->io_map = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(sp->io_map)) { ret = PTR_ERR(sp->io_map); goto e_err; -- cgit From 6d1c0186f392e77b9e873490bed4d27b2fe580d7 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 2 Aug 2019 21:28:00 +0800 Subject: crypto: exynos - use devm_platform_ioremap_resource() to simplify code Use devm_platform_ioremap_resource() to simplify the code a bit. This is detected by coccinelle. Reported-by: Hulk Robot Signed-off-by: YueHaibing Reviewed-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- drivers/crypto/exynos-rng.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c index 2cfabb99cb6e..cbd8ca6e52ee 100644 --- a/drivers/crypto/exynos-rng.c +++ b/drivers/crypto/exynos-rng.c @@ -268,7 +268,6 @@ static struct rng_alg exynos_rng_alg = { static int exynos_rng_probe(struct platform_device *pdev) { struct exynos_rng_dev *rng; - struct resource *res; int ret; if (exynos_rng_dev) @@ -289,8 +288,7 @@ static int exynos_rng_probe(struct platform_device *pdev) return PTR_ERR(rng->clk); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - rng->mem = devm_ioremap_resource(&pdev->dev, res); + rng->mem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rng->mem)) return PTR_ERR(rng->mem); -- cgit From f78c7123ff4cfb90572633d79d85e20f29f87112 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 2 Aug 2019 21:28:01 +0800 Subject: crypto: img-hash - use devm_platform_ioremap_resource() to simplify code Use devm_platform_ioremap_resource() to simplify the code a bit. This is detected by coccinelle. Reported-by: Hulk Robot Signed-off-by: YueHaibing Signed-off-by: Herbert Xu --- drivers/crypto/img-hash.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index 6754eaafdc85..fe4cc8babe1c 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -958,9 +958,7 @@ static int img_hash_probe(struct platform_device *pdev) crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH); /* Register bank */ - hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - hdev->io_base = devm_ioremap_resource(dev, hash_res); + hdev->io_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(hdev->io_base)) { err = PTR_ERR(hdev->io_base); dev_err(dev, "can't ioremap, returned %d\n", err); -- cgit From f8dab5575b76ea01b300e4b3a145955763ed61a9 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 2 Aug 2019 21:28:02 +0800 Subject: crypto: inside-secure - use devm_platform_ioremap_resource() to simplify code Use devm_platform_ioremap_resource() to simplify the code a bit. This is detected by coccinelle. Reported-by: Hulk Robot Signed-off-by: YueHaibing Acked-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index d1f60fd7e91a..822744dc9c21 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -999,7 +999,6 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) static int safexcel_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *res; struct safexcel_crypto_priv *priv; int i, ret; @@ -1015,8 +1014,7 @@ static int safexcel_probe(struct platform_device *pdev) safexcel_init_register_offsets(priv); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(dev, res); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { dev_err(dev, "failed to get resource\n"); return PTR_ERR(priv->base); -- cgit From b26120fdb9326052cc0bffe6a52c19a7145d3851 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 2 Aug 2019 21:28:03 +0800 Subject: crypto: mediatek - use devm_platform_ioremap_resource() to simplify code Use devm_platform_ioremap_resource() to simplify the code a bit. This is detected by coccinelle. Reported-by: Hulk Robot Signed-off-by: YueHaibing Signed-off-by: Herbert Xu --- drivers/crypto/mediatek/mtk-platform.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c index 4f43318ca14b..7e3ad085b5bd 100644 --- a/drivers/crypto/mediatek/mtk-platform.c +++ b/drivers/crypto/mediatek/mtk-platform.c @@ -481,7 +481,6 @@ err_cleanup: static int mtk_crypto_probe(struct platform_device *pdev) { - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct mtk_cryp *cryp; int i, err; @@ -489,7 +488,7 @@ static int mtk_crypto_probe(struct platform_device *pdev) if (!cryp) return -ENOMEM; - cryp->base = devm_ioremap_resource(&pdev->dev, res); + cryp->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(cryp->base)) return PTR_ERR(cryp->base); -- cgit From 9a8e0a513b2b07aad90182cce102032437795778 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 2 Aug 2019 21:28:04 +0800 Subject: crypto: picoxcell - use devm_platform_ioremap_resource() to simplify code Use devm_platform_ioremap_resource() to simplify the code a bit. This is detected by coccinelle. Reported-by: Hulk Robot Signed-off-by: YueHaibing Acked-by: Jamie Iles Signed-off-by: Herbert Xu --- drivers/crypto/picoxcell_crypto.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index b985cb85c9bc..9a939b4fd32f 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -1624,7 +1624,7 @@ MODULE_DEVICE_TABLE(of, spacc_of_id_table); static int spacc_probe(struct platform_device *pdev) { int i, err, ret; - struct resource *mem, *irq; + struct resource *irq; struct device_node *np = pdev->dev.of_node; struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine), GFP_KERNEL); @@ -1653,8 +1653,7 @@ static int spacc_probe(struct platform_device *pdev) engine->name = dev_name(&pdev->dev); - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - engine->regs = devm_ioremap_resource(&pdev->dev, mem); + engine->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(engine->regs)) return PTR_ERR(engine->regs); -- cgit From cf68528f9aaae3e5022bf9a4c71769ad48636c89 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 2 Aug 2019 21:28:05 +0800 Subject: crypto: sunxi-ss - use devm_platform_ioremap_resource() to simplify code Use devm_platform_ioremap_resource() to simplify the code a bit. This is detected by coccinelle. Reported-by: Hulk Robot Signed-off-by: YueHaibing Signed-off-by: Herbert Xu --- drivers/crypto/sunxi-ss/sun4i-ss-core.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c index 2e8704271f45..9aa6fe081a27 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c @@ -225,7 +225,6 @@ static struct sun4i_ss_alg_template ss_algs[] = { static int sun4i_ss_probe(struct platform_device *pdev) { - struct resource *res; u32 v; int err, i; unsigned long cr; @@ -240,8 +239,7 @@ static int sun4i_ss_probe(struct platform_device *pdev) if (!ss) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ss->base = devm_ioremap_resource(&pdev->dev, res); + ss->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ss->base)) { dev_err(&pdev->dev, "Cannot request MMIO\n"); return PTR_ERR(ss->base); -- cgit From 721744730ab06079df8197a700dc8929dadfcebc Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 2 Aug 2019 21:28:06 +0800 Subject: crypto: rockchip - use devm_platform_ioremap_resource() to simplify code Use devm_platform_ioremap_resource() to simplify the code a bit. This is detected by coccinelle. Reported-by: Hulk Robot Signed-off-by: YueHaibing Reviewed-by: Heiko Stuebner Signed-off-by: Herbert Xu --- drivers/crypto/rockchip/rk3288_crypto.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c index 8d7e2545e65a..e5714ef24bf2 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.c +++ b/drivers/crypto/rockchip/rk3288_crypto.c @@ -311,7 +311,6 @@ MODULE_DEVICE_TABLE(of, crypto_of_id_table); static int rk_crypto_probe(struct platform_device *pdev) { - struct resource *res; struct device *dev = &pdev->dev; struct rk_crypto_info *crypto_info; int err = 0; @@ -339,8 +338,7 @@ static int rk_crypto_probe(struct platform_device *pdev) spin_lock_init(&crypto_info->lock); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - crypto_info->reg = devm_ioremap_resource(&pdev->dev, res); + crypto_info->reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(crypto_info->reg)) { err = PTR_ERR(crypto_info->reg); goto err_crypto; -- cgit From 473b4d995963b158f1713274c7992aed9240eb45 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 2 Aug 2019 21:28:07 +0800 Subject: crypto: stm32 - use devm_platform_ioremap_resource() to simplify code Use devm_platform_ioremap_resource() to simplify the code a bit. This is detected by coccinelle. Reported-by: Hulk Robot Signed-off-by: YueHaibing Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-crc32.c | 4 +--- drivers/crypto/stm32/stm32-cryp.c | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index 440c9f1bd006..9e11c3480353 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -255,7 +255,6 @@ static int stm32_crc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct stm32_crc *crc; - struct resource *res; int ret; crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL); @@ -264,8 +263,7 @@ static int stm32_crc_probe(struct platform_device *pdev) crc->dev = dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - crc->regs = devm_ioremap_resource(dev, res); + crc->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(crc->regs)) { dev_err(dev, "Cannot map CRC IO\n"); return PTR_ERR(crc->regs); diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index 72f86063b046..5cf6679da580 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -1955,7 +1955,6 @@ static int stm32_cryp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct stm32_cryp *cryp; - struct resource *res; struct reset_control *rst; int irq, ret; @@ -1969,8 +1968,7 @@ static int stm32_cryp_probe(struct platform_device *pdev) cryp->dev = dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - cryp->regs = devm_ioremap_resource(dev, res); + cryp->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(cryp->regs)) return PTR_ERR(cryp->regs); -- cgit From a54d83d42890e986018d0957fdd683df536505aa Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 2 Aug 2019 21:28:08 +0800 Subject: crypto: qce - use devm_platform_ioremap_resource() to simplify code Use devm_platform_ioremap_resource() to simplify the code a bit. This is detected by coccinelle. Reported-by: Hulk Robot Signed-off-by: YueHaibing Signed-off-by: Herbert Xu --- drivers/crypto/qce/core.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c index ef1d74e8ddb2..08d4ce3bfddf 100644 --- a/drivers/crypto/qce/core.c +++ b/drivers/crypto/qce/core.c @@ -167,7 +167,6 @@ static int qce_crypto_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct qce_device *qce; - struct resource *res; int ret; qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL); @@ -177,8 +176,7 @@ static int qce_crypto_probe(struct platform_device *pdev) qce->dev = dev; platform_set_drvdata(pdev, qce); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - qce->base = devm_ioremap_resource(&pdev->dev, res); + qce->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(qce->base)) return PTR_ERR(qce->base); -- cgit From 2229c74079285c8f99754450e56d4d9814558e57 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 2 Aug 2019 21:28:09 +0800 Subject: crypto: qcom-rng - use devm_platform_ioremap_resource() to simplify code Use devm_platform_ioremap_resource() to simplify the code a bit. This is detected by coccinelle. Reported-by: Hulk Robot Signed-off-by: YueHaibing Signed-off-by: Herbert Xu --- drivers/crypto/qcom-rng.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c index e54249ccc009..4730f84b646d 100644 --- a/drivers/crypto/qcom-rng.c +++ b/drivers/crypto/qcom-rng.c @@ -153,7 +153,6 @@ static struct rng_alg qcom_rng_alg = { static int qcom_rng_probe(struct platform_device *pdev) { - struct resource *res; struct qcom_rng *rng; int ret; @@ -164,8 +163,7 @@ static int qcom_rng_probe(struct platform_device *pdev) platform_set_drvdata(pdev, rng); mutex_init(&rng->lock); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - rng->base = devm_ioremap_resource(&pdev->dev, res); + rng->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rng->base)) return PTR_ERR(rng->base); -- cgit From f6b0b78a5c6c198b31967bfc2c7d17150980a0f6 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Fri, 2 Aug 2019 18:20:11 -0500 Subject: crypto: ccp - Include DMA declarations explicitly ccp-dev.h uses dma_direction, which is defined in linux/dma-direction.h. Include that explicitly instead of relying on it being included via linux/pci.h, since ccp-dev.h requires nothing else from linux/pci.h. Similarly, ccp-dmaengine.c uses dma_get_mask(), which is defined in linux/dma-mapping.h, so include that explicitly since it requires nothing else from linux/pci.h. A future patch will remove the includes of linux/pci.h where it is not needed. Signed-off-by: Bjorn Helgaas Acked-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev.h | 1 + drivers/crypto/ccp/ccp-dmaengine.c | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index dd13468111cd..2fe5713fd40e 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index 9d077c42bcbe..a54f9367a580 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include -- cgit From 3a646b6ed49cc64f0dc0a1bba24e62f7b557e7d7 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Fri, 2 Aug 2019 18:20:12 -0500 Subject: crypto: ccp - Remove unnecessary linux/pci.h include Remove unused includes of linux/pci.h. Signed-off-by: Bjorn Helgaas Acked-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-crypto.h | 1 - drivers/crypto/ccp/ccp-dev-v3.c | 1 - drivers/crypto/ccp/ccp-dev-v5.c | 1 - drivers/crypto/ccp/ccp-dev.h | 1 - drivers/crypto/ccp/ccp-ops.c | 1 - drivers/crypto/ccp/psp-dev.h | 1 - drivers/crypto/ccp/sp-dev.h | 1 - 7 files changed, 7 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 599f7f2820a0..9015b5da6ba3 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -12,7 +12,6 @@ #include #include -#include #include #include #include diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 4005d438dff9..0186b3df4c87 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -10,7 +10,6 @@ #include #include -#include #include #include #include diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 9ee72cf46a0f..57eb53b8ac21 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -8,7 +8,6 @@ */ #include -#include #include #include #include diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 2fe5713fd40e..3f68262d9ab4 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -12,7 +12,6 @@ #define __CCP_DEV_H__ #include -#include #include #include #include diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 42d167574131..591584a95028 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -10,7 +10,6 @@ #include #include -#include #include #include #include diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index c5e06c92d40e..82a084f02990 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -11,7 +11,6 @@ #define __PSP_DEV_H__ #include -#include #include #include #include diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 8abe9ea7e76f..53c12562d31e 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -13,7 +13,6 @@ #define __SP_DEV_H__ #include -#include #include #include #include -- cgit From 00ae05db583a7b5d5575bb601158139e25af3b02 Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Wed, 14 Aug 2019 17:28:35 +0800 Subject: crypto: hisilicon - fix kbuild warnings Fix to use proper type of argument for dma_addr_t and size_t. Fixes: 263c9959c937 ("crypto: hisilicon - add queue management driver for HiSilicon QM module") Reported-by: kbuild test robot Signed-off-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 363f71b77aed..796fdbfacb19 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -347,7 +347,7 @@ static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, struct qm_mailbox mailbox; int ret = 0; - dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n", queue, + dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%pad\n", queue, cmd, dma_addr); mailbox.w0 = cmd | @@ -1137,7 +1137,7 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) goto err_clear_bit; } - dev_dbg(dev, "allocate qp dma buf(va=%pK, dma=%pad, size=%lx)\n", + dev_dbg(dev, "allocate qp dma buf(va=%pK, dma=%pad, size=%zx)\n", qp->qdma.va, &qp->qdma.dma, qp->qdma.size); } @@ -1714,7 +1714,7 @@ int hisi_qm_start(struct hisi_qm *qm) QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, GFP_KERNEL); - dev_dbg(dev, "allocate qm dma buf(va=%pK, dma=%pad, size=%lx)\n", + dev_dbg(dev, "allocate qm dma buf(va=%pK, dma=%pad, size=%zx)\n", qm->qdma.va, &qm->qdma.dma, qm->qdma.size); if (!qm->qdma.va) return -ENOMEM; -- cgit From db01e4818bbdab626c78747dd8f494d586ac968d Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Wed, 14 Aug 2019 17:28:36 +0800 Subject: crypto: hisilicon - add dependency for CRYPTO_DEV_HISI_ZIP Add ARM64/PCI/PCI_MSI dependency for CRYPTO_DEV_HISI_ZIP. Fixes: 62c455ca853e ("crypto: hisilicon - add HiSilicon ZIP accelerator support") Reported-by: kbuild test robot Signed-off-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index 19293172b408..fa8aa063bb65 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -30,6 +30,7 @@ config CRYPTO_HISI_SGL config CRYPTO_DEV_HISI_ZIP tristate "Support for HiSilicon ZIP accelerator" + depends on ARM64 && PCI && PCI_MSI select CRYPTO_DEV_HISI_QM select CRYPTO_HISI_SGL select SG_SPLIT -- cgit From 5c0861989cc197c29433915774f805299a497977 Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Wed, 14 Aug 2019 17:28:37 +0800 Subject: crypto: hisilicon - init curr_sgl_dma to fix compile warning Just init curr_sgl_dma = 0 to avoid compile warning. Fixes: dfed0098ab91 ("crypto: hisilicon - add hardware SGL support") Reported-by: kbuild test robot Signed-off-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sgl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index 8ef7679a365e..e083d172b618 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -150,7 +150,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, u32 index, dma_addr_t *hw_sgl_dma) { struct hisi_acc_hw_sgl *curr_hw_sgl; - dma_addr_t curr_sgl_dma; + dma_addr_t curr_sgl_dma = 0; struct acc_hw_sge *curr_hw_sge; struct scatterlist *sg; int sg_n = sg_nents(sgl); -- cgit From 902f0babf5457578f6bfb9b6ac952c55c4482cb7 Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Wed, 14 Aug 2019 17:28:38 +0800 Subject: crypto: hisilicon - add missing single_release Fix to add missing single_release in qm_regs_fops. Fixes: 263c9959c937 ("crypto: hisilicon - add queue management driver for HiSilicon QM module") Signed-off-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 796fdbfacb19..d72e062a3619 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -958,6 +958,7 @@ static const struct file_operations qm_regs_fops = { .owner = THIS_MODULE, .open = qm_regs_open, .read = seq_read, + .release = single_release, }; static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index) -- cgit From 1ed2002f891dc1082eb110eaafccfde96d327aa3 Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Wed, 14 Aug 2019 17:28:39 +0800 Subject: crypto: hisilicon - fix error handle in hisi_zip_create_req_q Directly return error in the first loop in hisi_zip_create_req_q. Fixes: 62c455ca853e ("crypto: hisilicon - add HiSilicon ZIP accelerator support") Signed-off-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/zip/zip_crypto.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 303351325a58..5a3f84dcdcde 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -223,8 +223,10 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) sizeof(long), GFP_KERNEL); if (!req_q->req_bitmap) { ret = -ENOMEM; - if (i == 1) - goto err_free_loop0; + if (i == 0) + return ret; + + goto err_free_loop0; } rwlock_init(&req_q->req_lock); -- cgit From c9fbcf6815ce24c8da828c2a70cd6a16ce242a1b Mon Sep 17 00:00:00 2001 From: Horia Geantă Date: Mon, 5 Aug 2019 15:49:55 +0300 Subject: crypto: caam/qi - execute library only on DPAA 1.x MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the process of turning caam/qi into a library, the check of MCFGR[QI] bit has been inadvertently dropped. Fix the condition for DPAA 1.x QI detection, which should be: MCFGR[QI] && !MCFGR[DPAA2] A check in the library exit point is currently not needed, since the list of registered algorithms is empty. While here, silence the library initialization abort - since jr.c calls it unconditionally. Fixes: 1b46c90c8e00 ("crypto: caam - convert top level drivers to libraries") Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_qi.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 59b59f5e9550..fb54b2c9d337 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -2576,10 +2576,9 @@ int caam_qi_algapi_init(struct device *ctrldev) unsigned int md_limit = SHA512_DIGEST_SIZE; bool registered = false; - if (caam_dpaa2) { - dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); - return -ENODEV; - } + /* Make sure this runs only on (DPAA 1.x) QI */ + if (!priv->qi_present || caam_dpaa2) + return 0; /* * Register crypto algorithms the device supports. -- cgit From 03a3bb7ae63150230c5de645dc95e673ebf17e1a Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 5 Aug 2019 16:32:41 -0700 Subject: hwrng: core - Freeze khwrng thread during suspend The hwrng_fill() function can run while devices are suspending and resuming. If the hwrng is behind a bus such as i2c or SPI and that bus is suspended, the hwrng may hang the bus while attempting to add some randomness. It's been observed on ChromeOS devices with suspend-to-idle (s2idle) and an i2c based hwrng that this kthread may run and ask the hwrng device for randomness before the i2c bus has been resumed. Let's make this kthread freezable so that we don't try to touch the hwrng during suspend/resume. This ensures that we can't cause the hwrng backing driver to get into a bad state because the device is guaranteed to be resumed before the hwrng kthread is thawed. Cc: Andrey Pronin Cc: Duncan Laurie Cc: Jason Gunthorpe Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Cc: Guenter Roeck Cc: Alexander Steffen Signed-off-by: Stephen Boyd Signed-off-by: Herbert Xu --- drivers/char/hw_random/core.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 9044d31ab1a1..bdab5d9af8d2 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -421,7 +422,9 @@ static int hwrng_fillfn(void *unused) { long rc; - while (!kthread_should_stop()) { + set_freezable(); + + while (!kthread_freezable_should_stop(NULL)) { struct hwrng *rng; rng = get_current_rng(); -- cgit From b31c17c852736846c917bbd7f1263ea7258fbdce Mon Sep 17 00:00:00 2001 From: Phani Kiran Hemadri Date: Thu, 8 Aug 2019 12:17:37 +0000 Subject: crypto: cavium/nitrox - Allocate asymmetric crypto command queues This patch adds support to allocate CNN55XX device AQMQ command queues required for submitting asymmetric crypto requests. Signed-off-by: Phani Kiran Hemadri Reviewed-by: Srikanth Jampala Signed-off-by: Herbert Xu --- drivers/crypto/cavium/nitrox/nitrox_dev.h | 4 ++ drivers/crypto/cavium/nitrox/nitrox_lib.c | 66 ++++++++++++++++++++++++++++++- drivers/crypto/cavium/nitrox/nitrox_req.h | 30 ++++++++++++++ 3 files changed, 99 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h index 5ee98eca728c..2217a2736c8e 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_dev.h +++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h @@ -10,6 +10,8 @@ #define VERSION_LEN 32 /* Maximum queues in PF mode */ #define MAX_PF_QUEUES 64 +/* Maximum device queues */ +#define MAX_DEV_QUEUES (MAX_PF_QUEUES) /* Maximum UCD Blocks */ #define CNN55XX_MAX_UCD_BLOCKS 8 @@ -208,6 +210,7 @@ enum vf_mode { * @mode: Device mode PF/VF * @ctx_pool: DMA pool for crypto context * @pkt_inq: Packet input rings + * @aqmq: AQM command queues * @qvec: MSI-X queue vectors information * @iov: SR-IOV informatin * @num_vecs: number of MSI-X vectors @@ -234,6 +237,7 @@ struct nitrox_device { struct dma_pool *ctx_pool; struct nitrox_cmdq *pkt_inq; + struct nitrox_cmdq *aqmq[MAX_DEV_QUEUES] ____cacheline_aligned_in_smp; struct nitrox_q_vector *qvec; struct nitrox_iov iov; diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index 4ace9bcd603a..5cbc64b851b9 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c @@ -19,6 +19,8 @@ /* packet inuput ring alignments */ #define PKTIN_Q_ALIGN_BYTES 16 +/* AQM Queue input alignments */ +#define AQM_Q_ALIGN_BYTES 32 static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes) { @@ -57,11 +59,15 @@ static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq) static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq) { - struct nitrox_device *ndev = cmdq->ndev; + struct nitrox_device *ndev; + + if (!cmdq) + return; if (!cmdq->unalign_base) return; + ndev = cmdq->ndev; cancel_work_sync(&cmdq->backlog_qflush); dma_free_coherent(DEV(ndev), cmdq->qsize, @@ -78,6 +84,57 @@ static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq) cmdq->instr_size = 0; } +static void nitrox_free_aqm_queues(struct nitrox_device *ndev) +{ + int i; + + for (i = 0; i < ndev->nr_queues; i++) { + nitrox_cmdq_cleanup(ndev->aqmq[i]); + kzfree(ndev->aqmq[i]); + ndev->aqmq[i] = NULL; + } +} + +static int nitrox_alloc_aqm_queues(struct nitrox_device *ndev) +{ + int i, err; + + for (i = 0; i < ndev->nr_queues; i++) { + struct nitrox_cmdq *cmdq; + u64 offset; + + cmdq = kzalloc_node(sizeof(*cmdq), GFP_KERNEL, ndev->node); + if (!cmdq) { + err = -ENOMEM; + goto aqmq_fail; + } + + cmdq->ndev = ndev; + cmdq->qno = i; + cmdq->instr_size = sizeof(struct aqmq_command_s); + + /* AQM Queue Doorbell Counter Register Address */ + offset = AQMQ_DRBLX(i); + cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset); + /* AQM Queue Commands Completed Count Register Address */ + offset = AQMQ_CMD_CNTX(i); + cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset); + + err = nitrox_cmdq_init(cmdq, AQM_Q_ALIGN_BYTES); + if (err) { + kzfree(cmdq); + goto aqmq_fail; + } + ndev->aqmq[i] = cmdq; + } + + return 0; + +aqmq_fail: + nitrox_free_aqm_queues(ndev); + return err; +} + static void nitrox_free_pktin_queues(struct nitrox_device *ndev) { int i; @@ -222,6 +279,12 @@ int nitrox_common_sw_init(struct nitrox_device *ndev) if (err) destroy_crypto_dma_pool(ndev); + err = nitrox_alloc_aqm_queues(ndev); + if (err) { + nitrox_free_pktin_queues(ndev); + destroy_crypto_dma_pool(ndev); + } + return err; } @@ -231,6 +294,7 @@ int nitrox_common_sw_init(struct nitrox_device *ndev) */ void nitrox_common_sw_cleanup(struct nitrox_device *ndev) { + nitrox_free_aqm_queues(ndev); nitrox_free_pktin_queues(ndev); destroy_crypto_dma_pool(ndev); } diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h index efdbd0fc3e3b..f69ba02c4d25 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_req.h +++ b/drivers/crypto/cavium/nitrox/nitrox_req.h @@ -399,6 +399,36 @@ struct nps_pkt_instr { u64 fdata[2]; }; +/** + * struct aqmq_command_s - The 32 byte command for AE processing. + * @opcode: Request opcode + * @param1: Request control parameter 1 + * @param2: Request control parameter 2 + * @dlen: Input length + * @dptr: Input pointer points to buffer in remote host + * @rptr: Result pointer points to buffer in remote host + * @grp: AQM Group (0..7) + * @cptr: Context pointer + */ +struct aqmq_command_s { + __be16 opcode; + __be16 param1; + __be16 param2; + __be16 dlen; + __be64 dptr; + __be64 rptr; + union { + __be64 word3; +#if defined(__BIG_ENDIAN_BITFIELD) + u64 grp : 3; + u64 cptr : 61; +#else + u64 cptr : 61; + u64 grp : 3; +#endif + }; +}; + /** * struct ctx_hdr - Book keeping data about the crypto context * @pool: Pool used to allocate crypto context -- cgit From 5f05cdca2727dd54a3ec84cb291663af6fb68672 Mon Sep 17 00:00:00 2001 From: Phani Kiran Hemadri Date: Thu, 8 Aug 2019 12:17:39 +0000 Subject: crypto: cavium/nitrox - Configure asymmetric queue manager Hardware unit This patch configures and initializes CNN55XX device AQM hardware unit. Signed-off-by: Phani Kiran Hemadri Reviewed-by: Srikanth Jampala Signed-off-by: Herbert Xu --- drivers/crypto/cavium/nitrox/nitrox_csr.h | 111 +++++++++++++++++++ drivers/crypto/cavium/nitrox/nitrox_hal.c | 158 +++++++++++++++++++++++++--- drivers/crypto/cavium/nitrox/nitrox_hal.h | 6 +- drivers/crypto/cavium/nitrox/nitrox_main.c | 4 +- drivers/crypto/cavium/nitrox/nitrox_sriov.c | 3 + 5 files changed, 265 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/cavium/nitrox/nitrox_csr.h b/drivers/crypto/cavium/nitrox/nitrox_csr.h index da1d73303780..1c8715ae0488 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_csr.h +++ b/drivers/crypto/cavium/nitrox/nitrox_csr.h @@ -256,6 +256,117 @@ union aqm_grp_execmsk_hi { }; }; +/** + * struct aqmq_drbl - AQM Queue Doorbell Counter Registers + * @dbell_count: Doorbell Counter + */ +union aqmq_drbl { + u64 value; + struct { +#if (defined(__BIG_ENDIAN_BITFIELD)) + u64 raz_32_63 : 32; + u64 dbell_count : 32; +#else + u64 dbell_count : 32; + u64 raz_32_63 : 32; +#endif + }; +}; + +/** + * struct aqmq_qsz - AQM Queue Host Queue Size Registers + * @host_queue_size: Size, in numbers of 'aqmq_command_s' command + * of the Host Ring. + */ +union aqmq_qsz { + u64 value; + struct { +#if (defined(__BIG_ENDIAN_BITFIELD)) + u64 raz_32_63 : 32; + u64 host_queue_size : 32; +#else + u64 host_queue_size : 32; + u64 raz_32_63 : 32; +#endif + }; +}; + +/** + * struct aqmq_cmp_thr - AQM Queue Commands Completed Threshold Registers + * @commands_completed_threshold: Count of 'aqmq_command_s' commands executed + * by AE engines for which completion interrupt is asserted. + */ +union aqmq_cmp_thr { + u64 value; + struct { +#if (defined(__BIG_ENDIAN_BITFIELD)) + u64 raz_32_63 : 32; + u64 commands_completed_threshold : 32; +#else + u64 commands_completed_threshold : 32; + u64 raz_32_63 : 32; +#endif + }; +}; + +/** + * struct aqmq_cmp_cnt - AQM Queue Commands Completed Count Registers + * @resend: Bit to request completion interrupt Resend. + * @completion_status: Command completion status of the ring. + * @commands_completed_count: Count of 'aqmq_command_s' commands executed by + * AE engines. + */ +union aqmq_cmp_cnt { + u64 value; + struct { +#if (defined(__BIG_ENDIAN_BITFIELD)) + u64 raz_34_63 : 30; + u64 resend : 1; + u64 completion_status : 1; + u64 commands_completed_count : 32; +#else + u64 commands_completed_count : 32; + u64 completion_status : 1; + u64 resend : 1; + u64 raz_34_63 : 30; +#endif + }; +}; + +/** + * struct aqmq_en - AQM Queue Enable Registers + * @queue_status: 1 = AQMQ is enabled, 0 = AQMQ is disabled + */ +union aqmq_en { + u64 value; + struct { +#if (defined(__BIG_ENDIAN_BITFIELD)) + u64 raz_1_63 : 63; + u64 queue_enable : 1; +#else + u64 queue_enable : 1; + u64 raz_1_63 : 63; +#endif + }; +}; + +/** + * struct aqmq_activity_stat - AQM Queue Activity Status Registers + * @queue_active: 1 = AQMQ is active, 0 = AQMQ is quiescent + */ +union aqmq_activity_stat { + u64 value; + struct { +#if (defined(__BIG_ENDIAN_BITFIELD)) + u64 raz_1_63 : 63; + u64 queue_active : 1; +#else + u64 queue_active : 1; + u64 raz_1_63 : 63; +#endif + }; +}; + /** * struct emu_fuse_map - EMU Fuse Map Registers * @ae_fuse: Fuse settings for AE 19..0 diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c index 3f0df60267a9..34a2f4f30a7e 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_hal.c +++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c @@ -241,12 +241,12 @@ void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev) } /** - * enable_nps_interrupts - enable NPS interrutps + * enable_nps_core_interrupts - enable NPS core interrutps * @ndev: NITROX device. * - * This includes NPS core, packet in and slc interrupts. + * This includes NPS core interrupts. */ -static void enable_nps_interrupts(struct nitrox_device *ndev) +static void enable_nps_core_interrupts(struct nitrox_device *ndev) { union nps_core_int_ena_w1s core_int; @@ -258,18 +258,9 @@ static void enable_nps_interrupts(struct nitrox_device *ndev) core_int.s.npco_dma_malform = 1; core_int.s.host_nps_wr_err = 1; nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value); - - /* NPS packet in ring interrupts */ - nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL)); - nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL)); - nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL)); - /* NPS packet slc port interrupts */ - nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL)); - nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL)); - nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL)); } -void nitrox_config_nps_unit(struct nitrox_device *ndev) +void nitrox_config_nps_core_unit(struct nitrox_device *ndev) { union nps_core_gbl_vfcfg core_gbl_vfcfg; @@ -281,12 +272,149 @@ void nitrox_config_nps_unit(struct nitrox_device *ndev) core_gbl_vfcfg.s.ilk_disable = 1; core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF; nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value); + + /* enable nps core interrupts */ + enable_nps_core_interrupts(ndev); +} + +/** + * enable_nps_pkt_interrupts - enable NPS packet interrutps + * @ndev: NITROX device. + * + * This includes NPS packet in and slc interrupts. + */ +static void enable_nps_pkt_interrupts(struct nitrox_device *ndev) +{ + /* NPS packet in ring interrupts */ + nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL)); + nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL)); + nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL)); + /* NPS packet slc port interrupts */ + nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL)); + nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL)); + nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL)); +} + +void nitrox_config_nps_pkt_unit(struct nitrox_device *ndev) +{ /* config input and solicit ports */ nitrox_config_pkt_input_rings(ndev); nitrox_config_pkt_solicit_ports(ndev); - /* enable interrupts */ - enable_nps_interrupts(ndev); + /* enable nps packet interrupts */ + enable_nps_pkt_interrupts(ndev); +} + +static void reset_aqm_ring(struct nitrox_device *ndev, int ring) +{ + union aqmq_en aqmq_en_reg; + union aqmq_activity_stat activity_stat; + union aqmq_cmp_cnt cmp_cnt; + int max_retries = MAX_CSR_RETRIES; + u64 offset; + + /* step 1: disable the queue */ + offset = AQMQ_ENX(ring); + aqmq_en_reg.value = 0; + aqmq_en_reg.queue_enable = 0; + nitrox_write_csr(ndev, offset, aqmq_en_reg.value); + + /* step 2: wait for AQMQ_ACTIVITY_STATX[QUEUE_ACTIVE] to clear */ + usleep_range(100, 150); + offset = AQMQ_ACTIVITY_STATX(ring); + do { + activity_stat.value = nitrox_read_csr(ndev, offset); + if (!activity_stat.queue_active) + break; + udelay(50); + } while (max_retries--); + + /* step 3: clear commands completed count */ + offset = AQMQ_CMP_CNTX(ring); + cmp_cnt.value = nitrox_read_csr(ndev, offset); + nitrox_write_csr(ndev, offset, cmp_cnt.value); + usleep_range(50, 100); +} + +void enable_aqm_ring(struct nitrox_device *ndev, int ring) +{ + union aqmq_en aqmq_en_reg; + u64 offset; + + offset = AQMQ_ENX(ring); + aqmq_en_reg.value = 0; + aqmq_en_reg.queue_enable = 1; + nitrox_write_csr(ndev, offset, aqmq_en_reg.value); + usleep_range(50, 100); +} + +void nitrox_config_aqm_rings(struct nitrox_device *ndev) +{ + int ring; + + for (ring = 0; ring < ndev->nr_queues; ring++) { + struct nitrox_cmdq *cmdq = ndev->aqmq[ring]; + union aqmq_drbl drbl; + union aqmq_qsz qsize; + union aqmq_cmp_thr cmp_thr; + u64 offset; + + /* steps 1 - 3 */ + reset_aqm_ring(ndev, ring); + + /* step 4: clear doorbell count of ring */ + offset = AQMQ_DRBLX(ring); + drbl.value = 0; + drbl.dbell_count = 0xFFFFFFFF; + nitrox_write_csr(ndev, offset, drbl.value); + + /* step 5: configure host ring details */ + + /* set host address for next command of ring */ + offset = AQMQ_NXT_CMDX(ring); + nitrox_write_csr(ndev, offset, 0ULL); + + /* set host address of ring base */ + offset = AQMQ_BADRX(ring); + nitrox_write_csr(ndev, offset, cmdq->dma); + + /* set ring size */ + offset = AQMQ_QSZX(ring); + qsize.value = 0; + qsize.host_queue_size = ndev->qlen; + nitrox_write_csr(ndev, offset, qsize.value); + + /* set command completion threshold */ + offset = AQMQ_CMP_THRX(ring); + cmp_thr.value = 0; + cmp_thr.commands_completed_threshold = 1; + nitrox_write_csr(ndev, offset, cmp_thr.value); + + /* step 6: enable the queue */ + enable_aqm_ring(ndev, ring); + } +} + +static void enable_aqm_interrupts(struct nitrox_device *ndev) +{ + /* clear interrupt enable bits */ + nitrox_write_csr(ndev, AQM_DBELL_OVF_LO_ENA_W1S, (~0ULL)); + nitrox_write_csr(ndev, AQM_DBELL_OVF_HI_ENA_W1S, (~0ULL)); + nitrox_write_csr(ndev, AQM_DMA_RD_ERR_LO_ENA_W1S, (~0ULL)); + nitrox_write_csr(ndev, AQM_DMA_RD_ERR_HI_ENA_W1S, (~0ULL)); + nitrox_write_csr(ndev, AQM_EXEC_NA_LO_ENA_W1S, (~0ULL)); + nitrox_write_csr(ndev, AQM_EXEC_NA_HI_ENA_W1S, (~0ULL)); + nitrox_write_csr(ndev, AQM_EXEC_ERR_LO_ENA_W1S, (~0ULL)); + nitrox_write_csr(ndev, AQM_EXEC_ERR_HI_ENA_W1S, (~0ULL)); +} + +void nitrox_config_aqm_unit(struct nitrox_device *ndev) +{ + /* config aqm command queues */ + nitrox_config_aqm_rings(ndev); + + /* enable aqm interrupts */ + enable_aqm_interrupts(ndev); } void nitrox_config_pom_unit(struct nitrox_device *ndev) diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.h b/drivers/crypto/cavium/nitrox/nitrox_hal.h index d6606418ba38..48b0af039099 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_hal.h +++ b/drivers/crypto/cavium/nitrox/nitrox_hal.h @@ -4,10 +4,13 @@ #include "nitrox_dev.h" +void nitrox_config_aqm_rings(struct nitrox_device *ndev); +void nitrox_config_aqm_unit(struct nitrox_device *ndev); void nitrox_config_emu_unit(struct nitrox_device *ndev); void nitrox_config_pkt_input_rings(struct nitrox_device *ndev); void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev); -void nitrox_config_nps_unit(struct nitrox_device *ndev); +void nitrox_config_nps_core_unit(struct nitrox_device *ndev); +void nitrox_config_nps_pkt_unit(struct nitrox_device *ndev); void nitrox_config_pom_unit(struct nitrox_device *ndev); void nitrox_config_rand_unit(struct nitrox_device *ndev); void nitrox_config_efl_unit(struct nitrox_device *ndev); @@ -15,6 +18,7 @@ void nitrox_config_bmi_unit(struct nitrox_device *ndev); void nitrox_config_bmo_unit(struct nitrox_device *ndev); void nitrox_config_lbc_unit(struct nitrox_device *ndev); void invalidate_lbc(struct nitrox_device *ndev); +void enable_aqm_ring(struct nitrox_device *ndev, int qno); void enable_pkt_input_ring(struct nitrox_device *ndev, int ring); void enable_pkt_solicit_port(struct nitrox_device *ndev, int port); void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode); diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c index 345d3ea10b1f..bc924980e10c 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_main.c +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c @@ -387,7 +387,9 @@ static int nitrox_pf_hw_init(struct nitrox_device *ndev) /* get cores information */ nitrox_get_hwinfo(ndev); - nitrox_config_nps_unit(ndev); + nitrox_config_nps_core_unit(ndev); + nitrox_config_aqm_unit(ndev); + nitrox_config_nps_pkt_unit(ndev); nitrox_config_pom_unit(ndev); nitrox_config_efl_unit(ndev); /* configure IO units */ diff --git a/drivers/crypto/cavium/nitrox/nitrox_sriov.c b/drivers/crypto/cavium/nitrox/nitrox_sriov.c index bf439d8256ba..43287f8471d1 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_sriov.c +++ b/drivers/crypto/cavium/nitrox/nitrox_sriov.c @@ -109,6 +109,9 @@ static int nitrox_pf_reinit(struct nitrox_device *ndev) return err; } + /* configure the AQM queues */ + nitrox_config_aqm_rings(ndev); + /* configure the packet queues */ nitrox_config_pkt_input_rings(ndev); nitrox_config_pkt_solicit_ports(ndev); -- cgit From ff296293b3538d19278a7f7cd1f3aa600ad9164c Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 19 Aug 2019 08:02:45 -0700 Subject: random: Support freezable kthreads in add_hwgenerator_randomness() The kthread calling this function is freezable after commit 03a3bb7ae631 ("hwrng: core - Freeze khwrng thread during suspend") is applied. Unfortunately, this function uses wait_event_interruptible() but doesn't check for the kthread being woken up by the fake freezer signal. When a user suspends the system, this kthread will wake up and if it fails the entropy size check it will immediately go back to sleep and not go into the freezer. Eventually, suspend will fail because the task never froze and a warning message like this may appear: PM: suspend entry (deep) Filesystems sync: 0.000 seconds Freezing user space processes ... (elapsed 0.001 seconds) done. OOM killer disabled. Freezing remaining freezable tasks ... Freezing of tasks failed after 20.003 seconds (1 tasks refusing to freeze, wq_busy=0): hwrng R running task 0 289 2 0x00000020 [] (__schedule) from [] (schedule+0x3c/0xc0) [] (schedule) from [] (add_hwgenerator_randomness+0xb0/0x100) [] (add_hwgenerator_randomness) from [] (hwrng_fillfn+0xc0/0x14c [rng_core]) [] (hwrng_fillfn [rng_core]) from [] (kthread+0x134/0x148) [] (kthread) from [] (ret_from_fork+0x14/0x2c) Check for a freezer signal here and skip adding any randomness if the task wakes up because it was frozen. This should make the kthread freeze properly and suspend work again. Fixes: 03a3bb7ae631 ("hwrng: core - Freeze khwrng thread during suspend") Reported-by: Keerthy Tested-by: Keerthy Signed-off-by: Stephen Boyd Signed-off-by: Herbert Xu --- drivers/char/random.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 5d5ea4ce1442..e2e85ca16410 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -2429,6 +2429,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy) { struct entropy_store *poolp = &input_pool; + bool frozen = false; if (unlikely(crng_init == 0)) { crng_fast_load(buffer, count); @@ -2439,9 +2440,12 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, * We'll be woken up again once below random_write_wakeup_thresh, * or when the calling thread is about to terminate. */ - wait_event_interruptible(random_write_wait, kthread_should_stop() || + wait_event_interruptible(random_write_wait, + kthread_freezable_should_stop(&frozen) || ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits); - mix_pool_bytes(poolp, buffer, count); - credit_entropy_bits(poolp, entropy); + if (!frozen) { + mix_pool_bytes(poolp, buffer, count); + credit_entropy_bits(poolp, entropy); + } } EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); -- cgit From 92c203e2dca3c816c7001e429134f412d9d89389 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:00:46 +0300 Subject: crypto: atmel/des - switch to new verification routines Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/atmel-tdes.c | 28 +++++++--------------------- 1 file changed, 7 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index 6256883a89ed..1a6c86ae6148 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include #include @@ -773,22 +773,12 @@ static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd) static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { - u32 tmp[DES_EXPKEY_WORDS]; - int err; - struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm); - struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + int err; - if (keylen != DES_KEY_SIZE) { - crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - err = des_ekey(tmp, key); - if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { - ctfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; - return -EINVAL; - } + err = verify_ablkcipher_des_key(tfm, key); + if (err) + return err; memcpy(ctx->key, key, keylen); ctx->keylen = keylen; @@ -800,15 +790,11 @@ static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm); - u32 flags; int err; - flags = crypto_ablkcipher_get_flags(tfm); - err = __des3_verify_key(&flags, key); - if (unlikely(err)) { - crypto_ablkcipher_set_flags(tfm, flags); + err = verify_ablkcipher_des3_key(tfm, key); + if (err) return err; - } memcpy(ctx->key, key, keylen); ctx->keylen = keylen; -- cgit From 05a7238d085f6cfaff9ad41e56f74d9923afa091 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:00:47 +0300 Subject: crypto: bcm/des - switch to new verification routines Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/bcm/cipher.c | 79 ++++++++++----------------------------------- 1 file changed, 17 insertions(+), 62 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 1c23e452700b..f85356a48e7e 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include @@ -1802,24 +1802,13 @@ static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); - u32 tmp[DES_EXPKEY_WORDS]; - - if (keylen == DES_KEY_SIZE) { - if (des_ekey(tmp, key) == 0) { - if (crypto_ablkcipher_get_flags(cipher) & - CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { - u32 flags = CRYPTO_TFM_RES_WEAK_KEY; + int err; - crypto_ablkcipher_set_flags(cipher, flags); - return -EINVAL; - } - } + err = verify_ablkcipher_des_key(cipher, key); + if (err) + return err; - ctx->cipher_type = CIPHER_TYPE_DES; - } else { - crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } + ctx->cipher_type = CIPHER_TYPE_DES; return 0; } @@ -1827,23 +1816,13 @@ static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); + int err; - if (keylen == (DES_KEY_SIZE * 3)) { - u32 flags; - int ret; - - flags = crypto_ablkcipher_get_flags(cipher); - ret = __des3_verify_key(&flags, key); - if (unlikely(ret)) { - crypto_ablkcipher_set_flags(cipher, flags); - return ret; - } + err = verify_ablkcipher_des3_key(cipher, key); + if (err) + return err; - ctx->cipher_type = CIPHER_TYPE_3DES; - } else { - crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } + ctx->cipher_type = CIPHER_TYPE_3DES; return 0; } @@ -2868,40 +2847,16 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, switch (ctx->alg->cipher_info.alg) { case CIPHER_ALG_DES: - if (ctx->enckeylen == DES_KEY_SIZE) { - u32 tmp[DES_EXPKEY_WORDS]; - u32 flags = CRYPTO_TFM_RES_WEAK_KEY; - - if (des_ekey(tmp, keys.enckey) == 0) { - if (crypto_aead_get_flags(cipher) & - CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { - crypto_aead_set_flags(cipher, flags); - return -EINVAL; - } - } + if (verify_aead_des_key(cipher, keys.enckey, keys.enckeylen)) + return -EINVAL; - ctx->cipher_type = CIPHER_TYPE_DES; - } else { - goto badkey; - } + ctx->cipher_type = CIPHER_TYPE_DES; break; case CIPHER_ALG_3DES: - if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { - u32 flags; - - flags = crypto_aead_get_flags(cipher); - ret = __des3_verify_key(&flags, keys.enckey); - if (unlikely(ret)) { - crypto_aead_set_flags(cipher, flags); - return ret; - } - - ctx->cipher_type = CIPHER_TYPE_3DES; - } else { - crypto_aead_set_flags(cipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (verify_aead_des3_key(cipher, keys.enckey, keys.enckeylen)) return -EINVAL; - } + + ctx->cipher_type = CIPHER_TYPE_3DES; break; case CIPHER_ALG_AES: switch (ctx->enckeylen) { -- cgit From a628c5a11d8b87a16dbc0d5bbc9b6ff233865622 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:00:48 +0300 Subject: crypto: caam/des - switch to new verification routines Cc: Horia Geanta Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 49 +++++++++++---------------------------- drivers/crypto/caam/caamalg_qi.c | 36 +++++----------------------- drivers/crypto/caam/caamalg_qi2.c | 36 +++++++++------------------- drivers/crypto/caam/compat.h | 2 +- 4 files changed, 31 insertions(+), 92 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 947ba8ef487a..3e2662cda9fd 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -628,33 +628,17 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_authenc_keys keys; - u32 flags; int err; err = crypto_authenc_extractkeys(&keys, key, keylen); if (unlikely(err)) - goto badkey; - - err = -EINVAL; - if (keys.enckeylen != DES3_EDE_KEY_SIZE) - goto badkey; - - flags = crypto_aead_get_flags(aead); - err = __des3_verify_key(&flags, keys.enckey); - if (unlikely(err)) { - crypto_aead_set_flags(aead, flags); - goto out; - } + return err; - err = aead_setkey(aead, key, keylen); + err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: + aead_setkey(aead, key, keylen); -out: memzero_explicit(&keys, sizeof(keys)); return err; - -badkey: - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); - goto out; } static int gcm_setkey(struct crypto_aead *aead, @@ -843,22 +827,15 @@ static int arc4_skcipher_setkey(struct crypto_skcipher *skcipher, static int des_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { - u32 tmp[DES3_EDE_EXPKEY_WORDS]; - struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); - - if (keylen == DES3_EDE_KEY_SIZE && - __des3_ede_setkey(tmp, &tfm->crt_flags, key, DES3_EDE_KEY_SIZE)) { - return -EINVAL; - } - - if (!des_ekey(tmp, key) && (crypto_skcipher_get_flags(skcipher) & - CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_WEAK_KEY); - return -EINVAL; - } + return verify_skcipher_des_key(skcipher, key) ?: + skcipher_setkey(skcipher, key, keylen, 0); +} - return skcipher_setkey(skcipher, key, keylen, 0); +static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + return verify_skcipher_des3_key(skcipher, key) ?: + skcipher_setkey(skcipher, key, keylen, 0); } static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, @@ -1954,7 +1931,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "cbc-3des-caam", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .setkey = des_skcipher_setkey, + .setkey = des3_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, @@ -2073,7 +2050,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "ecb-des3-caam", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .setkey = des_skcipher_setkey, + .setkey = des3_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index fb54b2c9d337..932643c88a5d 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -278,33 +278,17 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_authenc_keys keys; - u32 flags; int err; err = crypto_authenc_extractkeys(&keys, key, keylen); if (unlikely(err)) - goto badkey; - - err = -EINVAL; - if (keys.enckeylen != DES3_EDE_KEY_SIZE) - goto badkey; - - flags = crypto_aead_get_flags(aead); - err = __des3_verify_key(&flags, keys.enckey); - if (unlikely(err)) { - crypto_aead_set_flags(aead, flags); - goto out; - } + return err; - err = aead_setkey(aead, key, keylen); + err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: + aead_setkey(aead, key, keylen); -out: memzero_explicit(&keys, sizeof(keys)); return err; - -badkey: - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); - goto out; } static int gcm_set_sh_desc(struct crypto_aead *aead) @@ -745,23 +729,15 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { - return unlikely(des3_verify_key(skcipher, key)) ?: + return verify_skcipher_des3_key(skcipher, key) ?: skcipher_setkey(skcipher, key, keylen, 0); } static int des_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { - u32 tmp[DES_EXPKEY_WORDS]; - - if (!des_ekey(tmp, key) && (crypto_skcipher_get_flags(skcipher) & - CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_WEAK_KEY); - return -EINVAL; - } - - return skcipher_setkey(skcipher, key, keylen, 0); + return verify_skcipher_des_key(skcipher, key) ?: + skcipher_setkey(skcipher, key, keylen, 0); } static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index bd01bcd799e8..3443f6d6dd83 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -322,7 +322,6 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_authenc_keys keys; - u32 flags; int err; err = crypto_authenc_extractkeys(&keys, key, keylen); @@ -333,14 +332,8 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, if (keys.enckeylen != DES3_EDE_KEY_SIZE) goto badkey; - flags = crypto_aead_get_flags(aead); - err = __des3_verify_key(&flags, keys.enckey); - if (unlikely(err)) { - crypto_aead_set_flags(aead, flags); - goto out; - } - - err = aead_setkey(aead, key, keylen); + err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?: + aead_setkey(aead, key, keylen); out: memzero_explicit(&keys, sizeof(keys)); @@ -1070,22 +1063,15 @@ static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher, static int des_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { - u32 tmp[DES3_EDE_EXPKEY_WORDS]; - struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); - - if (keylen == DES3_EDE_KEY_SIZE && - __des3_ede_setkey(tmp, &tfm->crt_flags, key, DES3_EDE_KEY_SIZE)) { - return -EINVAL; - } - - if (!des_ekey(tmp, key) && (crypto_skcipher_get_flags(skcipher) & - CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_WEAK_KEY); - return -EINVAL; - } + return verify_skcipher_des_key(skcipher, key) ?: + skcipher_setkey(skcipher, key, keylen, 0); +} - return skcipher_setkey(skcipher, key, keylen, 0); +static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + return verify_skcipher_des3_key(skcipher, key) ?: + skcipher_setkey(skcipher, key, keylen, 0); } static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, @@ -1634,7 +1620,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "cbc-3des-caam-qi2", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .setkey = des_skcipher_setkey, + .setkey = des3_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index 8639b2df0371..60e2a54c19f1 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include #include -- cgit From 0e1cbe9795ace1ea5e1621e52b9d918d9382fcfd Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:00:49 +0300 Subject: crypto: cpt/des - switch to new verification routines Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/cavium/cpt/cptvf_algs.c | 26 +++++--------------------- 1 file changed, 5 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c index ff3cb1f8f2b6..596ce28b957d 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include #include @@ -322,31 +322,15 @@ static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, u32 keylen) { - u32 flags = crypto_ablkcipher_get_flags(cipher); - int err; - - err = __des3_verify_key(&flags, key); - if (unlikely(err)) { - crypto_ablkcipher_set_flags(cipher, flags); - return err; - } - - return cvm_setkey(cipher, key, keylen, DES3_CBC); + return verify_ablkcipher_des3_key(cipher, key) ?: + cvm_setkey(cipher, key, keylen, DES3_CBC); } static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, u32 keylen) { - u32 flags = crypto_ablkcipher_get_flags(cipher); - int err; - - err = __des3_verify_key(&flags, key); - if (unlikely(err)) { - crypto_ablkcipher_set_flags(cipher, flags); - return err; - } - - return cvm_setkey(cipher, key, keylen, DES3_ECB); + return verify_ablkcipher_des3_key(cipher, key) ?: + cvm_setkey(cipher, key, keylen, DES3_ECB); } static int cvm_enc_dec_init(struct crypto_tfm *tfm) -- cgit From 43d81a8b258e1ed894af0f5d81921365004b6d90 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:00:50 +0300 Subject: crypto: nitrox/des - switch to new verification routines Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/cavium/nitrox/nitrox_skcipher.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c index 7e4a5e69085e..3cdce1f0f257 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c +++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include "nitrox_dev.h" @@ -257,7 +257,7 @@ static int nitrox_aes_decrypt(struct skcipher_request *skreq) static int nitrox_3des_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { - return unlikely(des3_verify_key(cipher, key)) ?: + return verify_skcipher_des3_key(cipher, key) ?: nitrox_skcipher_setkey(cipher, 0, key, keylen); } -- cgit From b525041633145828bd6744a4d1b79dbc084315b3 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:00:51 +0300 Subject: crypto: ccp/des - switch to new verification routines Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-crypto-des3.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c index 5f05f834c7cd..d2c49b2f0323 100644 --- a/drivers/crypto/ccp/ccp-crypto-des3.c +++ b/drivers/crypto/ccp/ccp-crypto-des3.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include "ccp-crypto.h" @@ -39,11 +39,10 @@ static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key, struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm)); struct ccp_crypto_ablkcipher_alg *alg = ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm)); - u32 *flags = &tfm->base.crt_flags; int err; - err = __des3_verify_key(flags, key); - if (unlikely(err)) + err = verify_ablkcipher_des3_key(tfm, key); + if (err) return err; /* It's not clear that there is any support for a keysize of 112. -- cgit From 00cd6b233fc89463d4f4eddeb9abf1e009cc09c0 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:00:52 +0300 Subject: crypto: ccree/des - switch to new verification routines Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_aead.c | 24 ++++-------------------- drivers/crypto/ccree/cc_cipher.c | 15 ++++----------- 2 files changed, 8 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index a9779a212b18..d3e8faa03f15 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include "cc_driver.h" #include "cc_buffer_mgr.h" @@ -649,33 +649,17 @@ static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_authenc_keys keys; - u32 flags; int err; err = crypto_authenc_extractkeys(&keys, key, keylen); if (unlikely(err)) - goto badkey; - - err = -EINVAL; - if (keys.enckeylen != DES3_EDE_KEY_SIZE) - goto badkey; + return err; - flags = crypto_aead_get_flags(aead); - err = __des3_verify_key(&flags, keys.enckey); - if (unlikely(err)) { - crypto_aead_set_flags(aead, flags); - goto out; - } + err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: + cc_aead_setkey(aead, key, keylen); - err = cc_aead_setkey(aead, key, keylen); - -out: memzero_explicit(&keys, sizeof(keys)); return err; - -badkey: - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); - goto out; } static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 5b58226ea24d..c7ec20e90fc0 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include #include #include @@ -411,16 +411,9 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, * HW does the expansion on its own. */ if (ctx_p->flow_mode == S_DIN_to_DES) { - u32 tmp[DES3_EDE_EXPKEY_WORDS]; - if (keylen == DES3_EDE_KEY_SIZE && - __des3_ede_setkey(tmp, &tfm->crt_flags, key, - DES3_EDE_KEY_SIZE)) { - dev_dbg(dev, "weak 3DES key"); - return -EINVAL; - } else if (!des_ekey(tmp, key) && - (crypto_tfm_get_flags(tfm) & - CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { - tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; + if ((keylen == DES3_EDE_KEY_SIZE && + verify_skcipher_des3_key(sktfm, key)) || + verify_skcipher_des_key(sktfm, key)) { dev_dbg(dev, "weak DES key"); return -EINVAL; } -- cgit From f4ed68609798211f6503e66d7d371230ea18cf14 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:00:53 +0300 Subject: crypto: hifn/des - switch to new verification routines Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/hifn_795x.c | 32 ++++++++------------------------ 1 file changed, 8 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 5c3f02e4aece..a18e62df68d9 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -21,7 +21,7 @@ #include #include -#include +#include static char hifn_pll_ref[sizeof("extNNN")] = "ext"; module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444); @@ -1939,25 +1939,13 @@ static void hifn_flush(struct hifn_device *dev) static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int len) { - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); - struct hifn_context *ctx = crypto_tfm_ctx(tfm); + struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher); struct hifn_device *dev = ctx->dev; + int err; - if (len > HIFN_MAX_CRYPT_KEY_LENGTH) { - crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -1; - } - - if (len == HIFN_DES_KEY_LENGTH) { - u32 tmp[DES_EXPKEY_WORDS]; - int ret = des_ekey(tmp, key); - - if (unlikely(ret == 0) && - (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { - tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; - return -EINVAL; - } - } + err = verify_ablkcipher_des_key(cipher, key); + if (err) + return err; dev->flags &= ~HIFN_FLAG_OLD_KEY; @@ -1972,15 +1960,11 @@ static int hifn_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, { struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher); struct hifn_device *dev = ctx->dev; - u32 flags; int err; - flags = crypto_ablkcipher_get_flags(cipher); - err = __des3_verify_key(&flags, key); - if (unlikely(err)) { - crypto_ablkcipher_set_flags(cipher, flags); + err = verify_ablkcipher_des3_key(cipher, key); + if (err) return err; - } dev->flags &= ~HIFN_FLAG_OLD_KEY; -- cgit From 894b68d8be4b305615ab3be52667478fe5f6e0b5 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:00:54 +0300 Subject: crypto: hisilicon/des - switch to new verification routines Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec/sec_algs.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index 02768af0dccd..e0508ea160f1 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include #include @@ -347,25 +347,21 @@ static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm, static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { - if (keylen != DES_KEY_SIZE) - return -EINVAL; - - return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64); + return verify_skcipher_des_key(tfm, key) ?: + sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64); } static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { - if (keylen != DES_KEY_SIZE) - return -EINVAL; - - return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64); + return verify_skcipher_des_key(tfm, key) ?: + sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64); } static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { - return unlikely(des3_verify_key(tfm, key)) ?: + return verify_skcipher_des3_key(tfm, key) ?: sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_3DES_ECB_192_3KEY); } @@ -373,7 +369,7 @@ static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm, static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { - return unlikely(des3_verify_key(tfm, key)) ?: + return verify_skcipher_des3_key(tfm, key) ?: sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_3DES_CBC_192_3KEY); } -- cgit From 21f5a15e0f26c7ea15bee9ed3c5fd1daa40b0b77 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:00:55 +0300 Subject: crypto: safexcel/des - switch to new verification routines Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_cipher.c | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 5682fe8b606e..16c4d5460334 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -220,7 +220,6 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key, struct safexcel_crypto_priv *priv = ctx->priv; struct crypto_authenc_keys keys; struct crypto_aes_ctx aes; - u32 flags; int err = -EINVAL; if (crypto_authenc_extractkeys(&keys, key, len) != 0) @@ -241,12 +240,7 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key, /* Encryption key */ switch (ctx->alg) { case SAFEXCEL_3DES: - if (keys.enckeylen != DES3_EDE_KEY_SIZE) - goto badkey; - flags = crypto_aead_get_flags(ctfm); - err = __des3_verify_key(&flags, keys.enckey); - crypto_aead_set_flags(ctfm, flags); - + err = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen); if (unlikely(err)) goto badkey_expflags; break; @@ -1192,16 +1186,12 @@ static int safexcel_cbc_des_decrypt(struct skcipher_request *req) static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key, unsigned int len) { - struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); - struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); - u32 tmp[DES_EXPKEY_WORDS]; + struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm); int ret; - ret = des_ekey(tmp, key); - if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { - tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; - return -EINVAL; - } + ret = verify_skcipher_des_key(ctfm, key); + if (ret) + return ret; /* if context exits and key changed, need to invalidate it */ if (ctx->base.ctxr_dma) @@ -1299,8 +1289,8 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm, struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm); int err; - err = des3_verify_key(ctfm, key); - if (unlikely(err)) + err = verify_skcipher_des3_key(ctfm, key); + if (err) return err; /* if context exits and key changed, need to invalidate it */ -- cgit From 3ca20b65cbfd1acd733c2d12848261b00b2d55e1 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:00:56 +0300 Subject: crypto: ixp4xx/des - switch to new verification routines Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/ixp4xx_crypto.c | 27 ++++++--------------------- 1 file changed, 6 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index acedafe3fa98..9181523ba760 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -17,7 +17,7 @@ #include #include -#include +#include #include #include #include @@ -756,10 +756,7 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt, } cipher_cfg |= keylen_cfg; } else { - u32 tmp[DES_EXPKEY_WORDS]; - if (des_ekey(tmp, key) == 0) { - *flags |= CRYPTO_TFM_RES_WEAK_KEY; - } + crypto_des_verify_key(tfm, key); } /* write cfg word to cryptinfo */ *(u32*)cinfo = cpu_to_be32(cipher_cfg); @@ -851,14 +848,8 @@ out: static int ablk_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int key_len) { - u32 flags = crypto_ablkcipher_get_flags(tfm); - int err; - - err = __des3_verify_key(&flags, key); - if (unlikely(err)) - crypto_ablkcipher_set_flags(tfm, flags); - - return ablk_setkey(tfm, key, key_len); + return verify_ablkcipher_des3_key(tfm, key) ?: + ablk_setkey(tfm, key, key_len); } static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key, @@ -1181,7 +1172,6 @@ static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct ixp_ctx *ctx = crypto_aead_ctx(tfm); - u32 flags = CRYPTO_TFM_RES_BAD_KEY_LEN; struct crypto_authenc_keys keys; int err; @@ -1193,12 +1183,8 @@ static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key, if (keys.authkeylen > sizeof(ctx->authkey)) goto badkey; - if (keys.enckeylen != DES3_EDE_KEY_SIZE) - goto badkey; - - flags = crypto_aead_get_flags(tfm); - err = __des3_verify_key(&flags, keys.enckey); - if (unlikely(err)) + err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen); + if (err) goto badkey; memcpy(ctx->authkey, keys.authkey, keys.authkeylen); @@ -1209,7 +1195,6 @@ static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key, memzero_explicit(&keys, sizeof(keys)); return aead_setup(tfm, crypto_aead_authsize(tfm)); badkey: - crypto_aead_set_flags(tfm, flags); memzero_explicit(&keys, sizeof(keys)); return err; } -- cgit From 9bfa85ebd5eec6c357ab348b8fbca73529111bd7 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:00:57 +0300 Subject: crypto: cesa/des - switch to new verification routines Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cipher.c | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index fa1997e70b63..84ceddfee76b 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c @@ -10,7 +10,7 @@ */ #include -#include +#include #include "cesa.h" @@ -272,21 +272,12 @@ static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int len) { - struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); - struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); - u32 tmp[DES_EXPKEY_WORDS]; - int ret; - - if (len != DES_KEY_SIZE) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } + struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher); + int err; - ret = des_ekey(tmp, key); - if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { - tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; - return -EINVAL; - } + err = verify_skcipher_des_key(cipher, key); + if (err) + return err; memcpy(ctx->key, key, DES_KEY_SIZE); @@ -299,8 +290,8 @@ static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher, struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher); int err; - err = des3_verify_key(cipher, key); - if (unlikely(err)) + err = verify_skcipher_des3_key(cipher, key); + if (err) return err; memcpy(ctx->key, key, DES3_EDE_KEY_SIZE); -- cgit From d4b90dbc8578c567827901e625018ead7561f741 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:00:58 +0300 Subject: crypto: n2/des - switch to new verification routines Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/n2_core.c | 32 +++++++++----------------------- 1 file changed, 9 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 760e72a5893b..4765163df6be 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -757,24 +757,15 @@ static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); - struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); + struct n2_cipher_context *ctx = crypto_ablkcipher_ctx(cipher); struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); - u32 tmp[DES_EXPKEY_WORDS]; int err; - ctx->enc_type = n2alg->enc_type; - - if (keylen != DES_KEY_SIZE) { - crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } + err = verify_ablkcipher_des_key(cipher, key); + if (err) + return err; - err = des_ekey(tmp, key); - if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { - tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; - return -EINVAL; - } + ctx->enc_type = n2alg->enc_type; ctx->key_len = keylen; memcpy(ctx->key.des, key, keylen); @@ -784,18 +775,13 @@ static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); - struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); + struct n2_cipher_context *ctx = crypto_ablkcipher_ctx(cipher); struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); - u32 flags; int err; - flags = crypto_ablkcipher_get_flags(cipher); - err = __des3_verify_key(&flags, key); - if (unlikely(err)) { - crypto_ablkcipher_set_flags(cipher, flags); + err = verify_ablkcipher_des3_key(cipher, key); + if (err) return err; - } ctx->enc_type = n2alg->enc_type; -- cgit From 304daa4f0ee54e42dea9e167e162caba17d48543 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:00:59 +0300 Subject: crypto: omap/des - switch to new verification routines Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/omap-des.c | 27 ++++++++------------------- 1 file changed, 8 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 484a693122af..b19d7e5d55ec 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include @@ -650,20 +650,13 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher); - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + int err; pr_debug("enter, keylen: %d\n", keylen); - /* Do we need to test against weak key? */ - if (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { - u32 tmp[DES_EXPKEY_WORDS]; - int ret = des_ekey(tmp, key); - - if (!ret) { - tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; - return -EINVAL; - } - } + err = verify_ablkcipher_des_key(cipher, key); + if (err) + return err; memcpy(ctx->key, key, keylen); ctx->keylen = keylen; @@ -672,20 +665,16 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, } static int omap_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, - unsigned int keylen) + unsigned int keylen) { struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher); - u32 flags; int err; pr_debug("enter, keylen: %d\n", keylen); - flags = crypto_ablkcipher_get_flags(cipher); - err = __des3_verify_key(&flags, key); - if (unlikely(err)) { - crypto_ablkcipher_set_flags(cipher, flags); + err = verify_ablkcipher_des3_key(cipher, key); + if (err) return err; - } memcpy(ctx->key, key, keylen); ctx->keylen = keylen; -- cgit From 0157fb268893cddeb79c3cb6fc05d13a132aa66c Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:01:00 +0300 Subject: crypto: picoxcell/des - switch to new verification routines Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/picoxcell_crypto.c | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 9a939b4fd32f..3cbefb41b099 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include #include @@ -736,16 +736,12 @@ static void spacc_aead_cra_exit(struct crypto_aead *tfm) static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int len) { - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); - struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); - u32 tmp[DES_EXPKEY_WORDS]; + struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher); + int err; - if (unlikely(!des_ekey(tmp, key)) && - (crypto_ablkcipher_get_flags(cipher) & - CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { - tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; - return -EINVAL; - } + err = verify_ablkcipher_des_key(cipher, key); + if (err) + return err; memcpy(ctx->key, key, len); ctx->key_len = len; @@ -761,15 +757,11 @@ static int spacc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int len) { struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher); - u32 flags; int err; - flags = crypto_ablkcipher_get_flags(cipher); - err = __des3_verify_key(&flags, key); - if (unlikely(err)) { - crypto_ablkcipher_set_flags(cipher, flags); + err = verify_ablkcipher_des3_key(cipher, key); + if (err) return err; - } memcpy(ctx->key, key, len); ctx->key_len = len; -- cgit From f96c897c855cfbde74d92f5cd65dc1108799bd8a Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:01:01 +0300 Subject: crypto: qce/des - switch to new verification routines Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/qce/ablkcipher.c | 55 ++++++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c index a976210ba41c..7a98bf5cc967 100644 --- a/drivers/crypto/qce/ablkcipher.c +++ b/drivers/crypto/qce/ablkcipher.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include "cipher.h" @@ -154,27 +154,17 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key, { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk); struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); - unsigned long flags = to_cipher_tmpl(tfm)->alg_flags; int ret; if (!key || !keylen) return -EINVAL; - if (IS_AES(flags)) { - switch (keylen) { - case AES_KEYSIZE_128: - case AES_KEYSIZE_256: - break; - default: - goto fallback; - } - } else if (IS_DES(flags)) { - u32 tmp[DES_EXPKEY_WORDS]; - - ret = des_ekey(tmp, key); - if (!ret && (crypto_ablkcipher_get_flags(ablk) & - CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) - goto weakkey; + switch (keylen) { + case AES_KEYSIZE_128: + case AES_KEYSIZE_256: + break; + default: + goto fallback; } ctx->enc_keylen = keylen; @@ -185,24 +175,32 @@ fallback: if (!ret) ctx->enc_keylen = keylen; return ret; -weakkey: - crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY); - return -EINVAL; +} + +static int qce_des_setkey(struct crypto_ablkcipher *ablk, const u8 *key, + unsigned int keylen) +{ + struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk); + int err; + + err = verify_ablkcipher_des_key(ablk, key); + if (err) + return err; + + ctx->enc_keylen = keylen; + memcpy(ctx->enc_key, key, keylen); + return 0; } static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key, unsigned int keylen) { struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk); - u32 flags; int err; - flags = crypto_ablkcipher_get_flags(ablk); - err = __des3_verify_key(&flags, key); - if (unlikely(err)) { - crypto_ablkcipher_set_flags(ablk, flags); + err = verify_ablkcipher_des3_key(ablk, key); + if (err) return err; - } ctx->enc_keylen = keylen; memcpy(ctx->enc_key, key, keylen); @@ -374,8 +372,9 @@ static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def, alg->cra_ablkcipher.ivsize = def->ivsize; alg->cra_ablkcipher.min_keysize = def->min_keysize; alg->cra_ablkcipher.max_keysize = def->max_keysize; - alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ? - qce_des3_setkey : qce_ablkcipher_setkey; + alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ? qce_des3_setkey : + IS_DES(def->flags) ? qce_des_setkey : + qce_ablkcipher_setkey; alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt; alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt; -- cgit From 4eaef05210cbe7052adf53d030fd44b37b69c5a3 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:01:02 +0300 Subject: crypto: rk3288/des - switch to new verification routines Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/rockchip/rk3288_crypto.h | 2 +- drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c | 21 +++++++-------------- 2 files changed, 8 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h index 54ee5b3ed9db..18e2b3f29336 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.h +++ b/drivers/crypto/rockchip/rk3288_crypto.h @@ -3,7 +3,7 @@ #define __RK3288_CRYPTO_H__ #include -#include +#include #include #include #include diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c index 96078aaa2098..d0f4b2d18059 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c @@ -46,15 +46,12 @@ static int rk_aes_setkey(struct crypto_ablkcipher *cipher, static int rk_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); - struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); - u32 tmp[DES_EXPKEY_WORDS]; + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher); + int err; - if (!des_ekey(tmp, key) && - (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { - tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; - return -EINVAL; - } + err = verify_ablkcipher_des_key(cipher, key); + if (err) + return err; ctx->keylen = keylen; memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); @@ -65,15 +62,11 @@ static int rk_tdes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher); - u32 flags; int err; - flags = crypto_ablkcipher_get_flags(cipher); - err = __des3_verify_key(&flags, key); - if (unlikely(err)) { - crypto_ablkcipher_set_flags(cipher, flags); + err = verify_ablkcipher_des3_key(cipher, key); + if (err) return err; - } ctx->keylen = keylen; memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); -- cgit From b5d0ba83a0fb09b8dfd8bfdd9072d532da4f3091 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:01:03 +0300 Subject: crypto: stm32/des - switch to new verification routines Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-cryp.c | 30 +++++------------------------- 1 file changed, 5 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index 5cf6679da580..ba5ea6434f9c 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include #include @@ -767,35 +767,15 @@ static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { - u32 tmp[DES_EXPKEY_WORDS]; - - if (keylen != DES_KEY_SIZE) - return -EINVAL; - - if ((crypto_ablkcipher_get_flags(tfm) & - CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) && - unlikely(!des_ekey(tmp, key))) { - crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); - return -EINVAL; - } - - return stm32_cryp_setkey(tfm, key, keylen); + return verify_ablkcipher_des_key(tfm, key) ?: + stm32_cryp_setkey(tfm, key, keylen); } static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { - u32 flags; - int err; - - flags = crypto_ablkcipher_get_flags(tfm); - err = __des3_verify_key(&flags, key); - if (unlikely(err)) { - crypto_ablkcipher_set_flags(tfm, flags); - return err; - } - - return stm32_cryp_setkey(tfm, key, keylen); + return verify_ablkcipher_des3_key(tfm, key) ?: + stm32_cryp_setkey(tfm, key, keylen); } static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key, -- cgit From 08d4b408e8f51562676fa47b728abd6ddf9828c2 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:01:04 +0300 Subject: crypto: sun4i/des - switch to new verification routines Cc: Corentin Labbe Acked-by: Corentin Labbe Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/sunxi-ss/sun4i-ss-cipher.c | 26 ++++++-------------------- drivers/crypto/sunxi-ss/sun4i-ss.h | 2 +- 2 files changed, 7 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c index 6f7cbf6c2b55..6536fd4bee65 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c @@ -542,25 +542,11 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); - struct sun4i_ss_ctx *ss = op->ss; - u32 flags; - u32 tmp[DES_EXPKEY_WORDS]; - int ret; - - if (unlikely(keylen != DES_KEY_SIZE)) { - dev_err(ss->dev, "Invalid keylen %u\n", keylen); - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - flags = crypto_skcipher_get_flags(tfm); + int err; - ret = des_ekey(tmp, key); - if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); - dev_dbg(ss->dev, "Weak key %u\n", keylen); - return -EINVAL; - } + err = verify_skcipher_des_key(tfm, key); + if (err) + return err; op->keylen = keylen; memcpy(op->key, key, keylen); @@ -578,8 +564,8 @@ int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); int err; - err = des3_verify_key(tfm, key); - if (unlikely(err)) + err = verify_skcipher_des3_key(tfm, key); + if (err) return err; op->keylen = keylen; diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h index 8654d48aedc0..35a27a7145f8 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss.h +++ b/drivers/crypto/sunxi-ss/sun4i-ss.h @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include -- cgit From 9d574ae8ebc1e6e485ef949f086157601008a9d3 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:01:05 +0300 Subject: crypto: talitos/des - switch to new verification routines Cc: Christophe Leroy Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 37 +++++++------------------------------ 1 file changed, 7 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index c9d686a0e805..117c831b5ede 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -30,7 +30,7 @@ #include #include -#include +#include #include #include #include @@ -936,15 +936,9 @@ static int aead_des3_setkey(struct crypto_aead *authenc, if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) goto badkey; - if (keys.enckeylen != DES3_EDE_KEY_SIZE) - goto badkey; - - flags = crypto_aead_get_flags(authenc); - err = __des3_verify_key(&flags, keys.enckey); - if (unlikely(err)) { - crypto_aead_set_flags(authenc, flags); + err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen); + if (err) goto out; - } if (ctx->keylen) dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); @@ -1517,32 +1511,15 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { - u32 tmp[DES_EXPKEY_WORDS]; - - if (unlikely(crypto_ablkcipher_get_flags(cipher) & - CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) && - !des_ekey(tmp, key)) { - crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY); - return -EINVAL; - } - - return ablkcipher_setkey(cipher, key, keylen); + return verify_ablkcipher_des_key(cipher, key) ?: + ablkcipher_setkey(cipher, key, keylen); } static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { - u32 flags; - int err; - - flags = crypto_ablkcipher_get_flags(cipher); - err = __des3_verify_key(&flags, key); - if (unlikely(err)) { - crypto_ablkcipher_set_flags(cipher, flags); - return err; - } - - return ablkcipher_setkey(cipher, key, keylen); + return verify_ablkcipher_des3_key(cipher, key) ?: + ablkcipher_setkey(cipher, key, keylen); } static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher, -- cgit From dcb15794ec571c10243c24eaff8512b789dfef2a Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:01:06 +0300 Subject: crypto: ux500/des - switch to new verification routines Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/ux500/cryp/cryp_core.c | 31 +++++++------------------------ 1 file changed, 7 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index b4beb54c0dbe..e966e9a64501 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include @@ -987,26 +987,13 @@ static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); - u32 *flags = &cipher->base.crt_flags; - u32 tmp[DES_EXPKEY_WORDS]; - int ret; + int err; pr_debug(DEV_DBG_NAME " [%s]", __func__); - if (keylen != DES_KEY_SIZE) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN", - __func__); - return -EINVAL; - } - ret = des_ekey(tmp, key); - if (unlikely(ret == 0) && - (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { - *flags |= CRYPTO_TFM_RES_WEAK_KEY; - pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY", - __func__); - return -EINVAL; - } + err = verify_ablkcipher_des_key(cipher, key); + if (err) + return err; memcpy(ctx->key, key, keylen); ctx->keylen = keylen; @@ -1019,17 +1006,13 @@ static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); - u32 flags; int err; pr_debug(DEV_DBG_NAME " [%s]", __func__); - flags = crypto_ablkcipher_get_flags(cipher); - err = __des3_verify_key(&flags, key); - if (unlikely(err)) { - crypto_ablkcipher_set_flags(cipher, flags); + err = verify_ablkcipher_des3_key(cipher, key); + if (err) return err; - } memcpy(ctx->key, key, keylen); ctx->keylen = keylen; -- cgit From 04007b0e6cbbab5836ac891626e91edf10d46341 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:01:09 +0300 Subject: crypto: des - split off DES library from generic DES cipher driver Another one for the cipher museum: split off DES core processing into a separate module so other drivers (mostly for crypto accelerators) can reuse the code without pulling in the generic DES cipher itself. This will also permit the cipher interface to be made private to the crypto API itself once we move the only user in the kernel (CIFS) to this library interface. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 28 +++++++++++++------------- drivers/crypto/caam/Kconfig | 2 +- drivers/crypto/cavium/nitrox/Kconfig | 2 +- drivers/crypto/inside-secure/safexcel_cipher.c | 2 +- drivers/crypto/stm32/Kconfig | 2 +- drivers/crypto/ux500/Kconfig | 2 +- 6 files changed, 19 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index b8c50871f11b..5cd6e3d12bac 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -150,7 +150,7 @@ config CRYPTO_DES_S390 depends on S390 select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER - select CRYPTO_DES + select CRYPTO_LIB_DES help This is the s390 hardware accelerated implementation of the DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). @@ -215,7 +215,7 @@ config CRYPTO_DEV_MARVELL_CESA tristate "Marvell's Cryptographic Engine driver" depends on PLAT_ORION || ARCH_MVEBU select CRYPTO_LIB_AES - select CRYPTO_DES + select CRYPTO_LIB_DES select CRYPTO_BLKCIPHER select CRYPTO_HASH select SRAM @@ -227,7 +227,7 @@ config CRYPTO_DEV_MARVELL_CESA config CRYPTO_DEV_NIAGARA2 tristate "Niagara2 Stream Processing Unit driver" - select CRYPTO_DES + select CRYPTO_LIB_DES select CRYPTO_BLKCIPHER select CRYPTO_HASH select CRYPTO_MD5 @@ -244,7 +244,7 @@ config CRYPTO_DEV_NIAGARA2 config CRYPTO_DEV_HIFN_795X tristate "Driver HIFN 795x crypto accelerator chips" - select CRYPTO_DES + select CRYPTO_LIB_DES select CRYPTO_BLKCIPHER select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG depends on PCI @@ -300,7 +300,7 @@ config CRYPTO_DEV_TALITOS2 config CRYPTO_DEV_IXP4XX tristate "Driver for IXP4xx crypto hardware acceleration" depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE - select CRYPTO_DES + select CRYPTO_LIB_DES select CRYPTO_AEAD select CRYPTO_AUTHENC select CRYPTO_BLKCIPHER @@ -366,7 +366,7 @@ config CRYPTO_DEV_OMAP_AES config CRYPTO_DEV_OMAP_DES tristate "Support for OMAP DES/3DES hw engine" depends on ARCH_OMAP2PLUS - select CRYPTO_DES + select CRYPTO_LIB_DES select CRYPTO_BLKCIPHER select CRYPTO_ENGINE help @@ -384,7 +384,7 @@ config CRYPTO_DEV_PICOXCELL select CRYPTO_AES select CRYPTO_AUTHENC select CRYPTO_BLKCIPHER - select CRYPTO_DES + select CRYPTO_LIB_DES select CRYPTO_CBC select CRYPTO_ECB select CRYPTO_SEQIV @@ -497,7 +497,7 @@ config CRYPTO_DEV_ATMEL_AES config CRYPTO_DEV_ATMEL_TDES tristate "Support for Atmel DES/TDES hw accelerator" depends on ARCH_AT91 || COMPILE_TEST - select CRYPTO_DES + select CRYPTO_LIB_DES select CRYPTO_BLKCIPHER help Some Atmel processors have DES/TDES hw accelerator. @@ -595,7 +595,7 @@ config CRYPTO_DEV_QCE depends on ARCH_QCOM || COMPILE_TEST depends on HAS_IOMEM select CRYPTO_AES - select CRYPTO_DES + select CRYPTO_LIB_DES select CRYPTO_ECB select CRYPTO_CBC select CRYPTO_XTS @@ -643,7 +643,7 @@ config CRYPTO_DEV_SUN4I_SS select CRYPTO_MD5 select CRYPTO_SHA1 select CRYPTO_AES - select CRYPTO_DES + select CRYPTO_LIB_DES select CRYPTO_BLKCIPHER help Some Allwinner SoC have a crypto accelerator named @@ -666,7 +666,7 @@ config CRYPTO_DEV_ROCKCHIP tristate "Rockchip's Cryptographic Engine driver" depends on OF && ARCH_ROCKCHIP select CRYPTO_AES - select CRYPTO_DES + select CRYPTO_LIB_DES select CRYPTO_MD5 select CRYPTO_SHA1 select CRYPTO_SHA256 @@ -703,7 +703,7 @@ config CRYPTO_DEV_BCM_SPU depends on MAILBOX default m select CRYPTO_AUTHENC - select CRYPTO_DES + select CRYPTO_LIB_DES select CRYPTO_MD5 select CRYPTO_SHA1 select CRYPTO_SHA256 @@ -722,7 +722,7 @@ config CRYPTO_DEV_SAFEXCEL select CRYPTO_LIB_AES select CRYPTO_AUTHENC select CRYPTO_BLKCIPHER - select CRYPTO_DES + select CRYPTO_LIB_DES select CRYPTO_HASH select CRYPTO_HMAC select CRYPTO_MD5 @@ -760,7 +760,7 @@ config CRYPTO_DEV_CCREE default n select CRYPTO_HASH select CRYPTO_BLKCIPHER - select CRYPTO_DES + select CRYPTO_LIB_DES select CRYPTO_AEAD select CRYPTO_AUTHENC select CRYPTO_SHA1 diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index e4fdf545ac90..137ed3df0c74 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -98,7 +98,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API select CRYPTO_AEAD select CRYPTO_AUTHENC select CRYPTO_BLKCIPHER - select CRYPTO_DES + select CRYPTO_LIB_DES help Selecting this will offload crypto for users of the scatterlist crypto API (such as the linux native IPSec diff --git a/drivers/crypto/cavium/nitrox/Kconfig b/drivers/crypto/cavium/nitrox/Kconfig index dab162af41b8..7b1e751bb9cd 100644 --- a/drivers/crypto/cavium/nitrox/Kconfig +++ b/drivers/crypto/cavium/nitrox/Kconfig @@ -6,7 +6,7 @@ config CRYPTO_DEV_NITROX tristate select CRYPTO_BLKCIPHER select CRYPTO_AES - select CRYPTO_DES + select CRYPTO_LIB_DES select FW_LOADER config CRYPTO_DEV_NITROX_CNN55XX diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 16c4d5460334..b68b6a7c0a32 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig index d6576280fc9b..1aba9372cd23 100644 --- a/drivers/crypto/stm32/Kconfig +++ b/drivers/crypto/stm32/Kconfig @@ -25,7 +25,7 @@ config CRYPTO_DEV_STM32_CRYP depends on ARCH_STM32 select CRYPTO_HASH select CRYPTO_ENGINE - select CRYPTO_DES + select CRYPTO_LIB_DES help This enables support for the CRYP (AES/DES/TDES) hw accelerator which can be found on STMicroelectronics STM32 SOC. diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig index 349d34eaac13..b1c6f739f77b 100644 --- a/drivers/crypto/ux500/Kconfig +++ b/drivers/crypto/ux500/Kconfig @@ -9,7 +9,7 @@ config CRYPTO_DEV_UX500_CRYP depends on CRYPTO_DEV_UX500 select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER - select CRYPTO_DES + select CRYPTO_LIB_DES help This selects the crypto driver for the UX500_CRYP hardware. It supports AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes. -- cgit From b395ed4f948a21c27391ed7981e7e210d19c9366 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 16 Aug 2019 08:47:43 +1000 Subject: crypto: hisilicon - Fix warning on printing %p with dma_addr_t This patch fixes a printk format warning by replacing %p with %#llx for dma_addr_t. Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index d72e062a3619..f975c393a603 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -347,8 +347,8 @@ static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, struct qm_mailbox mailbox; int ret = 0; - dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%pad\n", queue, - cmd, dma_addr); + dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n", + queue, cmd, (unsigned long long)dma_addr); mailbox.w0 = cmd | (op ? 0x1 << QM_MB_OP_SHIFT : 0) | -- cgit From 23966841934908ad4ef997231f1fdd1f9a9d0f42 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 16 Aug 2019 17:06:24 +0300 Subject: crypto: vmx/xts - use fallback for ciphertext stealing For correctness and compliance with the XTS-AES specification, we are adding support for ciphertext stealing to XTS implementations, even though no use cases are known that will be enabled by this. Since the Power8 implementation already has a fallback skcipher standby for other purposes, let's use it for this purpose as well. If ciphertext stealing use cases ever become a bottleneck, we can always revisit this. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/vmx/aes_xts.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 49f7258045fa..d59e736882f6 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c @@ -84,7 +84,7 @@ static int p8_aes_xts_crypt(struct skcipher_request *req, int enc) u8 tweak[AES_BLOCK_SIZE]; int ret; - if (!crypto_simd_usable()) { + if (!crypto_simd_usable() || (req->cryptlen % XTS_BLOCK_SIZE) != 0) { struct skcipher_request *subreq = skcipher_request_ctx(req); *subreq = *req; -- cgit From c963050259f2daccb7141ee58bc0fc0b760836ba Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 22 Aug 2019 14:49:15 +0300 Subject: crypto: n2/des - fix build breakage after DES updates Fix build breakage caused by the DES library refactor. Fixes: d4b90dbc8578 ("crypto: n2/des - switch to new verification routines") Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/n2_core.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 4765163df6be..63923cc33727 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -757,7 +757,8 @@ static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { - struct n2_cipher_context *ctx = crypto_ablkcipher_ctx(cipher); + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); int err; @@ -775,7 +776,8 @@ static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { - struct n2_cipher_context *ctx = crypto_ablkcipher_ctx(cipher); + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); int err; -- cgit From e31b791fae32d01e8d0ee2e731b74d5471293cba Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 22 Aug 2019 22:09:15 +1000 Subject: crypto: talitos - Fix build warning in aead_des3_setkey This patch removes the variable flags which is now unused thanks to the new DES helpers. Fixes: 9d574ae8ebc1 ("crypto: talitos/des - switch to new...") Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 117c831b5ede..cb6c10b1bf36 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -925,7 +925,6 @@ static int aead_des3_setkey(struct crypto_aead *authenc, struct talitos_ctx *ctx = crypto_aead_ctx(authenc); struct device *dev = ctx->dev; struct crypto_authenc_keys keys; - u32 flags; int err; err = crypto_authenc_extractkeys(&keys, key, keylen); -- cgit From 309b77e0f8bfa3126ff12949173e6d45801a968b Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 19 Aug 2019 07:18:33 +0200 Subject: crypto: picoxcell - Fix the name of the module in the description of CRYPTO_DEV_PICOXCELL The help section says that the module will be called 'pipcoxcell_crypto'. This is likely a typo. Use 'picoxcell_crypto' instead Signed-off-by: Christophe JAILLET Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 5cd6e3d12bac..18b48ed675b8 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -393,7 +393,7 @@ config CRYPTO_DEV_PICOXCELL Picochip picoXcell SoC devices. Select this for IPSEC ESP offload and for 3gpp Layer 2 ciphering support. - Saying m here will build a module named pipcoxcell_crypto. + Saying m here will build a module named picoxcell_crypto. config CRYPTO_DEV_SAHARA tristate "Support for SAHARA crypto accelerator" -- cgit From 84a0b00aa486c0ab0216771c5cb988e2277b7498 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 19 Aug 2019 17:22:25 +0300 Subject: crypto: s5p - deal gracefully with bogus input sizes The s5p skcipher driver returns -EINVAL for zero length inputs, which deviates from the behavior of the generic ECB template, and causes fuzz tests to fail. In cases where the input is not a multiple of the AES block size (and the chaining mode is not CTR), it prints an error to the kernel log, which is a thing we usually try to avoid in response to situations that can be triggered by unprivileged users. Signed-off-by: Ard Biesheuvel Acked-by: Kamil Konieczny Reviewed-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- drivers/crypto/s5p-sss.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 9ef25230c199..ef90c58edb1f 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -2056,9 +2056,12 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode) struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct s5p_aes_dev *dev = ctx->dev; + if (!req->nbytes) + return 0; + if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE) && ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) { - dev_err(dev->dev, "request size is not exact amount of AES blocks\n"); + dev_dbg(dev->dev, "request size is not exact amount of AES blocks\n"); return -EINVAL; } -- cgit From c4624488665fff8edddb00ed0fd00ea4c985d591 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 19 Aug 2019 17:22:26 +0300 Subject: crypto: s5p - use correct block size of 1 for ctr(aes) Align the s5p ctr(aes) implementation with other implementations of the same mode, by setting the block size to 1. Signed-off-by: Ard Biesheuvel Acked-by: Kamil Konieczny Reviewed-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- drivers/crypto/s5p-sss.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index ef90c58edb1f..010f1bb20dad 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -2173,7 +2173,7 @@ static struct crypto_alg algs[] = { .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = AES_BLOCK_SIZE, + .cra_blocksize = 1, .cra_ctxsize = sizeof(struct s5p_aes_ctx), .cra_alignmask = 0x0f, .cra_type = &crypto_ablkcipher_type, -- cgit From 0f6e5c8234778f7dadf5789ce85ac1e062ebed9f Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Mon, 19 Aug 2019 16:40:23 +0200 Subject: crypto: inside-secure - make driver selectable for non-Marvell hardware While being a generic EIP97/EIP197 driver, the driver was only selectable for Marvell Armada hardware. This fix makes the driver selectable for any Device Tree supporting kernel configuration, allowing it to be used for other compatible hardware by just adding the correct device tree entry. It also allows the driver to be selected for PCI(E) supporting kernel con- figurations, to be able to use it with PCIE based FPGA development boards for pre-silicon driver development by both Inside Secure and its IP custo- mers. Signed-off-by: Pascal van Leeuwen Acked-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 18b48ed675b8..83271d944a96 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -717,8 +717,7 @@ source "drivers/crypto/stm32/Kconfig" config CRYPTO_DEV_SAFEXCEL tristate "Inside Secure's SafeXcel cryptographic engine driver" - depends on OF - depends on (ARM64 && ARCH_MVEBU) || (COMPILE_TEST && 64BIT) + depends on OF || PCI || COMPILE_TEST select CRYPTO_LIB_AES select CRYPTO_AUTHENC select CRYPTO_BLKCIPHER @@ -730,10 +729,11 @@ config CRYPTO_DEV_SAFEXCEL select CRYPTO_SHA256 select CRYPTO_SHA512 help - This driver interfaces with the SafeXcel EIP-197 cryptographic engine - designed by Inside Secure. Select this if you want to use CBC/ECB - chain mode, AES cipher mode and SHA1/SHA224/SHA256/SHA512 hash - algorithms. + This driver interfaces with the SafeXcel EIP-97 and EIP-197 cryptographic + engines designed by Inside Secure. It currently accelerates DES, 3DES and + AES block ciphers in ECB and CBC mode, as well as SHA1, SHA224, SHA256, + SHA384 and SHA512 hash algorithms for both basic hash and HMAC. + Additionally, it accelerates combined AES-CBC/HMAC-SHA AEAD operations. config CRYPTO_DEV_ARTPEC6 tristate "Support for Axis ARTPEC-6/7 hardware crypto acceleration." -- cgit From 70e39e2259a716cce1fecd9974942ee1ed58a9a5 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Mon, 19 Aug 2019 16:40:24 +0200 Subject: crypto: inside-secure - Remove redundant algo to engine mapping code This removes some code determine which engine has which algorithms which was effectively redundant (may have been forward-looking?) due to always enabling all algorithms for all currently supported engines. A future patch will use a different, more scalable approach to achieve this. This is removed now because otherwise the next patch will add new hardware which would otherwise have to be added to all algorithms, so now is a convenient time to just get rid of this. Signed-off-by: Pascal van Leeuwen Acked-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 9 --------- drivers/crypto/inside-secure/safexcel.h | 1 - drivers/crypto/inside-secure/safexcel_cipher.c | 11 ----------- drivers/crypto/inside-secure/safexcel_hash.c | 12 ------------ 4 files changed, 33 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 822744dc9c21..a5365f299b8c 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -884,9 +884,6 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { safexcel_algs[i]->priv = priv; - if (!(safexcel_algs[i]->engines & priv->version)) - continue; - if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher); else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD) @@ -902,9 +899,6 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) fail: for (j = 0; j < i; j++) { - if (!(safexcel_algs[j]->engines & priv->version)) - continue; - if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher); else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD) @@ -921,9 +915,6 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv) int i; for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { - if (!(safexcel_algs[i]->engines & priv->version)) - continue; - if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher); else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD) diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 6f9875e7a0c0..ea9369c231e2 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -630,7 +630,6 @@ struct safexcel_ahash_export_state { struct safexcel_alg_template { struct safexcel_crypto_priv *priv; enum safexcel_alg_type type; - u32 engines; union { struct skcipher_alg skcipher; struct aead_alg aead; diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index b68b6a7c0a32..8f0fecc0dafa 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -1028,7 +1028,6 @@ static void safexcel_aead_cra_exit(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_ecb_aes = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, - .engines = EIP97IES | EIP197B | EIP197D, .alg.skcipher = { .setkey = safexcel_skcipher_aes_setkey, .encrypt = safexcel_ecb_aes_encrypt, @@ -1067,7 +1066,6 @@ static int safexcel_cbc_aes_decrypt(struct skcipher_request *req) struct safexcel_alg_template safexcel_alg_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, - .engines = EIP97IES | EIP197B | EIP197D, .alg.skcipher = { .setkey = safexcel_skcipher_aes_setkey, .encrypt = safexcel_cbc_aes_encrypt, @@ -1206,7 +1204,6 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key, struct safexcel_alg_template safexcel_alg_cbc_des = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, - .engines = EIP97IES | EIP197B | EIP197D, .alg.skcipher = { .setkey = safexcel_des_setkey, .encrypt = safexcel_cbc_des_encrypt, @@ -1246,7 +1243,6 @@ static int safexcel_ecb_des_decrypt(struct skcipher_request *req) struct safexcel_alg_template safexcel_alg_ecb_des = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, - .engines = EIP97IES | EIP197B | EIP197D, .alg.skcipher = { .setkey = safexcel_des_setkey, .encrypt = safexcel_ecb_des_encrypt, @@ -1308,7 +1304,6 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm, struct safexcel_alg_template safexcel_alg_cbc_des3_ede = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, - .engines = EIP97IES | EIP197B | EIP197D, .alg.skcipher = { .setkey = safexcel_des3_ede_setkey, .encrypt = safexcel_cbc_des3_ede_encrypt, @@ -1348,7 +1343,6 @@ static int safexcel_ecb_des3_ede_decrypt(struct skcipher_request *req) struct safexcel_alg_template safexcel_alg_ecb_des3_ede = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, - .engines = EIP97IES | EIP197B | EIP197D, .alg.skcipher = { .setkey = safexcel_des3_ede_setkey, .encrypt = safexcel_ecb_des3_ede_encrypt, @@ -1418,7 +1412,6 @@ static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, - .engines = EIP97IES | EIP197B | EIP197D, .alg.aead = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt_aes, @@ -1453,7 +1446,6 @@ static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, - .engines = EIP97IES | EIP197B | EIP197D, .alg.aead = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt_aes, @@ -1488,7 +1480,6 @@ static int safexcel_aead_sha224_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, - .engines = EIP97IES | EIP197B | EIP197D, .alg.aead = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt_aes, @@ -1523,7 +1514,6 @@ static int safexcel_aead_sha512_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, - .engines = EIP97IES | EIP197B | EIP197D, .alg.aead = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt_aes, @@ -1558,7 +1548,6 @@ static int safexcel_aead_sha384_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, - .engines = EIP97IES | EIP197B | EIP197D, .alg.aead = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt_aes, diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index c1776b6690fc..626dd82e583f 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -845,7 +845,6 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_sha1 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, .alg.ahash = { .init = safexcel_sha1_init, .update = safexcel_ahash_update, @@ -1086,7 +1085,6 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, struct safexcel_alg_template safexcel_alg_hmac_sha1 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, .alg.ahash = { .init = safexcel_hmac_sha1_init, .update = safexcel_ahash_update, @@ -1142,7 +1140,6 @@ static int safexcel_sha256_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_sha256 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, .alg.ahash = { .init = safexcel_sha256_init, .update = safexcel_ahash_update, @@ -1197,7 +1194,6 @@ static int safexcel_sha224_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_sha224 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, .alg.ahash = { .init = safexcel_sha224_init, .update = safexcel_ahash_update, @@ -1266,7 +1262,6 @@ static int safexcel_hmac_sha224_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_hmac_sha224 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, .alg.ahash = { .init = safexcel_hmac_sha224_init, .update = safexcel_ahash_update, @@ -1336,7 +1331,6 @@ static int safexcel_hmac_sha256_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_hmac_sha256 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, .alg.ahash = { .init = safexcel_hmac_sha256_init, .update = safexcel_ahash_update, @@ -1392,7 +1386,6 @@ static int safexcel_sha512_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_sha512 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, .alg.ahash = { .init = safexcel_sha512_init, .update = safexcel_ahash_update, @@ -1447,7 +1440,6 @@ static int safexcel_sha384_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_sha384 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, .alg.ahash = { .init = safexcel_sha384_init, .update = safexcel_ahash_update, @@ -1516,7 +1508,6 @@ static int safexcel_hmac_sha512_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_hmac_sha512 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, .alg.ahash = { .init = safexcel_hmac_sha512_init, .update = safexcel_ahash_update, @@ -1586,7 +1577,6 @@ static int safexcel_hmac_sha384_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_hmac_sha384 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, .alg.ahash = { .init = safexcel_hmac_sha384_init, .update = safexcel_ahash_update, @@ -1642,7 +1632,6 @@ static int safexcel_md5_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_md5 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, .alg.ahash = { .init = safexcel_md5_init, .update = safexcel_ahash_update, @@ -1712,7 +1701,6 @@ static int safexcel_hmac_md5_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_hmac_md5 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, .alg.ahash = { .init = safexcel_hmac_md5_init, .update = safexcel_ahash_update, -- cgit From 625f269a5a7a3643771320387e474bd0a61d9654 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Mon, 19 Aug 2019 16:40:25 +0200 Subject: crypto: inside-secure - add support for PCI based FPGA development board This patch adds support for a PCIE development board with FPGA from Xilinx, to facilitate pre-silicon driver development by both Inside Secure and its IP customers. Since Inside Secure neither produces nor has access to actual silicon, this is required functionality to allow us to contribute. Signed-off-by: Pascal van Leeuwen Acked-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 539 +++++++++++++++++++-------- drivers/crypto/inside-secure/safexcel.h | 30 +- drivers/crypto/inside-secure/safexcel_ring.c | 3 +- 3 files changed, 405 insertions(+), 167 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index a5365f299b8c..dfe22d2d7161 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -32,16 +33,17 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) u32 val, htable_offset; int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc; - if (priv->version == EIP197B) { - cs_rc_max = EIP197B_CS_RC_MAX; - cs_ht_wc = EIP197B_CS_HT_WC; - cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC; - cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC; - } else { + if (priv->version == EIP197D_MRVL) { cs_rc_max = EIP197D_CS_RC_MAX; cs_ht_wc = EIP197D_CS_HT_WC; cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC; cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC; + } else { + /* Default to minimum "safe" settings */ + cs_rc_max = EIP197B_CS_RC_MAX; + cs_ht_wc = EIP197B_CS_HT_WC; + cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC; + cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC; } /* Enable the record cache memory access */ @@ -145,23 +147,19 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) int i, j, ret = 0, pe; u32 val; - switch (priv->version) { - case EIP197B: - dir = "eip197b"; - break; - case EIP197D: + if (priv->version == EIP197D_MRVL) dir = "eip197d"; - break; - default: - /* No firmware is required */ - return 0; - } + else if (priv->version == EIP197B_MRVL || + priv->version == EIP197_DEVBRD) + dir = "eip197b"; + else + return -ENODEV; for (i = 0; i < FW_NB; i++) { snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]); ret = request_firmware(&fw[i], fw_path, priv->dev); if (ret) { - if (priv->version != EIP197B) + if (priv->version != EIP197B_MRVL) goto release_fw; /* Fallback to the old firmware location for the @@ -294,6 +292,9 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) u32 version, val; int i, ret, pe; + dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n", + priv->config.pes, priv->config.rings); + /* Determine endianess and configure byte swap */ version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION); val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); @@ -303,8 +304,11 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE) val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24); - /* For EIP197 set maximum number of TX commands to 2^5 = 32 */ - if (priv->version == EIP197B || priv->version == EIP197D) + /* + * For EIP197's only set maximum number of TX commands to 2^5 = 32 + * Skip for the EIP97 as it does not have this field. + */ + if (priv->version != EIP97IES_MRVL) val |= EIP197_MST_CTRL_TX_MAX_CMD(5); writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); @@ -330,11 +334,10 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) writel(EIP197_DxE_THR_CTRL_RESET_PE, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe)); - if (priv->version == EIP197B || priv->version == EIP197D) { - /* Reset HIA input interface arbiter */ + if (priv->version != EIP97IES_MRVL) + /* Reset HIA input interface arbiter (EIP197 only) */ writel(EIP197_HIA_RA_PE_CTRL_RESET, EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe)); - } /* DMA transfer size to use */ val = EIP197_HIA_DFE_CFG_DIS_DEBUG; @@ -357,12 +360,11 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) EIP197_PE_IN_xBUF_THRES_MAX(7), EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe)); - if (priv->version == EIP197B || priv->version == EIP197D) { + if (priv->version != EIP97IES_MRVL) /* enable HIA input interface arbiter and rings */ writel(EIP197_HIA_RA_PE_CTRL_EN | GENMASK(priv->config.rings - 1, 0), EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe)); - } /* Data Store Engine configuration */ @@ -381,10 +383,10 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8); val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS); val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE; - /* FIXME: instability issues can occur for EIP97 but disabling it impact - * performances. + /* FIXME: instability issues can occur for EIP97 but disabling + * it impacts performance. */ - if (priv->version == EIP197B || priv->version == EIP197D) + if (priv->version != EIP97IES_MRVL) val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR; writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe)); @@ -471,7 +473,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) /* Clear any HIA interrupt */ writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK); - if (priv->version == EIP197B || priv->version == EIP197D) { + if (priv->version != EIP97IES_MRVL) { eip197_trc_cache_init(priv); ret = eip197_load_firmwares(priv); @@ -719,7 +721,8 @@ handle_results: ndesc = ctx->handle_result(priv, ring, req, &should_complete, &ret); if (ndesc < 0) { - dev_err(priv->dev, "failed to handle result (%d)", ndesc); + dev_err(priv->dev, "failed to handle result (%d)\n", + ndesc); goto acknowledge; } @@ -791,7 +794,7 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data) * reinitialized. This should not happen under * normal circumstances. */ - dev_err(priv->dev, "RDR: fatal error."); + dev_err(priv->dev, "RDR: fatal error.\n"); } else if (likely(stat & EIP197_xDR_THRESH)) { rc = IRQ_WAKE_THREAD; } @@ -821,23 +824,45 @@ static irqreturn_t safexcel_irq_ring_thread(int irq, void *data) return IRQ_HANDLED; } -static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name, +static int safexcel_request_ring_irq(void *pdev, int irqid, + int is_pci_dev, irq_handler_t handler, irq_handler_t threaded_handler, struct safexcel_ring_irq_data *ring_irq_priv) { - int ret, irq = platform_get_irq_byname(pdev, name); + int ret, irq; + struct device *dev; + + if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) { + struct pci_dev *pci_pdev = pdev; - if (irq < 0) { - dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name); - return irq; + dev = &pci_pdev->dev; + irq = pci_irq_vector(pci_pdev, irqid); + if (irq < 0) { + dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n", + irqid, irq); + return irq; + } + } else if (IS_ENABLED(CONFIG_OF)) { + struct platform_device *plf_pdev = pdev; + char irq_name[6] = {0}; /* "ringX\0" */ + + snprintf(irq_name, 6, "ring%d", irqid); + dev = &plf_pdev->dev; + irq = platform_get_irq_byname(plf_pdev, irq_name); + + if (irq < 0) { + dev_err(dev, "unable to get IRQ '%s' (err %d)\n", + irq_name, irq); + return irq; + } } - ret = devm_request_threaded_irq(&pdev->dev, irq, handler, + ret = devm_request_threaded_irq(dev, irq, handler, threaded_handler, IRQF_ONESHOT, - dev_name(&pdev->dev), ring_irq_priv); + dev_name(dev), ring_irq_priv); if (ret) { - dev_err(&pdev->dev, "unable to request IRQ %d\n", irq); + dev_err(dev, "unable to request IRQ %d\n", irq); return ret; } @@ -931,22 +956,20 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv) val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); /* Read number of PEs from the engine */ - switch (priv->version) { - case EIP197B: - case EIP197D: - mask = EIP197_N_PES_MASK; - break; - default: + if (priv->version == EIP97IES_MRVL) + /* Narrow field width for EIP97 type engine */ mask = EIP97_N_PES_MASK; - } + else + /* Wider field width for all EIP197 type engines */ + mask = EIP197_N_PES_MASK; + priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask; + priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings); + val = (val & GENMASK(27, 25)) >> 25; mask = BIT(val) - 1; - val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); - priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings); - priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32)); priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask; @@ -958,21 +981,7 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) { struct safexcel_register_offsets *offsets = &priv->offsets; - switch (priv->version) { - case EIP197B: - case EIP197D: - offsets->hia_aic = EIP197_HIA_AIC_BASE; - offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE; - offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE; - offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE; - offsets->hia_dfe = EIP197_HIA_DFE_BASE; - offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE; - offsets->hia_dse = EIP197_HIA_DSE_BASE; - offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE; - offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE; - offsets->pe = EIP197_PE_BASE; - break; - case EIP97IES: + if (priv->version == EIP97IES_MRVL) { offsets->hia_aic = EIP97_HIA_AIC_BASE; offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE; offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE; @@ -983,133 +992,119 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE; offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE; offsets->pe = EIP97_PE_BASE; - break; + } else { + offsets->hia_aic = EIP197_HIA_AIC_BASE; + offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE; + offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE; + offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE; + offsets->hia_dfe = EIP197_HIA_DFE_BASE; + offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE; + offsets->hia_dse = EIP197_HIA_DSE_BASE; + offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE; + offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE; + offsets->pe = EIP197_PE_BASE; } } -static int safexcel_probe(struct platform_device *pdev) +/* + * Generic part of probe routine, shared by platform and PCI driver + * + * Assumes IO resources have been mapped, private data mem has been allocated, + * clocks have been enabled, device pointer has been assigned etc. + * + */ +static int safexcel_probe_generic(void *pdev, + struct safexcel_crypto_priv *priv, + int is_pci_dev) { - struct device *dev = &pdev->dev; - struct safexcel_crypto_priv *priv; + struct device *dev = priv->dev; int i, ret; - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); - if (!priv) + priv->context_pool = dmam_pool_create("safexcel-context", dev, + sizeof(struct safexcel_context_record), + 1, 0); + if (!priv->context_pool) return -ENOMEM; - priv->dev = dev; - priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev); - - if (priv->version == EIP197B || priv->version == EIP197D) - priv->flags |= EIP197_TRC_CACHE; - safexcel_init_register_offsets(priv); - priv->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(priv->base)) { - dev_err(dev, "failed to get resource\n"); - return PTR_ERR(priv->base); - } + if (priv->version != EIP97IES_MRVL) + priv->flags |= EIP197_TRC_CACHE; - priv->clk = devm_clk_get(&pdev->dev, NULL); - ret = PTR_ERR_OR_ZERO(priv->clk); - /* The clock isn't mandatory */ - if (ret != -ENOENT) { - if (ret) - return ret; + safexcel_configure(priv); - ret = clk_prepare_enable(priv->clk); - if (ret) { - dev_err(dev, "unable to enable clk (%d)\n", ret); + if (IS_ENABLED(CONFIG_PCI) && priv->version == EIP197_DEVBRD) { + /* + * Request MSI vectors for global + 1 per ring - + * or just 1 for older dev images + */ + struct pci_dev *pci_pdev = pdev; + + ret = pci_alloc_irq_vectors(pci_pdev, + priv->config.rings + 1, + priv->config.rings + 1, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (ret < 0) { + dev_err(dev, "Failed to allocate PCI MSI interrupts\n"); return ret; } } - priv->reg_clk = devm_clk_get(&pdev->dev, "reg"); - ret = PTR_ERR_OR_ZERO(priv->reg_clk); - /* The clock isn't mandatory */ - if (ret != -ENOENT) { - if (ret) - goto err_core_clk; - - ret = clk_prepare_enable(priv->reg_clk); - if (ret) { - dev_err(dev, "unable to enable reg clk (%d)\n", ret); - goto err_core_clk; - } - } - - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); - if (ret) - goto err_reg_clk; - - priv->context_pool = dmam_pool_create("safexcel-context", dev, - sizeof(struct safexcel_context_record), - 1, 0); - if (!priv->context_pool) { - ret = -ENOMEM; - goto err_reg_clk; - } - - safexcel_configure(priv); - + /* Register the ring IRQ handlers and configure the rings */ priv->ring = devm_kcalloc(dev, priv->config.rings, sizeof(*priv->ring), GFP_KERNEL); - if (!priv->ring) { - ret = -ENOMEM; - goto err_reg_clk; - } + if (!priv->ring) + return -ENOMEM; for (i = 0; i < priv->config.rings; i++) { - char irq_name[6] = {0}; /* "ringX\0" */ - char wq_name[9] = {0}; /* "wq_ringX\0" */ + char wq_name[9] = {0}; int irq; struct safexcel_ring_irq_data *ring_irq; ret = safexcel_init_ring_descriptors(priv, &priv->ring[i].cdr, &priv->ring[i].rdr); - if (ret) - goto err_reg_clk; + if (ret) { + dev_err(dev, "Failed to initialize rings\n"); + return ret; + } priv->ring[i].rdr_req = devm_kcalloc(dev, EIP197_DEFAULT_RING_SIZE, sizeof(priv->ring[i].rdr_req), GFP_KERNEL); - if (!priv->ring[i].rdr_req) { - ret = -ENOMEM; - goto err_reg_clk; - } + if (!priv->ring[i].rdr_req) + return -ENOMEM; ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL); - if (!ring_irq) { - ret = -ENOMEM; - goto err_reg_clk; - } + if (!ring_irq) + return -ENOMEM; ring_irq->priv = priv; ring_irq->ring = i; - snprintf(irq_name, 6, "ring%d", i); - irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring, + irq = safexcel_request_ring_irq(pdev, + EIP197_IRQ_NUMBER(i, is_pci_dev), + is_pci_dev, + safexcel_irq_ring, safexcel_irq_ring_thread, ring_irq); if (irq < 0) { - ret = irq; - goto err_reg_clk; + dev_err(dev, "Failed to get IRQ ID for ring %d\n", i); + return irq; } priv->ring[i].work_data.priv = priv; priv->ring[i].work_data.ring = i; - INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work); + INIT_WORK(&priv->ring[i].work_data.work, + safexcel_dequeue_work); snprintf(wq_name, 9, "wq_ring%d", i); - priv->ring[i].workqueue = create_singlethread_workqueue(wq_name); - if (!priv->ring[i].workqueue) { - ret = -ENOMEM; - goto err_reg_clk; - } + priv->ring[i].workqueue = + create_singlethread_workqueue(wq_name); + if (!priv->ring[i].workqueue) + return -ENOMEM; priv->ring[i].requests = 0; priv->ring[i].busy = false; @@ -1121,28 +1116,21 @@ static int safexcel_probe(struct platform_device *pdev) spin_lock_init(&priv->ring[i].queue_lock); } - platform_set_drvdata(pdev, priv); atomic_set(&priv->ring_used, 0); ret = safexcel_hw_init(priv); if (ret) { - dev_err(dev, "EIP h/w init failed (%d)\n", ret); - goto err_reg_clk; + dev_err(dev, "HW init failed (%d)\n", ret); + return ret; } ret = safexcel_register_algorithms(priv); if (ret) { dev_err(dev, "Failed to register algorithms (%d)\n", ret); - goto err_reg_clk; + return ret; } return 0; - -err_reg_clk: - clk_disable_unprepare(priv->reg_clk); -err_core_clk: - clk_disable_unprepare(priv->clk); - return ret; } static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv) @@ -1164,6 +1152,76 @@ static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv) } } +#if IS_ENABLED(CONFIG_OF) +/* for Device Tree platform driver */ + +static int safexcel_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct safexcel_crypto_priv *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev); + + platform_set_drvdata(pdev, priv); + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) { + dev_err(dev, "failed to get resource\n"); + return PTR_ERR(priv->base); + } + + priv->clk = devm_clk_get(&pdev->dev, NULL); + ret = PTR_ERR_OR_ZERO(priv->clk); + /* The clock isn't mandatory */ + if (ret != -ENOENT) { + if (ret) + return ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) { + dev_err(dev, "unable to enable clk (%d)\n", ret); + return ret; + } + } + + priv->reg_clk = devm_clk_get(&pdev->dev, "reg"); + ret = PTR_ERR_OR_ZERO(priv->reg_clk); + /* The clock isn't mandatory */ + if (ret != -ENOENT) { + if (ret) + goto err_core_clk; + + ret = clk_prepare_enable(priv->reg_clk); + if (ret) { + dev_err(dev, "unable to enable reg clk (%d)\n", ret); + goto err_core_clk; + } + } + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (ret) + goto err_reg_clk; + + /* Generic EIP97/EIP197 device probing */ + ret = safexcel_probe_generic(pdev, priv, 0); + if (ret) + goto err_reg_clk; + + return 0; + +err_reg_clk: + clk_disable_unprepare(priv->reg_clk); +err_core_clk: + clk_disable_unprepare(priv->clk); + return ret; +} + static int safexcel_remove(struct platform_device *pdev) { struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev); @@ -1183,30 +1241,28 @@ static int safexcel_remove(struct platform_device *pdev) static const struct of_device_id safexcel_of_match_table[] = { { .compatible = "inside-secure,safexcel-eip97ies", - .data = (void *)EIP97IES, + .data = (void *)EIP97IES_MRVL, }, { .compatible = "inside-secure,safexcel-eip197b", - .data = (void *)EIP197B, + .data = (void *)EIP197B_MRVL, }, { .compatible = "inside-secure,safexcel-eip197d", - .data = (void *)EIP197D, + .data = (void *)EIP197D_MRVL, }, + /* For backward compatibility and intended for generic use */ { - /* Deprecated. Kept for backward compatibility. */ .compatible = "inside-secure,safexcel-eip97", - .data = (void *)EIP97IES, + .data = (void *)EIP97IES_MRVL, }, { - /* Deprecated. Kept for backward compatibility. */ .compatible = "inside-secure,safexcel-eip197", - .data = (void *)EIP197B, + .data = (void *)EIP197B_MRVL, }, {}, }; - static struct platform_driver crypto_safexcel = { .probe = safexcel_probe, .remove = safexcel_remove, @@ -1215,10 +1271,167 @@ static struct platform_driver crypto_safexcel = { .of_match_table = safexcel_of_match_table, }, }; -module_platform_driver(crypto_safexcel); +#endif + +#if IS_ENABLED(CONFIG_PCI) +/* PCIE devices - i.e. Inside Secure development boards */ + +static int safexcel_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct safexcel_crypto_priv *priv; + void __iomem *pciebase; + int rc; + u32 val; + + dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n", + ent->vendor, ent->device, ent->subvendor, + ent->subdevice, ent->driver_data); + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + priv->version = (enum safexcel_eip_version)ent->driver_data; + + pci_set_drvdata(pdev, priv); + + /* enable the device */ + rc = pcim_enable_device(pdev); + if (rc) { + dev_err(dev, "Failed to enable PCI device\n"); + return rc; + } + + /* take ownership of PCI BAR0 */ + rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel"); + if (rc) { + dev_err(dev, "Failed to map IO region for BAR0\n"); + return rc; + } + priv->base = pcim_iomap_table(pdev)[0]; + + if (priv->version == EIP197_DEVBRD) { + dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n"); + + rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel"); + if (rc) { + dev_err(dev, "Failed to map IO region for BAR4\n"); + return rc; + } + + pciebase = pcim_iomap_table(pdev)[2]; + val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR); + if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) { + dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n", + (val & 0xff)); + + /* Setup MSI identity map mapping */ + writel(EIP197_XLX_USER_VECT_LUT0_IDENT, + pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR); + writel(EIP197_XLX_USER_VECT_LUT1_IDENT, + pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR); + writel(EIP197_XLX_USER_VECT_LUT2_IDENT, + pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR); + writel(EIP197_XLX_USER_VECT_LUT3_IDENT, + pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR); + + /* Enable all device interrupts */ + writel(GENMASK(31, 0), + pciebase + EIP197_XLX_USER_INT_ENB_MSK); + } else { + dev_err(dev, "Unrecognised IRQ block identifier %x\n", + val); + return -ENODEV; + } + + /* HW reset FPGA dev board */ + /* assert reset */ + writel(1, priv->base + EIP197_XLX_GPIO_BASE); + wmb(); /* maintain strict ordering for accesses here */ + /* deassert reset */ + writel(0, priv->base + EIP197_XLX_GPIO_BASE); + wmb(); /* maintain strict ordering for accesses here */ + } + + /* enable bus mastering */ + pci_set_master(pdev); + + /* Generic EIP97/EIP197 device probing */ + rc = safexcel_probe_generic(pdev, priv, 1); + return rc; +} + +void safexcel_pci_remove(struct pci_dev *pdev) +{ + struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev); + int i; + + safexcel_unregister_algorithms(priv); + + for (i = 0; i < priv->config.rings; i++) + destroy_workqueue(priv->ring[i].workqueue); + + safexcel_hw_reset_rings(priv); +} + +static const struct pci_device_id safexcel_pci_ids[] = { + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038, + 0x16ae, 0xc522), + /* assume EIP197B for now */ + .driver_data = EIP197_DEVBRD, + }, + {}, +}; + +MODULE_DEVICE_TABLE(pci, safexcel_pci_ids); + +static struct pci_driver safexcel_pci_driver = { + .name = "crypto-safexcel", + .id_table = safexcel_pci_ids, + .probe = safexcel_pci_probe, + .remove = safexcel_pci_remove, +}; +#endif + +static int __init safexcel_init(void) +{ + int rc; + +#if IS_ENABLED(CONFIG_OF) + /* Register platform driver */ + platform_driver_register(&crypto_safexcel); +#endif + +#if IS_ENABLED(CONFIG_PCI) + /* Register PCI driver */ + rc = pci_register_driver(&safexcel_pci_driver); +#endif + + return 0; +} + +static void __exit safexcel_exit(void) +{ +#if IS_ENABLED(CONFIG_OF) + /* Unregister platform driver */ + platform_driver_unregister(&crypto_safexcel); +#endif + +#if IS_ENABLED(CONFIG_PCI) + /* Unregister PCI driver if successfully registered before */ + pci_unregister_driver(&safexcel_pci_driver); +#endif +} + +module_init(safexcel_init); +module_exit(safexcel_exit); MODULE_AUTHOR("Antoine Tenart "); MODULE_AUTHOR("Ofer Heifetz "); MODULE_AUTHOR("Igal Liberman "); -MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197"); +MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index ea9369c231e2..2db2a9636eb7 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -38,6 +38,27 @@ char __##name##_desc[size] CRYPTO_MINALIGN_ATTR; \ struct type##_request *name = (void *)__##name##_desc +/* Xilinx dev board base offsets */ +#define EIP197_XLX_GPIO_BASE 0x200000 +#define EIP197_XLX_IRQ_BLOCK_ID_ADDR 0x2000 +#define EIP197_XLX_IRQ_BLOCK_ID_VALUE 0x1fc2 +#define EIP197_XLX_USER_INT_ENB_MSK 0x2004 +#define EIP197_XLX_USER_INT_ENB_SET 0x2008 +#define EIP197_XLX_USER_INT_ENB_CLEAR 0x200c +#define EIP197_XLX_USER_INT_BLOCK 0x2040 +#define EIP197_XLX_USER_INT_PEND 0x2048 +#define EIP197_XLX_USER_VECT_LUT0_ADDR 0x2080 +#define EIP197_XLX_USER_VECT_LUT0_IDENT 0x03020100 +#define EIP197_XLX_USER_VECT_LUT1_ADDR 0x2084 +#define EIP197_XLX_USER_VECT_LUT1_IDENT 0x07060504 +#define EIP197_XLX_USER_VECT_LUT2_ADDR 0x2088 +#define EIP197_XLX_USER_VECT_LUT2_IDENT 0x0b0a0908 +#define EIP197_XLX_USER_VECT_LUT3_ADDR 0x208c +#define EIP197_XLX_USER_VECT_LUT3_IDENT 0x0f0e0d0c + +/* Helper defines for probe function */ +#define EIP197_IRQ_NUMBER(i, is_pci) (i + is_pci) + /* Register base offsets */ #define EIP197_HIA_AIC(priv) ((priv)->base + (priv)->offsets.hia_aic) #define EIP197_HIA_AIC_G(priv) ((priv)->base + (priv)->offsets.hia_aic_g) @@ -554,10 +575,13 @@ struct safexcel_ring { struct crypto_async_request *backlog; }; +/* EIP integration context flags */ enum safexcel_eip_version { - EIP97IES = BIT(0), - EIP197B = BIT(1), - EIP197D = BIT(2), + /* Platform (EIP integration context) specifier */ + EIP97IES_MRVL, + EIP197B_MRVL, + EIP197D_MRVL, + EIP197_DEVBRD }; struct safexcel_register_offsets { diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c index 142bc3f5c45c..2402a623759a 100644 --- a/drivers/crypto/inside-secure/safexcel_ring.c +++ b/drivers/crypto/inside-secure/safexcel_ring.c @@ -145,7 +145,8 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr (lower_32_bits(context) & GENMASK(31, 2)) >> 2; cdesc->control_data.context_hi = upper_32_bits(context); - if (priv->version == EIP197B || priv->version == EIP197D) + if (priv->version == EIP197B_MRVL || + priv->version == EIP197D_MRVL) cdesc->control_data.options |= EIP197_OPTION_RC_AUTO; /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */ -- cgit From f6cc45c854da0c964d1541712bb3326c72020a1c Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Mon, 19 Aug 2019 16:40:26 +0200 Subject: crypto: inside-secure - add support for using the EIP197 without vendor firmware Until now, the inside-secure driver required a set of firmware images supplied by the silicon vendor, typically under NDA, to be present in /lib/firmware/inside-secure in order to be able to function. This patch removes the dependence on this official vendor firmware by falling back to generic "mini" FW - developed specifically for this driver - that can be provided under GPL 2.0 through linux-firmwares. Signed-off-by: Pascal van Leeuwen Acked-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 194 ++++++++++++++++++++++++-------- drivers/crypto/inside-secure/safexcel.h | 12 ++ 2 files changed, 161 insertions(+), 45 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index dfe22d2d7161..e12a2a3a5422 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -108,44 +108,143 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) writel(val, priv->base + EIP197_TRC_PARAMS); } -static void eip197_write_firmware(struct safexcel_crypto_priv *priv, - const struct firmware *fw, int pe, u32 ctrl, - u32 prog_en) +static void eip197_init_firmware(struct safexcel_crypto_priv *priv) { - const u32 *data = (const u32 *)fw->data; + int pe, i; u32 val; - int i; - /* Reset the engine to make its program memory accessible */ - writel(EIP197_PE_ICE_x_CTRL_SW_RESET | - EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR | - EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR, - EIP197_PE(priv) + ctrl); + for (pe = 0; pe < priv->config.pes; pe++) { + /* Configure the token FIFO's */ + writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe)); + writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe)); + + /* Clear the ICE scratchpad memory */ + val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); + val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER | + EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN | + EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS | + EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; + writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); + + /* clear the scratchpad RAM using 32 bit writes only */ + for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++) + writel(0, EIP197_PE(priv) + + EIP197_PE_ICE_SCRATCH_RAM(pe) + (i<<2)); + + /* Reset the IFPP engine to make its program mem accessible */ + writel(EIP197_PE_ICE_x_CTRL_SW_RESET | + EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR | + EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR, + EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe)); + + /* Reset the IPUE engine to make its program mem accessible */ + writel(EIP197_PE_ICE_x_CTRL_SW_RESET | + EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR | + EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR, + EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe)); + + /* Enable access to all IFPP program memories */ + writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN, + EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); + } + +} - /* Enable access to the program memory */ - writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); +static int eip197_write_firmware(struct safexcel_crypto_priv *priv, + const struct firmware *fw) +{ + const u32 *data = (const u32 *)fw->data; + int i; /* Write the firmware */ for (i = 0; i < fw->size / sizeof(u32); i++) writel(be32_to_cpu(data[i]), priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32)); - /* Disable access to the program memory */ - writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); + /* Exclude final 2 NOPs from size */ + return i - EIP197_FW_TERMINAL_NOPS; +} + +/* + * If FW is actual production firmware, then poll for its initialization + * to complete and check if it is good for the HW, otherwise just return OK. + */ +static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp) +{ + int pe, pollcnt; + u32 base, pollofs; + + if (fpp) + pollofs = EIP197_FW_FPP_READY; + else + pollofs = EIP197_FW_PUE_READY; - /* Release engine from reset */ - val = readl(EIP197_PE(priv) + ctrl); - val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET; - writel(val, EIP197_PE(priv) + ctrl); + for (pe = 0; pe < priv->config.pes; pe++) { + base = EIP197_PE_ICE_SCRATCH_RAM(pe); + pollcnt = EIP197_FW_START_POLLCNT; + while (pollcnt && + (readl_relaxed(EIP197_PE(priv) + base + + pollofs) != 1)) { + pollcnt--; + } + if (!pollcnt) { + dev_err(priv->dev, "FW(%d) for PE %d failed to start\n", + fpp, pe); + return false; + } + } + return true; +} + +static bool eip197_start_firmware(struct safexcel_crypto_priv *priv, + int ipuesz, int ifppsz, int minifw) +{ + int pe; + u32 val; + + for (pe = 0; pe < priv->config.pes; pe++) { + /* Disable access to all program memory */ + writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); + + /* Start IFPP microengines */ + if (minifw) + val = 0; + else + val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) & + EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) | + EIP197_PE_ICE_UENG_DEBUG_RESET; + writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe)); + + /* Start IPUE microengines */ + if (minifw) + val = 0; + else + val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) & + EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) | + EIP197_PE_ICE_UENG_DEBUG_RESET; + writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe)); + } + + /* For miniFW startup, there is no initialization, so always succeed */ + if (minifw) + return true; + + /* Wait until all the firmwares have properly started up */ + if (!poll_fw_ready(priv, 1)) + return false; + if (!poll_fw_ready(priv, 0)) + return false; + + return true; } static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) { const char *fw_name[] = {"ifpp.bin", "ipue.bin"}; const struct firmware *fw[FW_NB]; - char fw_path[31], *dir = NULL; + char fw_path[37], *dir = NULL; int i, j, ret = 0, pe; - u32 val; + int ipuesz, ifppsz, minifw = 0; if (priv->version == EIP197D_MRVL) dir = "eip197d"; @@ -155,51 +254,56 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) else return -ENODEV; +retry_fw: for (i = 0; i < FW_NB; i++) { - snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]); - ret = request_firmware(&fw[i], fw_path, priv->dev); + snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]); + ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev); if (ret) { - if (priv->version != EIP197B_MRVL) + if (minifw || priv->version != EIP197B_MRVL) goto release_fw; /* Fallback to the old firmware location for the * EIP197b. */ - ret = request_firmware(&fw[i], fw_name[i], priv->dev); - if (ret) { - dev_err(priv->dev, - "Failed to request firmware %s (%d)\n", - fw_name[i], ret); + ret = firmware_request_nowarn(&fw[i], fw_name[i], + priv->dev); + if (ret) goto release_fw; - } } } - for (pe = 0; pe < priv->config.pes; pe++) { - /* Clear the scratchpad memory */ - val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); - val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER | - EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN | - EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS | - EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; - writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); + eip197_init_firmware(priv); - memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0, - EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); + ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]); - eip197_write_firmware(priv, fw[FW_IFPP], pe, - EIP197_PE_ICE_FPP_CTRL(pe), - EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN); + /* Enable access to IPUE program memories */ + for (pe = 0; pe < priv->config.pes; pe++) + writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN, + EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); - eip197_write_firmware(priv, fw[FW_IPUE], pe, - EIP197_PE_ICE_PUE_CTRL(pe), - EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN); + ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]); + + if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) { + dev_dbg(priv->dev, "Firmware loaded successfully"); + return 0; } + ret = -ENODEV; + release_fw: for (j = 0; j < i; j++) release_firmware(fw[j]); + if (!minifw) { + /* Retry with minifw path */ + dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n"); + dir = "eip197_minifw"; + minifw = 1; + goto retry_fw; + } + + dev_dbg(priv->dev, "Firmware load failed.\n"); + return ret; } diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 2db2a9636eb7..33e5f663c249 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -136,8 +136,10 @@ #define EIP197_PE_IN_TBUF_THRES(n) (0x0100 + (0x2000 * (n))) #define EIP197_PE_ICE_SCRATCH_RAM(n) (0x0800 + (0x2000 * (n))) #define EIP197_PE_ICE_PUE_CTRL(n) (0x0c80 + (0x2000 * (n))) +#define EIP197_PE_ICE_PUTF_CTRL(n) (0x0d00 + (0x2000 * (n))) #define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n))) #define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n))) +#define EIP197_PE_ICE_PPTF_CTRL(n) (0x0e00 + (0x2000 * (n))) #define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n))) #define EIP197_PE_EIP96_TOKEN_CTRL(n) (0x1000 + (0x2000 * (n))) #define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n))) @@ -228,6 +230,11 @@ #define EIP197_DxE_THR_CTRL_EN BIT(30) #define EIP197_DxE_THR_CTRL_RESET_PE BIT(31) +/* EIP197_PE_ICE_PUE/FPP_CTRL */ +#define EIP197_PE_ICE_UENG_START_OFFSET(n) ((n) << 16) +#define EIP197_PE_ICE_UENG_INIT_ALIGN_MASK 0x7ff0 +#define EIP197_PE_ICE_UENG_DEBUG_RESET BIT(3) + /* EIP197_HIA_AIC_G_ENABLED_STAT */ #define EIP197_G_IRQ_DFE(n) BIT((n) << 1) #define EIP197_G_IRQ_DSE(n) BIT(((n) << 1) + 1) @@ -503,6 +510,11 @@ struct safexcel_command_desc { * Internal structures & functions */ +#define EIP197_FW_TERMINAL_NOPS 2 +#define EIP197_FW_START_POLLCNT 16 +#define EIP197_FW_PUE_READY 0x14 +#define EIP197_FW_FPP_READY 0x18 + enum eip197_fw { FW_IFPP = 0, FW_IPUE, -- cgit From 671e50384ee6267052c5d613dc84a72b7fe9ef4b Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Tue, 20 Aug 2019 14:26:39 +0300 Subject: crypto: caam/qi - use print_hex_dump_debug function to print debug messages Use print_hex_dump_debug function to print debug messages, instead of print_hex_dump inside #ifdef DEBUG. Fixes: 6e005503199b ("crypto: caam - print debug messages at debug level") Signed-off-by: Iuliana Prodan Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_qi.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 932643c88a5d..8e3449670d2f 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -234,11 +234,10 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, dma_sync_single_for_device(jrdev->parent, ctx->key_dma, ctx->adata.keylen_pad + keys.enckeylen, ctx->dir); -#ifdef DEBUG - print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, - ctx->adata.keylen_pad + keys.enckeylen, 1); -#endif + + print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, + ctx->adata.keylen_pad + keys.enckeylen, 1); skip_split_key: ctx->cdata.keylen = keys.enckeylen; -- cgit From 70c0cda27a10df44e81887bc4cb10f587236a941 Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 20 Aug 2019 13:23:47 -0700 Subject: crypto: caam - move DMA mask selection into a function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Exactly the same code to figure out DMA mask is repeated twice in the driver code. To avoid repetition, move that logic into a standalone subroutine in intern.h. While at it re-shuffle the code to make it more readable with early returns. Signed-off-by: Andrey Smirnov Reviewed-by: Horia Geantă Cc: Chris Spencer Cc: Cory Tusar Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Aymen Sghaier Cc: Leonard Crestez Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 11 +---------- drivers/crypto/caam/intern.h | 20 ++++++++++++++++++++ drivers/crypto/caam/jr.c | 15 +-------------- 3 files changed, 22 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index e0590beae240..50336494f285 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -711,16 +711,7 @@ static int caam_probe(struct platform_device *pdev) JRSTART_JR1_START | JRSTART_JR2_START | JRSTART_JR3_START); - if (sizeof(dma_addr_t) == sizeof(u64)) { - if (caam_dpaa2) - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49)); - else if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); - else - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36)); - } else { - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); - } + ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev)); if (ret) { dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); goto disable_caam_emi_slow; diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 6af84bbc612c..ec25d260fa40 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -10,6 +10,8 @@ #ifndef INTERN_H #define INTERN_H +#include "ctrl.h" + /* Currently comes from Kconfig param as a ^2 (driver-required) */ #define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE) @@ -215,4 +217,22 @@ DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n"); DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n"); #endif +static inline u64 caam_get_dma_mask(struct device *dev) +{ + struct device_node *nprop = dev->of_node; + + if (sizeof(dma_addr_t) != sizeof(u64)) + return DMA_BIT_MASK(32); + + if (caam_dpaa2) + return DMA_BIT_MASK(49); + + if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring") || + of_device_is_compatible(nprop, "fsl,sec-v5.0")) + return DMA_BIT_MASK(40); + + return DMA_BIT_MASK(36); +} + + #endif /* INTERN_H */ diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index cea811fed320..4b25b2fa3d02 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -543,20 +543,7 @@ static int caam_jr_probe(struct platform_device *pdev) jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl; - if (sizeof(dma_addr_t) == sizeof(u64)) { - if (caam_dpaa2) - error = dma_set_mask_and_coherent(jrdev, - DMA_BIT_MASK(49)); - else if (of_device_is_compatible(nprop, - "fsl,sec-v5.0-job-ring")) - error = dma_set_mask_and_coherent(jrdev, - DMA_BIT_MASK(40)); - else - error = dma_set_mask_and_coherent(jrdev, - DMA_BIT_MASK(36)); - } else { - error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32)); - } + error = dma_set_mask_and_coherent(jrdev, caam_get_dma_mask(jrdev)); if (error) { dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n", error); -- cgit From 51e002e949d45b1d645681c4c2b8612174537221 Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 20 Aug 2019 13:23:48 -0700 Subject: crypto: caam - simplfy clock initialization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Simplify clock initialization code by converting it to use clk-bulk, devres and soc_device_match() match table. No functional change intended. Signed-off-by: Andrey Smirnov Reviewed-by: Leonard Crestez Tested-by: Iuliana Prodan Cc: Chris Spencer Cc: Cory Tusar Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Aymen Sghaier Cc: Leonard Crestez Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 198 ++++++++++++++++++++----------------------- drivers/crypto/caam/intern.h | 7 +- 2 files changed, 95 insertions(+), 110 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 50336494f285..0b4007068c31 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -25,16 +25,6 @@ EXPORT_SYMBOL(caam_dpaa2); #include "qi.h" #endif -/* - * i.MX targets tend to have clock control subsystems that can - * enable/disable clocking to our device. - */ -static inline struct clk *caam_drv_identify_clk(struct device *dev, - char *clk_name) -{ - return caam_imx ? devm_clk_get(dev, clk_name) : NULL; -} - /* * Descriptor to instantiate RNG State Handle 0 in normal mode and * load the JDKEK, TDKEK and TDSK registers @@ -342,13 +332,6 @@ static int caam_remove(struct platform_device *pdev) /* Unmap controller region */ iounmap(ctrl); - /* shut clocks off before finalizing shutdown */ - clk_disable_unprepare(ctrlpriv->caam_ipg); - if (ctrlpriv->caam_mem) - clk_disable_unprepare(ctrlpriv->caam_mem); - clk_disable_unprepare(ctrlpriv->caam_aclk); - if (ctrlpriv->caam_emi_slow) - clk_disable_unprepare(ctrlpriv->caam_emi_slow); return 0; } @@ -497,20 +480,98 @@ static const struct of_device_id caam_match[] = { }; MODULE_DEVICE_TABLE(of, caam_match); +struct caam_imx_data { + const struct clk_bulk_data *clks; + int num_clks; +}; + +static const struct clk_bulk_data caam_imx6_clks[] = { + { .id = "ipg" }, + { .id = "mem" }, + { .id = "aclk" }, + { .id = "emi_slow" }, +}; + +static const struct caam_imx_data caam_imx6_data = { + .clks = caam_imx6_clks, + .num_clks = ARRAY_SIZE(caam_imx6_clks), +}; + +static const struct clk_bulk_data caam_imx7_clks[] = { + { .id = "ipg" }, + { .id = "aclk" }, +}; + +static const struct caam_imx_data caam_imx7_data = { + .clks = caam_imx7_clks, + .num_clks = ARRAY_SIZE(caam_imx7_clks), +}; + +static const struct clk_bulk_data caam_imx6ul_clks[] = { + { .id = "ipg" }, + { .id = "mem" }, + { .id = "aclk" }, +}; + +static const struct caam_imx_data caam_imx6ul_data = { + .clks = caam_imx6ul_clks, + .num_clks = ARRAY_SIZE(caam_imx6ul_clks), +}; + +static const struct soc_device_attribute caam_imx_soc_table[] = { + { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data }, + { .soc_id = "i.MX6*", .data = &caam_imx6_data }, + { .soc_id = "i.MX7*", .data = &caam_imx7_data }, + { .family = "Freescale i.MX" }, + { /* sentinel */ } +}; + +static void disable_clocks(void *data) +{ + struct caam_drv_private *ctrlpriv = data; + + clk_bulk_disable_unprepare(ctrlpriv->num_clks, ctrlpriv->clks); +} + +static int init_clocks(struct device *dev, const struct caam_imx_data *data) +{ + struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev); + int ret; + + ctrlpriv->num_clks = data->num_clks; + ctrlpriv->clks = devm_kmemdup(dev, data->clks, + data->num_clks * sizeof(data->clks[0]), + GFP_KERNEL); + if (!ctrlpriv->clks) + return -ENOMEM; + + ret = devm_clk_bulk_get(dev, ctrlpriv->num_clks, ctrlpriv->clks); + if (ret) { + dev_err(dev, + "Failed to request all necessary clocks\n"); + return ret; + } + + ret = clk_bulk_prepare_enable(ctrlpriv->num_clks, ctrlpriv->clks); + if (ret) { + dev_err(dev, + "Failed to prepare/enable all necessary clocks\n"); + return ret; + } + + return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv); +} + /* Probe routine for CAAM top (controller) level */ static int caam_probe(struct platform_device *pdev) { int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; u64 caam_id; - static const struct soc_device_attribute imx_soc[] = { - {.family = "Freescale i.MX"}, - {}, - }; + const struct soc_device_attribute *imx_soc_match; struct device *dev; struct device_node *nprop, *np; struct caam_ctrl __iomem *ctrl; struct caam_drv_private *ctrlpriv; - struct clk *clk; #ifdef CONFIG_DEBUG_FS struct caam_perfmon *perfmon; #endif @@ -537,7 +598,8 @@ static int caam_probe(struct platform_device *pdev) caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) & (CSTA_PLEND | CSTA_ALT_PLEND)); - caam_imx = (bool)soc_device_match(imx_soc); + imx_soc_match = soc_device_match(caam_imx_soc_table); + caam_imx = (bool)imx_soc_match; comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms); caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2); @@ -568,81 +630,17 @@ static int caam_probe(struct platform_device *pdev) } #endif - /* Enable clocking */ - clk = caam_drv_identify_clk(&pdev->dev, "ipg"); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - dev_err(&pdev->dev, - "can't identify CAAM ipg clk: %d\n", ret); - goto iounmap_ctrl; - } - ctrlpriv->caam_ipg = clk; - - if (!of_machine_is_compatible("fsl,imx7d") && - !of_machine_is_compatible("fsl,imx7s") && - !of_machine_is_compatible("fsl,imx7ulp")) { - clk = caam_drv_identify_clk(&pdev->dev, "mem"); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - dev_err(&pdev->dev, - "can't identify CAAM mem clk: %d\n", ret); - goto iounmap_ctrl; + if (imx_soc_match) { + if (!imx_soc_match->data) { + dev_err(dev, "No clock data provided for i.MX SoC"); + return -EINVAL; } - ctrlpriv->caam_mem = clk; - } - clk = caam_drv_identify_clk(&pdev->dev, "aclk"); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - dev_err(&pdev->dev, - "can't identify CAAM aclk clk: %d\n", ret); - goto iounmap_ctrl; - } - ctrlpriv->caam_aclk = clk; - - if (!of_machine_is_compatible("fsl,imx6ul") && - !of_machine_is_compatible("fsl,imx7d") && - !of_machine_is_compatible("fsl,imx7s") && - !of_machine_is_compatible("fsl,imx7ulp")) { - clk = caam_drv_identify_clk(&pdev->dev, "emi_slow"); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - dev_err(&pdev->dev, - "can't identify CAAM emi_slow clk: %d\n", ret); - goto iounmap_ctrl; - } - ctrlpriv->caam_emi_slow = clk; - } - - ret = clk_prepare_enable(ctrlpriv->caam_ipg); - if (ret < 0) { - dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret); - goto iounmap_ctrl; - } - - if (ctrlpriv->caam_mem) { - ret = clk_prepare_enable(ctrlpriv->caam_mem); - if (ret < 0) { - dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n", - ret); - goto disable_caam_ipg; - } + ret = init_clocks(dev, imx_soc_match->data); + if (ret) + return ret; } - ret = clk_prepare_enable(ctrlpriv->caam_aclk); - if (ret < 0) { - dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret); - goto disable_caam_mem; - } - - if (ctrlpriv->caam_emi_slow) { - ret = clk_prepare_enable(ctrlpriv->caam_emi_slow); - if (ret < 0) { - dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n", - ret); - goto disable_caam_aclk; - } - } /* Allocating the BLOCK_OFFSET based on the supported page size on * the platform @@ -714,7 +712,7 @@ static int caam_probe(struct platform_device *pdev) ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev)); if (ret) { dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); - goto disable_caam_emi_slow; + goto iounmap_ctrl; } ctrlpriv->era = caam_get_era(ctrl); @@ -919,16 +917,6 @@ shutdown_qi: if (ctrlpriv->qi_init) caam_qi_shutdown(dev); #endif -disable_caam_emi_slow: - if (ctrlpriv->caam_emi_slow) - clk_disable_unprepare(ctrlpriv->caam_emi_slow); -disable_caam_aclk: - clk_disable_unprepare(ctrlpriv->caam_aclk); -disable_caam_mem: - if (ctrlpriv->caam_mem) - clk_disable_unprepare(ctrlpriv->caam_mem); -disable_caam_ipg: - clk_disable_unprepare(ctrlpriv->caam_ipg); iounmap_ctrl: iounmap(ctrl); return ret; diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index ec25d260fa40..1f01703f510a 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -94,11 +94,8 @@ struct caam_drv_private { Handles of the RNG4 block are initialized by this driver */ - struct clk *caam_ipg; - struct clk *caam_mem; - struct clk *caam_aclk; - struct clk *caam_emi_slow; - + struct clk_bulk_data *clks; + int num_clks; /* * debugfs entries for developer view into driver/device * variables at runtime. -- cgit From a6c4194ead005e83ca49226b9ac5fdcba7ff0a04 Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 20 Aug 2019 13:23:49 -0700 Subject: crypto: caam - convert caam_jr_init() to use devres MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use devres to allocate all of the resources in caam_jr_init() (DMA coherent and regular memory, IRQs) drop calls to corresponding deallocation routines. No functional change intended. Signed-off-by: Andrey Smirnov Reviewed-by: Horia Geantă Cc: Cory Tusar Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Aymen Sghaier Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- drivers/crypto/caam/jr.c | 48 ++++++++++++++---------------------------------- 1 file changed, 14 insertions(+), 34 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 4b25b2fa3d02..ea02f7774f7c 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -108,25 +108,12 @@ static int caam_reset_hw_jr(struct device *dev) static int caam_jr_shutdown(struct device *dev) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); - dma_addr_t inpbusaddr, outbusaddr; int ret; ret = caam_reset_hw_jr(dev); tasklet_kill(&jrp->irqtask); - /* Release interrupt */ - free_irq(jrp->irq, dev); - - /* Free rings */ - inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); - outbusaddr = rd_reg64(&jrp->rregs->outring_base); - dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, - jrp->inpring, inpbusaddr); - dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, - jrp->outring, outbusaddr); - kfree(jrp->entinfo); - return ret; } @@ -444,8 +431,8 @@ static int caam_jr_init(struct device *dev) tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev); /* Connect job ring interrupt handler. */ - error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, - dev_name(dev), dev); + error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED, + dev_name(dev), dev); if (error) { dev_err(dev, "can't connect JobR %d interrupt (%d)\n", jrp->ridx, jrp->irq); @@ -454,22 +441,25 @@ static int caam_jr_init(struct device *dev) error = caam_reset_hw_jr(dev); if (error) - goto out_free_irq; + goto out_kill_deq; error = -ENOMEM; - jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) * - JOBR_DEPTH, &inpbusaddr, GFP_KERNEL); + jrp->inpring = dmam_alloc_coherent(dev, sizeof(*jrp->inpring) * + JOBR_DEPTH, &inpbusaddr, + GFP_KERNEL); if (!jrp->inpring) - goto out_free_irq; + goto out_kill_deq; - jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) * - JOBR_DEPTH, &outbusaddr, GFP_KERNEL); + jrp->outring = dmam_alloc_coherent(dev, sizeof(*jrp->outring) * + JOBR_DEPTH, &outbusaddr, + GFP_KERNEL); if (!jrp->outring) - goto out_free_inpring; + goto out_kill_deq; - jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL); + jrp->entinfo = devm_kcalloc(dev, JOBR_DEPTH, sizeof(*jrp->entinfo), + GFP_KERNEL); if (!jrp->entinfo) - goto out_free_outring; + goto out_kill_deq; for (i = 0; i < JOBR_DEPTH; i++) jrp->entinfo[i].desc_addr_dma = !0; @@ -494,16 +484,6 @@ static int caam_jr_init(struct device *dev) (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); return 0; - -out_free_outring: - dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, - jrp->outring, outbusaddr); -out_free_inpring: - dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, - jrp->inpring, inpbusaddr); - dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx); -out_free_irq: - free_irq(jrp->irq, dev); out_kill_deq: tasklet_kill(&jrp->irqtask); return error; -- cgit From d488dfd9b210e6d393627b418b35c2e8e851b9d6 Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 20 Aug 2019 13:23:50 -0700 Subject: crypto: caam - request JR IRQ as the last step MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to avoid any risk of JR IRQ request being handled while some of the resources used for that are not yet allocated move the code requesting said IRQ to the endo of caam_jr_init(). Signed-off-by: Andrey Smirnov Cc: Chris Spencer Cc: Cory Tusar Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Aymen Sghaier Cc: Leonard Crestez Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- drivers/crypto/caam/jr.c | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index ea02f7774f7c..98b308de42c0 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -428,38 +428,26 @@ static int caam_jr_init(struct device *dev) jrp = dev_get_drvdata(dev); - tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev); - - /* Connect job ring interrupt handler. */ - error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED, - dev_name(dev), dev); - if (error) { - dev_err(dev, "can't connect JobR %d interrupt (%d)\n", - jrp->ridx, jrp->irq); - goto out_kill_deq; - } - error = caam_reset_hw_jr(dev); if (error) - goto out_kill_deq; + return error; - error = -ENOMEM; jrp->inpring = dmam_alloc_coherent(dev, sizeof(*jrp->inpring) * JOBR_DEPTH, &inpbusaddr, GFP_KERNEL); if (!jrp->inpring) - goto out_kill_deq; + return -ENOMEM; jrp->outring = dmam_alloc_coherent(dev, sizeof(*jrp->outring) * JOBR_DEPTH, &outbusaddr, GFP_KERNEL); if (!jrp->outring) - goto out_kill_deq; + return -ENOMEM; jrp->entinfo = devm_kcalloc(dev, JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL); if (!jrp->entinfo) - goto out_kill_deq; + return -ENOMEM; for (i = 0; i < JOBR_DEPTH; i++) jrp->entinfo[i].desc_addr_dma = !0; @@ -483,9 +471,17 @@ static int caam_jr_init(struct device *dev) (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); - return 0; -out_kill_deq: - tasklet_kill(&jrp->irqtask); + tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev); + + /* Connect job ring interrupt handler. */ + error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED, + dev_name(dev), dev); + if (error) { + dev_err(dev, "can't connect JobR %d interrupt (%d)\n", + jrp->ridx, jrp->irq); + tasklet_kill(&jrp->irqtask); + } + return error; } -- cgit From 9f5db8b5ea275fe22613a08bc6e3d9dedcbbf40f Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 20 Aug 2019 13:23:51 -0700 Subject: crytpo: caam - make use of iowrite64*_hi_lo in wr_reg64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to be able to unify 64 and 32 bit implementations of wr_reg64, let's convert it to use helpers from first. Here are the steps of the transformation: 1. Inline wr_reg32 helpers: if (!caam_imx && caam_little_end) { if (caam_little_end) { iowrite32(data >> 32, (u32 __iomem *)(reg) + 1); iowrite32(data, (u32 __iomem *)(reg)); } else { iowrite32be(data >> 32, (u32 __iomem *)(reg) + 1); iowrite32be(data, (u32 __iomem *)(reg)); } } else { if (caam_little_end) { iowrite32(data >> 32, (u32 __iomem *)(reg)); iowrite32(data, (u32 __iomem *)(reg) + 1); } else { iowrite32be(data >> 32, (u32 __iomem *)(reg)); iowrite32be(data, (u32 __iomem *)(reg) + 1); } } 2. Transfrom the conditionals such that the check for 'caam_little_end' is at the top level: if (caam_little_end) { if (!caam_imx) { iowrite32(data >> 32, (u32 __iomem *)(reg) + 1); iowrite32(data, (u32 __iomem *)(reg)); } else { iowrite32(data >> 32, (u32 __iomem *)(reg)); iowrite32(data, (u32 __iomem *)(reg) + 1); } } else { iowrite32be(data >> 32, (u32 __iomem *)(reg)); iowrite32be(data, (u32 __iomem *)(reg) + 1); } 3. Invert the check for !caam_imx: if (caam_little_end) { if (caam_imx) { iowrite32(data >> 32, (u32 __iomem *)(reg)); iowrite32(data, (u32 __iomem *)(reg) + 1); } else { iowrite32(data >> 32, (u32 __iomem *)(reg) + 1); iowrite32(data, (u32 __iomem *)(reg)); } } else { iowrite32be(data >> 32, (u32 __iomem *)(reg)); iowrite32be(data, (u32 __iomem *)(reg) + 1); } 4. Make use of iowrite64* helpers from if (caam_little_end) { if (caam_imx) { iowrite32(data >> 32, (u32 __iomem *)(reg)); iowrite32(data, (u32 __iomem *)(reg) + 1); } else { iowrite64(data, reg); } } else { iowrite64be(data, reg); } No functional change intended. Signed-off-by: Andrey Smirnov Reviewed-by: Horia Geantă Cc: Chris Spencer Cc: Cory Tusar Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Aymen Sghaier Cc: Leonard Crestez Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- drivers/crypto/caam/regs.h | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 7c7ea8af6a48..6acfef30a90c 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -12,6 +12,7 @@ #include #include #include +#include /* * Architecture-specific register access methods @@ -157,12 +158,15 @@ static inline u64 rd_reg64(void __iomem *reg) #else /* CONFIG_64BIT */ static inline void wr_reg64(void __iomem *reg, u64 data) { - if (!caam_imx && caam_little_end) { - wr_reg32((u32 __iomem *)(reg) + 1, data >> 32); - wr_reg32((u32 __iomem *)(reg), data); + if (caam_little_end) { + if (caam_imx) { + iowrite32(data >> 32, (u32 __iomem *)(reg)); + iowrite32(data, (u32 __iomem *)(reg) + 1); + } else { + iowrite64(data, reg); + } } else { - wr_reg32((u32 __iomem *)(reg), data >> 32); - wr_reg32((u32 __iomem *)(reg) + 1, data); + iowrite64be(data, reg); } } -- cgit From 393d2d0fe8e738521a73486e345531a715ad58b7 Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 20 Aug 2019 13:23:52 -0700 Subject: crypto: caam - use ioread64*_hi_lo in rd_reg64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Following the same transformation logic as outlined in previous commit converting wr_reg64, convert rd_reg64 to use helpers from first. No functional change intended. Signed-off-by: Andrey Smirnov Reviewed-by: Horia Geantă Cc: Chris Spencer Cc: Cory Tusar Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Aymen Sghaier Cc: Leonard Crestez Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- drivers/crypto/caam/regs.h | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 6acfef30a90c..4efc10534873 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -172,12 +172,20 @@ static inline void wr_reg64(void __iomem *reg, u64 data) static inline u64 rd_reg64(void __iomem *reg) { - if (!caam_imx && caam_little_end) - return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 | - (u64)rd_reg32((u32 __iomem *)(reg))); + if (caam_little_end) { + if (caam_imx) { + u32 low, high; - return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 | - (u64)rd_reg32((u32 __iomem *)(reg) + 1)); + high = ioread32(reg); + low = ioread32(reg + sizeof(u32)); + + return low + ((u64)high << 32); + } else { + return ioread64(reg); + } + } else { + return ioread64be(reg); + } } #endif /* CONFIG_64BIT */ -- cgit From 6e05542fd6061d3862c85fd049cb953f1cf5b264 Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 20 Aug 2019 13:23:53 -0700 Subject: crypto: caam - drop 64-bit only wr/rd_reg64() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since 32-bit of both wr_reg64 and rd_reg64 now use 64-bit IO helpers, these functions should no longer be necessary. No functional change intended. Signed-off-by: Andrey Smirnov Reviewed-by: Horia Geantă Cc: Chris Spencer Cc: Cory Tusar Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Aymen Sghaier Cc: Leonard Crestez Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- drivers/crypto/caam/regs.h | 19 ------------------- 1 file changed, 19 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 4efc10534873..489d6c1eec7d 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -138,24 +138,6 @@ static inline void clrsetbits_32(void __iomem *reg, u32 clear, u32 set) * base + 0x0000 : least-significant 32 bits * base + 0x0004 : most-significant 32 bits */ -#ifdef CONFIG_64BIT -static inline void wr_reg64(void __iomem *reg, u64 data) -{ - if (caam_little_end) - iowrite64(data, reg); - else - iowrite64be(data, reg); -} - -static inline u64 rd_reg64(void __iomem *reg) -{ - if (caam_little_end) - return ioread64(reg); - else - return ioread64be(reg); -} - -#else /* CONFIG_64BIT */ static inline void wr_reg64(void __iomem *reg, u64 data) { if (caam_little_end) { @@ -187,7 +169,6 @@ static inline u64 rd_reg64(void __iomem *reg) return ioread64be(reg); } } -#endif /* CONFIG_64BIT */ static inline u64 cpu_to_caam_dma64(dma_addr_t value) { -- cgit From 3a0944c5362654e13cde558ef81309a0fcf9112b Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 20 Aug 2019 13:23:54 -0700 Subject: crypto: caam - share definition for MAX_SDLEN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Both qi.h and cammalg_qi2.h seem to define identical versions of MAX_SDLEN. Move it to desc_constr.h to avoid duplication. Signed-off-by: Andrey Smirnov Cc: Chris Spencer Cc: Cory Tusar Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Aymen Sghaier Cc: Leonard Crestez Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_qi2.h | 27 --------------------------- drivers/crypto/caam/desc_constr.h | 27 +++++++++++++++++++++++++++ drivers/crypto/caam/qi.h | 26 -------------------------- 3 files changed, 27 insertions(+), 53 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h index b450e2a25c1f..706736776b47 100644 --- a/drivers/crypto/caam/caamalg_qi2.h +++ b/drivers/crypto/caam/caamalg_qi2.h @@ -92,33 +92,6 @@ struct dpaa2_caam_priv_per_cpu { struct dpaa2_io *dpio; }; -/* - * The CAAM QI hardware constructs a job descriptor which points - * to shared descriptor (as pointed by context_a of FQ to CAAM). - * When the job descriptor is executed by deco, the whole job - * descriptor together with shared descriptor gets loaded in - * deco buffer which is 64 words long (each 32-bit). - * - * The job descriptor constructed by QI hardware has layout: - * - * HEADER (1 word) - * Shdesc ptr (1 or 2 words) - * SEQ_OUT_PTR (1 word) - * Out ptr (1 or 2 words) - * Out length (1 word) - * SEQ_IN_PTR (1 word) - * In ptr (1 or 2 words) - * In length (1 word) - * - * The shdesc ptr is used to fetch shared descriptor contents - * into deco buffer. - * - * Apart from shdesc contents, the total number of words that - * get loaded in deco buffer are '8' or '11'. The remaining words - * in deco buffer can be used for storing shared descriptor. - */ -#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ) - /* Length of a single buffer in the QI driver memory cache */ #define CAAM_QI_MEMCACHE_SIZE 512 diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 536f360bf131..1fe50a4fefaa 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h @@ -18,6 +18,33 @@ #define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE) #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3) +/* + * The CAAM QI hardware constructs a job descriptor which points + * to shared descriptor (as pointed by context_a of FQ to CAAM). + * When the job descriptor is executed by deco, the whole job + * descriptor together with shared descriptor gets loaded in + * deco buffer which is 64 words long (each 32-bit). + * + * The job descriptor constructed by QI hardware has layout: + * + * HEADER (1 word) + * Shdesc ptr (1 or 2 words) + * SEQ_OUT_PTR (1 word) + * Out ptr (1 or 2 words) + * Out length (1 word) + * SEQ_IN_PTR (1 word) + * In ptr (1 or 2 words) + * In length (1 word) + * + * The shdesc ptr is used to fetch shared descriptor contents + * into deco buffer. + * + * Apart from shdesc contents, the total number of words that + * get loaded in deco buffer are '8' or '11'. The remaining words + * in deco buffer can be used for storing shared descriptor. + */ +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ) + #ifdef DEBUG #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ &__func__[sizeof("append")]); } while (0) diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h index f93c9c7ed430..db0549549e3b 100644 --- a/drivers/crypto/caam/qi.h +++ b/drivers/crypto/caam/qi.h @@ -14,32 +14,6 @@ #include "desc.h" #include "desc_constr.h" -/* - * CAAM hardware constructs a job descriptor which points to a shared descriptor - * (as pointed by context_a of to-CAAM FQ). - * When the job descriptor is executed by DECO, the whole job descriptor - * together with shared descriptor gets loaded in DECO buffer, which is - * 64 words (each 32-bit) long. - * - * The job descriptor constructed by CAAM hardware has the following layout: - * - * HEADER (1 word) - * Shdesc ptr (1 or 2 words) - * SEQ_OUT_PTR (1 word) - * Out ptr (1 or 2 words) - * Out length (1 word) - * SEQ_IN_PTR (1 word) - * In ptr (1 or 2 words) - * In length (1 word) - * - * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer. - * - * Apart from shdesc contents, the total number of words that get loaded in DECO - * buffer are '8' or '11'. The remaining words in DECO buffer can be used for - * storing shared descriptor. - */ -#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ) - /* Length of a single buffer in the QI driver memory cache */ #define CAAM_QI_MEMCACHE_SIZE 768 -- cgit From 1a3daadce955530df92b1bb22093618dd26a1717 Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 20 Aug 2019 13:23:55 -0700 Subject: crypto: caam - make CAAM_PTR_SZ dynamic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to be able to configure CAAM pointer size at run-time, which needed to support i.MX8MQ, which is 64-bit SoC with 32-bit pointer size, convert CAAM_PTR_SZ to refer to a global variable of the same name ("caam_ptr_sz") and adjust the rest of the code accordingly. No functional change intended. Signed-off-by: Andrey Smirnov Cc: Chris Spencer Cc: Cory Tusar Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Aymen Sghaier Cc: Leonard Crestez Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 2 +- drivers/crypto/caam/caamhash.c | 2 +- drivers/crypto/caam/caamrng.c | 2 +- drivers/crypto/caam/ctrl.c | 1 + drivers/crypto/caam/desc_constr.h | 12 +++++++++--- drivers/crypto/caam/error.c | 3 +++ 6 files changed, 16 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 3e2662cda9fd..2912006b946b 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -74,7 +74,7 @@ #define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6) -#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) +#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN) #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) struct caam_alg_entry { diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 8a07edb45dad..65399cb2a770 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -560,7 +560,7 @@ struct ahash_edesc { dma_addr_t sec4_sg_dma; int src_nents; int sec4_sg_bytes; - u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned; + u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned; struct sec4_sg_entry sec4_sg[0]; }; diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 7fbda1b08360..e8baacaabe07 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -53,7 +53,7 @@ L1_CACHE_BYTES) /* length of descriptors */ -#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2) +#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ_MAX * 2) #define DESC_RNG_LEN (3 * CAAM_CMD_SZ) /* Buffer, its dma address and lock */ diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 0b4007068c31..47b92451756f 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -602,6 +602,7 @@ static int caam_probe(struct platform_device *pdev) caam_imx = (bool)imx_soc_match; comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms); + caam_ptr_sz = sizeof(dma_addr_t); caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2); ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK); diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 1fe50a4fefaa..89187831d74f 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h @@ -14,9 +14,14 @@ #define IMMEDIATE (1 << 23) #define CAAM_CMD_SZ sizeof(u32) -#define CAAM_PTR_SZ sizeof(dma_addr_t) +#define CAAM_PTR_SZ caam_ptr_sz +#define CAAM_PTR_SZ_MAX sizeof(dma_addr_t) +#define CAAM_PTR_SZ_MIN sizeof(u32) #define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE) -#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3) +#define __DESC_JOB_IO_LEN(n) (CAAM_CMD_SZ * 5 + (n) * 3) +#define DESC_JOB_IO_LEN __DESC_JOB_IO_LEN(CAAM_PTR_SZ) +#define DESC_JOB_IO_LEN_MAX __DESC_JOB_IO_LEN(CAAM_PTR_SZ_MAX) +#define DESC_JOB_IO_LEN_MIN __DESC_JOB_IO_LEN(CAAM_PTR_SZ_MIN) /* * The CAAM QI hardware constructs a job descriptor which points @@ -43,7 +48,7 @@ * get loaded in deco buffer are '8' or '11'. The remaining words * in deco buffer can be used for storing shared descriptor. */ -#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ) +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN) / CAAM_CMD_SZ) #ifdef DEBUG #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ @@ -64,6 +69,7 @@ (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) extern bool caam_little_end; +extern size_t caam_ptr_sz; /* * HW fetches 4 S/G table entries at a time, irrespective of how many entries diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index b7fbf1be37a4..17c6108b6d41 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c @@ -56,6 +56,9 @@ EXPORT_SYMBOL(caam_little_end); bool caam_imx; EXPORT_SYMBOL(caam_imx); +size_t caam_ptr_sz; +EXPORT_SYMBOL(caam_ptr_sz); + static const struct { u8 value; const char *error_text; -- cgit From e27d96298bb893aacfeced1045c757b51abf301b Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 20 Aug 2019 13:23:56 -0700 Subject: crypto: caam - move cpu_to_caam_dma() selection to runtime MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of selecting the implementation of cpu_to_caam_dma()/caam_dma_to_cpu() at build time using the preprocessor, convert the code to do that at run-time using IS_ENABLED macro. This is needed to add support for i.MX8MQ. No functional change intended. Signed-off-by: Andrey Smirnov Cc: Chris Spencer Cc: Cory Tusar Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Aymen Sghaier Cc: Leonard Crestez Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- drivers/crypto/caam/regs.h | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 489d6c1eec7d..0df4cf32fe78 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -188,13 +188,21 @@ static inline u64 caam_dma64_to_cpu(u64 value) return caam64_to_cpu(value); } -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT -#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value) -#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value) -#else -#define cpu_to_caam_dma(value) cpu_to_caam32(value) -#define caam_dma_to_cpu(value) caam32_to_cpu(value) -#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ +static inline u64 cpu_to_caam_dma(u64 value) +{ + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + return cpu_to_caam_dma64(value); + else + return cpu_to_caam32(value); +} + +static inline u64 caam_dma_to_cpu(u64 value) +{ + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + return caam_dma64_to_cpu(value); + else + return caam32_to_cpu(value); +} /* * jr_outentry -- cgit From 6c5f898f12a3d65f04a928ffd7e38ae397c9a0d4 Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 20 Aug 2019 13:23:57 -0700 Subject: crypto: caam - drop explicit usage of struct jr_outentry MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using struct jr_outentry to specify the layout of JobR output ring is not appropriate for all 64-bit SoC, since some of them, like i.MX8MQ, use 32-bit pointers there which doesn't match 64-bit dma_addr_t. Convert existing code to use explicit helper functions to access any of the JobR output ring elements, so that the support for i.MX8MQ can be added later. No functional change intended. Signed-off-by: Andrey Smirnov Cc: Chris Spencer Cc: Cory Tusar Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Aymen Sghaier Cc: Leonard Crestez Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- drivers/crypto/caam/intern.h | 2 +- drivers/crypto/caam/jr.c | 10 ++++++---- drivers/crypto/caam/regs.h | 40 ++++++++++++++++++++++++++++++++++++---- 3 files changed, 43 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 1f01703f510a..081805c0f88b 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -58,7 +58,7 @@ struct caam_drv_private_jr { dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */ int out_ring_read_index; /* Output index "tail" */ int tail; /* entinfo (s/w ring) tail index */ - struct jr_outentry *outring; /* Base of output ring, DMA-safe */ + void *outring; /* Base of output ring, DMA-safe */ }; /* diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 98b308de42c0..6c91f38862e4 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -211,7 +211,7 @@ static void caam_jr_dequeue(unsigned long devarg) for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { sw_idx = (tail + i) & (JOBR_DEPTH - 1); - if (jrp->outring[hw_idx].desc == + if (jr_outentry_desc(jrp->outring, hw_idx) == caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma)) break; /* found */ } @@ -220,7 +220,8 @@ static void caam_jr_dequeue(unsigned long devarg) /* Unmap just-run descriptor so we can post-process */ dma_unmap_single(dev, - caam_dma_to_cpu(jrp->outring[hw_idx].desc), + caam_dma_to_cpu(jr_outentry_desc(jrp->outring, + hw_idx)), jrp->entinfo[sw_idx].desc_size, DMA_TO_DEVICE); @@ -231,7 +232,8 @@ static void caam_jr_dequeue(unsigned long devarg) usercall = jrp->entinfo[sw_idx].callbk; userarg = jrp->entinfo[sw_idx].cbkarg; userdesc = jrp->entinfo[sw_idx].desc_addr_virt; - userstatus = caam32_to_cpu(jrp->outring[hw_idx].jrstatus); + userstatus = caam32_to_cpu(jr_outentry_jrstatus(jrp->outring, + hw_idx)); /* * Make sure all information from the job has been obtained @@ -438,7 +440,7 @@ static int caam_jr_init(struct device *dev) if (!jrp->inpring) return -ENOMEM; - jrp->outring = dmam_alloc_coherent(dev, sizeof(*jrp->outring) * + jrp->outring = dmam_alloc_coherent(dev, SIZEOF_JR_OUTENTRY * JOBR_DEPTH, &outbusaddr, GFP_KERNEL); if (!jrp->outring) diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 0df4cf32fe78..cf73015b3be0 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -71,6 +71,7 @@ extern bool caam_little_end; extern bool caam_imx; +extern size_t caam_ptr_sz; #define caam_to_cpu(len) \ static inline u##len caam##len ## _to_cpu(u##len val) \ @@ -208,10 +209,41 @@ static inline u64 caam_dma_to_cpu(u64 value) * jr_outentry * Represents each entry in a JobR output ring */ -struct jr_outentry { - dma_addr_t desc;/* Pointer to completed descriptor */ - u32 jrstatus; /* Status for completed descriptor */ -} __packed; + +static inline void jr_outentry_get(void *outring, int hw_idx, dma_addr_t *desc, + u32 *jrstatus) +{ + struct { + dma_addr_t desc;/* Pointer to completed descriptor */ + u32 jrstatus; /* Status for completed descriptor */ + } __packed *outentry = outring; + + *desc = outentry[hw_idx].desc; + *jrstatus = outentry[hw_idx].jrstatus; +} + +#define SIZEOF_JR_OUTENTRY (caam_ptr_sz + sizeof(u32)) + +static inline dma_addr_t jr_outentry_desc(void *outring, int hw_idx) +{ + dma_addr_t desc; + u32 unused; + + jr_outentry_get(outring, hw_idx, &desc, &unused); + + return desc; +} + +static inline u32 jr_outentry_jrstatus(void *outring, int hw_idx) +{ + dma_addr_t unused; + u32 jrstatus; + + jr_outentry_get(outring, hw_idx, &unused, &jrstatus); + + return jrstatus; +} + /* Version registers (Era 10+) e80-eff */ struct version_regs { -- cgit From dff36801a9f0cca4f8b074b4c8229dcc0400acd1 Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 20 Aug 2019 13:23:58 -0700 Subject: crypto: caam - don't hardcode inpentry size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using dma_addr_t for elements of JobR input ring is not appropriate on all 64-bit SoCs, some of which, like i.MX8MQ, use only 32-bit wide pointers there. Convert all of the code to use explicit helper function that can be later extended to support i.MX8MQ. No functional change intended. Signed-off-by: Andrey Smirnov Cc: Chris Spencer Cc: Cory Tusar Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Aymen Sghaier Cc: Leonard Crestez Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- drivers/crypto/caam/intern.h | 3 ++- drivers/crypto/caam/jr.c | 4 ++-- drivers/crypto/caam/regs.h | 9 +++++++++ 3 files changed, 13 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 081805c0f88b..c00c7c84ec84 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -55,7 +55,8 @@ struct caam_drv_private_jr { spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */ u32 inpring_avail; /* Number of free entries in input ring */ int head; /* entinfo (s/w ring) head index */ - dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */ + void *inpring; /* Base of input ring, alloc + * DMA-safe */ int out_ring_read_index; /* Output index "tail" */ int tail; /* entinfo (s/w ring) tail index */ void *outring; /* Base of output ring, DMA-safe */ diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 6c91f38862e4..417ad52615c6 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -388,7 +388,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, head_entry->cbkarg = areq; head_entry->desc_addr_dma = desc_dma; - jrp->inpring[head] = cpu_to_caam_dma(desc_dma); + jr_inpentry_set(jrp->inpring, head, cpu_to_caam_dma(desc_dma)); /* * Guarantee that the descriptor's DMA address has been written to @@ -434,7 +434,7 @@ static int caam_jr_init(struct device *dev) if (error) return error; - jrp->inpring = dmam_alloc_coherent(dev, sizeof(*jrp->inpring) * + jrp->inpring = dmam_alloc_coherent(dev, SIZEOF_JR_INPENTRY * JOBR_DEPTH, &inpbusaddr, GFP_KERNEL); if (!jrp->inpring) diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index cf73015b3be0..6dbb269a3e7e 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -244,6 +244,15 @@ static inline u32 jr_outentry_jrstatus(void *outring, int hw_idx) return jrstatus; } +static inline void jr_inpentry_set(void *inpring, int hw_idx, dma_addr_t val) +{ + dma_addr_t *inpentry = inpring; + + inpentry[hw_idx] = val; +} + +#define SIZEOF_JR_INPENTRY caam_ptr_sz + /* Version registers (Era 10+) e80-eff */ struct version_regs { -- cgit From a1cf573ee95d5a15bdd1d33310d179d92b229dd1 Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 20 Aug 2019 13:23:59 -0700 Subject: crypto: caam - select DMA address size at runtime MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit i.MX8 mScale SoC still use 32-bit addresses in its CAAM implmentation, so we can't rely on sizeof(dma_addr_t) to detemine CAAM pointer size. Convert the code to query CTPR and MCFGR for that during driver probing. Signed-off-by: Andrey Smirnov Cc: Chris Spencer Cc: Cory Tusar Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Aymen Sghaier Cc: Leonard Crestez Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- drivers/crypto/caam/caampkc.c | 8 ++++---- drivers/crypto/caam/ctrl.c | 5 ++++- drivers/crypto/caam/desc_constr.h | 10 ++++++++-- drivers/crypto/caam/intern.h | 2 +- drivers/crypto/caam/pdb.h | 16 ++++++++++++---- drivers/crypto/caam/pkc_desc.c | 8 ++++---- drivers/crypto/caam/regs.h | 40 +++++++++++++++++++++++++++++---------- 7 files changed, 63 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 5b12b232ee5e..83f96d4f86e0 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -17,13 +17,13 @@ #include "sg_sw_sec4.h" #include "caampkc.h" -#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb)) +#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB) #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \ - sizeof(struct rsa_priv_f1_pdb)) + SIZEOF_RSA_PRIV_F1_PDB) #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \ - sizeof(struct rsa_priv_f2_pdb)) + SIZEOF_RSA_PRIV_F2_PDB) #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \ - sizeof(struct rsa_priv_f3_pdb)) + SIZEOF_RSA_PRIV_F3_PDB) #define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */ /* buffer filled with zeros, used for padding */ diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 47b92451756f..4b7f95f64e34 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -602,7 +602,10 @@ static int caam_probe(struct platform_device *pdev) caam_imx = (bool)imx_soc_match; comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms); - caam_ptr_sz = sizeof(dma_addr_t); + if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR) + caam_ptr_sz = sizeof(u64); + else + caam_ptr_sz = sizeof(u32); caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2); ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK); diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 89187831d74f..62ce6421bb3f 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h @@ -136,9 +136,15 @@ static inline void init_job_desc_pdb(u32 * const desc, u32 options, static inline void append_ptr(u32 * const desc, dma_addr_t ptr) { - dma_addr_t *offset = (dma_addr_t *)desc_end(desc); + if (caam_ptr_sz == sizeof(dma_addr_t)) { + dma_addr_t *offset = (dma_addr_t *)desc_end(desc); - *offset = cpu_to_caam_dma(ptr); + *offset = cpu_to_caam_dma(ptr); + } else { + u32 *offset = (u32 *)desc_end(desc); + + *offset = cpu_to_caam_dma(ptr); + } (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + CAAM_PTR_SZ / CAAM_CMD_SZ); diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index c00c7c84ec84..731b06becd9c 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -219,7 +219,7 @@ static inline u64 caam_get_dma_mask(struct device *dev) { struct device_node *nprop = dev->of_node; - if (sizeof(dma_addr_t) != sizeof(u64)) + if (caam_ptr_sz != sizeof(u64)) return DMA_BIT_MASK(32); if (caam_dpaa2) diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h index 810f0bef0652..68c1fd5dee5d 100644 --- a/drivers/crypto/caam/pdb.h +++ b/drivers/crypto/caam/pdb.h @@ -512,7 +512,9 @@ struct rsa_pub_pdb { dma_addr_t n_dma; dma_addr_t e_dma; u32 f_len; -} __packed; +}; + +#define SIZEOF_RSA_PUB_PDB (2 * sizeof(u32) + 4 * caam_ptr_sz) /** * RSA Decrypt PDB - Private Key Form #1 @@ -528,7 +530,9 @@ struct rsa_priv_f1_pdb { dma_addr_t f_dma; dma_addr_t n_dma; dma_addr_t d_dma; -} __packed; +}; + +#define SIZEOF_RSA_PRIV_F1_PDB (sizeof(u32) + 4 * caam_ptr_sz) /** * RSA Decrypt PDB - Private Key Form #2 @@ -554,7 +558,9 @@ struct rsa_priv_f2_pdb { dma_addr_t tmp1_dma; dma_addr_t tmp2_dma; u32 p_q_len; -} __packed; +}; + +#define SIZEOF_RSA_PRIV_F2_PDB (2 * sizeof(u32) + 7 * caam_ptr_sz) /** * RSA Decrypt PDB - Private Key Form #3 @@ -586,6 +592,8 @@ struct rsa_priv_f3_pdb { dma_addr_t tmp1_dma; dma_addr_t tmp2_dma; u32 p_q_len; -} __packed; +}; + +#define SIZEOF_RSA_PRIV_F3_PDB (2 * sizeof(u32) + 9 * caam_ptr_sz) #endif diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c index 2a8d87ea94bf..0d5ee762e036 100644 --- a/drivers/crypto/caam/pkc_desc.c +++ b/drivers/crypto/caam/pkc_desc.c @@ -13,7 +13,7 @@ /* Descriptor for RSA Public operation */ void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb) { - init_job_desc_pdb(desc, 0, sizeof(*pdb)); + init_job_desc_pdb(desc, 0, SIZEOF_RSA_PUB_PDB); append_cmd(desc, pdb->sgf); append_ptr(desc, pdb->f_dma); append_ptr(desc, pdb->g_dma); @@ -26,7 +26,7 @@ void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb) /* Descriptor for RSA Private operation - Private Key Form #1 */ void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb) { - init_job_desc_pdb(desc, 0, sizeof(*pdb)); + init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F1_PDB); append_cmd(desc, pdb->sgf); append_ptr(desc, pdb->g_dma); append_ptr(desc, pdb->f_dma); @@ -39,7 +39,7 @@ void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb) /* Descriptor for RSA Private operation - Private Key Form #2 */ void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb) { - init_job_desc_pdb(desc, 0, sizeof(*pdb)); + init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F2_PDB); append_cmd(desc, pdb->sgf); append_ptr(desc, pdb->g_dma); append_ptr(desc, pdb->f_dma); @@ -56,7 +56,7 @@ void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb) /* Descriptor for RSA Private operation - Private Key Form #3 */ void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb) { - init_job_desc_pdb(desc, 0, sizeof(*pdb)); + init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F3_PDB); append_cmd(desc, pdb->sgf); append_ptr(desc, pdb->g_dma); append_ptr(desc, pdb->f_dma); diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 6dbb269a3e7e..05127b70527d 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -191,7 +191,8 @@ static inline u64 caam_dma64_to_cpu(u64 value) static inline u64 cpu_to_caam_dma(u64 value) { - if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && + caam_ptr_sz == sizeof(u64)) return cpu_to_caam_dma64(value); else return cpu_to_caam32(value); @@ -199,7 +200,8 @@ static inline u64 cpu_to_caam_dma(u64 value) static inline u64 caam_dma_to_cpu(u64 value) { - if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && + caam_ptr_sz == sizeof(u64)) return caam_dma64_to_cpu(value); else return caam32_to_cpu(value); @@ -213,13 +215,24 @@ static inline u64 caam_dma_to_cpu(u64 value) static inline void jr_outentry_get(void *outring, int hw_idx, dma_addr_t *desc, u32 *jrstatus) { - struct { - dma_addr_t desc;/* Pointer to completed descriptor */ - u32 jrstatus; /* Status for completed descriptor */ - } __packed *outentry = outring; - *desc = outentry[hw_idx].desc; - *jrstatus = outentry[hw_idx].jrstatus; + if (caam_ptr_sz == sizeof(u32)) { + struct { + u32 desc; + u32 jrstatus; + } __packed *outentry = outring; + + *desc = outentry[hw_idx].desc; + *jrstatus = outentry[hw_idx].jrstatus; + } else { + struct { + dma_addr_t desc;/* Pointer to completed descriptor */ + u32 jrstatus; /* Status for completed descriptor */ + } __packed *outentry = outring; + + *desc = outentry[hw_idx].desc; + *jrstatus = outentry[hw_idx].jrstatus; + } } #define SIZEOF_JR_OUTENTRY (caam_ptr_sz + sizeof(u32)) @@ -246,9 +259,15 @@ static inline u32 jr_outentry_jrstatus(void *outring, int hw_idx) static inline void jr_inpentry_set(void *inpring, int hw_idx, dma_addr_t val) { - dma_addr_t *inpentry = inpring; + if (caam_ptr_sz == sizeof(u32)) { + u32 *inpentry = inpring; - inpentry[hw_idx] = val; + inpentry[hw_idx] = val; + } else { + dma_addr_t *inpentry = inpring; + + inpentry[hw_idx] = val; + } } #define SIZEOF_JR_INPENTRY caam_ptr_sz @@ -380,6 +399,7 @@ struct caam_perfmon { u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/ #define CTPR_MS_QI_SHIFT 25 #define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT) +#define CTPR_MS_PS BIT(17) #define CTPR_MS_DPAA2 BIT(13) #define CTPR_MS_VIRT_EN_INCL 0x00000001 #define CTPR_MS_VIRT_EN_POR 0x00000002 -- cgit From a6727055971b4fa8d07db08088ff618c57f7d296 Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 20 Aug 2019 13:24:00 -0700 Subject: crypto: caam - always select job ring via RSR on i.MX8MQ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Per feedback from NXP tech support the way to use register based service interface on i.MX8MQ is to follow the same set of steps outlined for the case when virtualization is enabled, regardless if it is. Current version of SRM for i.MX8MQ speaks of DECO DID_MS and DECO DID_LS registers, but apparently those are not implemented, so the case when SCFGR[VIRT_EN]=0 should be handled the same as the case when SCFGR[VIRT_EN]=1 Signed-off-by: Andrey Smirnov Cc: Chris Spencer Cc: Cory Tusar Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Aymen Sghaier Cc: Leonard Crestez Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 4b7f95f64e34..3b18e7e8da1f 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -97,7 +97,12 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, int i; - if (ctrlpriv->virt_en == 1) { + if (ctrlpriv->virt_en == 1 || + /* + * Apparently on i.MX8MQ it doesn't matter if virt_en == 1 + * and the following steps should be performed regardless + */ + of_machine_is_compatible("fsl,imx8mq")) { clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0); while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) && -- cgit From 6796c02a4b7ef3761df040db4905227ddf8dac77 Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 20 Aug 2019 13:24:01 -0700 Subject: crypto: caam - add clock entry for i.MX8MQ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add clock entry needed to support i.MX8MQ. Signed-off-by: Andrey Smirnov Cc: Chris Spencer Cc: Cory Tusar Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Aymen Sghaier Cc: Leonard Crestez Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 3b18e7e8da1f..3c059d0e4207 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -527,6 +527,7 @@ static const struct soc_device_attribute caam_imx_soc_table[] = { { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data }, { .soc_id = "i.MX6*", .data = &caam_imx6_data }, { .soc_id = "i.MX7*", .data = &caam_imx7_data }, + { .soc_id = "i.MX8MQ", .data = &caam_imx7_data }, { .family = "Freescale i.MX" }, { /* sentinel */ } }; -- cgit From 25e9960c370b23d6627a3424d0d9820e72152096 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 22 Aug 2019 22:44:44 +0800 Subject: crypto: atmel - Fix -Wunused-const-variable warning drivers/crypto/atmel-i2c.h:68:3: warning: error_list defined but not used [-Wunused-const-variable=] error_list is only used in atmel-i2c.c, so just move the definition over there. Reported-by: Hulk Robot Signed-off-by: YueHaibing Signed-off-by: Herbert Xu --- drivers/crypto/atmel-i2c.c | 12 ++++++++++++ drivers/crypto/atmel-i2c.h | 12 ------------ 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c index dc876fab2882..1d3355913b40 100644 --- a/drivers/crypto/atmel-i2c.c +++ b/drivers/crypto/atmel-i2c.c @@ -21,6 +21,18 @@ #include #include "atmel-i2c.h" +static const struct { + u8 value; + const char *error_text; +} error_list[] = { + { 0x01, "CheckMac or Verify miscompare" }, + { 0x03, "Parse Error" }, + { 0x05, "ECC Fault" }, + { 0x0F, "Execution Error" }, + { 0xEE, "Watchdog about to expire" }, + { 0xFF, "CRC or other communication error" }, +}; + /** * atmel_i2c_checksum() - Generate 16-bit CRC as required by ATMEL ECC. * CRC16 verification of the count, opcode, param1, param2 and data bytes. diff --git a/drivers/crypto/atmel-i2c.h b/drivers/crypto/atmel-i2c.h index 21860b99c3e3..63b97b104f16 100644 --- a/drivers/crypto/atmel-i2c.h +++ b/drivers/crypto/atmel-i2c.h @@ -62,18 +62,6 @@ struct atmel_i2c_cmd { #define STATUS_NOERR 0x00 #define STATUS_WAKE_SUCCESSFUL 0x11 -static const struct { - u8 value; - const char *error_text; -} error_list[] = { - { 0x01, "CheckMac or Verify miscompare" }, - { 0x03, "Parse Error" }, - { 0x05, "ECC Fault" }, - { 0x0F, "Execution Error" }, - { 0xEE, "Watchdog about to expire" }, - { 0xFF, "CRC or other communication error" }, -}; - /* Definitions for eeprom organization */ #define CONFIG_ZONE 0 -- cgit From 2be7f901610ca0d8588865b13ca9046ec32fb9bb Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 22 Aug 2019 22:46:49 +0800 Subject: crypto: nx - remove unused variables 'nx_driver_string' and 'nx_driver_version' drivers/crypto/nx/nx.h:12:19: warning: nx_driver_string defined but not used [-Wunused-const-variable=] drivers/crypto/nx/nx.h:13:19: warning: nx_driver_version defined but not used [-Wunused-const-variable=] They are never used, so just remove it. Reported-by: Hulk Robot Signed-off-by: YueHaibing Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index c6b5a3be02be..7ecca168f8c4 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h @@ -9,9 +9,6 @@ #define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver" #define NX_VERSION "1.0" -static const char nx_driver_string[] = NX_STRING; -static const char nx_driver_version[] = NX_VERSION; - /* a scatterlist in the format PHYP is expecting */ struct nx_sg { u64 addr; -- cgit From c03a509304954c5ed58ac9c607e20f1b55f88a28 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 22 Aug 2019 18:47:31 +0300 Subject: crypto: ccp - invoke fallback for XTS ciphertext stealing For correctness and compliance with the XTS-AES specification, we are adding support for ciphertext stealing to XTS implementations, even though no use cases are known that will be enabled by this. Since the ccp driver already has a fallback skcipher standby for dealing with input sizes other than [16, 512, 1024, 2048, 4096], just drop the check against the block size. Cc: Tom Lendacky Cc: Gary Hook Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-crypto-aes-xts.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c index 783ba75e0618..8e4a531f4f70 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c @@ -116,9 +116,6 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, if (!ctx->u.aes.key_len) return -EINVAL; - if (req->nbytes & (AES_BLOCK_SIZE - 1)) - return -EINVAL; - if (!req->info) return -EINVAL; -- cgit From 1bbbbcfdc0f0fa7a98ba0d551fd03d2c45d5a318 Mon Sep 17 00:00:00 2001 From: Mao Wenan Date: Wed, 28 Aug 2019 16:07:40 +0800 Subject: crypto: hisilicon - select CRYPTO_LIB_DES while compiling SEC driver When CRYPTO_DEV_HISI_SEC=y, below compilation error is found after 'commit 894b68d8be4b ("crypto: hisilicon/des - switch to new verification routines")': drivers/crypto/hisilicon/sec/sec_algs.o: In function `sec_alg_skcipher_setkey_des_cbc': sec_algs.c:(.text+0x11f0): undefined reference to `des_expand_key' drivers/crypto/hisilicon/sec/sec_algs.o: In function `sec_alg_skcipher_setkey_des_ecb': sec_algs.c:(.text+0x1390): undefined reference to `des_expand_key' make: *** [vmlinux] Error 1 This because DES library has been moved to lib/crypto in this commit '04007b0e6cbb ("crypto: des - split off DES library from generic DES cipher driver")'. Fix this by selecting CRYPTO_LIB_DES in CRYPTO_DEV_HISI_SEC. Fixes: 04007b0e6cbb ("crypto: des - split off DES library from generic DES cipher driver") Fixes: 894b68d8be4b ("crypto: hisilicon/des - switch to new verification routines") Signed-off-by: Mao Wenan Acked-by: Jonathan Cameron Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index fa8aa063bb65..ebaf91e0146d 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -4,6 +4,7 @@ config CRYPTO_DEV_HISI_SEC tristate "Support for Hisilicon SEC crypto block cipher accelerator" select CRYPTO_BLKCIPHER select CRYPTO_ALGAPI + select CRYPTO_LIB_DES select SG_SPLIT depends on ARM64 || COMPILE_TEST depends on HAS_IOMEM -- cgit From 4797f6cab5096aa4e97cee2d0044dcf8e6bcd7b8 Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Wed, 28 Aug 2019 14:37:12 +0800 Subject: crypto: mediatek - move mtk_aes_find_dev() to the right place Move mtk_aes_find_dev() to right functions as nobody uses the 'cryp' under current flows. We can also avoid duplicate checks here and there in this way. Signed-off-by: Ryder Lee Signed-off-by: Vic Wu Signed-off-by: Herbert Xu --- drivers/crypto/mediatek/mtk-aes.c | 39 ++++++++++++++------------------------- 1 file changed, 14 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c index b7477ee32ca0..0360658e43d5 100644 --- a/drivers/crypto/mediatek/mtk-aes.c +++ b/drivers/crypto/mediatek/mtk-aes.c @@ -651,14 +651,19 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm, static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode) { - struct mtk_aes_base_ctx *ctx; + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); struct mtk_aes_reqctx *rctx; + struct mtk_cryp *cryp; + + cryp = mtk_aes_find_dev(ctx); + if (!cryp) + return -ENODEV; - ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); rctx = ablkcipher_request_ctx(req); rctx->mode = mode; - return mtk_aes_handle_queue(ctx->cryp, !(mode & AES_FLAGS_ENCRYPT), + return mtk_aes_handle_queue(cryp, !(mode & AES_FLAGS_ENCRYPT), &req->base); } @@ -695,13 +700,6 @@ static int mtk_aes_ctr_decrypt(struct ablkcipher_request *req) static int mtk_aes_cra_init(struct crypto_tfm *tfm) { struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm); - struct mtk_cryp *cryp = NULL; - - cryp = mtk_aes_find_dev(&ctx->base); - if (!cryp) { - pr_err("can't find crypto device\n"); - return -ENODEV; - } tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx); ctx->base.start = mtk_aes_start; @@ -711,13 +709,6 @@ static int mtk_aes_cra_init(struct crypto_tfm *tfm) static int mtk_aes_ctr_cra_init(struct crypto_tfm *tfm) { struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm); - struct mtk_cryp *cryp = NULL; - - cryp = mtk_aes_find_dev(&ctx->base); - if (!cryp) { - pr_err("can't find crypto device\n"); - return -ENODEV; - } tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx); ctx->base.start = mtk_aes_ctr_start; @@ -923,6 +914,11 @@ static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode) struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); struct mtk_aes_reqctx *rctx = aead_request_ctx(req); + struct mtk_cryp *cryp; + + cryp = mtk_aes_find_dev(ctx); + if (!cryp) + return -ENODEV; /* Empty messages are not supported yet */ if (!gctx->textlen && !req->assoclen) @@ -930,7 +926,7 @@ static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode) rctx->mode = AES_FLAGS_GCM | mode; - return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT), + return mtk_aes_handle_queue(cryp, !!(mode & AES_FLAGS_ENCRYPT), &req->base); } @@ -1046,13 +1042,6 @@ static int mtk_aes_gcm_decrypt(struct aead_request *req) static int mtk_aes_gcm_init(struct crypto_aead *aead) { struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead); - struct mtk_cryp *cryp = NULL; - - cryp = mtk_aes_find_dev(&ctx->base); - if (!cryp) { - pr_err("can't find crypto device\n"); - return -ENODEV; - } ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); -- cgit From e049ff5af04b18bfb4105787be198d58c841fc99 Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Wed, 28 Aug 2019 14:37:13 +0800 Subject: crypto: mediatek - fix uninitialized value of gctx->textlen Add a pre-computed text length to avoid uninitialized value in the check. Fixes: e47270665b5f ("crypto: mediatek - Add empty messages check in GCM mode") Signed-off-by: Ryder Lee Signed-off-by: Vic Wu Signed-off-by: Herbert Xu --- drivers/crypto/mediatek/mtk-aes.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c index 0360658e43d5..787b3dd03fc3 100644 --- a/drivers/crypto/mediatek/mtk-aes.c +++ b/drivers/crypto/mediatek/mtk-aes.c @@ -896,14 +896,11 @@ static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) aes->resume = mtk_aes_transfer_complete; /* Compute total process length. */ aes->total = len + gctx->authsize; - /* Compute text length. */ - gctx->textlen = req->cryptlen; /* Hardware will append authenticated tag to output buffer */ scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1); } else { aes->resume = mtk_aes_gcm_tag_verify; aes->total = len; - gctx->textlen = req->cryptlen - gctx->authsize; } return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len); @@ -915,19 +912,22 @@ static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode) struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); struct mtk_aes_reqctx *rctx = aead_request_ctx(req); struct mtk_cryp *cryp; + bool enc = !!(mode & AES_FLAGS_ENCRYPT); cryp = mtk_aes_find_dev(ctx); if (!cryp) return -ENODEV; + /* Compute text length. */ + gctx->textlen = req->cryptlen - (enc ? 0 : gctx->authsize); + /* Empty messages are not supported yet */ if (!gctx->textlen && !req->assoclen) return -EINVAL; rctx->mode = AES_FLAGS_GCM | mode; - return mtk_aes_handle_queue(cryp, !!(mode & AES_FLAGS_ENCRYPT), - &req->base); + return mtk_aes_handle_queue(cryp, enc, &req->base); } /* -- cgit From f271ea9fe1de6455a6855ef320c6946a7f7bf6e7 Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Wed, 28 Aug 2019 14:37:14 +0800 Subject: crypto: mediatek - only treat EBUSY as transient if backlog The driver was treating -EBUSY as indication of queueing to backlog without checking that backlog is enabled for the request. Fix it by checking request flags. Signed-off-by: Ryder Lee Signed-off-by: Vic Wu Signed-off-by: Herbert Xu --- drivers/crypto/mediatek/mtk-sha.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c index f03b0f06fb2f..9e9f48bb7f85 100644 --- a/drivers/crypto/mediatek/mtk-sha.c +++ b/drivers/crypto/mediatek/mtk-sha.c @@ -778,7 +778,9 @@ static int mtk_sha_finup(struct ahash_request *req) ctx->flags |= SHA_FLAGS_FINUP; err1 = mtk_sha_update(req); - if (err1 == -EINPROGRESS || err1 == -EBUSY) + if (err1 == -EINPROGRESS || + (err1 == -EBUSY && (ahash_request_flags(req) & + CRYPTO_TFM_REQ_MAY_BACKLOG))) return err1; /* * final() has to be always called to cleanup resources -- cgit From 069ec891823a5211c48c68c9b9f1571d9647606d Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Wed, 28 Aug 2019 14:37:15 +0800 Subject: crypto: mediatek - add support to OFB/CFB mode This patch adds support to OFB/CFB mode. Signed-off-by: Ryder Lee Signed-off-by: Vic Wu Signed-off-by: Herbert Xu --- drivers/crypto/mediatek/mtk-aes.c | 85 +++++++++++++++++++++++++++++++++++---- 1 file changed, 78 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c index 787b3dd03fc3..378899b640b4 100644 --- a/drivers/crypto/mediatek/mtk-aes.c +++ b/drivers/crypto/mediatek/mtk-aes.c @@ -23,7 +23,7 @@ #define AES_CT_CTRL_HDR cpu_to_le32(0x00220000) -/* AES-CBC/ECB/CTR command token */ +/* AES-CBC/ECB/CTR/OFB/CFB command token */ #define AES_CMD0 cpu_to_le32(0x05000000) #define AES_CMD1 cpu_to_le32(0x2d060000) #define AES_CMD2 cpu_to_le32(0xe4a63806) @@ -50,6 +50,8 @@ /* AES transform information word 1 fields */ #define AES_TFM_ECB cpu_to_le32(0x0 << 0) #define AES_TFM_CBC cpu_to_le32(0x1 << 0) +#define AES_TFM_OFB cpu_to_le32(0x4 << 0) +#define AES_TFM_CFB128 cpu_to_le32(0x5 << 0) #define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */ #define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */ #define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */ @@ -58,13 +60,15 @@ #define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17) /* AES flags */ -#define AES_FLAGS_CIPHER_MSK GENMASK(2, 0) +#define AES_FLAGS_CIPHER_MSK GENMASK(4, 0) #define AES_FLAGS_ECB BIT(0) #define AES_FLAGS_CBC BIT(1) #define AES_FLAGS_CTR BIT(2) -#define AES_FLAGS_GCM BIT(3) -#define AES_FLAGS_ENCRYPT BIT(4) -#define AES_FLAGS_BUSY BIT(5) +#define AES_FLAGS_OFB BIT(3) +#define AES_FLAGS_CFB128 BIT(4) +#define AES_FLAGS_GCM BIT(5) +#define AES_FLAGS_ENCRYPT BIT(6) +#define AES_FLAGS_BUSY BIT(7) #define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26)) @@ -405,7 +409,7 @@ exit: return mtk_aes_complete(cryp, aes, -EINVAL); } -/* Initialize transform information of CBC/ECB/CTR mode */ +/* Initialize transform information of CBC/ECB/CTR/OFB/CFB mode */ static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, size_t len) { @@ -434,7 +438,12 @@ static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, case AES_FLAGS_CTR: info->tfm[1] = AES_TFM_CTR_LOAD; goto ctr; - + case AES_FLAGS_OFB: + info->tfm[1] = AES_TFM_OFB; + break; + case AES_FLAGS_CFB128: + info->tfm[1] = AES_TFM_CFB128; + break; default: /* Should not happen... */ return; @@ -697,6 +706,26 @@ static int mtk_aes_ctr_decrypt(struct ablkcipher_request *req) return mtk_aes_crypt(req, AES_FLAGS_CTR); } +static int mtk_aes_ofb_encrypt(struct ablkcipher_request *req) +{ + return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB); +} + +static int mtk_aes_ofb_decrypt(struct ablkcipher_request *req) +{ + return mtk_aes_crypt(req, AES_FLAGS_OFB); +} + +static int mtk_aes_cfb_encrypt(struct ablkcipher_request *req) +{ + return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128); +} + +static int mtk_aes_cfb_decrypt(struct ablkcipher_request *req) +{ + return mtk_aes_crypt(req, AES_FLAGS_CFB128); +} + static int mtk_aes_cra_init(struct crypto_tfm *tfm) { struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm); @@ -778,6 +807,48 @@ static struct crypto_alg aes_algs[] = { .decrypt = mtk_aes_ctr_decrypt, } }, +{ + .cra_name = "ofb(aes)", + .cra_driver_name = "ofb-aes-mtk", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_init = mtk_aes_cra_init, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct mtk_aes_ctx), + .cra_alignmask = 0xf, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = mtk_aes_setkey, + .encrypt = mtk_aes_ofb_encrypt, + .decrypt = mtk_aes_ofb_decrypt, + } +}, +{ + .cra_name = "cfb(aes)", + .cra_driver_name = "cfb-aes-mtk", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_init = mtk_aes_cra_init, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct mtk_aes_ctx), + .cra_alignmask = 0xf, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = mtk_aes_setkey, + .encrypt = mtk_aes_cfb_encrypt, + .decrypt = mtk_aes_cfb_decrypt, + } +}, }; static inline struct mtk_aes_gcm_ctx * -- cgit From a3d7c50c416c378c91272e6b2905f91fde682319 Mon Sep 17 00:00:00 2001 From: Vic Wu Date: Wed, 28 Aug 2019 14:37:16 +0800 Subject: crypto: mediatek - fix incorrect crypto key setting Record crypto key to context during setkey and set the key to transform state buffer in encrypt/decrypt process. Signed-off-by: Vic Wu Tested-by: John Crispin Signed-off-by: Herbert Xu --- drivers/crypto/mediatek/mtk-aes.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c index 378899b640b4..90c9644fb8a8 100644 --- a/drivers/crypto/mediatek/mtk-aes.c +++ b/drivers/crypto/mediatek/mtk-aes.c @@ -105,6 +105,7 @@ struct mtk_aes_reqctx { struct mtk_aes_base_ctx { struct mtk_cryp *cryp; u32 keylen; + __le32 key[12]; __le32 keymode; mtk_aes_fn start; @@ -534,6 +535,8 @@ static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id, backlog->complete(backlog, -EINPROGRESS); ctx = crypto_tfm_ctx(areq->tfm); + /* Write key into state buffer */ + memcpy(ctx->info.state, ctx->key, sizeof(ctx->key)); aes->areq = areq; aes->ctx = ctx; @@ -653,7 +656,7 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm, } ctx->keylen = SIZE_IN_WORDS(keylen); - mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen); + mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen); return 0; } @@ -1070,10 +1073,8 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, if (err) goto out; - /* Write key into state buffer */ - mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen); - /* Write key(H) into state buffer */ - mtk_aes_write_state_be(ctx->info.state + ctx->keylen, data->hash, + mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen); + mtk_aes_write_state_be(ctx->key + ctx->keylen, data->hash, AES_BLOCK_SIZE); out: kzfree(data); -- cgit From 93369b5d06c7c45f2c9c62106c7a030f92c0eb9e Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 30 Aug 2019 09:40:52 +0200 Subject: crypto: inside-secure - Move static cipher alg & mode settings to init ctx->alg and ctx->mode were set from safexcel_send_req through the various safexcel_encrypt and _decrypt routines, but this makes little sense as these are static per ciphersuite. So moved to _init instead, in preparation of adding more ciphersuites. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_cipher.c | 258 +++++++++++-------------- 1 file changed, 111 insertions(+), 147 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 8f0fecc0dafa..ba40166897f9 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -903,8 +903,7 @@ static int safexcel_aead_exit_inv(struct crypto_tfm *tfm) static int safexcel_queue_req(struct crypto_async_request *base, struct safexcel_cipher_req *sreq, - enum safexcel_cipher_direction dir, u32 mode, - enum safexcel_cipher_alg alg) + enum safexcel_cipher_direction dir) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); struct safexcel_crypto_priv *priv = ctx->priv; @@ -912,8 +911,6 @@ static int safexcel_queue_req(struct crypto_async_request *base, sreq->needs_inv = false; sreq->direction = dir; - ctx->alg = alg; - ctx->mode = mode; if (ctx->base.ctxr) { if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) { @@ -941,18 +938,16 @@ static int safexcel_queue_req(struct crypto_async_request *base, return ret; } -static int safexcel_ecb_aes_encrypt(struct skcipher_request *req) +static int safexcel_encrypt(struct skcipher_request *req) { return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, - SAFEXCEL_AES); + SAFEXCEL_ENCRYPT); } -static int safexcel_ecb_aes_decrypt(struct skcipher_request *req) +static int safexcel_decrypt(struct skcipher_request *req) { return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, - SAFEXCEL_AES); + SAFEXCEL_DECRYPT); } static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm) @@ -1026,12 +1021,22 @@ static void safexcel_aead_cra_exit(struct crypto_tfm *tfm) } } +static int safexcel_skcipher_aes_ecb_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_AES; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB; + return 0; +} + struct safexcel_alg_template safexcel_alg_ecb_aes = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, .alg.skcipher = { .setkey = safexcel_skcipher_aes_setkey, - .encrypt = safexcel_ecb_aes_encrypt, - .decrypt = safexcel_ecb_aes_decrypt, + .encrypt = safexcel_encrypt, + .decrypt = safexcel_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .base = { @@ -1043,33 +1048,29 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = { .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), .cra_alignmask = 0, - .cra_init = safexcel_skcipher_cra_init, + .cra_init = safexcel_skcipher_aes_ecb_cra_init, .cra_exit = safexcel_skcipher_cra_exit, .cra_module = THIS_MODULE, }, }, }; -static int safexcel_cbc_aes_encrypt(struct skcipher_request *req) +static int safexcel_skcipher_aes_cbc_cra_init(struct crypto_tfm *tfm) { - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, - SAFEXCEL_AES); -} + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); -static int safexcel_cbc_aes_decrypt(struct skcipher_request *req) -{ - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, - SAFEXCEL_AES); + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_AES; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; + return 0; } struct safexcel_alg_template safexcel_alg_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, .alg.skcipher = { .setkey = safexcel_skcipher_aes_setkey, - .encrypt = safexcel_cbc_aes_encrypt, - .decrypt = safexcel_cbc_aes_decrypt, + .encrypt = safexcel_encrypt, + .decrypt = safexcel_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, @@ -1082,27 +1083,13 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = { .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), .cra_alignmask = 0, - .cra_init = safexcel_skcipher_cra_init, + .cra_init = safexcel_skcipher_aes_cbc_cra_init, .cra_exit = safexcel_skcipher_cra_exit, .cra_module = THIS_MODULE, }, }, }; -static int safexcel_ctr_aes_encrypt(struct skcipher_request *req) -{ - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD, - SAFEXCEL_AES); -} - -static int safexcel_ctr_aes_decrypt(struct skcipher_request *req) -{ - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD, - SAFEXCEL_AES); -} - static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm, const u8 *key, unsigned int len) { @@ -1141,12 +1128,22 @@ static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm, return 0; } +static int safexcel_skcipher_aes_ctr_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_AES; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; + return 0; +} + struct safexcel_alg_template safexcel_alg_ctr_aes = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, .alg.skcipher = { .setkey = safexcel_skcipher_aesctr_setkey, - .encrypt = safexcel_ctr_aes_encrypt, - .decrypt = safexcel_ctr_aes_decrypt, + .encrypt = safexcel_encrypt, + .decrypt = safexcel_decrypt, /* Add nonce size */ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, @@ -1160,27 +1157,13 @@ struct safexcel_alg_template safexcel_alg_ctr_aes = { .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), .cra_alignmask = 0, - .cra_init = safexcel_skcipher_cra_init, + .cra_init = safexcel_skcipher_aes_ctr_cra_init, .cra_exit = safexcel_skcipher_cra_exit, .cra_module = THIS_MODULE, }, }, }; -static int safexcel_cbc_des_encrypt(struct skcipher_request *req) -{ - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, - SAFEXCEL_DES); -} - -static int safexcel_cbc_des_decrypt(struct skcipher_request *req) -{ - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, - SAFEXCEL_DES); -} - static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key, unsigned int len) { @@ -1202,12 +1185,22 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key, return 0; } +static int safexcel_skcipher_des_cbc_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_DES; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; + return 0; +} + struct safexcel_alg_template safexcel_alg_cbc_des = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, .alg.skcipher = { .setkey = safexcel_des_setkey, - .encrypt = safexcel_cbc_des_encrypt, - .decrypt = safexcel_cbc_des_decrypt, + .encrypt = safexcel_encrypt, + .decrypt = safexcel_decrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, @@ -1220,33 +1213,29 @@ struct safexcel_alg_template safexcel_alg_cbc_des = { .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), .cra_alignmask = 0, - .cra_init = safexcel_skcipher_cra_init, + .cra_init = safexcel_skcipher_des_cbc_cra_init, .cra_exit = safexcel_skcipher_cra_exit, .cra_module = THIS_MODULE, }, }, }; -static int safexcel_ecb_des_encrypt(struct skcipher_request *req) +static int safexcel_skcipher_des_ecb_cra_init(struct crypto_tfm *tfm) { - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, - SAFEXCEL_DES); -} + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); -static int safexcel_ecb_des_decrypt(struct skcipher_request *req) -{ - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, - SAFEXCEL_DES); + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_DES; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB; + return 0; } struct safexcel_alg_template safexcel_alg_ecb_des = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, .alg.skcipher = { .setkey = safexcel_des_setkey, - .encrypt = safexcel_ecb_des_encrypt, - .decrypt = safexcel_ecb_des_decrypt, + .encrypt = safexcel_encrypt, + .decrypt = safexcel_decrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .base = { @@ -1258,27 +1247,13 @@ struct safexcel_alg_template safexcel_alg_ecb_des = { .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), .cra_alignmask = 0, - .cra_init = safexcel_skcipher_cra_init, + .cra_init = safexcel_skcipher_des_ecb_cra_init, .cra_exit = safexcel_skcipher_cra_exit, .cra_module = THIS_MODULE, }, }, }; -static int safexcel_cbc_des3_ede_encrypt(struct skcipher_request *req) -{ - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, - SAFEXCEL_3DES); -} - -static int safexcel_cbc_des3_ede_decrypt(struct skcipher_request *req) -{ - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, - SAFEXCEL_3DES); -} - static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm, const u8 *key, unsigned int len) { @@ -1302,12 +1277,22 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm, return 0; } +static int safexcel_skcipher_des3_cbc_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_3DES; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; + return 0; +} + struct safexcel_alg_template safexcel_alg_cbc_des3_ede = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, .alg.skcipher = { .setkey = safexcel_des3_ede_setkey, - .encrypt = safexcel_cbc_des3_ede_encrypt, - .decrypt = safexcel_cbc_des3_ede_decrypt, + .encrypt = safexcel_encrypt, + .decrypt = safexcel_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -1320,33 +1305,29 @@ struct safexcel_alg_template safexcel_alg_cbc_des3_ede = { .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), .cra_alignmask = 0, - .cra_init = safexcel_skcipher_cra_init, + .cra_init = safexcel_skcipher_des3_cbc_cra_init, .cra_exit = safexcel_skcipher_cra_exit, .cra_module = THIS_MODULE, }, }, }; -static int safexcel_ecb_des3_ede_encrypt(struct skcipher_request *req) +static int safexcel_skcipher_des3_ecb_cra_init(struct crypto_tfm *tfm) { - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, - SAFEXCEL_3DES); -} + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); -static int safexcel_ecb_des3_ede_decrypt(struct skcipher_request *req) -{ - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, - SAFEXCEL_3DES); + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_3DES; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB; + return 0; } struct safexcel_alg_template safexcel_alg_ecb_des3_ede = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, .alg.skcipher = { .setkey = safexcel_des3_ede_setkey, - .encrypt = safexcel_ecb_des3_ede_encrypt, - .decrypt = safexcel_ecb_des3_ede_decrypt, + .encrypt = safexcel_encrypt, + .decrypt = safexcel_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .base = { @@ -1358,27 +1339,25 @@ struct safexcel_alg_template safexcel_alg_ecb_des3_ede = { .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), .cra_alignmask = 0, - .cra_init = safexcel_skcipher_cra_init, + .cra_init = safexcel_skcipher_des3_ecb_cra_init, .cra_exit = safexcel_skcipher_cra_exit, .cra_module = THIS_MODULE, }, }, }; -static int safexcel_aead_encrypt_aes(struct aead_request *req) +static int safexcel_aead_encrypt(struct aead_request *req) { struct safexcel_cipher_req *creq = aead_request_ctx(req); - return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT, - CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES); + return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT); } -static int safexcel_aead_decrypt_aes(struct aead_request *req) +static int safexcel_aead_decrypt(struct aead_request *req) { struct safexcel_cipher_req *creq = aead_request_ctx(req); - return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT, - CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES); + return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT); } static int safexcel_aead_cra_init(struct crypto_tfm *tfm) @@ -1394,6 +1373,7 @@ static int safexcel_aead_cra_init(struct crypto_tfm *tfm) ctx->priv = tmpl->priv; ctx->alg = SAFEXCEL_AES; /* default */ + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; /* default */ ctx->aead = true; ctx->base.send = safexcel_aead_send; ctx->base.handle_result = safexcel_aead_handle_result; @@ -1414,8 +1394,8 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, .alg.aead = { .setkey = safexcel_aead_setkey, - .encrypt = safexcel_aead_encrypt_aes, - .decrypt = safexcel_aead_decrypt_aes, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, .base = { @@ -1448,8 +1428,8 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, .alg.aead = { .setkey = safexcel_aead_setkey, - .encrypt = safexcel_aead_encrypt_aes, - .decrypt = safexcel_aead_decrypt_aes, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, .base = { @@ -1482,8 +1462,8 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, .alg.aead = { .setkey = safexcel_aead_setkey, - .encrypt = safexcel_aead_encrypt_aes, - .decrypt = safexcel_aead_decrypt_aes, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, .base = { @@ -1516,8 +1496,8 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, .alg.aead = { .setkey = safexcel_aead_setkey, - .encrypt = safexcel_aead_encrypt_aes, - .decrypt = safexcel_aead_decrypt_aes, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, .base = { @@ -1550,8 +1530,8 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, .alg.aead = { .setkey = safexcel_aead_setkey, - .encrypt = safexcel_aead_encrypt_aes, - .decrypt = safexcel_aead_decrypt_aes, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, .base = { @@ -1579,28 +1559,12 @@ static int safexcel_aead_sha1_des3_cra_init(struct crypto_tfm *tfm) return 0; } -static int safexcel_aead_encrypt_3des(struct aead_request *req) -{ - struct safexcel_cipher_req *creq = aead_request_ctx(req); - - return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT, - CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_3DES); -} - -static int safexcel_aead_decrypt_3des(struct aead_request *req) -{ - struct safexcel_cipher_req *creq = aead_request_ctx(req); - - return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT, - CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_3DES); -} - struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = { .type = SAFEXCEL_ALG_TYPE_AEAD, .alg.aead = { .setkey = safexcel_aead_setkey, - .encrypt = safexcel_aead_encrypt_3des, - .decrypt = safexcel_aead_decrypt_3des, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, .base = { @@ -1632,8 +1596,8 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, .alg.aead = { .setkey = safexcel_aead_setkey, - .encrypt = safexcel_aead_encrypt_aes, - .decrypt = safexcel_aead_decrypt_aes, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, .base = { @@ -1665,8 +1629,8 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, .alg.aead = { .setkey = safexcel_aead_setkey, - .encrypt = safexcel_aead_encrypt_aes, - .decrypt = safexcel_aead_decrypt_aes, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, .base = { @@ -1698,8 +1662,8 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, .alg.aead = { .setkey = safexcel_aead_setkey, - .encrypt = safexcel_aead_encrypt_aes, - .decrypt = safexcel_aead_decrypt_aes, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, .base = { @@ -1731,8 +1695,8 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, .alg.aead = { .setkey = safexcel_aead_setkey, - .encrypt = safexcel_aead_encrypt_aes, - .decrypt = safexcel_aead_decrypt_aes, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, .base = { @@ -1764,8 +1728,8 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, .alg.aead = { .setkey = safexcel_aead_setkey, - .encrypt = safexcel_aead_encrypt_aes, - .decrypt = safexcel_aead_decrypt_aes, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, .base = { -- cgit From c7da38a71cfbb5c0f20f84864a290b9450ad78e9 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 30 Aug 2019 09:40:53 +0200 Subject: crypto: inside-secure - Add support for the AES-XTS algorithm This patch adds support for the AES-XTS skcipher algorithm. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 1 + drivers/crypto/inside-secure/safexcel.h | 2 + drivers/crypto/inside-secure/safexcel_cipher.c | 124 ++++++++++++++++++++++++- 3 files changed, 123 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index e12a2a3a5422..9941861ae1d8 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -1004,6 +1004,7 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_authenc_hmac_sha256_ctr_aes, &safexcel_alg_authenc_hmac_sha384_ctr_aes, &safexcel_alg_authenc_hmac_sha512_ctr_aes, + &safexcel_alg_xts_aes, }; static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 33e5f663c249..0a30a7bf4fe0 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -334,6 +334,7 @@ struct safexcel_context_record { #define CONTEXT_CONTROL_CRYPTO_MODE_ECB (0 << 0) #define CONTEXT_CONTROL_CRYPTO_MODE_CBC (1 << 0) #define CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD (6 << 0) +#define CONTEXT_CONTROL_CRYPTO_MODE_XTS (7 << 0) #define CONTEXT_CONTROL_IV0 BIT(5) #define CONTEXT_CONTROL_IV1 BIT(6) #define CONTEXT_CONTROL_IV2 BIT(7) @@ -750,5 +751,6 @@ extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes; +extern struct safexcel_alg_template safexcel_alg_xts_aes; #endif diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index ba40166897f9..05e34c62945c 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -40,9 +41,9 @@ struct safexcel_cipher_ctx { enum safexcel_cipher_alg alg; bool aead; - __le32 key[8]; + __le32 key[16]; u32 nonce; - unsigned int key_len; + unsigned int key_len, xts; /* All the below is AEAD specific */ u32 hash_alg; @@ -351,7 +352,7 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, } else if (ctx->alg == SAFEXCEL_3DES) { cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_3DES; } else if (ctx->alg == SAFEXCEL_AES) { - switch (ctx->key_len) { + switch (ctx->key_len >> ctx->xts) { case AES_KEYSIZE_128: cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128; break; @@ -363,7 +364,7 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, break; default: dev_err(priv->dev, "aes keysize not supported: %u\n", - ctx->key_len); + ctx->key_len >> ctx->xts); return -EINVAL; } } @@ -1747,3 +1748,118 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes = { }, }, }; + +static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm, + const u8 *key, unsigned int len) +{ + struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct safexcel_crypto_priv *priv = ctx->priv; + struct crypto_aes_ctx aes; + int ret, i; + unsigned int keylen; + + /* Check for illegal XTS keys */ + ret = xts_verify_key(ctfm, key, len); + if (ret) + return ret; + + /* Only half of the key data is cipher key */ + keylen = (len >> 1); + ret = aes_expandkey(&aes, key, keylen); + if (ret) { + crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return ret; + } + + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { + for (i = 0; i < keylen / sizeof(u32); i++) { + if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) { + ctx->base.needs_inv = true; + break; + } + } + } + + for (i = 0; i < keylen / sizeof(u32); i++) + ctx->key[i] = cpu_to_le32(aes.key_enc[i]); + + /* The other half is the tweak key */ + ret = aes_expandkey(&aes, (u8 *)(key + keylen), keylen); + if (ret) { + crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return ret; + } + + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { + for (i = 0; i < keylen / sizeof(u32); i++) { + if (ctx->key[i + keylen / sizeof(u32)] != + cpu_to_le32(aes.key_enc[i])) { + ctx->base.needs_inv = true; + break; + } + } + } + + for (i = 0; i < keylen / sizeof(u32); i++) + ctx->key[i + keylen / sizeof(u32)] = + cpu_to_le32(aes.key_enc[i]); + + ctx->key_len = keylen << 1; + + memzero_explicit(&aes, sizeof(aes)); + return 0; +} + +static int safexcel_skcipher_aes_xts_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_AES; + ctx->xts = 1; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XTS; + return 0; +} + +static int safexcel_encrypt_xts(struct skcipher_request *req) +{ + if (req->cryptlen < XTS_BLOCK_SIZE) + return -EINVAL; + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_ENCRYPT); +} + +static int safexcel_decrypt_xts(struct skcipher_request *req) +{ + if (req->cryptlen < XTS_BLOCK_SIZE) + return -EINVAL; + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_DECRYPT); +} + +struct safexcel_alg_template safexcel_alg_xts_aes = { + .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .setkey = safexcel_skcipher_aesxts_setkey, + .encrypt = safexcel_encrypt_xts, + .decrypt = safexcel_decrypt_xts, + /* XTS actually uses 2 AES keys glued together */ + .min_keysize = AES_MIN_KEY_SIZE * 2, + .max_keysize = AES_MAX_KEY_SIZE * 2, + .ivsize = XTS_BLOCK_SIZE, + .base = { + .cra_name = "xts(aes)", + .cra_driver_name = "safexcel-xts-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = XTS_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_skcipher_aes_xts_cra_init, + .cra_exit = safexcel_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; -- cgit From 062b64ca6db409fd6e102aed0fa59716b5cdfd78 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 30 Aug 2019 09:40:54 +0200 Subject: crypto: inside-secure - Only enable algorithms advertised by the hardware This patch probes the supported algorithms from the hardware and only registers the ones that the hardware actually supports. This is necessary because this is a generic driver supposed to run on a wide variety of engines, which may or may not implement certain algorithms. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 36 ++++++++++++++++++++++++-- drivers/crypto/inside-secure/safexcel.h | 33 +++++++++++++++++++++++ drivers/crypto/inside-secure/safexcel_cipher.c | 19 ++++++++++++++ drivers/crypto/inside-secure/safexcel_hash.c | 12 +++++++++ 4 files changed, 98 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 9941861ae1d8..25285d664581 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -284,7 +284,7 @@ retry_fw: ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]); if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) { - dev_dbg(priv->dev, "Firmware loaded successfully"); + dev_dbg(priv->dev, "Firmware loaded successfully\n"); return 0; } @@ -1014,6 +1014,12 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { safexcel_algs[i]->priv = priv; + /* Do we have all required base algorithms available? */ + if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) != + safexcel_algs[i]->algo_mask) + /* No, so don't register this ciphersuite */ + continue; + if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher); else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD) @@ -1029,6 +1035,12 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) fail: for (j = 0; j < i; j++) { + /* Do we have all required base algorithms available? */ + if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) != + safexcel_algs[j]->algo_mask) + /* No, so don't unregister this ciphersuite */ + continue; + if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher); else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD) @@ -1045,6 +1057,12 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv) int i; for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { + /* Do we have all required base algorithms available? */ + if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) != + safexcel_algs[i]->algo_mask) + /* No, so don't unregister this ciphersuite */ + continue; + if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher); else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD) @@ -1123,6 +1141,7 @@ static int safexcel_probe_generic(void *pdev, int is_pci_dev) { struct device *dev = priv->dev; + u32 peid; int i, ret; priv->context_pool = dmam_pool_create("safexcel-context", dev, @@ -1133,8 +1152,21 @@ static int safexcel_probe_generic(void *pdev, safexcel_init_register_offsets(priv); - if (priv->version != EIP97IES_MRVL) + /* Get supported algorithms from EIP96 transform engine */ + priv->hwconfig.algo_flags = readl(EIP197_PE(priv) + + EIP197_PE_EIP96_OPTIONS(0)); + + if (priv->version == EIP97IES_MRVL) { + peid = 97; + } else { priv->flags |= EIP197_TRC_CACHE; + peid = 197; + } + + /* Dump some debug information important during development */ + dev_dbg(priv->dev, "Inside Secure EIP%d packetengine\n", peid); + dev_dbg(priv->dev, "Supported algorithms: %08x\n", + priv->hwconfig.algo_flags); safexcel_configure(priv); diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 0a30a7bf4fe0..b5ff62fa3044 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -145,6 +145,7 @@ #define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n))) #define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n))) #define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n))) +#define EIP197_PE_EIP96_OPTIONS(n) (0x13f8 + (0x2000 * (n))) #define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n))) #define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n))) #define EIP197_MST_CTRL 0xfff4 @@ -597,6 +598,32 @@ enum safexcel_eip_version { EIP197_DEVBRD }; +/* EIP algorithm presence flags */ +enum safexcel_eip_algorithms { + SAFEXCEL_ALG_BC0 = BIT(5), + SAFEXCEL_ALG_SM4 = BIT(6), + SAFEXCEL_ALG_SM3 = BIT(7), + SAFEXCEL_ALG_CHACHA20 = BIT(8), + SAFEXCEL_ALG_POLY1305 = BIT(9), + SAFEXCEL_SEQMASK_256 = BIT(10), + SAFEXCEL_SEQMASK_384 = BIT(11), + SAFEXCEL_ALG_AES = BIT(12), + SAFEXCEL_ALG_AES_XFB = BIT(13), + SAFEXCEL_ALG_DES = BIT(15), + SAFEXCEL_ALG_DES_XFB = BIT(16), + SAFEXCEL_ALG_ARC4 = BIT(18), + SAFEXCEL_ALG_AES_XTS = BIT(20), + SAFEXCEL_ALG_WIRELESS = BIT(21), + SAFEXCEL_ALG_MD5 = BIT(22), + SAFEXCEL_ALG_SHA1 = BIT(23), + SAFEXCEL_ALG_SHA2_256 = BIT(25), + SAFEXCEL_ALG_SHA2_512 = BIT(26), + SAFEXCEL_ALG_XCBC_MAC = BIT(27), + SAFEXCEL_ALG_CBC_MAC_ALL = BIT(29), + SAFEXCEL_ALG_GHASH = BIT(30), + SAFEXCEL_ALG_SHA3 = BIT(31), +}; + struct safexcel_register_offsets { u32 hia_aic; u32 hia_aic_g; @@ -614,6 +641,10 @@ enum safexcel_flags { EIP197_TRC_CACHE = BIT(0), }; +struct safexcel_hwconfig { + enum safexcel_eip_algorithms algo_flags; +}; + struct safexcel_crypto_priv { void __iomem *base; struct device *dev; @@ -623,6 +654,7 @@ struct safexcel_crypto_priv { enum safexcel_eip_version version; struct safexcel_register_offsets offsets; + struct safexcel_hwconfig hwconfig; u32 flags; /* context DMA pool */ @@ -667,6 +699,7 @@ struct safexcel_ahash_export_state { struct safexcel_alg_template { struct safexcel_crypto_priv *priv; enum safexcel_alg_type type; + enum safexcel_eip_algorithms algo_mask; union { struct skcipher_alg skcipher; struct aead_alg aead; diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 05e34c62945c..e60e797b5719 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -1034,6 +1034,7 @@ static int safexcel_skcipher_aes_ecb_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_ecb_aes = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .algo_mask = SAFEXCEL_ALG_AES, .alg.skcipher = { .setkey = safexcel_skcipher_aes_setkey, .encrypt = safexcel_encrypt, @@ -1068,6 +1069,7 @@ static int safexcel_skcipher_aes_cbc_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .algo_mask = SAFEXCEL_ALG_AES, .alg.skcipher = { .setkey = safexcel_skcipher_aes_setkey, .encrypt = safexcel_encrypt, @@ -1141,6 +1143,7 @@ static int safexcel_skcipher_aes_ctr_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_ctr_aes = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .algo_mask = SAFEXCEL_ALG_AES, .alg.skcipher = { .setkey = safexcel_skcipher_aesctr_setkey, .encrypt = safexcel_encrypt, @@ -1198,6 +1201,7 @@ static int safexcel_skcipher_des_cbc_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_cbc_des = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .algo_mask = SAFEXCEL_ALG_DES, .alg.skcipher = { .setkey = safexcel_des_setkey, .encrypt = safexcel_encrypt, @@ -1233,6 +1237,7 @@ static int safexcel_skcipher_des_ecb_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_ecb_des = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .algo_mask = SAFEXCEL_ALG_DES, .alg.skcipher = { .setkey = safexcel_des_setkey, .encrypt = safexcel_encrypt, @@ -1290,6 +1295,7 @@ static int safexcel_skcipher_des3_cbc_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_cbc_des3_ede = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .algo_mask = SAFEXCEL_ALG_DES, .alg.skcipher = { .setkey = safexcel_des3_ede_setkey, .encrypt = safexcel_encrypt, @@ -1325,6 +1331,7 @@ static int safexcel_skcipher_des3_ecb_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_ecb_des3_ede = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .algo_mask = SAFEXCEL_ALG_DES, .alg.skcipher = { .setkey = safexcel_des3_ede_setkey, .encrypt = safexcel_encrypt, @@ -1393,6 +1400,7 @@ static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA1, .alg.aead = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt, @@ -1427,6 +1435,7 @@ static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256, .alg.aead = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt, @@ -1461,6 +1470,7 @@ static int safexcel_aead_sha224_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256, .alg.aead = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt, @@ -1495,6 +1505,7 @@ static int safexcel_aead_sha512_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512, .alg.aead = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt, @@ -1529,6 +1540,7 @@ static int safexcel_aead_sha384_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512, .alg.aead = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt, @@ -1562,6 +1574,7 @@ static int safexcel_aead_sha1_des3_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = { .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA1, .alg.aead = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt, @@ -1595,6 +1608,7 @@ static int safexcel_aead_sha1_ctr_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA1, .alg.aead = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt, @@ -1628,6 +1642,7 @@ static int safexcel_aead_sha256_ctr_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256, .alg.aead = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt, @@ -1661,6 +1676,7 @@ static int safexcel_aead_sha224_ctr_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256, .alg.aead = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt, @@ -1694,6 +1710,7 @@ static int safexcel_aead_sha512_ctr_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512, .alg.aead = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt, @@ -1727,6 +1744,7 @@ static int safexcel_aead_sha384_ctr_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512, .alg.aead = { .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt, @@ -1840,6 +1858,7 @@ static int safexcel_decrypt_xts(struct skcipher_request *req) struct safexcel_alg_template safexcel_alg_xts_aes = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XTS, .alg.skcipher = { .setkey = safexcel_skcipher_aesxts_setkey, .encrypt = safexcel_encrypt_xts, diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 626dd82e583f..e60838f2fa96 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -845,6 +845,7 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_sha1 = { .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SHA1, .alg.ahash = { .init = safexcel_sha1_init, .update = safexcel_ahash_update, @@ -1085,6 +1086,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, struct safexcel_alg_template safexcel_alg_hmac_sha1 = { .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SHA1, .alg.ahash = { .init = safexcel_hmac_sha1_init, .update = safexcel_ahash_update, @@ -1140,6 +1142,7 @@ static int safexcel_sha256_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_sha256 = { .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SHA2_256, .alg.ahash = { .init = safexcel_sha256_init, .update = safexcel_ahash_update, @@ -1194,6 +1197,7 @@ static int safexcel_sha224_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_sha224 = { .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SHA2_256, .alg.ahash = { .init = safexcel_sha224_init, .update = safexcel_ahash_update, @@ -1262,6 +1266,7 @@ static int safexcel_hmac_sha224_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_hmac_sha224 = { .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SHA2_256, .alg.ahash = { .init = safexcel_hmac_sha224_init, .update = safexcel_ahash_update, @@ -1331,6 +1336,7 @@ static int safexcel_hmac_sha256_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_hmac_sha256 = { .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SHA2_256, .alg.ahash = { .init = safexcel_hmac_sha256_init, .update = safexcel_ahash_update, @@ -1386,6 +1392,7 @@ static int safexcel_sha512_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_sha512 = { .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SHA2_512, .alg.ahash = { .init = safexcel_sha512_init, .update = safexcel_ahash_update, @@ -1440,6 +1447,7 @@ static int safexcel_sha384_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_sha384 = { .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SHA2_512, .alg.ahash = { .init = safexcel_sha384_init, .update = safexcel_ahash_update, @@ -1508,6 +1516,7 @@ static int safexcel_hmac_sha512_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_hmac_sha512 = { .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SHA2_512, .alg.ahash = { .init = safexcel_hmac_sha512_init, .update = safexcel_ahash_update, @@ -1577,6 +1586,7 @@ static int safexcel_hmac_sha384_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_hmac_sha384 = { .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SHA2_512, .alg.ahash = { .init = safexcel_hmac_sha384_init, .update = safexcel_ahash_update, @@ -1632,6 +1642,7 @@ static int safexcel_md5_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_md5 = { .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_MD5, .alg.ahash = { .init = safexcel_md5_init, .update = safexcel_ahash_update, @@ -1701,6 +1712,7 @@ static int safexcel_hmac_md5_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_hmac_md5 = { .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_MD5, .alg.ahash = { .init = safexcel_hmac_md5_init, .update = safexcel_ahash_update, -- cgit From aa88f331c8ff40ef3643dba9b5ac11b5702e5fc4 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 30 Aug 2019 09:41:47 +0200 Subject: crypto: inside-secure - Made .cra_priority value a define Instead of having a fixed value (of 300) all over the place, the value for for .cra_priority is now made into a define (SAFEXCEL_CRA_PRIORITY). This makes it easier to play with, e.g. during development. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.h | 3 ++ drivers/crypto/inside-secure/safexcel_cipher.c | 38 +++++++++++++------------- drivers/crypto/inside-secure/safexcel_hash.c | 24 ++++++++-------- 3 files changed, 34 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index b5ff62fa3044..1e575a1bb0ea 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -598,6 +598,9 @@ enum safexcel_eip_version { EIP197_DEVBRD }; +/* Priority we use for advertising our algorithms */ +#define SAFEXCEL_CRA_PRIORITY 300 + /* EIP algorithm presence flags */ enum safexcel_eip_algorithms { SAFEXCEL_ALG_BC0 = BIT(5), diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index e60e797b5719..6e2027ec167f 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -1044,7 +1044,7 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = { .base = { .cra_name = "ecb(aes)", .cra_driver_name = "safexcel-ecb-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, @@ -1080,7 +1080,7 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = { .base = { .cra_name = "cbc(aes)", .cra_driver_name = "safexcel-cbc-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, @@ -1155,7 +1155,7 @@ struct safexcel_alg_template safexcel_alg_ctr_aes = { .base = { .cra_name = "rfc3686(ctr(aes))", .cra_driver_name = "safexcel-ctr-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, @@ -1212,7 +1212,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des = { .base = { .cra_name = "cbc(des)", .cra_driver_name = "safexcel-cbc-des", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, @@ -1247,7 +1247,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des = { .base = { .cra_name = "ecb(des)", .cra_driver_name = "safexcel-ecb-des", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, @@ -1306,7 +1306,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des3_ede = { .base = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "safexcel-cbc-des3_ede", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -1341,7 +1341,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des3_ede = { .base = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "safexcel-ecb-des3_ede", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -1410,7 +1410,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = { .base = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, @@ -1445,7 +1445,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { .base = { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, @@ -1480,7 +1480,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { .base = { .cra_name = "authenc(hmac(sha224),cbc(aes))", .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, @@ -1515,7 +1515,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = { .base = { .cra_name = "authenc(hmac(sha512),cbc(aes))", .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, @@ -1550,7 +1550,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { .base = { .cra_name = "authenc(hmac(sha384),cbc(aes))", .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, @@ -1584,7 +1584,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = { .base = { .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des3_ede", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -1618,7 +1618,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes = { .base = { .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", .cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, @@ -1652,7 +1652,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes = { .base = { .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", .cra_driver_name = "safexcel-authenc-hmac-sha256-ctr-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, @@ -1686,7 +1686,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes = { .base = { .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))", .cra_driver_name = "safexcel-authenc-hmac-sha224-ctr-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, @@ -1720,7 +1720,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes = { .base = { .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))", .cra_driver_name = "safexcel-authenc-hmac-sha512-ctr-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, @@ -1754,7 +1754,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes = { .base = { .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))", .cra_driver_name = "safexcel-authenc-hmac-sha384-ctr-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, @@ -1870,7 +1870,7 @@ struct safexcel_alg_template safexcel_alg_xts_aes = { .base = { .cra_name = "xts(aes)", .cra_driver_name = "safexcel-xts-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = XTS_BLOCK_SIZE, diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index e60838f2fa96..2effb6d21e8b 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -860,7 +860,7 @@ struct safexcel_alg_template safexcel_alg_sha1 = { .base = { .cra_name = "sha1", .cra_driver_name = "safexcel-sha1", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, @@ -1102,7 +1102,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha1 = { .base = { .cra_name = "hmac(sha1)", .cra_driver_name = "safexcel-hmac-sha1", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, @@ -1157,7 +1157,7 @@ struct safexcel_alg_template safexcel_alg_sha256 = { .base = { .cra_name = "sha256", .cra_driver_name = "safexcel-sha256", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, @@ -1212,7 +1212,7 @@ struct safexcel_alg_template safexcel_alg_sha224 = { .base = { .cra_name = "sha224", .cra_driver_name = "safexcel-sha224", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA224_BLOCK_SIZE, @@ -1282,7 +1282,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha224 = { .base = { .cra_name = "hmac(sha224)", .cra_driver_name = "safexcel-hmac-sha224", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA224_BLOCK_SIZE, @@ -1352,7 +1352,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha256 = { .base = { .cra_name = "hmac(sha256)", .cra_driver_name = "safexcel-hmac-sha256", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, @@ -1407,7 +1407,7 @@ struct safexcel_alg_template safexcel_alg_sha512 = { .base = { .cra_name = "sha512", .cra_driver_name = "safexcel-sha512", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA512_BLOCK_SIZE, @@ -1462,7 +1462,7 @@ struct safexcel_alg_template safexcel_alg_sha384 = { .base = { .cra_name = "sha384", .cra_driver_name = "safexcel-sha384", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA384_BLOCK_SIZE, @@ -1532,7 +1532,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha512 = { .base = { .cra_name = "hmac(sha512)", .cra_driver_name = "safexcel-hmac-sha512", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA512_BLOCK_SIZE, @@ -1602,7 +1602,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha384 = { .base = { .cra_name = "hmac(sha384)", .cra_driver_name = "safexcel-hmac-sha384", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA384_BLOCK_SIZE, @@ -1657,7 +1657,7 @@ struct safexcel_alg_template safexcel_alg_md5 = { .base = { .cra_name = "md5", .cra_driver_name = "safexcel-md5", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, @@ -1728,7 +1728,7 @@ struct safexcel_alg_template safexcel_alg_hmac_md5 = { .base = { .cra_name = "hmac(md5)", .cra_driver_name = "safexcel-hmac-md5", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, -- cgit From 493e289ca8aab8f9408376d6388ac8f819de6b73 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 30 Aug 2019 09:42:29 +0200 Subject: crypto: inside-secure - Minor optimization recognizing CTR is always AES Moved counter mode handling code in front as it doesn't depend on the rest of the code to be executed, it can just do its thing and exit. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_cipher.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 6e2027ec167f..18115dac9d01 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -65,6 +65,19 @@ static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, { u32 block_sz = 0; + if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) { + cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; + + /* 32 bit nonce */ + cdesc->control_data.token[0] = ctx->nonce; + /* 64 bit IV part */ + memcpy(&cdesc->control_data.token[1], iv, 8); + /* 32 bit counter, start at 1 (big endian!) */ + cdesc->control_data.token[3] = cpu_to_be32(1); + + return; + } + if (ctx->mode != CONTEXT_CONTROL_CRYPTO_MODE_ECB) { switch (ctx->alg) { case SAFEXCEL_DES: @@ -80,17 +93,7 @@ static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; break; } - - if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) { - /* 32 bit nonce */ - cdesc->control_data.token[0] = ctx->nonce; - /* 64 bit IV part */ - memcpy(&cdesc->control_data.token[1], iv, 8); - /* 32 bit counter, start at 1 (big endian!) */ - cdesc->control_data.token[3] = cpu_to_be32(1); - } else { - memcpy(cdesc->control_data.token, iv, block_sz); - } + memcpy(cdesc->control_data.token, iv, block_sz); } } -- cgit From d2d9e6fd6d1cd9bd0654c9f7ed2317a7fd220aef Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 30 Aug 2019 09:43:01 +0200 Subject: crypto: inside-secure - Minor code cleanup and optimizations Some minor cleanup changing e.g. "if (!x) A else B" to "if (x) B else A", merging some back-to-back if's with the same condition, collapsing some back-to-back assignments to the same variable and replacing some weird assignments with proper symbolics. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_cipher.c | 86 ++++++++++++++------------ 1 file changed, 47 insertions(+), 39 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 18115dac9d01..3c2b1f759dad 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -126,9 +126,6 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, safexcel_cipher_token(ctx, iv, cdesc); - if (direction == SAFEXCEL_DECRYPT) - cryptlen -= digestsize; - if (direction == SAFEXCEL_ENCRYPT) { /* align end of instruction sequence to end of token */ token = (struct safexcel_token *)(cdesc->control_data.token + @@ -141,6 +138,8 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | EIP197_TOKEN_INS_INSERT_HASH_DIGEST; } else { + cryptlen -= digestsize; + /* align end of instruction sequence to end of token */ token = (struct safexcel_token *)(cdesc->control_data.token + EIP197_MAX_TOKENS - 4); @@ -159,13 +158,7 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, token[3].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT; } - if (unlikely(!cryptlen)) { - token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION; - token[1].packet_length = assoclen; - token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; - token[1].instructions = EIP197_TOKEN_INS_LAST | - EIP197_TOKEN_INS_TYPE_HASH; - } else { + if (likely(cryptlen)) { if (likely(assoclen)) { token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; token[0].packet_length = assoclen; @@ -179,6 +172,12 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, EIP197_TOKEN_INS_TYPE_CRYPTO | EIP197_TOKEN_INS_TYPE_HASH | EIP197_TOKEN_INS_TYPE_OUTPUT; + } else { + token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION; + token[1].packet_length = assoclen; + token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; + token[1].instructions = EIP197_TOKEN_INS_LAST | + EIP197_TOKEN_INS_TYPE_HASH; } } @@ -325,45 +324,60 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, struct safexcel_command_desc *cdesc) { struct safexcel_crypto_priv *priv = ctx->priv; - int ctrl_size; + int ctrl_size = ctx->key_len / sizeof(u32); + + cdesc->control_data.control1 = ctx->mode; if (ctx->aead) { + /* Take in account the ipad+opad digests */ + ctrl_size += ctx->state_sz / sizeof(u32) * 2; + if (sreq->direction == SAFEXCEL_ENCRYPT) - cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT; + cdesc->control_data.control0 = + CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT | + CONTEXT_CONTROL_DIGEST_HMAC | + CONTEXT_CONTROL_KEY_EN | + ctx->hash_alg | + CONTEXT_CONTROL_SIZE(ctrl_size); else - cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN; + cdesc->control_data.control0 = + CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN | + CONTEXT_CONTROL_DIGEST_HMAC | + CONTEXT_CONTROL_KEY_EN | + ctx->hash_alg | + CONTEXT_CONTROL_SIZE(ctrl_size); } else { - cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT; - - /* The decryption control type is a combination of the - * encryption type and CONTEXT_CONTROL_TYPE_NULL_IN, for all - * types. - */ - if (sreq->direction == SAFEXCEL_DECRYPT) - cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_NULL_IN; + if (sreq->direction == SAFEXCEL_ENCRYPT) + cdesc->control_data.control0 = + CONTEXT_CONTROL_TYPE_CRYPTO_OUT | + CONTEXT_CONTROL_KEY_EN | + CONTEXT_CONTROL_SIZE(ctrl_size); + else + cdesc->control_data.control0 = + CONTEXT_CONTROL_TYPE_CRYPTO_IN | + CONTEXT_CONTROL_KEY_EN | + CONTEXT_CONTROL_SIZE(ctrl_size); } - cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN; - cdesc->control_data.control1 |= ctx->mode; - - if (ctx->aead) - cdesc->control_data.control0 |= CONTEXT_CONTROL_DIGEST_HMAC | - ctx->hash_alg; - if (ctx->alg == SAFEXCEL_DES) { - cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_DES; + cdesc->control_data.control0 |= + CONTEXT_CONTROL_CRYPTO_ALG_DES; } else if (ctx->alg == SAFEXCEL_3DES) { - cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_3DES; + cdesc->control_data.control0 |= + CONTEXT_CONTROL_CRYPTO_ALG_3DES; } else if (ctx->alg == SAFEXCEL_AES) { switch (ctx->key_len >> ctx->xts) { case AES_KEYSIZE_128: - cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128; + cdesc->control_data.control0 |= + CONTEXT_CONTROL_CRYPTO_ALG_AES128; break; case AES_KEYSIZE_192: - cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192; + cdesc->control_data.control0 |= + CONTEXT_CONTROL_CRYPTO_ALG_AES192; break; case AES_KEYSIZE_256: - cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256; + cdesc->control_data.control0 |= + CONTEXT_CONTROL_CRYPTO_ALG_AES256; break; default: dev_err(priv->dev, "aes keysize not supported: %u\n", @@ -372,12 +386,6 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, } } - ctrl_size = ctx->key_len / sizeof(u32); - if (ctx->aead) - /* Take in account the ipad+opad digests */ - ctrl_size += ctx->state_sz / sizeof(u32) * 2; - cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size); - return 0; } -- cgit From 3e450886ec573cb9d7cb1758317b5e4e0f308b52 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 30 Aug 2019 09:52:30 +0200 Subject: crypto: inside-secure - Added support for basic AES-GCM This patch adds support for the basic AES-GCM AEAD cipher suite. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 4 +- drivers/crypto/inside-secure/safexcel.h | 7 +- drivers/crypto/inside-secure/safexcel_cipher.c | 226 ++++++++++++++++++++----- drivers/crypto/inside-secure/safexcel_ring.c | 8 +- 4 files changed, 204 insertions(+), 41 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 25285d664581..46cdcbeb122b 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -715,7 +715,8 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, } else if (rdesc->result_data.error_code & BIT(9)) { /* Authentication failed */ return -EBADMSG; - } + } else if (!rdesc->result_data.error_code) + return 0; /* All other non-fatal errors */ return -EINVAL; @@ -1005,6 +1006,7 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_authenc_hmac_sha384_ctr_aes, &safexcel_alg_authenc_hmac_sha512_ctr_aes, &safexcel_alg_xts_aes, + &safexcel_alg_gcm, }; static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 1e575a1bb0ea..c6f93ec88c84 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -19,7 +19,7 @@ /* Static configuration */ #define EIP197_DEFAULT_RING_SIZE 400 -#define EIP197_MAX_TOKENS 8 +#define EIP197_MAX_TOKENS 10 #define EIP197_MAX_RINGS 4 #define EIP197_FETCH_COUNT 1 #define EIP197_MAX_BATCH_SZ 64 @@ -321,6 +321,7 @@ struct safexcel_context_record { #define CONTEXT_CONTROL_CRYPTO_ALG_AES192 (0x6 << 17) #define CONTEXT_CONTROL_CRYPTO_ALG_AES256 (0x7 << 17) #define CONTEXT_CONTROL_DIGEST_PRECOMPUTED (0x1 << 21) +#define CONTEXT_CONTROL_DIGEST_XCM (0x2 << 21) #define CONTEXT_CONTROL_DIGEST_HMAC (0x3 << 21) #define CONTEXT_CONTROL_CRYPTO_ALG_MD5 (0x0 << 23) #define CONTEXT_CONTROL_CRYPTO_ALG_SHA1 (0x2 << 23) @@ -328,6 +329,7 @@ struct safexcel_context_record { #define CONTEXT_CONTROL_CRYPTO_ALG_SHA256 (0x3 << 23) #define CONTEXT_CONTROL_CRYPTO_ALG_SHA384 (0x6 << 23) #define CONTEXT_CONTROL_CRYPTO_ALG_SHA512 (0x5 << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_GHASH (0x4 << 23) #define CONTEXT_CONTROL_INV_FR (0x5 << 24) #define CONTEXT_CONTROL_INV_TR (0x6 << 24) @@ -336,6 +338,7 @@ struct safexcel_context_record { #define CONTEXT_CONTROL_CRYPTO_MODE_CBC (1 << 0) #define CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD (6 << 0) #define CONTEXT_CONTROL_CRYPTO_MODE_XTS (7 << 0) +#define CONTEXT_CONTROL_CRYPTO_MODE_XCM ((6 << 0) | BIT(17)) #define CONTEXT_CONTROL_IV0 BIT(5) #define CONTEXT_CONTROL_IV1 BIT(6) #define CONTEXT_CONTROL_IV2 BIT(7) @@ -445,6 +448,7 @@ struct safexcel_token { #define EIP197_TOKEN_OPCODE_INSERT 0x2 #define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT #define EIP197_TOKEN_OPCODE_RETRIEVE 0x4 +#define EIP197_TOKEN_OPCODE_INSERT_REMRES 0xa #define EIP197_TOKEN_OPCODE_VERIFY 0xd #define EIP197_TOKEN_OPCODE_CTX_ACCESS 0xe #define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0) @@ -788,5 +792,6 @@ extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes; extern struct safexcel_alg_template safexcel_alg_xts_aes; +extern struct safexcel_alg_template safexcel_alg_gcm; #endif diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 3c2b1f759dad..7091b6086e2e 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include #include #include @@ -40,6 +42,7 @@ struct safexcel_cipher_ctx { u32 mode; enum safexcel_cipher_alg alg; bool aead; + int xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */ __le32 key[16]; u32 nonce; @@ -50,6 +53,8 @@ struct safexcel_cipher_ctx { u32 state_sz; u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)]; u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)]; + + struct crypto_cipher *hkaes; }; struct safexcel_cipher_req { @@ -75,6 +80,15 @@ static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, /* 32 bit counter, start at 1 (big endian!) */ cdesc->control_data.token[3] = cpu_to_be32(1); + return; + } else if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_XCM) { + cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; + + /* 96 bit IV part */ + memcpy(&cdesc->control_data.token[0], iv, 12); + /* 32 bit counter, start at 1 (big endian!) */ + cdesc->control_data.token[3] = cpu_to_be32(1); + return; } @@ -129,56 +143,68 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, if (direction == SAFEXCEL_ENCRYPT) { /* align end of instruction sequence to end of token */ token = (struct safexcel_token *)(cdesc->control_data.token + - EIP197_MAX_TOKENS - 3); + EIP197_MAX_TOKENS - 5); - token[2].opcode = EIP197_TOKEN_OPCODE_INSERT; - token[2].packet_length = digestsize; - token[2].stat = EIP197_TOKEN_STAT_LAST_HASH | + token[4].opcode = EIP197_TOKEN_OPCODE_INSERT; + token[4].packet_length = digestsize; + token[4].stat = EIP197_TOKEN_STAT_LAST_HASH | EIP197_TOKEN_STAT_LAST_PACKET; - token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | + token[4].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | EIP197_TOKEN_INS_INSERT_HASH_DIGEST; } else { cryptlen -= digestsize; /* align end of instruction sequence to end of token */ token = (struct safexcel_token *)(cdesc->control_data.token + - EIP197_MAX_TOKENS - 4); + EIP197_MAX_TOKENS - 6); - token[2].opcode = EIP197_TOKEN_OPCODE_RETRIEVE; - token[2].packet_length = digestsize; - token[2].stat = EIP197_TOKEN_STAT_LAST_HASH | + token[4].opcode = EIP197_TOKEN_OPCODE_RETRIEVE; + token[4].packet_length = digestsize; + token[4].stat = EIP197_TOKEN_STAT_LAST_HASH | EIP197_TOKEN_STAT_LAST_PACKET; - token[2].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST; + token[4].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST; - token[3].opcode = EIP197_TOKEN_OPCODE_VERIFY; - token[3].packet_length = digestsize | + token[5].opcode = EIP197_TOKEN_OPCODE_VERIFY; + token[5].packet_length = digestsize | EIP197_TOKEN_HASH_RESULT_VERIFY; - token[3].stat = EIP197_TOKEN_STAT_LAST_HASH | + token[5].stat = EIP197_TOKEN_STAT_LAST_HASH | EIP197_TOKEN_STAT_LAST_PACKET; - token[3].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT; + token[5].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT; } + token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; + token[0].packet_length = assoclen; + if (likely(cryptlen)) { - if (likely(assoclen)) { - token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; - token[0].packet_length = assoclen; - token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH; - } + token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH; - token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION; - token[1].packet_length = cryptlen; - token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; - token[1].instructions = EIP197_TOKEN_INS_LAST | + token[3].opcode = EIP197_TOKEN_OPCODE_DIRECTION; + token[3].packet_length = cryptlen; + token[3].stat = EIP197_TOKEN_STAT_LAST_HASH; + token[3].instructions = EIP197_TOKEN_INS_LAST | EIP197_TOKEN_INS_TYPE_CRYPTO | EIP197_TOKEN_INS_TYPE_HASH | EIP197_TOKEN_INS_TYPE_OUTPUT; } else { - token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION; - token[1].packet_length = assoclen; - token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; - token[1].instructions = EIP197_TOKEN_INS_LAST | + token[0].stat = EIP197_TOKEN_STAT_LAST_HASH; + token[0].instructions = EIP197_TOKEN_INS_LAST | EIP197_TOKEN_INS_TYPE_HASH; } + + if (ctx->xcm) { + token[0].instructions = EIP197_TOKEN_INS_LAST | + EIP197_TOKEN_INS_TYPE_HASH; + + token[1].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES; + token[1].packet_length = 0; + token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; + token[1].instructions = AES_BLOCK_SIZE; + + token[2].opcode = EIP197_TOKEN_OPCODE_INSERT; + token[2].packet_length = AES_BLOCK_SIZE; + token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | + EIP197_TOKEN_INS_TYPE_CRYPTO; + } } static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm, @@ -330,22 +356,27 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, if (ctx->aead) { /* Take in account the ipad+opad digests */ - ctrl_size += ctx->state_sz / sizeof(u32) * 2; - - if (sreq->direction == SAFEXCEL_ENCRYPT) + if (ctx->xcm) { + ctrl_size += ctx->state_sz / sizeof(u32); cdesc->control_data.control0 = - CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT | - CONTEXT_CONTROL_DIGEST_HMAC | CONTEXT_CONTROL_KEY_EN | + CONTEXT_CONTROL_DIGEST_XCM | ctx->hash_alg | CONTEXT_CONTROL_SIZE(ctrl_size); - else + } else { + ctrl_size += ctx->state_sz / sizeof(u32) * 2; cdesc->control_data.control0 = - CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN | - CONTEXT_CONTROL_DIGEST_HMAC | CONTEXT_CONTROL_KEY_EN | + CONTEXT_CONTROL_DIGEST_HMAC | ctx->hash_alg | CONTEXT_CONTROL_SIZE(ctrl_size); + } + if (sreq->direction == SAFEXCEL_ENCRYPT) + cdesc->control_data.control0 |= + CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT; + else + cdesc->control_data.control0 |= + CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN; } else { if (sreq->direction == SAFEXCEL_ENCRYPT) cdesc->control_data.control0 = @@ -485,9 +516,10 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32), ctx->ipad, ctx->state_sz); - memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) / - sizeof(u32), - ctx->opad, ctx->state_sz); + if (!ctx->xcm) + memcpy(ctx->base.ctxr->data + (ctx->key_len + + ctx->state_sz) / sizeof(u32), ctx->opad, + ctx->state_sz); } else if ((ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) && (sreq->direction == SAFEXCEL_DECRYPT)) { /* @@ -1893,3 +1925,121 @@ struct safexcel_alg_template safexcel_alg_xts_aes = { }, }, }; + +static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct safexcel_crypto_priv *priv = ctx->priv; + struct crypto_aes_ctx aes; + u32 hashkey[AES_BLOCK_SIZE >> 2]; + int ret, i; + + ret = aes_expandkey(&aes, key, len); + if (ret) { + crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + memzero_explicit(&aes, sizeof(aes)); + return ret; + } + + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { + for (i = 0; i < len / sizeof(u32); i++) { + if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) { + ctx->base.needs_inv = true; + break; + } + } + } + + for (i = 0; i < len / sizeof(u32); i++) + ctx->key[i] = cpu_to_le32(aes.key_enc[i]); + + ctx->key_len = len; + + /* Compute hash key by encrypting zeroes with cipher key */ + crypto_cipher_clear_flags(ctx->hkaes, CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) & + CRYPTO_TFM_REQ_MASK); + ret = crypto_cipher_setkey(ctx->hkaes, key, len); + crypto_aead_set_flags(ctfm, crypto_cipher_get_flags(ctx->hkaes) & + CRYPTO_TFM_RES_MASK); + if (ret) + return ret; + + memset(hashkey, 0, AES_BLOCK_SIZE); + crypto_cipher_encrypt_one(ctx->hkaes, (u8 *)hashkey, (u8 *)hashkey); + + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { + for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) { + if (ctx->ipad[i] != cpu_to_be32(hashkey[i])) { + ctx->base.needs_inv = true; + break; + } + } + } + + for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) + ctx->ipad[i] = cpu_to_be32(hashkey[i]); + + memzero_explicit(hashkey, AES_BLOCK_SIZE); + memzero_explicit(&aes, sizeof(aes)); + return 0; +} + +static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_cra_init(tfm); + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_GHASH; + ctx->state_sz = GHASH_BLOCK_SIZE; + ctx->xcm = 1; /* GCM */ + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */ + + ctx->hkaes = crypto_alloc_cipher("aes", 0, 0); + if (IS_ERR(ctx->hkaes)) + return PTR_ERR(ctx->hkaes); + + return 0; +} + +static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_cipher(ctx->hkaes); + safexcel_aead_cra_exit(tfm); +} + +static int safexcel_aead_gcm_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + return crypto_gcm_check_authsize(authsize); +} + +struct safexcel_alg_template safexcel_alg_gcm = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_GHASH, + .alg.aead = { + .setkey = safexcel_aead_gcm_setkey, + .setauthsize = safexcel_aead_gcm_setauthsize, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = GCM_AES_IV_SIZE, + .maxauthsize = GHASH_DIGEST_SIZE, + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "safexcel-gcm-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_gcm_cra_init, + .cra_exit = safexcel_aead_gcm_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c index 2402a623759a..0f269b89cfd4 100644 --- a/drivers/crypto/inside-secure/safexcel_ring.c +++ b/drivers/crypto/inside-secure/safexcel_ring.c @@ -137,7 +137,13 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr struct safexcel_token *token = (struct safexcel_token *)cdesc->control_data.token; - cdesc->control_data.packet_length = full_data_len; + /* + * Note that the length here MUST be >0 or else the EIP(1)97 + * may hang. Newer EIP197 firmware actually incorporates this + * fix already, but that doesn't help the EIP97 and we may + * also be running older firmware. + */ + cdesc->control_data.packet_length = full_data_len ?: 1; cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE | EIP197_OPTION_64BIT_CTX | EIP197_OPTION_CTX_CTRL_IN_CMD; -- cgit From 48e97afa41784c4b94e01f8605179bd49e759d03 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 30 Aug 2019 09:52:31 +0200 Subject: crypto: inside-secure - Added AES-CFB support This patch adds support for AES in 128 bit cipher feedback mode (AES-CFB). Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 1 + drivers/crypto/inside-secure/safexcel.h | 2 ++ drivers/crypto/inside-secure/safexcel_cipher.c | 36 ++++++++++++++++++++++++++ 3 files changed, 39 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 46cdcbeb122b..3196cb30e1b5 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -981,6 +981,7 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_cbc_des3_ede, &safexcel_alg_ecb_aes, &safexcel_alg_cbc_aes, + &safexcel_alg_cfb_aes, &safexcel_alg_ctr_aes, &safexcel_alg_md5, &safexcel_alg_sha1, diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index c6f93ec88c84..6f781ed1f0a5 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -336,6 +336,7 @@ struct safexcel_context_record { /* control1 */ #define CONTEXT_CONTROL_CRYPTO_MODE_ECB (0 << 0) #define CONTEXT_CONTROL_CRYPTO_MODE_CBC (1 << 0) +#define CONTEXT_CONTROL_CRYPTO_MODE_CFB (5 << 0) #define CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD (6 << 0) #define CONTEXT_CONTROL_CRYPTO_MODE_XTS (7 << 0) #define CONTEXT_CONTROL_CRYPTO_MODE_XCM ((6 << 0) | BIT(17)) @@ -767,6 +768,7 @@ extern struct safexcel_alg_template safexcel_alg_ecb_des3_ede; extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede; extern struct safexcel_alg_template safexcel_alg_ecb_aes; extern struct safexcel_alg_template safexcel_alg_cbc_aes; +extern struct safexcel_alg_template safexcel_alg_cfb_aes; extern struct safexcel_alg_template safexcel_alg_ctr_aes; extern struct safexcel_alg_template safexcel_alg_md5; extern struct safexcel_alg_template safexcel_alg_sha1; diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 7091b6086e2e..4e06c61f9ff9 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -1136,6 +1136,42 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = { }, }; +static int safexcel_skcipher_aes_cfb_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_AES; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB; + return 0; +} + +struct safexcel_alg_template safexcel_alg_cfb_aes = { + .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XFB, + .alg.skcipher = { + .setkey = safexcel_skcipher_aes_setkey, + .encrypt = safexcel_encrypt, + .decrypt = safexcel_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "cfb(aes)", + .cra_driver_name = "safexcel-cfb-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_skcipher_aes_cfb_cra_init, + .cra_exit = safexcel_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm, const u8 *key, unsigned int len) { -- cgit From 50485dfb6c60f63e1a42943a910c0ab670c92a25 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 30 Aug 2019 09:52:32 +0200 Subject: crypto: inside-secure - Added AES-OFB support This patch adds support for AES in output feedback mode (AES-OFB). Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 1 + drivers/crypto/inside-secure/safexcel.h | 2 ++ drivers/crypto/inside-secure/safexcel_cipher.c | 36 ++++++++++++++++++++++++++ 3 files changed, 39 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 3196cb30e1b5..5ad4feb07b6f 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -982,6 +982,7 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_ecb_aes, &safexcel_alg_cbc_aes, &safexcel_alg_cfb_aes, + &safexcel_alg_ofb_aes, &safexcel_alg_ctr_aes, &safexcel_alg_md5, &safexcel_alg_sha1, diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 6f781ed1f0a5..0eb344534f36 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -336,6 +336,7 @@ struct safexcel_context_record { /* control1 */ #define CONTEXT_CONTROL_CRYPTO_MODE_ECB (0 << 0) #define CONTEXT_CONTROL_CRYPTO_MODE_CBC (1 << 0) +#define CONTEXT_CONTROL_CRYPTO_MODE_OFB (4 << 0) #define CONTEXT_CONTROL_CRYPTO_MODE_CFB (5 << 0) #define CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD (6 << 0) #define CONTEXT_CONTROL_CRYPTO_MODE_XTS (7 << 0) @@ -769,6 +770,7 @@ extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede; extern struct safexcel_alg_template safexcel_alg_ecb_aes; extern struct safexcel_alg_template safexcel_alg_cbc_aes; extern struct safexcel_alg_template safexcel_alg_cfb_aes; +extern struct safexcel_alg_template safexcel_alg_ofb_aes; extern struct safexcel_alg_template safexcel_alg_ctr_aes; extern struct safexcel_alg_template safexcel_alg_md5; extern struct safexcel_alg_template safexcel_alg_sha1; diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 4e06c61f9ff9..7a1e78518871 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -1172,6 +1172,42 @@ struct safexcel_alg_template safexcel_alg_cfb_aes = { }, }; +static int safexcel_skcipher_aes_ofb_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_AES; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB; + return 0; +} + +struct safexcel_alg_template safexcel_alg_ofb_aes = { + .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XFB, + .alg.skcipher = { + .setkey = safexcel_skcipher_aes_setkey, + .encrypt = safexcel_encrypt, + .decrypt = safexcel_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "ofb(aes)", + .cra_driver_name = "safexcel-ofb-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_skcipher_aes_ofb_cra_init, + .cra_exit = safexcel_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm, const u8 *key, unsigned int len) { -- cgit From 4eb76faff89f29d3b729953007b963b39b2048be Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 30 Aug 2019 09:52:33 +0200 Subject: crypto: inside-secure - Added support for basic AES-CCM This patch adds support for the basic AES-CCM AEAD cipher suite. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 4 +- drivers/crypto/inside-secure/safexcel.h | 10 +- drivers/crypto/inside-secure/safexcel_cipher.c | 288 ++++++++++++++++++++----- 3 files changed, 249 insertions(+), 53 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 5ad4feb07b6f..5d648ee5f0b0 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -715,8 +715,7 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, } else if (rdesc->result_data.error_code & BIT(9)) { /* Authentication failed */ return -EBADMSG; - } else if (!rdesc->result_data.error_code) - return 0; + } /* All other non-fatal errors */ return -EINVAL; @@ -1009,6 +1008,7 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_authenc_hmac_sha512_ctr_aes, &safexcel_alg_xts_aes, &safexcel_alg_gcm, + &safexcel_alg_ccm, }; static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 0eb344534f36..1407804b66b7 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -19,7 +19,7 @@ /* Static configuration */ #define EIP197_DEFAULT_RING_SIZE 400 -#define EIP197_MAX_TOKENS 10 +#define EIP197_MAX_TOKENS 18 #define EIP197_MAX_RINGS 4 #define EIP197_FETCH_COUNT 1 #define EIP197_MAX_BATCH_SZ 64 @@ -330,6 +330,9 @@ struct safexcel_context_record { #define CONTEXT_CONTROL_CRYPTO_ALG_SHA384 (0x6 << 23) #define CONTEXT_CONTROL_CRYPTO_ALG_SHA512 (0x5 << 23) #define CONTEXT_CONTROL_CRYPTO_ALG_GHASH (0x4 << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC128 (0x1 << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC192 (0x2 << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC256 (0x3 << 23) #define CONTEXT_CONTROL_INV_FR (0x5 << 24) #define CONTEXT_CONTROL_INV_TR (0x6 << 24) @@ -350,6 +353,9 @@ struct safexcel_context_record { #define CONTEXT_CONTROL_CRYPTO_STORE BIT(12) #define CONTEXT_CONTROL_HASH_STORE BIT(19) +#define EIP197_XCM_MODE_GCM 1 +#define EIP197_XCM_MODE_CCM 2 + /* The hash counter given to the engine in the context has a granularity of * 64 bits. */ @@ -464,6 +470,7 @@ static inline void eip197_noop_token(struct safexcel_token *token) /* Instructions */ #define EIP197_TOKEN_INS_INSERT_HASH_DIGEST 0x1c #define EIP197_TOKEN_INS_ORIGIN_IV0 0x14 +#define EIP197_TOKEN_INS_ORIGIN_TOKEN 0x1b #define EIP197_TOKEN_INS_ORIGIN_LEN(x) ((x) << 5) #define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5) #define EIP197_TOKEN_INS_TYPE_HASH BIT(6) @@ -797,5 +804,6 @@ extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes; extern struct safexcel_alg_template safexcel_alg_xts_aes; extern struct safexcel_alg_template safexcel_alg_gcm; +extern struct safexcel_alg_template safexcel_alg_ccm; #endif diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 7a1e78518871..ef51f8c2b473 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -81,7 +81,7 @@ static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, cdesc->control_data.token[3] = cpu_to_be32(1); return; - } else if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_XCM) { + } else if (ctx->xcm == EIP197_XCM_MODE_GCM) { cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; /* 96 bit IV part */ @@ -89,6 +89,16 @@ static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, /* 32 bit counter, start at 1 (big endian!) */ cdesc->control_data.token[3] = cpu_to_be32(1); + return; + } else if (ctx->xcm == EIP197_XCM_MODE_CCM) { + cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; + + /* Variable length IV part */ + memcpy(&cdesc->control_data.token[0], iv, 15 - iv[0]); + /* Start variable length counter at 0 */ + memset((u8 *)&cdesc->control_data.token[0] + 15 - iv[0], + 0, iv[0] + 1); + return; } @@ -143,67 +153,117 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, if (direction == SAFEXCEL_ENCRYPT) { /* align end of instruction sequence to end of token */ token = (struct safexcel_token *)(cdesc->control_data.token + - EIP197_MAX_TOKENS - 5); - - token[4].opcode = EIP197_TOKEN_OPCODE_INSERT; - token[4].packet_length = digestsize; - token[4].stat = EIP197_TOKEN_STAT_LAST_HASH | - EIP197_TOKEN_STAT_LAST_PACKET; - token[4].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | - EIP197_TOKEN_INS_INSERT_HASH_DIGEST; + EIP197_MAX_TOKENS - 13); + + token[12].opcode = EIP197_TOKEN_OPCODE_INSERT; + token[12].packet_length = digestsize; + token[12].stat = EIP197_TOKEN_STAT_LAST_HASH | + EIP197_TOKEN_STAT_LAST_PACKET; + token[12].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | + EIP197_TOKEN_INS_INSERT_HASH_DIGEST; } else { cryptlen -= digestsize; /* align end of instruction sequence to end of token */ token = (struct safexcel_token *)(cdesc->control_data.token + - EIP197_MAX_TOKENS - 6); - - token[4].opcode = EIP197_TOKEN_OPCODE_RETRIEVE; - token[4].packet_length = digestsize; - token[4].stat = EIP197_TOKEN_STAT_LAST_HASH | - EIP197_TOKEN_STAT_LAST_PACKET; - token[4].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST; - - token[5].opcode = EIP197_TOKEN_OPCODE_VERIFY; - token[5].packet_length = digestsize | - EIP197_TOKEN_HASH_RESULT_VERIFY; - token[5].stat = EIP197_TOKEN_STAT_LAST_HASH | - EIP197_TOKEN_STAT_LAST_PACKET; - token[5].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT; + EIP197_MAX_TOKENS - 14); + + token[12].opcode = EIP197_TOKEN_OPCODE_RETRIEVE; + token[12].packet_length = digestsize; + token[12].stat = EIP197_TOKEN_STAT_LAST_HASH | + EIP197_TOKEN_STAT_LAST_PACKET; + token[12].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST; + + token[13].opcode = EIP197_TOKEN_OPCODE_VERIFY; + token[13].packet_length = digestsize | + EIP197_TOKEN_HASH_RESULT_VERIFY; + token[13].stat = EIP197_TOKEN_STAT_LAST_HASH | + EIP197_TOKEN_STAT_LAST_PACKET; + token[13].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT; } - token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; - token[0].packet_length = assoclen; + token[6].opcode = EIP197_TOKEN_OPCODE_DIRECTION; + token[6].packet_length = assoclen; if (likely(cryptlen)) { - token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH; - - token[3].opcode = EIP197_TOKEN_OPCODE_DIRECTION; - token[3].packet_length = cryptlen; - token[3].stat = EIP197_TOKEN_STAT_LAST_HASH; - token[3].instructions = EIP197_TOKEN_INS_LAST | - EIP197_TOKEN_INS_TYPE_CRYPTO | - EIP197_TOKEN_INS_TYPE_HASH | - EIP197_TOKEN_INS_TYPE_OUTPUT; - } else { - token[0].stat = EIP197_TOKEN_STAT_LAST_HASH; - token[0].instructions = EIP197_TOKEN_INS_LAST | + token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH; + + token[10].opcode = EIP197_TOKEN_OPCODE_DIRECTION; + token[10].packet_length = cryptlen; + token[10].stat = EIP197_TOKEN_STAT_LAST_HASH; + token[10].instructions = EIP197_TOKEN_INS_LAST | + EIP197_TOKEN_INS_TYPE_CRYPTO | + EIP197_TOKEN_INS_TYPE_HASH | + EIP197_TOKEN_INS_TYPE_OUTPUT; + } else if (ctx->xcm != EIP197_XCM_MODE_CCM) { + token[6].stat = EIP197_TOKEN_STAT_LAST_HASH; + token[6].instructions = EIP197_TOKEN_INS_LAST | EIP197_TOKEN_INS_TYPE_HASH; } - if (ctx->xcm) { - token[0].instructions = EIP197_TOKEN_INS_LAST | + if (!ctx->xcm) + return; + + token[8].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES; + token[8].packet_length = 0; + token[8].instructions = AES_BLOCK_SIZE; + + token[9].opcode = EIP197_TOKEN_OPCODE_INSERT; + token[9].packet_length = AES_BLOCK_SIZE; + token[9].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | + EIP197_TOKEN_INS_TYPE_CRYPTO; + + if (ctx->xcm == EIP197_XCM_MODE_GCM) { + token[6].instructions = EIP197_TOKEN_INS_LAST | EIP197_TOKEN_INS_TYPE_HASH; + } else { + u8 *cbcmaciv = (u8 *)&token[1]; + u32 *aadlen = (u32 *)&token[5]; + + /* Construct IV block B0 for the CBC-MAC */ + token[0].opcode = EIP197_TOKEN_OPCODE_INSERT; + token[0].packet_length = AES_BLOCK_SIZE + + ((assoclen > 0) << 1); + token[0].instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN | + EIP197_TOKEN_INS_TYPE_HASH; + /* Variable length IV part */ + memcpy(cbcmaciv, iv, 15 - iv[0]); + /* fixup flags byte */ + cbcmaciv[0] |= ((assoclen > 0) << 6) | ((digestsize - 2) << 2); + /* Clear upper bytes of variable message length to 0 */ + memset(cbcmaciv + 15 - iv[0], 0, iv[0] - 1); + /* insert lower 2 bytes of message length */ + cbcmaciv[14] = cryptlen >> 8; + cbcmaciv[15] = cryptlen & 255; - token[1].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES; - token[1].packet_length = 0; - token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; - token[1].instructions = AES_BLOCK_SIZE; + if (assoclen) { + *aadlen = cpu_to_le32(cpu_to_be16(assoclen)); + assoclen += 2; + } - token[2].opcode = EIP197_TOKEN_OPCODE_INSERT; - token[2].packet_length = AES_BLOCK_SIZE; - token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | - EIP197_TOKEN_INS_TYPE_CRYPTO; + token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH; + + /* Align AAD data towards hash engine */ + token[7].opcode = EIP197_TOKEN_OPCODE_INSERT; + assoclen &= 15; + token[7].packet_length = assoclen ? 16 - assoclen : 0; + + if (likely(cryptlen)) { + token[7].instructions = EIP197_TOKEN_INS_TYPE_HASH; + + /* Align crypto data towards hash engine */ + token[10].stat = 0; + + token[11].opcode = EIP197_TOKEN_OPCODE_INSERT; + cryptlen &= 15; + token[11].packet_length = cryptlen ? 16 - cryptlen : 0; + token[11].stat = EIP197_TOKEN_STAT_LAST_HASH; + token[11].instructions = EIP197_TOKEN_INS_TYPE_HASH; + } else { + token[7].stat = EIP197_TOKEN_STAT_LAST_HASH; + token[7].instructions = EIP197_TOKEN_INS_LAST | + EIP197_TOKEN_INS_TYPE_HASH; + } } } @@ -373,10 +433,15 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, } if (sreq->direction == SAFEXCEL_ENCRYPT) cdesc->control_data.control0 |= - CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT; + (ctx->xcm == EIP197_XCM_MODE_CCM) ? + CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT : + CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT; + else cdesc->control_data.control0 |= - CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN; + (ctx->xcm == EIP197_XCM_MODE_CCM) ? + CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN : + CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN; } else { if (sreq->direction == SAFEXCEL_ENCRYPT) cdesc->control_data.control0 = @@ -2066,7 +2131,7 @@ static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm) safexcel_aead_cra_init(tfm); ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_GHASH; ctx->state_sz = GHASH_BLOCK_SIZE; - ctx->xcm = 1; /* GCM */ + ctx->xcm = EIP197_XCM_MODE_GCM; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */ ctx->hkaes = crypto_alloc_cipher("aes", 0, 0); @@ -2115,3 +2180,126 @@ struct safexcel_alg_template safexcel_alg_gcm = { }, }, }; + +static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct safexcel_crypto_priv *priv = ctx->priv; + struct crypto_aes_ctx aes; + int ret, i; + + ret = aes_expandkey(&aes, key, len); + if (ret) { + crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + memzero_explicit(&aes, sizeof(aes)); + return ret; + } + + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { + for (i = 0; i < len / sizeof(u32); i++) { + if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) { + ctx->base.needs_inv = true; + break; + } + } + } + + for (i = 0; i < len / sizeof(u32); i++) { + ctx->key[i] = cpu_to_le32(aes.key_enc[i]); + ctx->ipad[i + 2 * AES_BLOCK_SIZE / sizeof(u32)] = + cpu_to_be32(aes.key_enc[i]); + } + + ctx->key_len = len; + ctx->state_sz = 2 * AES_BLOCK_SIZE + len; + + if (len == AES_KEYSIZE_192) + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192; + else if (len == AES_KEYSIZE_256) + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256; + else + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128; + + memzero_explicit(&aes, sizeof(aes)); + return 0; +} + +static int safexcel_aead_ccm_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_cra_init(tfm); + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128; + ctx->state_sz = 3 * AES_BLOCK_SIZE; + ctx->xcm = EIP197_XCM_MODE_CCM; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */ + return 0; +} + +static int safexcel_aead_ccm_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + /* Borrowed from crypto/ccm.c */ + switch (authsize) { + case 4: + case 6: + case 8: + case 10: + case 12: + case 14: + case 16: + break; + default: + return -EINVAL; + } + + return 0; +} + +static int safexcel_ccm_encrypt(struct aead_request *req) +{ + struct safexcel_cipher_req *creq = aead_request_ctx(req); + + if (req->iv[0] < 1 || req->iv[0] > 7) + return -EINVAL; + + return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT); +} + +static int safexcel_ccm_decrypt(struct aead_request *req) +{ + struct safexcel_cipher_req *creq = aead_request_ctx(req); + + if (req->iv[0] < 1 || req->iv[0] > 7) + return -EINVAL; + + return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT); +} + +struct safexcel_alg_template safexcel_alg_ccm = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_CBC_MAC_ALL, + .alg.aead = { + .setkey = safexcel_aead_ccm_setkey, + .setauthsize = safexcel_aead_ccm_setauthsize, + .encrypt = safexcel_ccm_encrypt, + .decrypt = safexcel_ccm_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "ccm(aes)", + .cra_driver_name = "safexcel-ccm-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_ccm_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; -- cgit From 2a4bfd023fa97c70006ab368c30c0bf84d0e905d Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Sat, 31 Aug 2019 13:55:55 +0200 Subject: hwrng: timeriomem - relax check on memory resource size The timeriomem_rng driver only accesses the first 4 bytes of the given memory area and currently, it also forces that memory resource to be exactly 4 bytes in size. This, however, is problematic when used with device-trees that are generated from things like FPGA toolchains, where the minimum size of an exposed memory block may be something like 4k. Hence, let's only check for what's needed for the driver to operate properly; namely that we have enough memory available to read the random data from. Signed-off-by: Daniel Mack Signed-off-by: Herbert Xu --- drivers/char/hw_random/timeriomem-rng.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c index ccd1f6e0696b..e262445fed5f 100644 --- a/drivers/char/hw_random/timeriomem-rng.c +++ b/drivers/char/hw_random/timeriomem-rng.c @@ -117,9 +117,9 @@ static int timeriomem_rng_probe(struct platform_device *pdev) if (!res) return -ENXIO; - if (res->start % 4 != 0 || resource_size(res) != 4) { + if (res->start % 4 != 0 || resource_size(res) < 4) { dev_err(&pdev->dev, - "address must be four bytes wide and aligned\n"); + "address must be at least four bytes wide and 32-bit aligned\n"); return -EINVAL; } -- cgit From e55d8a75c60207e3c261c847d03f832344544712 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sun, 1 Sep 2019 22:35:28 +0200 Subject: crypto: ccree - Rename arrays to avoid conflict with crypto/sha256.h Rename the algo_init arrays to cc_algo_init so that they do not conflict with the functions declared in crypto/sha256.h. This is a preparation patch for folding crypto/sha256.h into crypto/sha.h. Signed-off-by: Hans de Goede Acked-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_hash.c | 153 +++++++++++++++++++++-------------------- 1 file changed, 77 insertions(+), 76 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c index a6abe4e3bb0e..bc71bdf44a9f 100644 --- a/drivers/crypto/ccree/cc_hash.c +++ b/drivers/crypto/ccree/cc_hash.c @@ -25,27 +25,27 @@ struct cc_hash_handle { struct list_head hash_list; }; -static const u32 digest_len_init[] = { +static const u32 cc_digest_len_init[] = { 0x00000040, 0x00000000, 0x00000000, 0x00000000 }; -static const u32 md5_init[] = { +static const u32 cc_md5_init[] = { SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 }; -static const u32 sha1_init[] = { +static const u32 cc_sha1_init[] = { SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 }; -static const u32 sha224_init[] = { +static const u32 cc_sha224_init[] = { SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4, SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 }; -static const u32 sha256_init[] = { +static const u32 cc_sha256_init[] = { SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4, SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 }; -static const u32 digest_len_sha512_init[] = { +static const u32 cc_digest_len_sha512_init[] = { 0x00000080, 0x00000000, 0x00000000, 0x00000000 }; -static u64 sha384_init[] = { +static u64 cc_sha384_init[] = { SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4, SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 }; -static u64 sha512_init[] = { +static u64 cc_sha512_init[] = { SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4, SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 }; -static const u32 sm3_init[] = { +static const u32 cc_sm3_init[] = { SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE, SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA }; @@ -144,10 +144,11 @@ static void cc_init_req(struct device *dev, struct ahash_req_ctx *state, if (ctx->hash_mode == DRV_HASH_SHA512 || ctx->hash_mode == DRV_HASH_SHA384) memcpy(state->digest_bytes_len, - digest_len_sha512_init, + cc_digest_len_sha512_init, ctx->hash_len); else - memcpy(state->digest_bytes_len, digest_len_init, + memcpy(state->digest_bytes_len, + cc_digest_len_init, ctx->hash_len); } @@ -1873,26 +1874,26 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata) int rc = 0; /* Copy-to-sram digest-len */ - cc_set_sram_desc(digest_len_init, sram_buff_ofs, - ARRAY_SIZE(digest_len_init), larval_seq, + cc_set_sram_desc(cc_digest_len_init, sram_buff_ofs, + ARRAY_SIZE(cc_digest_len_init), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(digest_len_init); + sram_buff_ofs += sizeof(cc_digest_len_init); larval_seq_len = 0; if (large_sha_supported) { /* Copy-to-sram digest-len for sha384/512 */ - cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs, - ARRAY_SIZE(digest_len_sha512_init), + cc_set_sram_desc(cc_digest_len_sha512_init, sram_buff_ofs, + ARRAY_SIZE(cc_digest_len_sha512_init), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(digest_len_sha512_init); + sram_buff_ofs += sizeof(cc_digest_len_sha512_init); larval_seq_len = 0; } @@ -1900,64 +1901,64 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata) hash_handle->larval_digest_sram_addr = sram_buff_ofs; /* Copy-to-sram initial SHA* digests */ - cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init), + cc_set_sram_desc(cc_md5_init, sram_buff_ofs, ARRAY_SIZE(cc_md5_init), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(md5_init); + sram_buff_ofs += sizeof(cc_md5_init); larval_seq_len = 0; - cc_set_sram_desc(sha1_init, sram_buff_ofs, - ARRAY_SIZE(sha1_init), larval_seq, + cc_set_sram_desc(cc_sha1_init, sram_buff_ofs, + ARRAY_SIZE(cc_sha1_init), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(sha1_init); + sram_buff_ofs += sizeof(cc_sha1_init); larval_seq_len = 0; - cc_set_sram_desc(sha224_init, sram_buff_ofs, - ARRAY_SIZE(sha224_init), larval_seq, + cc_set_sram_desc(cc_sha224_init, sram_buff_ofs, + ARRAY_SIZE(cc_sha224_init), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(sha224_init); + sram_buff_ofs += sizeof(cc_sha224_init); larval_seq_len = 0; - cc_set_sram_desc(sha256_init, sram_buff_ofs, - ARRAY_SIZE(sha256_init), larval_seq, + cc_set_sram_desc(cc_sha256_init, sram_buff_ofs, + ARRAY_SIZE(cc_sha256_init), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(sha256_init); + sram_buff_ofs += sizeof(cc_sha256_init); larval_seq_len = 0; if (sm3_supported) { - cc_set_sram_desc(sm3_init, sram_buff_ofs, - ARRAY_SIZE(sm3_init), larval_seq, + cc_set_sram_desc(cc_sm3_init, sram_buff_ofs, + ARRAY_SIZE(cc_sm3_init), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(sm3_init); + sram_buff_ofs += sizeof(cc_sm3_init); larval_seq_len = 0; } if (large_sha_supported) { - cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs, - (ARRAY_SIZE(sha384_init) * 2), larval_seq, + cc_set_sram_desc((u32 *)cc_sha384_init, sram_buff_ofs, + (ARRAY_SIZE(cc_sha384_init) * 2), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(sha384_init); + sram_buff_ofs += sizeof(cc_sha384_init); larval_seq_len = 0; - cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs, - (ARRAY_SIZE(sha512_init) * 2), larval_seq, + cc_set_sram_desc((u32 *)cc_sha512_init, sram_buff_ofs, + (ARRAY_SIZE(cc_sha512_init) * 2), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) @@ -1986,8 +1987,8 @@ static void __init cc_swap_dwords(u32 *buf, unsigned long size) */ void __init cc_hash_global_init(void) { - cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2)); - cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2)); + cc_swap_dwords((u32 *)&cc_sha384_init, (ARRAY_SIZE(cc_sha384_init) * 2)); + cc_swap_dwords((u32 *)&cc_sha512_init, (ARRAY_SIZE(cc_sha512_init) * 2)); } int cc_hash_alloc(struct cc_drvdata *drvdata) @@ -2006,18 +2007,18 @@ int cc_hash_alloc(struct cc_drvdata *drvdata) INIT_LIST_HEAD(&hash_handle->hash_list); drvdata->hash_handle = hash_handle; - sram_size_to_alloc = sizeof(digest_len_init) + - sizeof(md5_init) + - sizeof(sha1_init) + - sizeof(sha224_init) + - sizeof(sha256_init); + sram_size_to_alloc = sizeof(cc_digest_len_init) + + sizeof(cc_md5_init) + + sizeof(cc_sha1_init) + + sizeof(cc_sha224_init) + + sizeof(cc_sha256_init); if (drvdata->hw_rev >= CC_HW_REV_713) - sram_size_to_alloc += sizeof(sm3_init); + sram_size_to_alloc += sizeof(cc_sm3_init); if (drvdata->hw_rev >= CC_HW_REV_712) - sram_size_to_alloc += sizeof(digest_len_sha512_init) + - sizeof(sha384_init) + sizeof(sha512_init); + sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) + + sizeof(cc_sha384_init) + sizeof(cc_sha512_init); sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc); if (sram_buff == NULL_SRAM_ADDR) { @@ -2258,22 +2259,22 @@ static const void *cc_larval_digest(struct device *dev, u32 mode) { switch (mode) { case DRV_HASH_MD5: - return md5_init; + return cc_md5_init; case DRV_HASH_SHA1: - return sha1_init; + return cc_sha1_init; case DRV_HASH_SHA224: - return sha224_init; + return cc_sha224_init; case DRV_HASH_SHA256: - return sha256_init; + return cc_sha256_init; case DRV_HASH_SHA384: - return sha384_init; + return cc_sha384_init; case DRV_HASH_SHA512: - return sha512_init; + return cc_sha512_init; case DRV_HASH_SM3: - return sm3_init; + return cc_sm3_init; default: dev_err(dev, "Invalid hash mode (%d)\n", mode); - return md5_init; + return cc_md5_init; } } @@ -2301,40 +2302,40 @@ cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode) return (hash_handle->larval_digest_sram_addr); case DRV_HASH_SHA1: return (hash_handle->larval_digest_sram_addr + - sizeof(md5_init)); + sizeof(cc_md5_init)); case DRV_HASH_SHA224: return (hash_handle->larval_digest_sram_addr + - sizeof(md5_init) + - sizeof(sha1_init)); + sizeof(cc_md5_init) + + sizeof(cc_sha1_init)); case DRV_HASH_SHA256: return (hash_handle->larval_digest_sram_addr + - sizeof(md5_init) + - sizeof(sha1_init) + - sizeof(sha224_init)); + sizeof(cc_md5_init) + + sizeof(cc_sha1_init) + + sizeof(cc_sha224_init)); case DRV_HASH_SM3: return (hash_handle->larval_digest_sram_addr + - sizeof(md5_init) + - sizeof(sha1_init) + - sizeof(sha224_init) + - sizeof(sha256_init)); + sizeof(cc_md5_init) + + sizeof(cc_sha1_init) + + sizeof(cc_sha224_init) + + sizeof(cc_sha256_init)); case DRV_HASH_SHA384: addr = (hash_handle->larval_digest_sram_addr + - sizeof(md5_init) + - sizeof(sha1_init) + - sizeof(sha224_init) + - sizeof(sha256_init)); + sizeof(cc_md5_init) + + sizeof(cc_sha1_init) + + sizeof(cc_sha224_init) + + sizeof(cc_sha256_init)); if (sm3_supported) - addr += sizeof(sm3_init); + addr += sizeof(cc_sm3_init); return addr; case DRV_HASH_SHA512: addr = (hash_handle->larval_digest_sram_addr + - sizeof(md5_init) + - sizeof(sha1_init) + - sizeof(sha224_init) + - sizeof(sha256_init) + - sizeof(sha384_init)); + sizeof(cc_md5_init) + + sizeof(cc_sha1_init) + + sizeof(cc_sha224_init) + + sizeof(cc_sha256_init) + + sizeof(cc_sha384_init)); if (sm3_supported) - addr += sizeof(sm3_init); + addr += sizeof(cc_sm3_init); return addr; default: dev_err(dev, "Invalid hash mode (%d)\n", mode); @@ -2360,7 +2361,7 @@ cc_digest_len_addr(void *drvdata, u32 mode) #if (CC_DEV_SHA_MAX > 256) case DRV_HASH_SHA384: case DRV_HASH_SHA512: - return digest_len_addr + sizeof(digest_len_init); + return digest_len_addr + sizeof(cc_digest_len_init); #endif default: return digest_len_addr; /*to avoid kernel crash*/ -- cgit From 6e4655e20d92c902932dc65909c31c414b4aba2d Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sun, 1 Sep 2019 22:35:29 +0200 Subject: crypto: chelsio - Rename arrays to avoid conflict with crypto/sha256.h Rename the sha*_init arrays to chcr_sha*_init so that they do not conflict with the functions declared in crypto/sha256.h. This is a preparation patch for folding crypto/sha256.h into crypto/sha.h. Signed-off-by: Hans de Goede Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chcr_algo.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h index ee20dd899e83..d1e6b51df0ce 100644 --- a/drivers/crypto/chelsio/chcr_algo.h +++ b/drivers/crypto/chelsio/chcr_algo.h @@ -333,26 +333,26 @@ struct phys_sge_pairs { }; -static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { +static const u32 chcr_sha1_init[SHA1_DIGEST_SIZE / 4] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, }; -static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { +static const u32 chcr_sha224_init[SHA256_DIGEST_SIZE / 4] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, }; -static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { +static const u32 chcr_sha256_init[SHA256_DIGEST_SIZE / 4] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, }; -static const u64 sha384_init[SHA512_DIGEST_SIZE / 8] = { +static const u64 chcr_sha384_init[SHA512_DIGEST_SIZE / 8] = { SHA384_H0, SHA384_H1, SHA384_H2, SHA384_H3, SHA384_H4, SHA384_H5, SHA384_H6, SHA384_H7, }; -static const u64 sha512_init[SHA512_DIGEST_SIZE / 8] = { +static const u64 chcr_sha512_init[SHA512_DIGEST_SIZE / 8] = { SHA512_H0, SHA512_H1, SHA512_H2, SHA512_H3, SHA512_H4, SHA512_H5, SHA512_H6, SHA512_H7, }; @@ -362,21 +362,21 @@ static inline void copy_hash_init_values(char *key, int digestsize) u8 i; __be32 *dkey = (__be32 *)key; u64 *ldkey = (u64 *)key; - __be64 *sha384 = (__be64 *)sha384_init; - __be64 *sha512 = (__be64 *)sha512_init; + __be64 *sha384 = (__be64 *)chcr_sha384_init; + __be64 *sha512 = (__be64 *)chcr_sha512_init; switch (digestsize) { case SHA1_DIGEST_SIZE: for (i = 0; i < SHA1_INIT_STATE; i++) - dkey[i] = cpu_to_be32(sha1_init[i]); + dkey[i] = cpu_to_be32(chcr_sha1_init[i]); break; case SHA224_DIGEST_SIZE: for (i = 0; i < SHA224_INIT_STATE; i++) - dkey[i] = cpu_to_be32(sha224_init[i]); + dkey[i] = cpu_to_be32(chcr_sha224_init[i]); break; case SHA256_DIGEST_SIZE: for (i = 0; i < SHA256_INIT_STATE; i++) - dkey[i] = cpu_to_be32(sha256_init[i]); + dkey[i] = cpu_to_be32(chcr_sha256_init[i]); break; case SHA384_DIGEST_SIZE: for (i = 0; i < SHA384_INIT_STATE; i++) -- cgit From 527aa8958f6e112e10479a8a8989c94164e090c0 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sun, 1 Sep 2019 22:35:30 +0200 Subject: crypto: n2 - Rename arrays to avoid conflict with crypto/sha256.h Rename the sha*_init arrays to n2_sha*_init so that they do not conflict with the functions declared in crypto/sha256.h. Also rename md5_init to n2_md5_init for consistency. This is a preparation patch for folding crypto/sha256.h into crypto/sha.h. Signed-off-by: Hans de Goede Signed-off-by: Herbert Xu --- drivers/crypto/n2_core.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 63923cc33727..dc15b06e96ab 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1283,20 +1283,20 @@ struct n2_hash_tmpl { u8 hmac_type; }; -static const u32 md5_init[MD5_HASH_WORDS] = { +static const u32 n2_md5_init[MD5_HASH_WORDS] = { cpu_to_le32(MD5_H0), cpu_to_le32(MD5_H1), cpu_to_le32(MD5_H2), cpu_to_le32(MD5_H3), }; -static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { +static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, }; -static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { +static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, }; -static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { +static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, }; @@ -1304,7 +1304,7 @@ static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { static const struct n2_hash_tmpl hash_tmpls[] = { { .name = "md5", .hash_zero = md5_zero_message_hash, - .hash_init = md5_init, + .hash_init = n2_md5_init, .auth_type = AUTH_TYPE_MD5, .hmac_type = AUTH_TYPE_HMAC_MD5, .hw_op_hashsz = MD5_DIGEST_SIZE, @@ -1312,7 +1312,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .block_size = MD5_HMAC_BLOCK_SIZE }, { .name = "sha1", .hash_zero = sha1_zero_message_hash, - .hash_init = sha1_init, + .hash_init = n2_sha1_init, .auth_type = AUTH_TYPE_SHA1, .hmac_type = AUTH_TYPE_HMAC_SHA1, .hw_op_hashsz = SHA1_DIGEST_SIZE, @@ -1320,7 +1320,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .block_size = SHA1_BLOCK_SIZE }, { .name = "sha256", .hash_zero = sha256_zero_message_hash, - .hash_init = sha256_init, + .hash_init = n2_sha256_init, .auth_type = AUTH_TYPE_SHA256, .hmac_type = AUTH_TYPE_HMAC_SHA256, .hw_op_hashsz = SHA256_DIGEST_SIZE, @@ -1328,7 +1328,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .block_size = SHA256_BLOCK_SIZE }, { .name = "sha224", .hash_zero = sha224_zero_message_hash, - .hash_init = sha224_init, + .hash_init = n2_sha224_init, .auth_type = AUTH_TYPE_SHA256, .hmac_type = AUTH_TYPE_RESERVED, .hw_op_hashsz = SHA256_DIGEST_SIZE, -- cgit From 796114f5c5224c210127e5e62f915a76caad05cd Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 3 Sep 2019 19:35:04 -0700 Subject: crypto: caam - make sure clocks are enabled first MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to access IP block's registers we need to enable appropriate clocks first, otherwise we are risking hanging the CPU. The problem becomes very apparent when trying to use CAAM driver built as a kernel module. In that case caam_probe() gets called after clk_disable_unused() which means all of the necessary clocks are guaranteed to be disabled. Coincidentally, this change also fixes iomap leak introduced by early return (instead of "goto iounmap_ctrl") in commit 41fc54afae70 ("crypto: caam - simplfy clock initialization") Tested on ZII i.MX6Q+ RDU2 Fixes: 176435ad2ac7 ("crypto: caam - defer probing until QMan is available") Fixes: 41fc54afae70 ("crypto: caam - simplfy clock initialization") Signed-off-by: Andrey Smirnov Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Herbert Xu Cc: Iuliana Prodan Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Tested-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 3c059d0e4207..db22777d59b4 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -594,6 +594,21 @@ static int caam_probe(struct platform_device *pdev) dev_set_drvdata(dev, ctrlpriv); nprop = pdev->dev.of_node; + imx_soc_match = soc_device_match(caam_imx_soc_table); + caam_imx = (bool)imx_soc_match; + + if (imx_soc_match) { + if (!imx_soc_match->data) { + dev_err(dev, "No clock data provided for i.MX SoC"); + return -EINVAL; + } + + ret = init_clocks(dev, imx_soc_match->data); + if (ret) + return ret; + } + + /* Get configuration properties from device tree */ /* First, get register page */ ctrl = of_iomap(nprop, 0); @@ -604,9 +619,6 @@ static int caam_probe(struct platform_device *pdev) caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) & (CSTA_PLEND | CSTA_ALT_PLEND)); - imx_soc_match = soc_device_match(caam_imx_soc_table); - caam_imx = (bool)imx_soc_match; - comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms); if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR) caam_ptr_sz = sizeof(u64); @@ -640,18 +652,6 @@ static int caam_probe(struct platform_device *pdev) } #endif - if (imx_soc_match) { - if (!imx_soc_match->data) { - dev_err(dev, "No clock data provided for i.MX SoC"); - return -EINVAL; - } - - ret = init_clocks(dev, imx_soc_match->data); - if (ret) - return ret; - } - - /* Allocating the BLOCK_OFFSET based on the supported page size on * the platform */ -- cgit From 05d2a75441d44a11c6337f5b731fd04d96e1ee9f Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 3 Sep 2019 19:35:05 -0700 Subject: crypto: caam - use devres to unmap JR's registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use devres to unmap memory and drop explicit de-initialization code. NOTE: There's no corresponding unmapping code in caam_jr_remove which seems like a resource leak. Signed-off-by: Andrey Smirnov Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Herbert Xu Cc: Iuliana Prodan Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/jr.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 417ad52615c6..7947d61a25cf 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -498,6 +498,7 @@ static int caam_jr_probe(struct platform_device *pdev) struct caam_job_ring __iomem *ctrl; struct caam_drv_private_jr *jrpriv; static int total_jobrs; + struct resource *r; int error; jrdev = &pdev->dev; @@ -513,9 +514,15 @@ static int caam_jr_probe(struct platform_device *pdev) nprop = pdev->dev.of_node; /* Get configuration properties from device tree */ /* First, get register page */ - ctrl = of_iomap(nprop, 0); + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(jrdev, "platform_get_resource() failed\n"); + return -ENOMEM; + } + + ctrl = devm_ioremap(jrdev, r->start, resource_size(r)); if (!ctrl) { - dev_err(jrdev, "of_iomap() failed\n"); + dev_err(jrdev, "devm_ioremap() failed\n"); return -ENOMEM; } @@ -525,7 +532,6 @@ static int caam_jr_probe(struct platform_device *pdev) if (error) { dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n", error); - iounmap(ctrl); return error; } @@ -536,7 +542,6 @@ static int caam_jr_probe(struct platform_device *pdev) error = caam_jr_init(jrdev); /* now turn on hardware */ if (error) { irq_dispose_mapping(jrpriv->irq); - iounmap(ctrl); return error; } -- cgit From 549077d7d86a1a2b8db4d131b260db9c9e206b66 Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 3 Sep 2019 19:35:06 -0700 Subject: crypto: caam - check irq_of_parse_and_map for errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Irq_of_parse_and_map will return zero in case of error, so add a error check for that. Signed-off-by: Andrey Smirnov Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Herbert Xu Cc: Iuliana Prodan Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/jr.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 7947d61a25cf..2732f3a0725a 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -537,6 +537,10 @@ static int caam_jr_probe(struct platform_device *pdev) /* Identify the interrupt */ jrpriv->irq = irq_of_parse_and_map(nprop, 0); + if (!jrpriv->irq) { + dev_err(jrdev, "irq_of_parse_and_map failed\n"); + return -EINVAL; + } /* Now do the platform independent part */ error = caam_jr_init(jrdev); /* now turn on hardware */ -- cgit From f2ef960231d77b72685f81f92b49bfaa22f9973e Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Tue, 3 Sep 2019 19:35:07 -0700 Subject: crypto: caam - dispose of IRQ mapping only after IRQ is freed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With IRQ requesting being managed by devres we need to make sure that we dispose of IRQ mapping after and not before it is free'd (otherwise we'll end up with a warning from the kernel). To achieve that simply convert IRQ mapping to rely on devres as well. Fixes: f314f12db65c ("crypto: caam - convert caam_jr_init() to use devres") Signed-off-by: Andrey Smirnov Cc: Chris Healy Cc: Lucas Stach Cc: Horia Geantă Cc: Herbert Xu Cc: Iuliana Prodan Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/jr.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 2732f3a0725a..d11956bc358f 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -146,7 +146,6 @@ static int caam_jr_remove(struct platform_device *pdev) ret = caam_jr_shutdown(jrdev); if (ret) dev_err(jrdev, "Failed to shut down job ring\n"); - irq_dispose_mapping(jrpriv->irq); return ret; } @@ -487,6 +486,10 @@ static int caam_jr_init(struct device *dev) return error; } +static void caam_jr_irq_dispose_mapping(void *data) +{ + irq_dispose_mapping((int)data); +} /* * Probe routine for each detected JobR subsystem. @@ -542,12 +545,15 @@ static int caam_jr_probe(struct platform_device *pdev) return -EINVAL; } + error = devm_add_action_or_reset(jrdev, caam_jr_irq_dispose_mapping, + (void *)jrpriv->irq); + if (error) + return error; + /* Now do the platform independent part */ error = caam_jr_init(jrdev); /* now turn on hardware */ - if (error) { - irq_dispose_mapping(jrpriv->irq); + if (error) return error; - } jrpriv->dev = jrdev; spin_lock(&driver_data.jr_alloc_lock); -- cgit From 18a0bb4aca81839578fef79c9bdef6710ed1c4dc Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Wed, 4 Sep 2019 11:01:17 +0800 Subject: crypto: marvell - Use kzfree rather than its implementation Use kzfree instead of memset() + kfree(). Signed-off-by: zhong jiang Signed-off-by: Herbert Xu --- drivers/crypto/marvell/hash.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 0f0ac851f4eb..a2b35fb0fb89 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -1148,8 +1148,7 @@ static int mv_cesa_ahmac_pad_init(struct ahash_request *req, } /* Set the memory region to 0 to avoid any leak. */ - memset(keydup, 0, keylen); - kfree(keydup); + kzfree(keydup); if (ret) return ret; -- cgit From c552ffb5c93d9d65aaf34f5f001c4e7e8484ced1 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 4 Sep 2019 14:18:09 +0000 Subject: crypto: cavium/zip - Add missing single_release() When using single_open() for opening, single_release() should be used instead of seq_release(), otherwise there is a memory leak. Fixes: 09ae5d37e093 ("crypto: zip - Add Compression/Decompression statistics") Cc: Signed-off-by: Wei Yongjun Signed-off-by: Herbert Xu --- drivers/crypto/cavium/zip/zip_main.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c index a8447a3cf366..194624b4855b 100644 --- a/drivers/crypto/cavium/zip/zip_main.c +++ b/drivers/crypto/cavium/zip/zip_main.c @@ -593,6 +593,7 @@ static const struct file_operations zip_stats_fops = { .owner = THIS_MODULE, .open = zip_stats_open, .read = seq_read, + .release = single_release, }; static int zip_clear_open(struct inode *inode, struct file *file) @@ -604,6 +605,7 @@ static const struct file_operations zip_clear_fops = { .owner = THIS_MODULE, .open = zip_clear_open, .read = seq_read, + .release = single_release, }; static int zip_regs_open(struct inode *inode, struct file *file) @@ -615,6 +617,7 @@ static const struct file_operations zip_regs_fops = { .owner = THIS_MODULE, .open = zip_regs_open, .read = seq_read, + .release = single_release, }; /* Root directory for thunderx_zip debugfs entry */ -- cgit From 347bce3eab76b2e07147f8d4b4436d59264f8132 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 5 Sep 2019 14:51:37 +1000 Subject: crypto: ux500 - Fix COMPILE_TEST warnings This patch fixes a number of warnings encountered when this driver is built on a 64-bit platform with COMPILE_TEST. Signed-off-by: Herbert Xu --- drivers/crypto/ux500/cryp/cryp_core.c | 8 ++++---- drivers/crypto/ux500/hash/hash_core.c | 12 +++++++----- 2 files changed, 11 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index e966e9a64501..1628ae7a1467 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -528,9 +528,9 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx, dev_dbg(ctx->device->dev, "[%s]: ", __func__); - if (unlikely(!IS_ALIGNED((u32)sg, 4))) { + if (unlikely(!IS_ALIGNED((unsigned long)sg, 4))) { dev_err(ctx->device->dev, "[%s]: Data in sg list isn't " - "aligned! Addr: 0x%08x", __func__, (u32)sg); + "aligned! Addr: 0x%08lx", __func__, (unsigned long)sg); return -EFAULT; } @@ -763,9 +763,9 @@ static int hw_crypt_noxts(struct cryp_ctx *ctx, ctx->outlen = ctx->datalen; - if (unlikely(!IS_ALIGNED((u32)indata, 4))) { + if (unlikely(!IS_ALIGNED((unsigned long)indata, 4))) { pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: " - "0x%08x", __func__, (u32)indata); + "0x%08lx", __func__, (unsigned long)indata); return -EINVAL; } diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index f1ebc3dfa21e..c172a6953477 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -806,7 +806,7 @@ static int hash_process_data(struct hash_device_data *device_data, * HW peripheral, otherwise we first copy data * to a local buffer */ - if ((0 == (((u32)data_buffer) % 4)) && + if (IS_ALIGNED((unsigned long)data_buffer, 4) && (0 == *index)) hash_processblock(device_data, (const u32 *)data_buffer, @@ -864,7 +864,8 @@ static int hash_dma_final(struct ahash_request *req) if (ret) return ret; - dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx); + dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__, + (unsigned long)ctx); if (req_ctx->updated) { ret = hash_resume_state(device_data, &device_data->state); @@ -969,7 +970,8 @@ static int hash_hw_final(struct ahash_request *req) if (ret) return ret; - dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx); + dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__, + (unsigned long)ctx); if (req_ctx->updated) { ret = hash_resume_state(device_data, &device_data->state); @@ -1272,8 +1274,8 @@ void hash_get_digest(struct hash_device_data *device_data, else loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32); - dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n", - __func__, (u32) digest); + dev_dbg(device_data->dev, "%s: digest array:(0x%lx)\n", + __func__, (unsigned long)digest); /* Copy result into digest array */ for (count = 0; count < loop_ctr; count++) { -- cgit From 59b569480dc8bb9dce57cdff133853a842dfd805 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 5 Sep 2019 09:41:12 -0700 Subject: random: Use wait_event_freezable() in add_hwgenerator_randomness() Sebastian reports that after commit ff296293b353 ("random: Support freezable kthreads in add_hwgenerator_randomness()") we can call might_sleep() when the task state is TASK_INTERRUPTIBLE (state=1). This leads to the following warning. do not call blocking ops when !TASK_RUNNING; state=1 set at [<00000000349d1489>] prepare_to_wait_event+0x5a/0x180 WARNING: CPU: 0 PID: 828 at kernel/sched/core.c:6741 __might_sleep+0x6f/0x80 Modules linked in: CPU: 0 PID: 828 Comm: hwrng Not tainted 5.3.0-rc7-next-20190903+ #46 RIP: 0010:__might_sleep+0x6f/0x80 Call Trace: kthread_freezable_should_stop+0x1b/0x60 add_hwgenerator_randomness+0xdd/0x130 hwrng_fillfn+0xbf/0x120 kthread+0x10c/0x140 ret_from_fork+0x27/0x50 We shouldn't call kthread_freezable_should_stop() from deep within the wait_event code because the task state is still set as TASK_INTERRUPTIBLE instead of TASK_RUNNING and kthread_freezable_should_stop() will try to call into the freezer with the task in the wrong state. Use wait_event_freezable() instead so that it calls schedule() in the right place and tries to enter the freezer when the task state is TASK_RUNNING instead. Reported-by: Sebastian Andrzej Siewior Tested-by: Sebastian Andrzej Siewior Cc: Keerthy Fixes: ff296293b353 ("random: Support freezable kthreads in add_hwgenerator_randomness()") Signed-off-by: Stephen Boyd Signed-off-by: Herbert Xu --- drivers/char/random.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index e2e85ca16410..5b799aa973a3 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -327,6 +327,7 @@ #include #include #include +#include #include #include #include @@ -2429,7 +2430,6 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy) { struct entropy_store *poolp = &input_pool; - bool frozen = false; if (unlikely(crng_init == 0)) { crng_fast_load(buffer, count); @@ -2440,12 +2440,10 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, * We'll be woken up again once below random_write_wakeup_thresh, * or when the calling thread is about to terminate. */ - wait_event_interruptible(random_write_wait, - kthread_freezable_should_stop(&frozen) || + wait_event_freezable(random_write_wait, + kthread_should_stop() || ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits); - if (!frozen) { - mix_pool_bytes(poolp, buffer, count); - credit_entropy_bits(poolp, entropy); - } + mix_pool_bytes(poolp, buffer, count); + credit_entropy_bits(poolp, entropy); } EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); -- cgit From 118db42deeeff1cac3fe5f23f5c3ac25d6a77b7d Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 6 Sep 2019 16:31:48 +0200 Subject: crypto: inside-secure - Add EIP97/EIP197 and endianness detection This patch adds automatic EIP97/EIP197 detection, so it does not need to rely on any static value from the device table anymore. In particular, the static value from the table won't work for PCI devboards that cannot be further identified save from this direct hardware probing. The patch also adds automatic host xs endianness detection & correction. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 154 ++++++++++++++++++++++---------- drivers/crypto/inside-secure/safexcel.h | 26 +++++- 2 files changed, 130 insertions(+), 50 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 5d648ee5f0b0..98a42f9c2fe9 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -393,29 +393,21 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) static int safexcel_hw_init(struct safexcel_crypto_priv *priv) { - u32 version, val; + u32 val; int i, ret, pe; dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n", priv->config.pes, priv->config.rings); - /* Determine endianess and configure byte swap */ - version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION); - val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); - - if ((version & 0xffff) == EIP197_HIA_VERSION_BE) - val |= EIP197_MST_CTRL_BYTE_SWAP; - else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE) - val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24); - /* * For EIP197's only set maximum number of TX commands to 2^5 = 32 * Skip for the EIP97 as it does not have this field. */ - if (priv->version != EIP97IES_MRVL) + if (priv->flags & SAFEXCEL_HW_EIP197) { + val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); val |= EIP197_MST_CTRL_TX_MAX_CMD(5); - - writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); + writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); + } /* Configure wr/rd cache values */ writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) | @@ -438,7 +430,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) writel(EIP197_DxE_THR_CTRL_RESET_PE, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe)); - if (priv->version != EIP97IES_MRVL) + if (priv->flags & SAFEXCEL_HW_EIP197) /* Reset HIA input interface arbiter (EIP197 only) */ writel(EIP197_HIA_RA_PE_CTRL_RESET, EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe)); @@ -464,7 +456,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) EIP197_PE_IN_xBUF_THRES_MAX(7), EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe)); - if (priv->version != EIP97IES_MRVL) + if (priv->flags & SAFEXCEL_HW_EIP197) /* enable HIA input interface arbiter and rings */ writel(EIP197_HIA_RA_PE_CTRL_EN | GENMASK(priv->config.rings - 1, 0), @@ -490,7 +482,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) /* FIXME: instability issues can occur for EIP97 but disabling * it impacts performance. */ - if (priv->version != EIP97IES_MRVL) + if (priv->flags & SAFEXCEL_HW_EIP197) val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR; writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe)); @@ -577,8 +569,9 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) /* Clear any HIA interrupt */ writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK); - if (priv->version != EIP97IES_MRVL) { + if (priv->flags & SAFEXCEL_HW_EIP197) { eip197_trc_cache_init(priv); + priv->flags |= EIP197_TRC_CACHE; ret = eip197_load_firmwares(priv); if (ret) @@ -1083,12 +1076,12 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv) val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); /* Read number of PEs from the engine */ - if (priv->version == EIP97IES_MRVL) - /* Narrow field width for EIP97 type engine */ - mask = EIP97_N_PES_MASK; - else + if (priv->flags & SAFEXCEL_HW_EIP197) /* Wider field width for all EIP197 type engines */ mask = EIP197_N_PES_MASK; + else + /* Narrow field width for EIP97 type engine */ + mask = EIP97_N_PES_MASK; priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask; @@ -1108,18 +1101,7 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) { struct safexcel_register_offsets *offsets = &priv->offsets; - if (priv->version == EIP97IES_MRVL) { - offsets->hia_aic = EIP97_HIA_AIC_BASE; - offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE; - offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE; - offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE; - offsets->hia_dfe = EIP97_HIA_DFE_BASE; - offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE; - offsets->hia_dse = EIP97_HIA_DSE_BASE; - offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE; - offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE; - offsets->pe = EIP97_PE_BASE; - } else { + if (priv->flags & SAFEXCEL_HW_EIP197) { offsets->hia_aic = EIP197_HIA_AIC_BASE; offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE; offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE; @@ -1130,6 +1112,19 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE; offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE; offsets->pe = EIP197_PE_BASE; + offsets->global = EIP197_GLOBAL_BASE; + } else { + offsets->hia_aic = EIP97_HIA_AIC_BASE; + offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE; + offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE; + offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE; + offsets->hia_dfe = EIP97_HIA_DFE_BASE; + offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE; + offsets->hia_dse = EIP97_HIA_DSE_BASE; + offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE; + offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE; + offsets->pe = EIP97_PE_BASE; + offsets->global = EIP97_GLOBAL_BASE; } } @@ -1145,8 +1140,8 @@ static int safexcel_probe_generic(void *pdev, int is_pci_dev) { struct device *dev = priv->dev; - u32 peid; - int i, ret; + u32 peid, version, mask, val; + int i, ret, hwctg; priv->context_pool = dmam_pool_create("safexcel-context", dev, sizeof(struct safexcel_context_record), @@ -1154,23 +1149,89 @@ static int safexcel_probe_generic(void *pdev, if (!priv->context_pool) return -ENOMEM; + /* + * First try the EIP97 HIA version regs + * For the EIP197, this is guaranteed to NOT return any of the test + * values + */ + version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION); + + mask = 0; /* do not swap */ + if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) { + priv->hwconfig.hiaver = EIP197_VERSION_MASK(version); + } else if (EIP197_REG_HI16(version) == EIP197_HIA_VERSION_BE) { + /* read back byte-swapped, so complement byte swap bits */ + mask = EIP197_MST_CTRL_BYTE_SWAP_BITS; + priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version); + } else { + /* So it wasn't an EIP97 ... maybe it's an EIP197? */ + version = readl(priv->base + EIP197_HIA_AIC_BASE + + EIP197_HIA_VERSION); + if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) { + priv->hwconfig.hiaver = EIP197_VERSION_MASK(version); + priv->flags |= SAFEXCEL_HW_EIP197; + } else if (EIP197_REG_HI16(version) == + EIP197_HIA_VERSION_BE) { + /* read back byte-swapped, so complement swap bits */ + mask = EIP197_MST_CTRL_BYTE_SWAP_BITS; + priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version); + priv->flags |= SAFEXCEL_HW_EIP197; + } else { + return -ENODEV; + } + } + + /* Now initialize the reg offsets based on the probing info so far */ safexcel_init_register_offsets(priv); + /* + * If the version was read byte-swapped, we need to flip the device + * swapping Keep in mind here, though, that what we write will also be + * byte-swapped ... + */ + if (mask) { + val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); + val = val ^ (mask >> 24); /* toggle byte swap bits */ + writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); + } + + /* + * We're not done probing yet! We may fall through to here if no HIA + * was found at all. So, with the endianness presumably correct now and + * the offsets setup, *really* probe for the EIP97/EIP197. + */ + version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION); + if (((priv->flags & SAFEXCEL_HW_EIP197) && + (EIP197_REG_LO16(version) != EIP197_VERSION_LE)) || + ((!(priv->flags & SAFEXCEL_HW_EIP197) && + (EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) { + /* + * We did not find the device that matched our initial probing + * (or our initial probing failed) Report appropriate error. + */ + return -ENODEV; + } + + priv->hwconfig.hwver = EIP197_VERSION_MASK(version); + hwctg = version >> 28; + peid = version & 255; + + /* Detect EIP96 packet engine and version */ + version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0)); + if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) { + dev_err(dev, "EIP%d: EIP96 not detected.\n", peid); + return -ENODEV; + } + priv->hwconfig.pever = EIP197_VERSION_MASK(version); + /* Get supported algorithms from EIP96 transform engine */ priv->hwconfig.algo_flags = readl(EIP197_PE(priv) + EIP197_PE_EIP96_OPTIONS(0)); - if (priv->version == EIP97IES_MRVL) { - peid = 97; - } else { - priv->flags |= EIP197_TRC_CACHE; - peid = 197; - } - - /* Dump some debug information important during development */ - dev_dbg(priv->dev, "Inside Secure EIP%d packetengine\n", peid); - dev_dbg(priv->dev, "Supported algorithms: %08x\n", - priv->hwconfig.algo_flags); + /* Print single info line describing what we just detected */ + dev_info(priv->dev, "EIP%d:%x(%d)-HIA:%x,PE:%x,alg:%08x\n", peid, + priv->hwconfig.hwver, hwctg, priv->hwconfig.hiaver, + priv->hwconfig.pever, priv->hwconfig.algo_flags); safexcel_configure(priv); @@ -1522,7 +1583,6 @@ static const struct pci_device_id safexcel_pci_ids[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038, 0x16ae, 0xc522), - /* assume EIP197B for now */ .driver_data = EIP197_DEVBRD, }, {}, diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 1407804b66b7..feb0a9c32b75 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -14,8 +14,17 @@ #include #include -#define EIP197_HIA_VERSION_LE 0xca35 -#define EIP197_HIA_VERSION_BE 0x35ca +#define EIP197_HIA_VERSION_BE 0xca35 +#define EIP197_HIA_VERSION_LE 0x35ca +#define EIP97_VERSION_LE 0x9e61 +#define EIP197_VERSION_LE 0x3ac5 +#define EIP96_VERSION_LE 0x9f60 +#define EIP197_REG_LO16(reg) (reg & 0xffff) +#define EIP197_REG_HI16(reg) ((reg >> 16) & 0xffff) +#define EIP197_VERSION_MASK(reg) ((reg >> 16) & 0xfff) +#define EIP197_VERSION_SWAP(reg) (((reg & 0xf0) << 4) | \ + ((reg >> 4) & 0xf0) | \ + ((reg >> 12) & 0xf)) /* Static configuration */ #define EIP197_DEFAULT_RING_SIZE 400 @@ -70,6 +79,7 @@ #define EIP197_HIA_DSE_THR(priv) ((priv)->base + (priv)->offsets.hia_dse_thr) #define EIP197_HIA_GEN_CFG(priv) ((priv)->base + (priv)->offsets.hia_gen_cfg) #define EIP197_PE(priv) ((priv)->base + (priv)->offsets.pe) +#define EIP197_GLOBAL(priv) ((priv)->base + (priv)->offsets.global) /* EIP197 base offsets */ #define EIP197_HIA_AIC_BASE 0x90000 @@ -82,6 +92,7 @@ #define EIP197_HIA_DSE_THR_BASE 0x8d040 #define EIP197_HIA_GEN_CFG_BASE 0xf0000 #define EIP197_PE_BASE 0xa0000 +#define EIP197_GLOBAL_BASE 0xf0000 /* EIP97 base offsets */ #define EIP97_HIA_AIC_BASE 0x0 @@ -94,6 +105,7 @@ #define EIP97_HIA_DSE_THR_BASE 0xf600 #define EIP97_HIA_GEN_CFG_BASE 0x10000 #define EIP97_PE_BASE 0x10000 +#define EIP97_GLOBAL_BASE 0x10000 /* CDR/RDR register offsets */ #define EIP197_HIA_xDR_OFF(priv, r) (EIP197_HIA_AIC_xDR(priv) + (r) * 0x1000) @@ -146,9 +158,11 @@ #define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n))) #define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n))) #define EIP197_PE_EIP96_OPTIONS(n) (0x13f8 + (0x2000 * (n))) +#define EIP197_PE_EIP96_VERSION(n) (0x13fc + (0x2000 * (n))) #define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n))) #define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n))) #define EIP197_MST_CTRL 0xfff4 +#define EIP197_VERSION 0xfffc /* EIP197-specific registers, no indirection */ #define EIP197_CLASSIFICATION_RAMS 0xe0000 @@ -252,6 +266,7 @@ #define EIP197_MST_CTRL_TX_MAX_CMD(n) (((n) & 0xf) << 20) #define EIP197_MST_CTRL_BYTE_SWAP BIT(24) #define EIP197_MST_CTRL_NO_BYTE_SWAP BIT(25) +#define EIP197_MST_CTRL_BYTE_SWAP_BITS GENMASK(25, 24) /* EIP197_PE_IN_DBUF/TBUF_THRES */ #define EIP197_PE_IN_xBUF_THRES_MIN(n) ((n) << 8) @@ -651,14 +666,19 @@ struct safexcel_register_offsets { u32 hia_dse_thr; u32 hia_gen_cfg; u32 pe; + u32 global; }; enum safexcel_flags { - EIP197_TRC_CACHE = BIT(0), + EIP197_TRC_CACHE = BIT(0), + SAFEXCEL_HW_EIP197 = BIT(1), }; struct safexcel_hwconfig { enum safexcel_eip_algorithms algo_flags; + int hwver; + int hiaver; + int pever; }; struct safexcel_crypto_priv { -- cgit From f9d131d983673e7d2e79dea695c6cd129e929d09 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 6 Sep 2019 16:31:49 +0200 Subject: crypto: inside-secure: Corrected configuration of EIP96_TOKEN_CTRL This patch corrects the configuration of the EIP197_PE_EIP96_TOKEN_CTRL register. Previous value was wrong and potentially dangerous. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 4 ++-- drivers/crypto/inside-secure/safexcel.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 98a42f9c2fe9..322d9e379876 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -498,8 +498,8 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) /* Token & context configuration */ val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES | - EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX | - EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX; + EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT | + EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT; writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe)); /* H/W capabilities selection: just enable everything */ diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index feb0a9c32b75..ecf068c293bb 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -296,8 +296,8 @@ /* EIP197_PE_EIP96_TOKEN_CTRL */ #define EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES BIT(16) -#define EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX BIT(19) -#define EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX BIT(20) +#define EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT BIT(17) +#define EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT BIT(22) /* EIP197_PE_EIP96_FUNCTION_EN */ #define EIP197_FUNCTION_ALL 0xffffffff -- cgit From 4bdf712c3049ea10e2a01031b364b85bad2cb6d4 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 6 Sep 2019 16:31:50 +0200 Subject: crypto: inside-secure - Enable extended algorithms on newer HW This patch enables algorithms that did not fit the original 32 bit FUNCTION_EN register anymore via the FUNCTION2_EN extension reg. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 2 ++ drivers/crypto/inside-secure/safexcel.h | 1 + 2 files changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 322d9e379876..1f563e0b4158 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -505,6 +505,8 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) /* H/W capabilities selection: just enable everything */ writel(EIP197_FUNCTION_ALL, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe)); + writel(EIP197_FUNCTION_ALL, + EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION2_EN(pe)); } /* Command Descriptor Rings prepare */ diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index ecf068c293bb..d06dee21e7d1 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -157,6 +157,7 @@ #define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n))) #define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n))) #define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n))) +#define EIP197_PE_EIP96_FUNCTION2_EN(n) (0x1030 + (0x2000 * (n))) #define EIP197_PE_EIP96_OPTIONS(n) (0x13f8 + (0x2000 * (n))) #define EIP197_PE_EIP96_VERSION(n) (0x13fc + (0x2000 * (n))) #define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n))) -- cgit From 35c0e6c375ac7a09d1b099ceea101a2258857afa Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 6 Sep 2019 16:31:51 +0200 Subject: crypto: inside-secure - Base CD fetchcount on actual CD FIFO size This patch derives the command descriptor fetch count from the actual FIFO size advertised by the hardware. Fetching command descriptors one at a time is a performance bottleneck for small blocks, especially on hardware with multiple pipes. Even moreso if the HW has few rings. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 47 ++++++++++++++++++++++++++------- drivers/crypto/inside-secure/safexcel.h | 11 ++++++++ 2 files changed, 48 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 1f563e0b4158..32366f703332 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -310,13 +310,22 @@ release_fw: static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) { u32 hdw, cd_size_rnd, val; - int i; + int i, cd_fetch_cnt; - hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); - hdw &= GENMASK(27, 25); - hdw >>= 25; - - cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw; + cd_size_rnd = (priv->config.cd_size + + (BIT(priv->hwconfig.hwdataw) - 1)) >> + priv->hwconfig.hwdataw; + /* determine number of CD's we can fetch into the CD FIFO as 1 block */ + if (priv->flags & SAFEXCEL_HW_EIP197) { + /* EIP197: try to fetch enough in 1 go to keep all pipes busy */ + cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd; + cd_fetch_cnt = min_t(uint, cd_fetch_cnt, + (priv->config.pes * EIP197_FETCH_DEPTH)); + } else { + /* for the EIP97, just fetch all that fits minus 1 */ + cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) / + cd_size_rnd) - 1; + } for (i = 0; i < priv->config.rings; i++) { /* ring base address */ @@ -328,8 +337,8 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) | priv->config.cd_size, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); - writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) | - (EIP197_FETCH_COUNT * priv->config.cd_offset), + writel(((cd_fetch_cnt * (cd_size_rnd << hdw)) << 16) | + (cd_fetch_cnt * priv->config.cd_offset), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG); /* Configure DMA tx control */ @@ -1142,7 +1151,7 @@ static int safexcel_probe_generic(void *pdev, int is_pci_dev) { struct device *dev = priv->dev; - u32 peid, version, mask, val; + u32 peid, version, mask, val, hiaopt; int i, ret, hwctg; priv->context_pool = dmam_pool_create("safexcel-context", dev, @@ -1226,13 +1235,31 @@ static int safexcel_probe_generic(void *pdev, } priv->hwconfig.pever = EIP197_VERSION_MASK(version); + hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS); + + if (priv->flags & SAFEXCEL_HW_EIP197) { + /* EIP197 */ + priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) & + EIP197_HWDATAW_MASK; + priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) & + EIP197_CFSIZE_MASK) + + EIP197_CFSIZE_ADJUST; + } else { + /* EIP97 */ + priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) & + EIP97_HWDATAW_MASK; + priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) & + EIP97_CFSIZE_MASK; + } + /* Get supported algorithms from EIP96 transform engine */ priv->hwconfig.algo_flags = readl(EIP197_PE(priv) + EIP197_PE_EIP96_OPTIONS(0)); /* Print single info line describing what we just detected */ - dev_info(priv->dev, "EIP%d:%x(%d)-HIA:%x,PE:%x,alg:%08x\n", peid, + dev_info(priv->dev, "EIP%d:%x(%d)-HIA:%x(%d,%d),PE:%x,alg:%08x\n", peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hiaver, + priv->hwconfig.hwdataw, priv->hwconfig.hwcfsize, priv->hwconfig.pever, priv->hwconfig.algo_flags); safexcel_configure(priv); diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index d06dee21e7d1..2da757d5b404 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -31,6 +31,7 @@ #define EIP197_MAX_TOKENS 18 #define EIP197_MAX_RINGS 4 #define EIP197_FETCH_COUNT 1 +#define EIP197_FETCH_DEPTH 2 #define EIP197_MAX_BATCH_SZ 64 #define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \ @@ -225,6 +226,14 @@ #define EIP197_N_PES_OFFSET 4 #define EIP197_N_PES_MASK GENMASK(4, 0) #define EIP97_N_PES_MASK GENMASK(2, 0) +#define EIP197_HWDATAW_OFFSET 25 +#define EIP197_HWDATAW_MASK GENMASK(3, 0) +#define EIP97_HWDATAW_MASK GENMASK(2, 0) +#define EIP197_CFSIZE_OFFSET 9 +#define EIP197_CFSIZE_ADJUST 4 +#define EIP97_CFSIZE_OFFSET 8 +#define EIP197_CFSIZE_MASK GENMASK(3, 0) +#define EIP97_CFSIZE_MASK GENMASK(4, 0) /* EIP197_HIA_AIC_R_ENABLE_CTRL */ #define EIP197_CDR_IRQ(n) BIT((n) * 2) @@ -680,6 +689,8 @@ struct safexcel_hwconfig { int hwver; int hiaver; int pever; + int hwdataw; + int hwcfsize; }; struct safexcel_crypto_priv { -- cgit From b2d92ac1c5eebcb0510939edfdfc2e87330f6679 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 6 Sep 2019 16:31:52 +0200 Subject: crypto: inside-secure - Base RD fetchcount on actual RD FIFO size This patch derives the result descriptor fetch count from the actual FIFO size advertised by the hardware. Fetching result descriptors one at a time is a performance bottleneck for small blocks, especially on hardware with multiple pipes. Even moreso if the HW has few rings. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 37 +++++++++++++++++++++++---------- drivers/crypto/inside-secure/safexcel.h | 15 ++++++++++++- 2 files changed, 40 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 32366f703332..acf26dfa7e94 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -357,13 +357,22 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) { u32 hdw, rd_size_rnd, val; - int i; - - hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); - hdw &= GENMASK(27, 25); - hdw >>= 25; + int i, rd_fetch_cnt; - rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw; + /* determine number of RD's we can fetch into the FIFO as one block */ + rd_size_rnd = (EIP197_RD64_FETCH_SIZE + + BIT(priv->hwconfig.hwdataw) - 1) >> + priv->hwconfig.hwdataw; + if (priv->flags & SAFEXCEL_HW_EIP197) { + /* EIP197: try to fetch enough in 1 go to keep all pipes busy */ + rd_fetch_cnt = (1 << priv->hwconfig.hwrfsize) / rd_size_rnd; + rd_fetch_cnt = min_t(uint, rd_fetch_cnt, + (priv->config.pes * EIP197_FETCH_DEPTH)); + } else { + /* for the EIP97, just fetch all that fits minus 1 */ + rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) / + rd_size_rnd) - 1; + } for (i = 0; i < priv->config.rings; i++) { /* ring base address */ @@ -376,8 +385,8 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) priv->config.rd_size, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); - writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) | - (EIP197_FETCH_COUNT * priv->config.rd_offset), + writel(((rd_fetch_cnt * (rd_size_rnd << hdw)) << 16) | + (rd_fetch_cnt * priv->config.rd_offset), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG); /* Configure DMA tx control */ @@ -1244,12 +1253,17 @@ static int safexcel_probe_generic(void *pdev, priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) & EIP197_CFSIZE_MASK) + EIP197_CFSIZE_ADJUST; + priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) & + EIP197_RFSIZE_MASK) + + EIP197_RFSIZE_ADJUST; } else { /* EIP97 */ priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) & EIP97_HWDATAW_MASK; priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) & EIP97_CFSIZE_MASK; + priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) & + EIP97_RFSIZE_MASK; } /* Get supported algorithms from EIP96 transform engine */ @@ -1257,10 +1271,11 @@ static int safexcel_probe_generic(void *pdev, EIP197_PE_EIP96_OPTIONS(0)); /* Print single info line describing what we just detected */ - dev_info(priv->dev, "EIP%d:%x(%d)-HIA:%x(%d,%d),PE:%x,alg:%08x\n", peid, - priv->hwconfig.hwver, hwctg, priv->hwconfig.hiaver, + dev_info(priv->dev, "EIP%d:%x(%d)-HIA:%x(%d,%d,%d),PE:%x,alg:%08x\n", + peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hiaver, priv->hwconfig.hwdataw, priv->hwconfig.hwcfsize, - priv->hwconfig.pever, priv->hwconfig.algo_flags); + priv->hwconfig.hwrfsize, priv->hwconfig.pever, + priv->hwconfig.algo_flags); safexcel_configure(priv); diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 2da757d5b404..7d740b31c056 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -30,7 +30,6 @@ #define EIP197_DEFAULT_RING_SIZE 400 #define EIP197_MAX_TOKENS 18 #define EIP197_MAX_RINGS 4 -#define EIP197_FETCH_COUNT 1 #define EIP197_FETCH_DEPTH 2 #define EIP197_MAX_BATCH_SZ 64 @@ -234,6 +233,11 @@ #define EIP97_CFSIZE_OFFSET 8 #define EIP197_CFSIZE_MASK GENMASK(3, 0) #define EIP97_CFSIZE_MASK GENMASK(4, 0) +#define EIP197_RFSIZE_OFFSET 12 +#define EIP197_RFSIZE_ADJUST 4 +#define EIP97_RFSIZE_OFFSET 12 +#define EIP197_RFSIZE_MASK GENMASK(3, 0) +#define EIP97_RFSIZE_MASK GENMASK(4, 0) /* EIP197_HIA_AIC_R_ENABLE_CTRL */ #define EIP197_CDR_IRQ(n) BIT((n) * 2) @@ -462,6 +466,14 @@ struct safexcel_result_desc { struct result_data_desc result_data; } __packed; +/* + * The EIP(1)97 only needs to fetch the descriptor part of + * the result descriptor, not the result token part! + */ +#define EIP197_RD64_FETCH_SIZE ((sizeof(struct safexcel_result_desc) -\ + sizeof(struct result_data_desc)) /\ + sizeof(u32)) + struct safexcel_token { u32 packet_length:17; u8 stat:2; @@ -691,6 +703,7 @@ struct safexcel_hwconfig { int pever; int hwdataw; int hwcfsize; + int hwrfsize; }; struct safexcel_crypto_priv { -- cgit From 465527bcaebc1dcecc77b78ff77936658384152e Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 6 Sep 2019 16:31:53 +0200 Subject: crypto: inside-secure - Probe transform record cache RAM sizes This patch actually probes the transform record cache data and administration RAM sizes, instead of making assumptions, and then configures the TRC based on the actually probed values. This allows the driver to work with EIP197 HW that has TRC RAM sizes different from those of the Marvell EIP197B/D variants. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 229 ++++++++++++++++++++++++++------ drivers/crypto/inside-secure/safexcel.h | 21 +-- 2 files changed, 200 insertions(+), 50 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index acf26dfa7e94..b456b85f46d3 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -28,63 +28,205 @@ static u32 max_rings = EIP197_MAX_RINGS; module_param(max_rings, uint, 0644); MODULE_PARM_DESC(max_rings, "Maximum number of rings to use."); -static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) +static void eip197_trc_cache_setupvirt(struct safexcel_crypto_priv *priv) { - u32 val, htable_offset; - int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc; - - if (priv->version == EIP197D_MRVL) { - cs_rc_max = EIP197D_CS_RC_MAX; - cs_ht_wc = EIP197D_CS_HT_WC; - cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC; - cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC; - } else { - /* Default to minimum "safe" settings */ - cs_rc_max = EIP197B_CS_RC_MAX; - cs_ht_wc = EIP197B_CS_HT_WC; - cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC; - cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC; + int i; + + /* + * Map all interfaces/rings to register index 0 + * so they can share contexts. Without this, the EIP197 will + * assume each interface/ring to be in its own memory domain + * i.e. have its own subset of UNIQUE memory addresses. + * Which would cause records with the SAME memory address to + * use DIFFERENT cache buffers, causing both poor cache utilization + * AND serious coherence/invalidation issues. + */ + for (i = 0; i < 4; i++) + writel(0, priv->base + EIP197_FLUE_IFC_LUT(i)); + + /* + * Initialize other virtualization regs for cache + * These may not be in their reset state ... + */ + for (i = 0; i < priv->config.rings; i++) { + writel(0, priv->base + EIP197_FLUE_CACHEBASE_LO(i)); + writel(0, priv->base + EIP197_FLUE_CACHEBASE_HI(i)); + writel(EIP197_FLUE_CONFIG_MAGIC, + priv->base + EIP197_FLUE_CONFIG(i)); } + writel(0, priv->base + EIP197_FLUE_OFFSETS); + writel(0, priv->base + EIP197_FLUE_ARC4_OFFSET); +} - /* Enable the record cache memory access */ - val = readl(priv->base + EIP197_CS_RAM_CTRL); - val &= ~EIP197_TRC_ENABLE_MASK; - val |= EIP197_TRC_ENABLE_0; - writel(val, priv->base + EIP197_CS_RAM_CTRL); +static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv, + u32 addrmid, int *actbank) +{ + u32 val; + int curbank; + + curbank = addrmid >> 16; + if (curbank != *actbank) { + val = readl(priv->base + EIP197_CS_RAM_CTRL); + val = (val & ~EIP197_CS_BANKSEL_MASK) | + (curbank << EIP197_CS_BANKSEL_OFS); + writel(val, priv->base + EIP197_CS_RAM_CTRL); + *actbank = curbank; + } +} - /* Clear all ECC errors */ - writel(0, priv->base + EIP197_TRC_ECCCTRL); +static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv, + int maxbanks, u32 probemask) +{ + u32 val, addrhi, addrlo, addrmid; + int actbank; /* - * Make sure the cache memory is accessible by taking record cache into - * reset. + * And probe the actual size of the physically attached cache data RAM + * Using a binary subdivision algorithm downto 32 byte cache lines. */ - val = readl(priv->base + EIP197_TRC_PARAMS); - val |= EIP197_TRC_PARAMS_SW_RESET; - val &= ~EIP197_TRC_PARAMS_DATA_ACCESS; - writel(val, priv->base + EIP197_TRC_PARAMS); + addrhi = 1 << (16 + maxbanks); + addrlo = 0; + actbank = min(maxbanks - 1, 0); + while ((addrhi - addrlo) > 32) { + /* write marker to lowest address in top half */ + addrmid = (addrhi + addrlo) >> 1; + eip197_trc_cache_banksel(priv, addrmid, &actbank); + writel((addrmid | (addrlo << 16)) & probemask, + priv->base + EIP197_CLASSIFICATION_RAMS + + (addrmid & 0xffff)); + + /* write marker to lowest address in bottom half */ + eip197_trc_cache_banksel(priv, addrlo, &actbank); + writel((addrlo | (addrhi << 16)) & probemask, + priv->base + EIP197_CLASSIFICATION_RAMS + + (addrlo & 0xffff)); + + /* read back marker from top half */ + eip197_trc_cache_banksel(priv, addrmid, &actbank); + val = readl(priv->base + EIP197_CLASSIFICATION_RAMS + + (addrmid & 0xffff)); + + if (val == ((addrmid | (addrlo << 16)) & probemask)) { + /* read back correct, continue with top half */ + addrlo = addrmid; + } else { + /* not read back correct, continue with bottom half */ + addrhi = addrmid; + } + } + return addrhi; +} + +static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv, + int cs_rc_max, int cs_ht_wc) +{ + int i; + u32 htable_offset, val, offset; - /* Clear all records */ + /* Clear all records in administration RAM */ for (i = 0; i < cs_rc_max; i++) { - u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE; + offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE; writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) | EIP197_CS_RC_PREV(EIP197_RC_NULL), priv->base + offset); - val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1); + val = EIP197_CS_RC_NEXT(i + 1) | EIP197_CS_RC_PREV(i - 1); if (i == 0) val |= EIP197_CS_RC_PREV(EIP197_RC_NULL); else if (i == cs_rc_max - 1) val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL); - writel(val, priv->base + offset + sizeof(u32)); + writel(val, priv->base + offset + 4); + /* must also initialize the address key due to ECC! */ + writel(0, priv->base + offset + 8); + writel(0, priv->base + offset + 12); } /* Clear the hash table entries */ htable_offset = cs_rc_max * EIP197_CS_RC_SIZE; for (i = 0; i < cs_ht_wc; i++) writel(GENMASK(29, 0), - priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32)); + priv->base + EIP197_CLASSIFICATION_RAMS + + htable_offset + i * sizeof(u32)); +} + +static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) +{ + u32 val, dsize, asize; + int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc; + int cs_rc_abs_max, cs_ht_sz; + int maxbanks; + + /* Setup (dummy) virtualization for cache */ + eip197_trc_cache_setupvirt(priv); + + /* + * Enable the record cache memory access and + * probe the bank select width + */ + val = readl(priv->base + EIP197_CS_RAM_CTRL); + val &= ~EIP197_TRC_ENABLE_MASK; + val |= EIP197_TRC_ENABLE_0 | EIP197_CS_BANKSEL_MASK; + writel(val, priv->base + EIP197_CS_RAM_CTRL); + val = readl(priv->base + EIP197_CS_RAM_CTRL); + maxbanks = ((val&EIP197_CS_BANKSEL_MASK)>>EIP197_CS_BANKSEL_OFS) + 1; + + /* Clear all ECC errors */ + writel(0, priv->base + EIP197_TRC_ECCCTRL); + + /* + * Make sure the cache memory is accessible by taking record cache into + * reset. Need data memory access here, not admin access. + */ + val = readl(priv->base + EIP197_TRC_PARAMS); + val |= EIP197_TRC_PARAMS_SW_RESET | EIP197_TRC_PARAMS_DATA_ACCESS; + writel(val, priv->base + EIP197_TRC_PARAMS); + + /* Probed data RAM size in bytes */ + dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff); + + /* + * Now probe the administration RAM size pretty much the same way + * Except that only the lower 30 bits are writable and we don't need + * bank selects + */ + val = readl(priv->base + EIP197_TRC_PARAMS); + /* admin access now */ + val &= ~(EIP197_TRC_PARAMS_DATA_ACCESS | EIP197_CS_BANKSEL_MASK); + writel(val, priv->base + EIP197_TRC_PARAMS); + + /* Probed admin RAM size in admin words */ + asize = eip197_trc_cache_probe(priv, 0, 0xbfffffff) >> 4; + + /* Clear any ECC errors detected while probing! */ + writel(0, priv->base + EIP197_TRC_ECCCTRL); + + /* + * Determine optimal configuration from RAM sizes + * Note that we assume that the physical RAM configuration is sane + * Therefore, we don't do any parameter error checking here ... + */ + + /* For now, just use a single record format covering everything */ + cs_trc_rec_wc = EIP197_CS_TRC_REC_WC; + cs_trc_lg_rec_wc = EIP197_CS_TRC_REC_WC; + + /* + * Step #1: How many records will physically fit? + * Hard upper limit is 1023! + */ + cs_rc_abs_max = min_t(uint, ((dsize >> 2) / cs_trc_lg_rec_wc), 1023); + /* Step #2: Need at least 2 words in the admin RAM per record */ + cs_rc_max = min_t(uint, cs_rc_abs_max, (asize >> 1)); + /* Step #3: Determine log2 of hash table size */ + cs_ht_sz = __fls(asize - cs_rc_max) - 2; + /* Step #4: determine current size of hash table in dwords */ + cs_ht_wc = 16<> 4)); + + /* Clear the cache RAMs */ + eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc); /* Disable the record cache memory access */ val = readl(priv->base + EIP197_CS_RAM_CTRL); @@ -104,8 +246,11 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) /* Configure the record cache #2 */ val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) | EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) | - EIP197_TRC_PARAMS_HTABLE_SZ(2); + EIP197_TRC_PARAMS_HTABLE_SZ(cs_ht_sz); writel(val, priv->base + EIP197_TRC_PARAMS); + + dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n", + dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc); } static void eip197_init_firmware(struct safexcel_crypto_priv *priv) @@ -129,7 +274,7 @@ static void eip197_init_firmware(struct safexcel_crypto_priv *priv) /* clear the scratchpad RAM using 32 bit writes only */ for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++) writel(0, EIP197_PE(priv) + - EIP197_PE_ICE_SCRATCH_RAM(pe) + (i<<2)); + EIP197_PE_ICE_SCRATCH_RAM(pe) + (i << 2)); /* Reset the IFPP engine to make its program mem accessible */ writel(EIP197_PE_ICE_x_CTRL_SW_RESET | @@ -309,7 +454,7 @@ release_fw: static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) { - u32 hdw, cd_size_rnd, val; + u32 cd_size_rnd, val; int i, cd_fetch_cnt; cd_size_rnd = (priv->config.cd_size + @@ -337,7 +482,8 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) | priv->config.cd_size, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); - writel(((cd_fetch_cnt * (cd_size_rnd << hdw)) << 16) | + writel(((cd_fetch_cnt * + (cd_size_rnd << priv->hwconfig.hwdataw)) << 16) | (cd_fetch_cnt * priv->config.cd_offset), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG); @@ -356,12 +502,12 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) { - u32 hdw, rd_size_rnd, val; + u32 rd_size_rnd, val; int i, rd_fetch_cnt; /* determine number of RD's we can fetch into the FIFO as one block */ rd_size_rnd = (EIP197_RD64_FETCH_SIZE + - BIT(priv->hwconfig.hwdataw) - 1) >> + (BIT(priv->hwconfig.hwdataw) - 1)) >> priv->hwconfig.hwdataw; if (priv->flags & SAFEXCEL_HW_EIP197) { /* EIP197: try to fetch enough in 1 go to keep all pipes busy */ @@ -371,7 +517,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) } else { /* for the EIP97, just fetch all that fits minus 1 */ rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) / - rd_size_rnd) - 1; + rd_size_rnd) - 1; } for (i = 0; i < priv->config.rings; i++) { @@ -385,7 +531,8 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) priv->config.rd_size, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); - writel(((rd_fetch_cnt * (rd_size_rnd << hdw)) << 16) | + writel(((rd_fetch_cnt * + (rd_size_rnd << priv->hwconfig.hwdataw)) << 16) | (rd_fetch_cnt * priv->config.rd_offset), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG); diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 7d740b31c056..930cc48a6f85 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -178,6 +178,12 @@ #define EIP197_TRC_ECCADMINSTAT 0xf0838 #define EIP197_TRC_ECCDATASTAT 0xf083c #define EIP197_TRC_ECCDATA 0xf0840 +#define EIP197_FLUE_CACHEBASE_LO(n) (0xf6000 + (32 * (n))) +#define EIP197_FLUE_CACHEBASE_HI(n) (0xf6004 + (32 * (n))) +#define EIP197_FLUE_CONFIG(n) (0xf6010 + (32 * (n))) +#define EIP197_FLUE_OFFSETS 0xf6808 +#define EIP197_FLUE_ARC4_OFFSET 0xf680c +#define EIP197_FLUE_IFC_LUT(n) (0xf6820 + (4 * (n))) #define EIP197_CS_RAM_CTRL 0xf7ff0 /* EIP197_HIA_xDR_DESC_SIZE */ @@ -321,6 +327,9 @@ #define EIP197_ADDRESS_MODE BIT(8) #define EIP197_CONTROL_MODE BIT(9) +/* EIP197_FLUE_CONFIG */ +#define EIP197_FLUE_CONFIG_MAGIC 0xc7000004 + /* Context Control */ struct safexcel_context_record { u32 control0; @@ -395,6 +404,8 @@ struct safexcel_context_record { #define EIP197_TRC_ENABLE_1 BIT(5) #define EIP197_TRC_ENABLE_2 BIT(6) #define EIP197_TRC_ENABLE_MASK GENMASK(6, 4) +#define EIP197_CS_BANKSEL_MASK GENMASK(14, 12) +#define EIP197_CS_BANKSEL_OFS 12 /* EIP197_TRC_PARAMS */ #define EIP197_TRC_PARAMS_SW_RESET BIT(0) @@ -412,19 +423,11 @@ struct safexcel_context_record { #define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n) ((n) << 18) /* Cache helpers */ -#define EIP197B_CS_RC_MAX 52 -#define EIP197D_CS_RC_MAX 96 +#define EIP197_CS_TRC_REC_WC 64 #define EIP197_CS_RC_SIZE (4 * sizeof(u32)) #define EIP197_CS_RC_NEXT(x) (x) #define EIP197_CS_RC_PREV(x) ((x) << 10) #define EIP197_RC_NULL 0x3ff -#define EIP197B_CS_TRC_REC_WC 59 -#define EIP197D_CS_TRC_REC_WC 64 -#define EIP197B_CS_TRC_LG_REC_WC 73 -#define EIP197D_CS_TRC_LG_REC_WC 80 -#define EIP197B_CS_HT_WC 64 -#define EIP197D_CS_HT_WC 256 - /* Result data */ struct result_data_desc { -- cgit From 67caef08a71fa6506fc16be58cd594c49b0605de Mon Sep 17 00:00:00 2001 From: Uri Shir Date: Sun, 8 Sep 2019 11:04:26 +0300 Subject: crypto: ccree - enable CTS support in AES-XTS In XTS encryption/decryption the plaintext byte size can be >= AES_BLOCK_SIZE. This patch enable the AES-XTS ciphertext stealing implementation in ccree driver. Signed-off-by: Uri Shir Acked-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_cipher.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index c7ec20e90fc0..254b48797799 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -116,10 +116,6 @@ static int validate_data_size(struct cc_cipher_ctx *ctx_p, case S_DIN_to_AES: switch (ctx_p->cipher_mode) { case DRV_CIPHER_XTS: - if (size >= AES_BLOCK_SIZE && - IS_ALIGNED(size, AES_BLOCK_SIZE)) - return 0; - break; case DRV_CIPHER_CBC_CTS: if (size >= AES_BLOCK_SIZE) return 0; @@ -938,7 +934,7 @@ static const struct cc_alg_template skcipher_algs[] = { { .name = "xts(paes)", .driver_name = "xts-paes-ccree", - .blocksize = AES_BLOCK_SIZE, + .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_sethkey, .encrypt = cc_cipher_encrypt, @@ -956,7 +952,7 @@ static const struct cc_alg_template skcipher_algs[] = { { .name = "xts512(paes)", .driver_name = "xts-paes-du512-ccree", - .blocksize = AES_BLOCK_SIZE, + .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_sethkey, .encrypt = cc_cipher_encrypt, @@ -975,7 +971,7 @@ static const struct cc_alg_template skcipher_algs[] = { { .name = "xts4096(paes)", .driver_name = "xts-paes-du4096-ccree", - .blocksize = AES_BLOCK_SIZE, + .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_sethkey, .encrypt = cc_cipher_encrypt, @@ -1196,7 +1192,7 @@ static const struct cc_alg_template skcipher_algs[] = { { .name = "xts(aes)", .driver_name = "xts-aes-ccree", - .blocksize = AES_BLOCK_SIZE, + .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, @@ -1213,7 +1209,7 @@ static const struct cc_alg_template skcipher_algs[] = { { .name = "xts512(aes)", .driver_name = "xts-aes-du512-ccree", - .blocksize = AES_BLOCK_SIZE, + .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, @@ -1231,7 +1227,7 @@ static const struct cc_alg_template skcipher_algs[] = { { .name = "xts4096(aes)", .driver_name = "xts-aes-du4096-ccree", - .blocksize = AES_BLOCK_SIZE, + .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, -- cgit From 9575d1a5c0780ea26ff8dd29c94a32be32ce3c85 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 9 Sep 2019 23:55:29 +1000 Subject: crypto: caam - Cast to long first before pointer conversion MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While storing an int in a pointer is safe the compiler is not happy about it. So we need some extra casting in order to make this warning free. Fixes: 1d3f75bce123 ("crypto: caam - dispose of IRQ mapping only...") Signed-off-by: Herbert Xu Reviewed-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/jr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index d11956bc358f..fc97cde27059 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -488,7 +488,7 @@ static int caam_jr_init(struct device *dev) static void caam_jr_irq_dispose_mapping(void *data) { - irq_dispose_mapping((int)data); + irq_dispose_mapping((unsigned long)data); } /* @@ -546,7 +546,7 @@ static int caam_jr_probe(struct platform_device *pdev) } error = devm_add_action_or_reset(jrdev, caam_jr_irq_dispose_mapping, - (void *)jrpriv->irq); + (void *)(unsigned long)jrpriv->irq); if (error) return error; -- cgit