diff options
Diffstat (limited to 'drivers/crypto/hisilicon/sec/sec_algs.c')
| -rw-r--r-- | drivers/crypto/hisilicon/sec/sec_algs.c | 80 |
1 files changed, 45 insertions, 35 deletions
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index c27e7160d2df..1189effcdad0 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2016-2017 Hisilicon Limited. */ +/* Copyright (c) 2016-2017 HiSilicon Limited. */ #include <linux/crypto.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> @@ -175,7 +175,8 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, dma_addr_t *psec_sgl, struct scatterlist *sgl, int count, - struct sec_dev_info *info) + struct sec_dev_info *info, + gfp_t gfp) { struct sec_hw_sgl *sgl_current = NULL; struct sec_hw_sgl *sgl_next; @@ -190,7 +191,7 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, sge_index = i % SEC_MAX_SGE_NUM; if (sge_index == 0) { sgl_next = dma_pool_zalloc(info->hw_sgl_pool, - GFP_KERNEL, &sgl_next_dma); + gfp, &sgl_next_dma); if (!sgl_next) { ret = -ENOMEM; goto err_free_hw_sgls; @@ -448,7 +449,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp, */ } - mutex_lock(&ctx->queue->queuelock); + spin_lock_bh(&ctx->queue->queuelock); /* Put the IV in place for chained cases */ switch (ctx->cipher_alg) { case SEC_C_AES_CBC_128: @@ -503,12 +504,12 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp, kfifo_avail(&ctx->queue->softqueue) > backlog_req->num_elements)) { sec_send_request(backlog_req, ctx->queue); - backlog_req->req_base->complete(backlog_req->req_base, - -EINPROGRESS); + crypto_request_complete(backlog_req->req_base, + -EINPROGRESS); list_del(&backlog_req->backlog_head); } } - mutex_unlock(&ctx->queue->queuelock); + spin_unlock_bh(&ctx->queue->queuelock); mutex_lock(&sec_req->lock); list_del(&sec_req_el->head); @@ -533,7 +534,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp, if (skreq->src != skreq->dst) dma_unmap_sg(dev, skreq->dst, sec_req->len_out, DMA_BIDIRECTIONAL); - skreq->base.complete(&skreq->base, sec_req->err); + skcipher_request_complete(skreq, sec_req->err); } } @@ -545,14 +546,14 @@ void sec_alg_callback(struct sec_bd_info *resp, void *shadow) } static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes, - int *steps) + int *steps, gfp_t gfp) { size_t *sizes; int i; /* Split into suitable sized blocks */ *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT; - sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL); + sizes = kcalloc(*steps, sizeof(*sizes), gfp); if (!sizes) return -ENOMEM; @@ -568,7 +569,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, int steps, struct scatterlist ***splits, int **splits_nents, int sgl_len_in, - struct device *dev) + struct device *dev, gfp_t gfp) { int ret, count; @@ -576,12 +577,12 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, if (!count) return -EINVAL; - *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL); + *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp); if (!*splits) { ret = -ENOMEM; goto err_unmap_sg; } - *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL); + *splits_nents = kcalloc(steps, sizeof(int), gfp); if (!*splits_nents) { ret = -ENOMEM; goto err_free_splits; @@ -589,7 +590,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, /* output the scatter list before and after this */ ret = sg_split(sgl, count, 0, steps, split_sizes, - *splits, *splits_nents, GFP_KERNEL); + *splits, *splits_nents, gfp); if (ret) { ret = -ENOMEM; goto err_free_splits_nents; @@ -630,13 +631,13 @@ static struct sec_request_el int el_size, bool different_dest, struct scatterlist *sgl_in, int n_ents_in, struct scatterlist *sgl_out, int n_ents_out, - struct sec_dev_info *info) + struct sec_dev_info *info, gfp_t gfp) { struct sec_request_el *el; struct sec_bd_info *req; int ret; - el = kzalloc(sizeof(*el), GFP_KERNEL); + el = kzalloc(sizeof(*el), gfp); if (!el) return ERR_PTR(-ENOMEM); el->el_length = el_size; @@ -668,7 +669,7 @@ static struct sec_request_el el->sgl_in = sgl_in; ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in, - n_ents_in, info); + n_ents_in, info, gfp); if (ret) goto err_free_el; @@ -679,7 +680,7 @@ static struct sec_request_el el->sgl_out = sgl_out; ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out, el->sgl_out, - n_ents_out, info); + n_ents_out, info, gfp); if (ret) goto err_free_hw_sgl_in; @@ -720,6 +721,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, int *splits_out_nents = NULL; struct sec_request_el *el, *temp; bool split = skreq->src != skreq->dst; + gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; mutex_init(&sec_req->lock); sec_req->req_base = &skreq->base; @@ -728,13 +730,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, sec_req->len_in = sg_nents(skreq->src); ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes, - &steps); + &steps, gfp); if (ret) return ret; sec_req->num_elements = steps; ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in, &splits_in_nents, sec_req->len_in, - info->dev); + info->dev, gfp); if (ret) goto err_free_split_sizes; @@ -742,7 +744,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, sec_req->len_out = sg_nents(skreq->dst); ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps, &splits_out, &splits_out_nents, - sec_req->len_out, info->dev); + sec_req->len_out, info->dev, gfp); if (ret) goto err_unmap_in_sg; } @@ -775,7 +777,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, splits_in[i], splits_in_nents[i], split ? splits_out[i] : NULL, split ? splits_out_nents[i] : 0, - info); + info, gfp); if (IS_ERR(el)) { ret = PTR_ERR(el); goto err_free_elements; @@ -796,7 +798,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, */ /* Grab a big lock for a long time to avoid concurrency issues */ - mutex_lock(&queue->queuelock); + spin_lock_bh(&queue->queuelock); /* * Can go on to queue if we have space in either: @@ -812,15 +814,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, ret = -EBUSY; if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { list_add_tail(&sec_req->backlog_head, &ctx->backlog); - mutex_unlock(&queue->queuelock); + spin_unlock_bh(&queue->queuelock); goto out; } - mutex_unlock(&queue->queuelock); + spin_unlock_bh(&queue->queuelock); goto err_free_elements; } ret = sec_send_request(sec_req, queue); - mutex_unlock(&queue->queuelock); + spin_unlock_bh(&queue->queuelock); if (ret) goto err_free_elements; @@ -879,7 +881,7 @@ static int sec_alg_skcipher_init(struct crypto_skcipher *tfm) if (IS_ERR(ctx->queue)) return PTR_ERR(ctx->queue); - mutex_init(&ctx->queue->queuelock); + spin_lock_init(&ctx->queue->queuelock); ctx->queue->havesoftqueue = false; return 0; @@ -932,7 +934,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "ecb(aes)", .cra_driver_name = "hisi_sec_aes_ecb", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -951,7 +954,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "cbc(aes)", .cra_driver_name = "hisi_sec_aes_cbc", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -970,7 +974,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "ctr(aes)", .cra_driver_name = "hisi_sec_aes_ctr", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -989,7 +994,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "xts(aes)", .cra_driver_name = "hisi_sec_aes_xts", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -1009,7 +1015,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "ecb(des)", .cra_driver_name = "hisi_sec_des_ecb", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -1028,7 +1035,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "cbc(des)", .cra_driver_name = "hisi_sec_des_cbc", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -1047,7 +1055,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "hisi_sec_3des_cbc", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -1066,7 +1075,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "hisi_sec_3des_ecb", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, |
