summaryrefslogtreecommitdiff
path: root/drivers/crypto/hisilicon/sec/sec_algs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/hisilicon/sec/sec_algs.c')
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index 0a3c8f019b02..1189effcdad0 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -449,7 +449,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
*/
}
- mutex_lock(&ctx->queue->queuelock);
+ spin_lock_bh(&ctx->queue->queuelock);
/* Put the IV in place for chained cases */
switch (ctx->cipher_alg) {
case SEC_C_AES_CBC_128:
@@ -504,12 +504,12 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
kfifo_avail(&ctx->queue->softqueue) >
backlog_req->num_elements)) {
sec_send_request(backlog_req, ctx->queue);
- backlog_req->req_base->complete(backlog_req->req_base,
- -EINPROGRESS);
+ crypto_request_complete(backlog_req->req_base,
+ -EINPROGRESS);
list_del(&backlog_req->backlog_head);
}
}
- mutex_unlock(&ctx->queue->queuelock);
+ spin_unlock_bh(&ctx->queue->queuelock);
mutex_lock(&sec_req->lock);
list_del(&sec_req_el->head);
@@ -534,7 +534,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
if (skreq->src != skreq->dst)
dma_unmap_sg(dev, skreq->dst, sec_req->len_out,
DMA_BIDIRECTIONAL);
- skreq->base.complete(&skreq->base, sec_req->err);
+ skcipher_request_complete(skreq, sec_req->err);
}
}
@@ -798,7 +798,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
*/
/* Grab a big lock for a long time to avoid concurrency issues */
- mutex_lock(&queue->queuelock);
+ spin_lock_bh(&queue->queuelock);
/*
* Can go on to queue if we have space in either:
@@ -814,15 +814,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
ret = -EBUSY;
if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
list_add_tail(&sec_req->backlog_head, &ctx->backlog);
- mutex_unlock(&queue->queuelock);
+ spin_unlock_bh(&queue->queuelock);
goto out;
}
- mutex_unlock(&queue->queuelock);
+ spin_unlock_bh(&queue->queuelock);
goto err_free_elements;
}
ret = sec_send_request(sec_req, queue);
- mutex_unlock(&queue->queuelock);
+ spin_unlock_bh(&queue->queuelock);
if (ret)
goto err_free_elements;
@@ -881,7 +881,7 @@ static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
if (IS_ERR(ctx->queue))
return PTR_ERR(ctx->queue);
- mutex_init(&ctx->queue->queuelock);
+ spin_lock_init(&ctx->queue->queuelock);
ctx->queue->havesoftqueue = false;
return 0;