summaryrefslogtreecommitdiff
path: root/drivers/crypto/chelsio/chcr_algo.c
diff options
context:
space:
mode:
authorAyush Sawal <ayush.sawal@chelsio.com>2020-02-05 10:48:41 +0530
committerHerbert Xu <herbert@gondor.apana.org.au>2020-02-13 17:05:25 +0800
commit1c502e2e2d79b6a4c800c3806c70b58ad6ae784d (patch)
treef65794f0b89018094a69bd252ddc73aa2e2698ba /drivers/crypto/chelsio/chcr_algo.c
parent4fb3d8ba2824db0b8553602daef82fe27f50bba2 (diff)
crypto: chelsio - This fixes the libkcapi's cbc(aes) aio fail test cases
The libkcapi "cbc(aes)" failed tests are symmetric asynchronous cipher one shot multiple test, symmetric asynchronous cipher stream multiple test, Symmetric asynchronous cipher vmsplice multiple test In this patch a wait_for_completion is added in the chcr_aes_encrypt function, which completes when the response of comes from the hardware. This adds serialization for encryption in cbc(aes) aio case. Signed-off-by: Ayush Sawal <ayush.sawal@chelsio.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/chelsio/chcr_algo.c')
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c20
1 files changed, 19 insertions, 1 deletions
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index b4b9b22125d1..699e3053895a 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -1102,6 +1102,7 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
unsigned char *input, int err)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chcr_context *ctx = c_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct sk_buff *skb;
@@ -1166,10 +1167,20 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
chcr_send_wr(skb);
reqctx->last_req_len = bytes;
reqctx->processed += bytes;
+ if (get_cryptoalg_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
+ CRYPTO_TFM_REQ_MAY_SLEEP ) {
+ complete(&ctx->cbc_aes_aio_done);
+ }
return 0;
unmap:
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
complete:
+ if (get_cryptoalg_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
+ CRYPTO_TFM_REQ_MAY_SLEEP ) {
+ complete(&ctx->cbc_aes_aio_done);
+ }
chcr_dec_wrcount(dev);
req->base.complete(&req->base, err);
return err;
@@ -1289,6 +1300,7 @@ error:
static int chcr_aes_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chcr_context *ctx;
struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
int err, isfull = 0;
@@ -1313,6 +1325,12 @@ static int chcr_aes_encrypt(struct skcipher_request *req)
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
+ if (get_cryptoalg_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
+ CRYPTO_TFM_REQ_MAY_SLEEP ) {
+ ctx=c_ctx(tfm);
+ wait_for_completion(&ctx->cbc_aes_aio_done);
+ }
return isfull ? -EBUSY : -EINPROGRESS;
error:
chcr_dec_wrcount(dev);
@@ -1401,7 +1419,7 @@ static int chcr_init_tfm(struct crypto_skcipher *tfm)
pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
return PTR_ERR(ablkctx->sw_cipher);
}
-
+ init_completion(&ctx->cbc_aes_aio_done);
crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
return chcr_device_init(ctx);