From c1abe6f570aff4b6d396dc551e60570d2f50bd79 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 6 Jun 2023 14:08:52 +0100 Subject: crypto: af_alg: Use extract_iter_to_sg() to create scatterlists Use extract_iter_to_sg() to decant the destination iterator into a scatterlist in af_alg_get_rsgl(). af_alg_make_sg() can then be removed. Signed-off-by: David Howells cc: Herbert Xu cc: "David S. Miller" cc: Eric Dumazet cc: Jakub Kicinski cc: Paolo Abeni cc: Jens Axboe cc: Matthew Wilcox cc: linux-crypto@vger.kernel.org cc: netdev@vger.kernel.org Acked-by: Herbert Xu Signed-off-by: Paolo Abeni --- crypto/af_alg.c | 57 +++++++++++++------------------------------------ crypto/algif_aead.c | 16 ++++++++------ crypto/algif_hash.c | 18 +++++++++++----- crypto/algif_skcipher.c | 2 +- 4 files changed, 38 insertions(+), 55 deletions(-) (limited to 'crypto') diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 7caff10df643..b8bf6d8525ba 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -531,45 +531,11 @@ static const struct net_proto_family alg_family = { .owner = THIS_MODULE, }; -int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len) -{ - struct page **pages = sgl->pages; - size_t off; - ssize_t n; - int npages, i; - - n = iov_iter_extract_pages(iter, &pages, len, ALG_MAX_PAGES, 0, &off); - if (n < 0) - return n; - - sgl->need_unpin = iov_iter_extract_will_pin(iter); - - npages = DIV_ROUND_UP(off + n, PAGE_SIZE); - if (WARN_ON(npages == 0)) - return -EINVAL; - /* Add one extra for linking */ - sg_init_table(sgl->sg, npages + 1); - - for (i = 0, len = n; i < npages; i++) { - int plen = min_t(int, len, PAGE_SIZE - off); - - sg_set_page(sgl->sg + i, sgl->pages[i], plen, off); - - off = 0; - len -= plen; - } - sg_mark_end(sgl->sg + npages - 1); - sgl->npages = npages; - - return n; -} -EXPORT_SYMBOL_GPL(af_alg_make_sg); - static void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new) { - sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); - sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg); + sg_unmark_end(sgl_prev->sgt.sgl + sgl_prev->sgt.nents - 1); + sg_chain(sgl_prev->sgt.sgl, sgl_prev->sgt.nents + 1, sgl_new->sgt.sgl); } void af_alg_free_sg(struct af_alg_sgl *sgl) @@ -577,8 +543,8 @@ void af_alg_free_sg(struct af_alg_sgl *sgl) int i; if (sgl->need_unpin) - for (i = 0; i < sgl->npages; i++) - unpin_user_page(sgl->pages[i]); + for (i = 0; i < sgl->sgt.nents; i++) + unpin_user_page(sg_page(&sgl->sgt.sgl[i])); } EXPORT_SYMBOL_GPL(af_alg_free_sg); @@ -1292,8 +1258,8 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, while (maxsize > len && msg_data_left(msg)) { struct af_alg_rsgl *rsgl; + ssize_t err; size_t seglen; - int err; /* limit the amount of readable buffers */ if (!af_alg_readable(sk)) @@ -1310,16 +1276,23 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, return -ENOMEM; } - rsgl->sgl.npages = 0; + rsgl->sgl.sgt.sgl = rsgl->sgl.sgl; + rsgl->sgl.sgt.nents = 0; + rsgl->sgl.sgt.orig_nents = 0; list_add_tail(&rsgl->list, &areq->rsgl_list); - /* make one iovec available as scatterlist */ - err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); + sg_init_table(rsgl->sgl.sgt.sgl, ALG_MAX_PAGES); + err = extract_iter_to_sg(&msg->msg_iter, seglen, &rsgl->sgl.sgt, + ALG_MAX_PAGES, 0); if (err < 0) { rsgl->sg_num_bytes = 0; return err; } + sg_mark_end(rsgl->sgl.sgt.sgl + rsgl->sgl.sgt.nents - 1); + rsgl->sgl.need_unpin = + iov_iter_extract_will_pin(&msg->msg_iter); + /* chain the new scatterlist with previous one */ if (areq->last_rsgl) af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl); diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 42493b4d8ce4..829878025dba 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -210,7 +210,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, */ /* Use the RX SGL as source (and destination) for crypto op. */ - rsgl_src = areq->first_rsgl.sgl.sg; + rsgl_src = areq->first_rsgl.sgl.sgt.sgl; if (ctx->enc) { /* @@ -224,7 +224,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, * RX SGL: AAD || PT || Tag */ err = crypto_aead_copy_sgl(null_tfm, tsgl_src, - areq->first_rsgl.sgl.sg, processed); + areq->first_rsgl.sgl.sgt.sgl, + processed); if (err) goto free; af_alg_pull_tsgl(sk, processed, NULL, 0); @@ -242,7 +243,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, /* Copy AAD || CT to RX SGL buffer for in-place operation. */ err = crypto_aead_copy_sgl(null_tfm, tsgl_src, - areq->first_rsgl.sgl.sg, outlen); + areq->first_rsgl.sgl.sgt.sgl, + outlen); if (err) goto free; @@ -267,10 +269,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, if (usedpages) { /* RX SGL present */ struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl; + struct scatterlist *sg = sgl_prev->sgt.sgl; - sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); - sg_chain(sgl_prev->sg, sgl_prev->npages + 1, - areq->tsgl); + sg_unmark_end(sg + sgl_prev->sgt.nents - 1); + sg_chain(sg, sgl_prev->sgt.nents + 1, areq->tsgl); } else /* no RX SGL present (e.g. authentication only) */ rsgl_src = areq->tsgl; @@ -278,7 +280,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, /* Initialize the crypto operation */ aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src, - areq->first_rsgl.sgl.sg, used, ctx->iv); + areq->first_rsgl.sgl.sgt.sgl, used, ctx->iv); aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen); aead_request_set_tfm(&areq->cra_u.aead_req, tfm); diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 63af72e19fa8..16c69c4b9c62 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -91,13 +91,21 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, if (len > limit) len = limit; - len = af_alg_make_sg(&ctx->sgl, &msg->msg_iter, len); + ctx->sgl.sgt.sgl = ctx->sgl.sgl; + ctx->sgl.sgt.nents = 0; + ctx->sgl.sgt.orig_nents = 0; + + len = extract_iter_to_sg(&msg->msg_iter, len, &ctx->sgl.sgt, + ALG_MAX_PAGES, 0); if (len < 0) { err = copied ? 0 : len; goto unlock; } + sg_mark_end(ctx->sgl.sgt.sgl + ctx->sgl.sgt.nents); + + ctx->sgl.need_unpin = iov_iter_extract_will_pin(&msg->msg_iter); - ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len); + ahash_request_set_crypt(&ctx->req, ctx->sgl.sgt.sgl, NULL, len); err = crypto_wait_req(crypto_ahash_update(&ctx->req), &ctx->wait); @@ -141,8 +149,8 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, flags |= MSG_MORE; lock_sock(sk); - sg_init_table(ctx->sgl.sg, 1); - sg_set_page(ctx->sgl.sg, page, size, offset); + sg_init_table(ctx->sgl.sgl, 1); + sg_set_page(ctx->sgl.sgl, page, size, offset); if (!(flags & MSG_MORE)) { err = hash_alloc_result(sk, ctx); @@ -151,7 +159,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, } else if (!ctx->more) hash_free_result(sk, ctx); - ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size); + ahash_request_set_crypt(&ctx->req, ctx->sgl.sgl, ctx->result, size); if (!(flags & MSG_MORE)) { if (ctx->more) diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index ee8890ee8f33..a251cd6bd5b9 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -105,7 +105,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, /* Initialize the crypto operation */ skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm); skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl, - areq->first_rsgl.sgl.sg, len, ctx->iv); + areq->first_rsgl.sgl.sgt.sgl, len, ctx->iv); if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { /* AIO operation */ -- cgit