summaryrefslogtreecommitdiff
path: root/crypto/acompress.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2025-03-09 10:43:21 +0800
committerHerbert Xu <herbert@gondor.apana.org.au>2025-03-15 16:21:23 +0800
commitb67a026003725a5d2496eba691c293694ab4847a (patch)
treef26a8ae2284ef02c2e6eb9638e8a30bff99cc577 /crypto/acompress.c
parentcff12830e2cb2044067167f95a537074240347b7 (diff)
crypto: acomp - Add request chaining and virtual addresses
This adds request chaining and virtual address support to the acomp interface. It is identical to the ahash interface, except that a new flag CRYPTO_ACOMP_REQ_NONDMA has been added to indicate that the virtual addresses are not suitable for DMA. This is because all existing and potential acomp users can provide memory that is suitable for DMA so there is no need for a fall-back copy path. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/acompress.c')
-rw-r--r--crypto/acompress.c197
1 files changed, 197 insertions, 0 deletions
diff --git a/crypto/acompress.c b/crypto/acompress.c
index ef36ec31d73d..45444e99a9db 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -23,6 +23,8 @@ struct crypto_scomp;
static const struct crypto_type crypto_acomp_type;
+static void acomp_reqchain_done(void *data, int err);
+
static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
{
return container_of(alg, struct acomp_alg, calg.base);
@@ -123,6 +125,201 @@ struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
}
EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
+static bool acomp_request_has_nondma(struct acomp_req *req)
+{
+ struct acomp_req *r2;
+
+ if (acomp_request_isnondma(req))
+ return true;
+
+ list_for_each_entry(r2, &req->base.list, base.list)
+ if (acomp_request_isnondma(r2))
+ return true;
+
+ return false;
+}
+
+static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ struct acomp_req_chain *state = &req->chain;
+
+ if (!acomp_is_async(tfm))
+ return;
+
+ state->compl = req->base.complete;
+ state->data = req->base.data;
+ req->base.complete = cplt;
+ req->base.data = state;
+ state->req0 = req;
+}
+
+static void acomp_restore_req(struct acomp_req_chain *state)
+{
+ struct acomp_req *req = state->req0;
+ struct crypto_acomp *tfm;
+
+ tfm = crypto_acomp_reqtfm(req);
+ if (!acomp_is_async(tfm))
+ return;
+
+ req->base.complete = state->compl;
+ req->base.data = state->data;
+}
+
+static void acomp_reqchain_virt(struct acomp_req_chain *state, int err)
+{
+ struct acomp_req *req = state->cur;
+ unsigned int slen = req->slen;
+ unsigned int dlen = req->dlen;
+
+ req->base.err = err;
+ state = &req->chain;
+
+ if (state->src)
+ acomp_request_set_src_dma(req, state->src, slen);
+ if (state->dst)
+ acomp_request_set_dst_dma(req, state->dst, dlen);
+ state->src = NULL;
+ state->dst = NULL;
+}
+
+static void acomp_virt_to_sg(struct acomp_req *req)
+{
+ struct acomp_req_chain *state = &req->chain;
+
+ if (acomp_request_src_isvirt(req)) {
+ unsigned int slen = req->slen;
+ const u8 *svirt = req->svirt;
+
+ state->src = svirt;
+ sg_init_one(&state->ssg, svirt, slen);
+ acomp_request_set_src_sg(req, &state->ssg, slen);
+ }
+
+ if (acomp_request_dst_isvirt(req)) {
+ unsigned int dlen = req->dlen;
+ u8 *dvirt = req->dvirt;
+
+ state->dst = dvirt;
+ sg_init_one(&state->dsg, dvirt, dlen);
+ acomp_request_set_dst_sg(req, &state->dsg, dlen);
+ }
+}
+
+static int acomp_reqchain_finish(struct acomp_req_chain *state,
+ int err, u32 mask)
+{
+ struct acomp_req *req0 = state->req0;
+ struct acomp_req *req = state->cur;
+ struct acomp_req *n;
+
+ acomp_reqchain_virt(state, err);
+
+ if (req != req0)
+ list_add_tail(&req->base.list, &req0->base.list);
+
+ list_for_each_entry_safe(req, n, &state->head, base.list) {
+ list_del_init(&req->base.list);
+
+ req->base.flags &= mask;
+ req->base.complete = acomp_reqchain_done;
+ req->base.data = state;
+ state->cur = req;
+
+ acomp_virt_to_sg(req);
+ err = state->op(req);
+
+ if (err == -EINPROGRESS) {
+ if (!list_empty(&state->head))
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (err == -EBUSY)
+ goto out;
+
+ acomp_reqchain_virt(state, err);
+ list_add_tail(&req->base.list, &req0->base.list);
+ }
+
+ acomp_restore_req(state);
+
+out:
+ return err;
+}
+
+static void acomp_reqchain_done(void *data, int err)
+{
+ struct acomp_req_chain *state = data;
+ crypto_completion_t compl = state->compl;
+
+ data = state->data;
+
+ if (err == -EINPROGRESS) {
+ if (!list_empty(&state->head))
+ return;
+ goto notify;
+ }
+
+ err = acomp_reqchain_finish(state, err, CRYPTO_TFM_REQ_MAY_BACKLOG);
+ if (err == -EBUSY)
+ return;
+
+notify:
+ compl(data, err);
+}
+
+static int acomp_do_req_chain(struct acomp_req *req,
+ int (*op)(struct acomp_req *req))
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ struct acomp_req_chain *state = &req->chain;
+ int err;
+
+ if (crypto_acomp_req_chain(tfm) ||
+ (!acomp_request_chained(req) && !acomp_request_isvirt(req)))
+ return op(req);
+
+ /*
+ * There are no in-kernel users that do this. If and ever
+ * such users come into being then we could add a fall-back
+ * path.
+ */
+ if (acomp_request_has_nondma(req))
+ return -EINVAL;
+
+ if (acomp_is_async(tfm)) {
+ acomp_save_req(req, acomp_reqchain_done);
+ state = req->base.data;
+ }
+
+ state->op = op;
+ state->cur = req;
+ state->src = NULL;
+ INIT_LIST_HEAD(&state->head);
+ list_splice_init(&req->base.list, &state->head);
+
+ acomp_virt_to_sg(req);
+ err = op(req);
+ if (err == -EBUSY || err == -EINPROGRESS)
+ return -EBUSY;
+
+ return acomp_reqchain_finish(state, err, ~0);
+}
+
+int crypto_acomp_compress(struct acomp_req *req)
+{
+ return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->compress);
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_compress);
+
+int crypto_acomp_decompress(struct acomp_req *req)
+{
+ return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->decompress);
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_decompress);
+
void comp_prepare_alg(struct comp_alg_common *alg)
{
struct crypto_alg *base = &alg->base;