summaryrefslogtreecommitdiff
path: root/crypto/acompress.c
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/acompress.c')
-rw-r--r--crypto/acompress.c463
1 files changed, 403 insertions, 60 deletions
diff --git a/crypto/acompress.c b/crypto/acompress.c
index 1c682810a484..be28cbfd22e3 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -8,21 +8,36 @@
*/
#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
#include <linux/cryptouser.h>
-#include <linux/errno.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include <net/netlink.h>
#include "compress.h"
struct crypto_scomp;
+enum {
+ ACOMP_WALK_SLEEP = 1 << 0,
+ ACOMP_WALK_SRC_LINEAR = 1 << 1,
+ ACOMP_WALK_DST_LINEAR = 1 << 2,
+};
+
static const struct crypto_type crypto_acomp_type;
+static void acomp_reqchain_done(void *data, int err);
+
static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
{
return container_of(alg, struct acomp_alg, calg.base);
@@ -58,29 +73,54 @@ static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
struct acomp_alg *alg = crypto_acomp_alg(acomp);
- alg->exit(acomp);
+ if (alg->exit)
+ alg->exit(acomp);
+
+ if (acomp_is_async(acomp))
+ crypto_free_acomp(crypto_acomp_fb(acomp));
}
static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
struct acomp_alg *alg = crypto_acomp_alg(acomp);
+ struct crypto_acomp *fb = NULL;
+ int err;
if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
return crypto_init_scomp_ops_async(tfm);
+ if (acomp_is_async(acomp)) {
+ fb = crypto_alloc_acomp(crypto_acomp_alg_name(acomp), 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
+
+ err = -EINVAL;
+ if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE)
+ goto out_free_fb;
+
+ tfm->fb = crypto_acomp_tfm(fb);
+ }
+
acomp->compress = alg->compress;
acomp->decompress = alg->decompress;
- acomp->dst_free = alg->dst_free;
- acomp->reqsize = alg->reqsize;
+ acomp->reqsize = alg->base.cra_reqsize;
- if (alg->exit)
- acomp->base.exit = crypto_acomp_exit_tfm;
+ acomp->base.exit = crypto_acomp_exit_tfm;
- if (alg->init)
- return alg->init(acomp);
+ if (!alg->init)
+ return 0;
+
+ err = alg->init(acomp);
+ if (err)
+ goto out_free_fb;
return 0;
+
+out_free_fb:
+ crypto_free_acomp(fb);
+ return err;
}
static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
@@ -93,32 +133,6 @@ static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
return extsize;
}
-static inline int __crypto_acomp_report_stat(struct sk_buff *skb,
- struct crypto_alg *alg)
-{
- struct comp_alg_common *calg = __crypto_comp_alg_common(alg);
- struct crypto_istat_compress *istat = comp_get_stat(calg);
- struct crypto_stat_compress racomp;
-
- memset(&racomp, 0, sizeof(racomp));
-
- strscpy(racomp.type, "acomp", sizeof(racomp.type));
- racomp.stat_compress_cnt = atomic64_read(&istat->compress_cnt);
- racomp.stat_compress_tlen = atomic64_read(&istat->compress_tlen);
- racomp.stat_decompress_cnt = atomic64_read(&istat->decompress_cnt);
- racomp.stat_decompress_tlen = atomic64_read(&istat->decompress_tlen);
- racomp.stat_err_cnt = atomic64_read(&istat->err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
-}
-
-#ifdef CONFIG_CRYPTO_STATS
-int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return __crypto_acomp_report_stat(skb, alg);
-}
-#endif
-
static const struct crypto_type crypto_acomp_type = {
.extsize = crypto_acomp_extsize,
.init_tfm = crypto_acomp_init_tfm,
@@ -128,13 +142,11 @@ static const struct crypto_type crypto_acomp_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_acomp_report,
#endif
-#ifdef CONFIG_CRYPTO_STATS
- .report_stat = crypto_acomp_report_stat,
-#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
.type = CRYPTO_ALG_TYPE_ACOMPRESS,
.tfmsize = offsetof(struct crypto_acomp, base),
+ .algsize = offsetof(struct acomp_alg, base),
};
struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
@@ -152,45 +164,152 @@ struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
}
EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
-struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
+static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt)
+{
+ struct acomp_req_chain *state = &req->chain;
+
+ state->compl = req->base.complete;
+ state->data = req->base.data;
+ req->base.complete = cplt;
+ req->base.data = state;
+}
+
+static void acomp_restore_req(struct acomp_req *req)
{
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
- struct acomp_req *req;
+ struct acomp_req_chain *state = req->base.data;
- req = __acomp_request_alloc(acomp);
- if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
- return crypto_acomp_scomp_alloc_ctx(req);
+ req->base.complete = state->compl;
+ req->base.data = state->data;
+}
- return req;
+static void acomp_reqchain_virt(struct acomp_req *req)
+{
+ struct acomp_req_chain *state = &req->chain;
+ unsigned int slen = req->slen;
+ unsigned int dlen = req->dlen;
+
+ if (state->flags & CRYPTO_ACOMP_REQ_SRC_VIRT)
+ acomp_request_set_src_dma(req, state->src, slen);
+ if (state->flags & CRYPTO_ACOMP_REQ_DST_VIRT)
+ acomp_request_set_dst_dma(req, state->dst, dlen);
}
-EXPORT_SYMBOL_GPL(acomp_request_alloc);
-void acomp_request_free(struct acomp_req *req)
+static void acomp_virt_to_sg(struct acomp_req *req)
{
- struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+ struct acomp_req_chain *state = &req->chain;
- if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
- crypto_acomp_scomp_free_ctx(req);
+ state->flags = req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
+ CRYPTO_ACOMP_REQ_DST_VIRT);
- if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
- acomp->dst_free(req->dst);
- req->dst = NULL;
+ if (acomp_request_src_isvirt(req)) {
+ unsigned int slen = req->slen;
+ const u8 *svirt = req->svirt;
+
+ state->src = svirt;
+ sg_init_one(&state->ssg, svirt, slen);
+ acomp_request_set_src_sg(req, &state->ssg, slen);
+ }
+
+ if (acomp_request_dst_isvirt(req)) {
+ unsigned int dlen = req->dlen;
+ u8 *dvirt = req->dvirt;
+
+ state->dst = dvirt;
+ sg_init_one(&state->dsg, dvirt, dlen);
+ acomp_request_set_dst_sg(req, &state->dsg, dlen);
}
+}
+
+static int acomp_do_nondma(struct acomp_req *req, bool comp)
+{
+ ACOMP_FBREQ_ON_STACK(fbreq, req);
+ int err;
+
+ if (comp)
+ err = crypto_acomp_compress(fbreq);
+ else
+ err = crypto_acomp_decompress(fbreq);
+
+ req->dlen = fbreq->dlen;
+ return err;
+}
+
+static int acomp_do_one_req(struct acomp_req *req, bool comp)
+{
+ if (acomp_request_isnondma(req))
+ return acomp_do_nondma(req, comp);
+
+ acomp_virt_to_sg(req);
+ return comp ? crypto_acomp_reqtfm(req)->compress(req) :
+ crypto_acomp_reqtfm(req)->decompress(req);
+}
+
+static int acomp_reqchain_finish(struct acomp_req *req, int err)
+{
+ acomp_reqchain_virt(req);
+ acomp_restore_req(req);
+ return err;
+}
+
+static void acomp_reqchain_done(void *data, int err)
+{
+ struct acomp_req *req = data;
+ crypto_completion_t compl;
+
+ compl = req->chain.compl;
+ data = req->chain.data;
+
+ if (err == -EINPROGRESS)
+ goto notify;
+
+ err = acomp_reqchain_finish(req, err);
+
+notify:
+ compl(data, err);
+}
+
+static int acomp_do_req_chain(struct acomp_req *req, bool comp)
+{
+ int err;
+
+ acomp_save_req(req, acomp_reqchain_done);
+
+ err = acomp_do_one_req(req, comp);
+ if (err == -EBUSY || err == -EINPROGRESS)
+ return err;
+
+ return acomp_reqchain_finish(req, err);
+}
+
+int crypto_acomp_compress(struct acomp_req *req)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ if (acomp_req_on_stack(req) && acomp_is_async(tfm))
+ return -EAGAIN;
+ if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req))
+ return crypto_acomp_reqtfm(req)->compress(req);
+ return acomp_do_req_chain(req, true);
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_compress);
+
+int crypto_acomp_decompress(struct acomp_req *req)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- __acomp_request_free(req);
+ if (acomp_req_on_stack(req) && acomp_is_async(tfm))
+ return -EAGAIN;
+ if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req))
+ return crypto_acomp_reqtfm(req)->decompress(req);
+ return acomp_do_req_chain(req, false);
}
-EXPORT_SYMBOL_GPL(acomp_request_free);
+EXPORT_SYMBOL_GPL(crypto_acomp_decompress);
void comp_prepare_alg(struct comp_alg_common *alg)
{
- struct crypto_istat_compress *istat = comp_get_stat(alg);
struct crypto_alg *base = &alg->base;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- memset(istat, 0, sizeof(*istat));
}
int crypto_register_acomp(struct acomp_alg *alg)
@@ -241,5 +360,229 @@ void crypto_unregister_acomps(struct acomp_alg *algs, int count)
}
EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
+static void acomp_stream_workfn(struct work_struct *work)
+{
+ struct crypto_acomp_streams *s =
+ container_of(work, struct crypto_acomp_streams, stream_work);
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ int cpu;
+
+ for_each_cpu(cpu, &s->stream_want) {
+ struct crypto_acomp_stream *ps;
+ void *ctx;
+
+ ps = per_cpu_ptr(streams, cpu);
+ if (ps->ctx)
+ continue;
+
+ ctx = s->alloc_ctx();
+ if (IS_ERR(ctx))
+ break;
+
+ spin_lock_bh(&ps->lock);
+ ps->ctx = ctx;
+ spin_unlock_bh(&ps->lock);
+
+ cpumask_clear_cpu(cpu, &s->stream_want);
+ }
+}
+
+void crypto_acomp_free_streams(struct crypto_acomp_streams *s)
+{
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ void (*free_ctx)(void *);
+ int i;
+
+ s->streams = NULL;
+ if (!streams)
+ return;
+
+ cancel_work_sync(&s->stream_work);
+ free_ctx = s->free_ctx;
+
+ for_each_possible_cpu(i) {
+ struct crypto_acomp_stream *ps = per_cpu_ptr(streams, i);
+
+ if (!ps->ctx)
+ continue;
+
+ free_ctx(ps->ctx);
+ }
+
+ free_percpu(streams);
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_free_streams);
+
+int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s)
+{
+ struct crypto_acomp_stream __percpu *streams;
+ struct crypto_acomp_stream *ps;
+ unsigned int i;
+ void *ctx;
+
+ if (s->streams)
+ return 0;
+
+ streams = alloc_percpu(struct crypto_acomp_stream);
+ if (!streams)
+ return -ENOMEM;
+
+ ctx = s->alloc_ctx();
+ if (IS_ERR(ctx)) {
+ free_percpu(streams);
+ return PTR_ERR(ctx);
+ }
+
+ i = cpumask_first(cpu_possible_mask);
+ ps = per_cpu_ptr(streams, i);
+ ps->ctx = ctx;
+
+ for_each_possible_cpu(i) {
+ ps = per_cpu_ptr(streams, i);
+ spin_lock_init(&ps->lock);
+ }
+
+ s->streams = streams;
+
+ INIT_WORK(&s->stream_work, acomp_stream_workfn);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_alloc_streams);
+
+struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
+ struct crypto_acomp_streams *s) __acquires(stream)
+{
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ int cpu = raw_smp_processor_id();
+ struct crypto_acomp_stream *ps;
+
+ ps = per_cpu_ptr(streams, cpu);
+ spin_lock_bh(&ps->lock);
+ if (likely(ps->ctx))
+ return ps;
+ spin_unlock(&ps->lock);
+
+ cpumask_set_cpu(cpu, &s->stream_want);
+ schedule_work(&s->stream_work);
+
+ ps = per_cpu_ptr(streams, cpumask_first(cpu_possible_mask));
+ spin_lock(&ps->lock);
+ return ps;
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_lock_stream_bh);
+
+void acomp_walk_done_src(struct acomp_walk *walk, int used)
+{
+ walk->slen -= used;
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR))
+ scatterwalk_advance(&walk->in, used);
+ else
+ scatterwalk_done_src(&walk->in, used);
+
+ if ((walk->flags & ACOMP_WALK_SLEEP))
+ cond_resched();
+}
+EXPORT_SYMBOL_GPL(acomp_walk_done_src);
+
+void acomp_walk_done_dst(struct acomp_walk *walk, int used)
+{
+ walk->dlen -= used;
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR))
+ scatterwalk_advance(&walk->out, used);
+ else
+ scatterwalk_done_dst(&walk->out, used);
+
+ if ((walk->flags & ACOMP_WALK_SLEEP))
+ cond_resched();
+}
+EXPORT_SYMBOL_GPL(acomp_walk_done_dst);
+
+int acomp_walk_next_src(struct acomp_walk *walk)
+{
+ unsigned int slen = walk->slen;
+ unsigned int max = UINT_MAX;
+
+ if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
+ max = PAGE_SIZE;
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
+ walk->in.__addr = (void *)(((u8 *)walk->in.sg) +
+ walk->in.offset);
+ return min(slen, max);
+ }
+
+ return slen ? scatterwalk_next(&walk->in, slen) : 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_next_src);
+
+int acomp_walk_next_dst(struct acomp_walk *walk)
+{
+ unsigned int dlen = walk->dlen;
+ unsigned int max = UINT_MAX;
+
+ if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
+ max = PAGE_SIZE;
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
+ walk->out.__addr = (void *)(((u8 *)walk->out.sg) +
+ walk->out.offset);
+ return min(dlen, max);
+ }
+
+ return dlen ? scatterwalk_next(&walk->out, dlen) : 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_next_dst);
+
+int acomp_walk_virt(struct acomp_walk *__restrict walk,
+ struct acomp_req *__restrict req, bool atomic)
+{
+ struct scatterlist *src = req->src;
+ struct scatterlist *dst = req->dst;
+
+ walk->slen = req->slen;
+ walk->dlen = req->dlen;
+
+ if (!walk->slen || !walk->dlen)
+ return -EINVAL;
+
+ walk->flags = 0;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
+ walk->flags |= ACOMP_WALK_SLEEP;
+ if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT))
+ walk->flags |= ACOMP_WALK_SRC_LINEAR;
+ if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT))
+ walk->flags |= ACOMP_WALK_DST_LINEAR;
+
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
+ walk->in.sg = (void *)req->svirt;
+ walk->in.offset = 0;
+ } else
+ scatterwalk_start(&walk->in, src);
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
+ walk->out.sg = (void *)req->dvirt;
+ walk->out.offset = 0;
+ } else
+ scatterwalk_start(&walk->out, dst);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_virt);
+
+struct acomp_req *acomp_request_clone(struct acomp_req *req,
+ size_t total, gfp_t gfp)
+{
+ struct acomp_req *nreq;
+
+ nreq = container_of(crypto_request_clone(&req->base, total, gfp),
+ struct acomp_req, base);
+ if (nreq == req)
+ return req;
+
+ if (req->src == &req->chain.ssg)
+ nreq->src = &nreq->chain.ssg;
+ if (req->dst == &req->chain.dsg)
+ nreq->dst = &nreq->chain.dsg;
+ return nreq;
+}
+EXPORT_SYMBOL_GPL(acomp_request_clone);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous compression type");