diff options
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/ahash.c | 39 | ||||
-rw-r--r-- | crypto/cryptd.c | 6 | ||||
-rw-r--r-- | crypto/crypto_engine.c | 55 | ||||
-rw-r--r-- | crypto/deflate.c | 7 | ||||
-rw-r--r-- | crypto/jitterentropy-kcapi.c | 9 | ||||
-rw-r--r-- | crypto/jitterentropy.c | 2 | ||||
-rw-r--r-- | crypto/krb5/selftest.c | 1 | ||||
-rw-r--r-- | crypto/pcrypt.c | 7 | ||||
-rw-r--r-- | crypto/testmgr.c | 39 | ||||
-rw-r--r-- | crypto/zstd.c | 356 |
10 files changed, 310 insertions, 211 deletions
diff --git a/crypto/ahash.c b/crypto/ahash.c index bc84a07c924c..a227793d2c5b 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -29,19 +29,6 @@ #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e -struct crypto_hash_walk { - const char *data; - - unsigned int offset; - unsigned int flags; - - struct page *pg; - unsigned int entrylen; - - unsigned int total; - struct scatterlist *sg; -}; - static int ahash_def_finup(struct ahash_request *req); static inline bool crypto_ahash_block_only(struct crypto_ahash *tfm) @@ -112,8 +99,8 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk) return hash_walk_next(walk); } -static int crypto_hash_walk_first(struct ahash_request *req, - struct crypto_hash_walk *walk) +int crypto_hash_walk_first(struct ahash_request *req, + struct crypto_hash_walk *walk) { walk->total = req->nbytes; walk->entrylen = 0; @@ -133,8 +120,9 @@ static int crypto_hash_walk_first(struct ahash_request *req, return hash_walk_new_entry(walk); } +EXPORT_SYMBOL_GPL(crypto_hash_walk_first); -static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) +int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) { if ((walk->flags & CRYPTO_AHASH_REQ_VIRT)) return err; @@ -160,11 +148,7 @@ static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) return hash_walk_new_entry(walk); } - -static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) -{ - return !(walk->entrylen | walk->total); -} +EXPORT_SYMBOL_GPL(crypto_hash_walk_done); /* * For an ahash tfm that is using an shash algorithm (instead of an ahash @@ -347,6 +331,12 @@ static int ahash_do_req_chain(struct ahash_request *req, if (crypto_ahash_statesize(tfm) > HASH_MAX_STATESIZE) return -ENOSYS; + if (!crypto_ahash_need_fallback(tfm)) + return -ENOSYS; + + if (crypto_hash_no_export_core(tfm)) + return -ENOSYS; + { u8 state[HASH_MAX_STATESIZE]; @@ -954,6 +944,10 @@ static int ahash_prepare_alg(struct ahash_alg *alg) base->cra_reqsize > MAX_SYNC_HASH_REQSIZE) return -EINVAL; + if (base->cra_flags & CRYPTO_ALG_NEED_FALLBACK && + base->cra_flags & CRYPTO_ALG_NO_FALLBACK) + return -EINVAL; + err = hash_prepare_alg(&alg->halg); if (err) return err; @@ -962,7 +956,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg) base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; if ((base->cra_flags ^ CRYPTO_ALG_REQ_VIRT) & - (CRYPTO_ALG_ASYNC | CRYPTO_ALG_REQ_VIRT)) + (CRYPTO_ALG_ASYNC | CRYPTO_ALG_REQ_VIRT) && + !(base->cra_flags & CRYPTO_ALG_NO_FALLBACK)) base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK; if (!alg->setkey) diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 5bb6f8d88cc2..efff54e707cb 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -34,6 +34,7 @@ MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); static struct workqueue_struct *cryptd_wq; struct cryptd_cpu_queue { + local_lock_t bh_lock; struct crypto_queue queue; struct work_struct work; }; @@ -110,6 +111,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue, cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); INIT_WORK(&cpu_queue->work, cryptd_queue_worker); + local_lock_init(&cpu_queue->bh_lock); } pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); return 0; @@ -135,6 +137,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, refcount_t *refcnt; local_bh_disable(); + local_lock_nested_bh(&queue->cpu_queue->bh_lock); cpu_queue = this_cpu_ptr(queue->cpu_queue); err = crypto_enqueue_request(&cpu_queue->queue, request); @@ -151,6 +154,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, refcount_inc(refcnt); out: + local_unlock_nested_bh(&queue->cpu_queue->bh_lock); local_bh_enable(); return err; @@ -169,8 +173,10 @@ static void cryptd_queue_worker(struct work_struct *work) * Only handle one request at a time to avoid hogging crypto workqueue. */ local_bh_disable(); + __local_lock_nested_bh(&cpu_queue->bh_lock); backlog = crypto_get_backlog(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue); + __local_unlock_nested_bh(&cpu_queue->bh_lock); local_bh_enable(); if (!req) diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index 445d3c113ee1..18e1689efe12 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -74,7 +74,6 @@ static void crypto_pump_requests(struct crypto_engine *engine, struct crypto_engine_alg *alg; struct crypto_engine_op *op; unsigned long flags; - bool was_busy = false; int ret; spin_lock_irqsave(&engine->queue_lock, flags); @@ -83,12 +82,6 @@ static void crypto_pump_requests(struct crypto_engine *engine, if (!engine->retry_support && engine->cur_req) goto out; - /* If another context is idling then defer */ - if (engine->idling) { - kthread_queue_work(engine->kworker, &engine->pump_requests); - goto out; - } - /* Check if the engine queue is idle */ if (!crypto_queue_len(&engine->queue) || !engine->running) { if (!engine->busy) @@ -102,15 +95,6 @@ static void crypto_pump_requests(struct crypto_engine *engine, } engine->busy = false; - engine->idling = true; - spin_unlock_irqrestore(&engine->queue_lock, flags); - - if (engine->unprepare_crypt_hardware && - engine->unprepare_crypt_hardware(engine)) - dev_err(engine->dev, "failed to unprepare crypt hardware\n"); - - spin_lock_irqsave(&engine->queue_lock, flags); - engine->idling = false; goto out; } @@ -129,22 +113,11 @@ start_request: if (!engine->retry_support) engine->cur_req = async_req; - if (engine->busy) - was_busy = true; - else + if (!engine->busy) engine->busy = true; spin_unlock_irqrestore(&engine->queue_lock, flags); - /* Until here we get the request need to be encrypted successfully */ - if (!was_busy && engine->prepare_crypt_hardware) { - ret = engine->prepare_crypt_hardware(engine); - if (ret) { - dev_err(engine->dev, "failed to prepare crypt hardware\n"); - goto req_err_1; - } - } - alg = container_of(async_req->tfm->__crt_alg, struct crypto_engine_alg, base); op = &alg->op; @@ -195,17 +168,6 @@ retry: out: spin_unlock_irqrestore(&engine->queue_lock, flags); - /* - * Batch requests is possible only if - * hardware can enqueue multiple requests - */ - if (engine->do_batch_requests) { - ret = engine->do_batch_requests(engine); - if (ret) - dev_err(engine->dev, "failed to do batch requests: %d\n", - ret); - } - return; } @@ -462,12 +424,6 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop); * crypto-engine queue. * @dev: the device attached with one hardware engine * @retry_support: whether hardware has support for retry mechanism - * @cbk_do_batch: pointer to a callback function to be invoked when executing - * a batch of requests. - * This has the form: - * callback(struct crypto_engine *engine) - * where: - * engine: the crypto engine structure. * @rt: whether this queue is set to run as a realtime task * @qlen: maximum size of the crypto-engine queue * @@ -476,7 +432,6 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop); */ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, bool retry_support, - int (*cbk_do_batch)(struct crypto_engine *engine), bool rt, int qlen) { struct crypto_engine *engine; @@ -492,14 +447,8 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, engine->rt = rt; engine->running = false; engine->busy = false; - engine->idling = false; engine->retry_support = retry_support; engine->priv_data = dev; - /* - * Batch requests is possible only if - * hardware has support for retry mechanism. - */ - engine->do_batch_requests = retry_support ? cbk_do_batch : NULL; snprintf(engine->name, sizeof(engine->name), "%s-engine", dev_name(dev)); @@ -534,7 +483,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set); */ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) { - return crypto_engine_alloc_init_and_set(dev, false, NULL, rt, + return crypto_engine_alloc_init_and_set(dev, false, rt, CRYPTO_ENGINE_MAX_QLEN); } EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); diff --git a/crypto/deflate.c b/crypto/deflate.c index fe8e4ad0fee1..21404515dc77 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c @@ -48,9 +48,14 @@ static void *deflate_alloc_stream(void) return ctx; } +static void deflate_free_stream(void *ctx) +{ + kvfree(ctx); +} + static struct crypto_acomp_streams deflate_streams = { .alloc_ctx = deflate_alloc_stream, - .cfree_ctx = kvfree, + .free_ctx = deflate_free_stream, }; static int deflate_compress_one(struct acomp_req *req, diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index c24d4ff2b4a8..1266eb790708 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -144,7 +144,7 @@ int jent_hash_time(void *hash_state, __u64 time, u8 *addtl, * Inject the data from the previous loop into the pool. This data is * not considered to contain any entropy, but it stirs the pool a bit. */ - ret = crypto_shash_update(desc, intermediary, sizeof(intermediary)); + ret = crypto_shash_update(hash_state_desc, intermediary, sizeof(intermediary)); if (ret) goto err; @@ -157,11 +157,12 @@ int jent_hash_time(void *hash_state, __u64 time, u8 *addtl, * conditioning operation to have an identical amount of input data * according to section 3.1.5. */ - if (!stuck) { - ret = crypto_shash_update(hash_state_desc, (u8 *)&time, - sizeof(__u64)); + if (stuck) { + time = 0; } + ret = crypto_shash_update(hash_state_desc, (u8 *)&time, sizeof(__u64)); + err: shash_desc_zero(desc); memzero_explicit(intermediary, sizeof(intermediary)); diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 3b390bd6c119..3f93cdc9a7af 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -145,6 +145,7 @@ struct rand_data { */ #define JENT_ENTROPY_SAFETY_FACTOR 64 +#include <linux/array_size.h> #include <linux/fips.h> #include <linux/minmax.h> #include "jitterentropy.h" @@ -178,7 +179,6 @@ static const unsigned int jent_apt_cutoff_lookup[15] = { static const unsigned int jent_apt_cutoff_permanent_lookup[15] = { 355, 447, 479, 494, 502, 507, 510, 512, 512, 512, 512, 512, 512, 512, 512 }; -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) static void jent_apt_init(struct rand_data *ec, unsigned int osr) { diff --git a/crypto/krb5/selftest.c b/crypto/krb5/selftest.c index 2a81a6315a0d..4519c572d37e 100644 --- a/crypto/krb5/selftest.c +++ b/crypto/krb5/selftest.c @@ -152,6 +152,7 @@ static int krb5_test_one_prf(const struct krb5_prf_test *test) out: clear_buf(&result); + clear_buf(&prf); clear_buf(&octet); clear_buf(&key); return ret; diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index c33d29a523e0..c3a9d4f2995c 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -178,7 +178,7 @@ static int pcrypt_aead_decrypt(struct aead_request *req) static int pcrypt_aead_init_tfm(struct crypto_aead *tfm) { - int cpu, cpu_index; + int cpu_index; struct aead_instance *inst = aead_alg_instance(tfm); struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm); @@ -187,10 +187,7 @@ static int pcrypt_aead_init_tfm(struct crypto_aead *tfm) cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) % cpumask_weight(cpu_online_mask); - ctx->cb_cpu = cpumask_first(cpu_online_mask); - for (cpu = 0; cpu < cpu_index; cpu++) - ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask); - + ctx->cb_cpu = cpumask_nth(cpu_index, cpu_online_mask); cipher = crypto_spawn_aead(&ictx->spawn); if (IS_ERR(cipher)) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index d636e04b55d5..ee33ba21ae2b 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4186,7 +4186,6 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "authenc(hmac(sha1),cbc(aes))", .generic_driver = "authenc(hmac-sha1-lib,cbc(aes-generic))", .test = alg_test_aead, - .fips_allowed = 1, .suite = { .aead = __VECS(hmac_sha1_aes_cbc_tv_temp) } @@ -4207,7 +4206,6 @@ static const struct alg_test_desc alg_test_descs[] = { }, { .alg = "authenc(hmac(sha1),ctr(aes))", .test = alg_test_null, - .fips_allowed = 1, }, { .alg = "authenc(hmac(sha1),ecb(cipher_null))", .generic_driver = "authenc(hmac-sha1-lib,ecb-cipher_null)", @@ -4218,7 +4216,6 @@ static const struct alg_test_desc alg_test_descs[] = { }, { .alg = "authenc(hmac(sha1),rfc3686(ctr(aes)))", .test = alg_test_null, - .fips_allowed = 1, }, { .alg = "authenc(hmac(sha224),cbc(des))", .generic_driver = "authenc(hmac-sha224-lib,cbc(des-generic))", @@ -4712,6 +4709,7 @@ static const struct alg_test_desc alg_test_descs[] = { */ .alg = "drbg_nopr_hmac_sha384", .test = alg_test_null, + .fips_allowed = 1 }, { .alg = "drbg_nopr_hmac_sha512", .test = alg_test_drbg, @@ -4730,6 +4728,7 @@ static const struct alg_test_desc alg_test_descs[] = { /* covered by drbg_nopr_sha256 test */ .alg = "drbg_nopr_sha384", .test = alg_test_null, + .fips_allowed = 1 }, { .alg = "drbg_nopr_sha512", .fips_allowed = 1, @@ -4761,6 +4760,7 @@ static const struct alg_test_desc alg_test_descs[] = { /* covered by drbg_pr_hmac_sha256 test */ .alg = "drbg_pr_hmac_sha384", .test = alg_test_null, + .fips_allowed = 1 }, { .alg = "drbg_pr_hmac_sha512", .test = alg_test_null, @@ -4776,6 +4776,7 @@ static const struct alg_test_desc alg_test_descs[] = { /* covered by drbg_pr_sha256 test */ .alg = "drbg_pr_sha384", .test = alg_test_null, + .fips_allowed = 1 }, { .alg = "drbg_pr_sha512", .fips_allowed = 1, @@ -5077,7 +5078,6 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "hmac(sha1)", .generic_driver = "hmac-sha1-lib", .test = alg_test_hash, - .fips_allowed = 1, .suite = { .hash = __VECS(hmac_sha1_tv_template) } @@ -5291,6 +5291,36 @@ static const struct alg_test_desc alg_test_descs[] = { .cipher = __VECS(fcrypt_pcbc_tv_template) } }, { +#if IS_ENABLED(CONFIG_CRYPTO_PHMAC_S390) + .alg = "phmac(sha224)", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { + .hash = __VECS(hmac_sha224_tv_template) + } + }, { + .alg = "phmac(sha256)", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { + .hash = __VECS(hmac_sha256_tv_template) + } + }, { + .alg = "phmac(sha384)", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { + .hash = __VECS(hmac_sha384_tv_template) + } + }, { + .alg = "phmac(sha512)", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { + .hash = __VECS(hmac_sha512_tv_template) + } + }, { +#endif .alg = "pkcs1(rsa,none)", .test = alg_test_sig, .suite = { @@ -5418,7 +5448,6 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "sha1", .generic_driver = "sha1-lib", .test = alg_test_hash, - .fips_allowed = 1, .suite = { .hash = __VECS(sha1_tv_template) } diff --git a/crypto/zstd.c b/crypto/zstd.c index 7570e11b4ee6..c2a19cb0879d 100644 --- a/crypto/zstd.c +++ b/crypto/zstd.c @@ -12,188 +12,304 @@ #include <linux/net.h> #include <linux/vmalloc.h> #include <linux/zstd.h> -#include <crypto/internal/scompress.h> +#include <crypto/internal/acompress.h> +#include <crypto/scatterwalk.h> -#define ZSTD_DEF_LEVEL 3 +#define ZSTD_DEF_LEVEL 3 +#define ZSTD_MAX_WINDOWLOG 18 +#define ZSTD_MAX_SIZE BIT(ZSTD_MAX_WINDOWLOG) struct zstd_ctx { zstd_cctx *cctx; zstd_dctx *dctx; - void *cwksp; - void *dwksp; + size_t wksp_size; + zstd_parameters params; + u8 wksp[] __aligned(8); }; -static zstd_parameters zstd_params(void) -{ - return zstd_get_params(ZSTD_DEF_LEVEL, 0); -} +static DEFINE_MUTEX(zstd_stream_lock); -static int zstd_comp_init(struct zstd_ctx *ctx) +static void *zstd_alloc_stream(void) { - int ret = 0; - const zstd_parameters params = zstd_params(); - const size_t wksp_size = zstd_cctx_workspace_bound(¶ms.cParams); + zstd_parameters params; + struct zstd_ctx *ctx; + size_t wksp_size; - ctx->cwksp = vzalloc(wksp_size); - if (!ctx->cwksp) { - ret = -ENOMEM; - goto out; - } + params = zstd_get_params(ZSTD_DEF_LEVEL, ZSTD_MAX_SIZE); - ctx->cctx = zstd_init_cctx(ctx->cwksp, wksp_size); - if (!ctx->cctx) { - ret = -EINVAL; - goto out_free; - } -out: - return ret; -out_free: - vfree(ctx->cwksp); - goto out; + wksp_size = max_t(size_t, + zstd_cstream_workspace_bound(¶ms.cParams), + zstd_dstream_workspace_bound(ZSTD_MAX_SIZE)); + if (!wksp_size) + return ERR_PTR(-EINVAL); + + ctx = kvmalloc(sizeof(*ctx) + wksp_size, GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + ctx->params = params; + ctx->wksp_size = wksp_size; + + return ctx; } -static int zstd_decomp_init(struct zstd_ctx *ctx) +static void zstd_free_stream(void *ctx) +{ + kvfree(ctx); +} + +static struct crypto_acomp_streams zstd_streams = { + .alloc_ctx = zstd_alloc_stream, + .free_ctx = zstd_free_stream, +}; + +static int zstd_init(struct crypto_acomp *acomp_tfm) { int ret = 0; - const size_t wksp_size = zstd_dctx_workspace_bound(); - ctx->dwksp = vzalloc(wksp_size); - if (!ctx->dwksp) { - ret = -ENOMEM; - goto out; - } + mutex_lock(&zstd_stream_lock); + ret = crypto_acomp_alloc_streams(&zstd_streams); + mutex_unlock(&zstd_stream_lock); - ctx->dctx = zstd_init_dctx(ctx->dwksp, wksp_size); - if (!ctx->dctx) { - ret = -EINVAL; - goto out_free; - } -out: return ret; -out_free: - vfree(ctx->dwksp); - goto out; } -static void zstd_comp_exit(struct zstd_ctx *ctx) +static void zstd_exit(struct crypto_acomp *acomp_tfm) { - vfree(ctx->cwksp); - ctx->cwksp = NULL; - ctx->cctx = NULL; + crypto_acomp_free_streams(&zstd_streams); } -static void zstd_decomp_exit(struct zstd_ctx *ctx) +static int zstd_compress_one(struct acomp_req *req, struct zstd_ctx *ctx, + const void *src, void *dst, unsigned int *dlen) { - vfree(ctx->dwksp); - ctx->dwksp = NULL; - ctx->dctx = NULL; -} + unsigned int out_len; -static int __zstd_init(void *ctx) -{ - int ret; + ctx->cctx = zstd_init_cctx(ctx->wksp, ctx->wksp_size); + if (!ctx->cctx) + return -EINVAL; - ret = zstd_comp_init(ctx); - if (ret) - return ret; - ret = zstd_decomp_init(ctx); - if (ret) - zstd_comp_exit(ctx); - return ret; + out_len = zstd_compress_cctx(ctx->cctx, dst, req->dlen, src, req->slen, + &ctx->params); + if (zstd_is_error(out_len)) + return -EINVAL; + + *dlen = out_len; + + return 0; } -static void *zstd_alloc_ctx(void) +static int zstd_compress(struct acomp_req *req) { - int ret; + struct crypto_acomp_stream *s; + unsigned int pos, scur, dcur; + unsigned int total_out = 0; + bool data_available = true; + zstd_out_buffer outbuf; + struct acomp_walk walk; + zstd_in_buffer inbuf; struct zstd_ctx *ctx; + size_t pending_bytes; + size_t num_bytes; + int ret; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return ERR_PTR(-ENOMEM); + s = crypto_acomp_lock_stream_bh(&zstd_streams); + ctx = s->ctx; - ret = __zstd_init(ctx); - if (ret) { - kfree(ctx); - return ERR_PTR(ret); + ret = acomp_walk_virt(&walk, req, true); + if (ret) + goto out; + + ctx->cctx = zstd_init_cstream(&ctx->params, 0, ctx->wksp, ctx->wksp_size); + if (!ctx->cctx) { + ret = -EINVAL; + goto out; } - return ctx; -} + do { + dcur = acomp_walk_next_dst(&walk); + if (!dcur) { + ret = -ENOSPC; + goto out; + } -static void __zstd_exit(void *ctx) -{ - zstd_comp_exit(ctx); - zstd_decomp_exit(ctx); -} + outbuf.pos = 0; + outbuf.dst = (u8 *)walk.dst.virt.addr; + outbuf.size = dcur; -static void zstd_free_ctx(void *ctx) -{ - __zstd_exit(ctx); - kfree_sensitive(ctx); -} + do { + scur = acomp_walk_next_src(&walk); + if (dcur == req->dlen && scur == req->slen) { + ret = zstd_compress_one(req, ctx, walk.src.virt.addr, + walk.dst.virt.addr, &total_out); + acomp_walk_done_src(&walk, scur); + acomp_walk_done_dst(&walk, dcur); + goto out; + } -static int __zstd_compress(const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, void *ctx) -{ - size_t out_len; - struct zstd_ctx *zctx = ctx; - const zstd_parameters params = zstd_params(); + if (scur) { + inbuf.pos = 0; + inbuf.src = walk.src.virt.addr; + inbuf.size = scur; + } else { + data_available = false; + break; + } - out_len = zstd_compress_cctx(zctx->cctx, dst, *dlen, src, slen, ¶ms); - if (zstd_is_error(out_len)) - return -EINVAL; - *dlen = out_len; - return 0; -} + num_bytes = zstd_compress_stream(ctx->cctx, &outbuf, &inbuf); + if (ZSTD_isError(num_bytes)) { + ret = -EIO; + goto out; + } -static int zstd_scompress(struct crypto_scomp *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen, - void *ctx) -{ - return __zstd_compress(src, slen, dst, dlen, ctx); + pending_bytes = zstd_flush_stream(ctx->cctx, &outbuf); + if (ZSTD_isError(pending_bytes)) { + ret = -EIO; + goto out; + } + acomp_walk_done_src(&walk, inbuf.pos); + } while (dcur != outbuf.pos); + + total_out += outbuf.pos; + acomp_walk_done_dst(&walk, dcur); + } while (data_available); + + pos = outbuf.pos; + num_bytes = zstd_end_stream(ctx->cctx, &outbuf); + if (ZSTD_isError(num_bytes)) + ret = -EIO; + else + total_out += (outbuf.pos - pos); + +out: + if (ret) + req->dlen = 0; + else + req->dlen = total_out; + + crypto_acomp_unlock_stream_bh(s); + + return ret; } -static int __zstd_decompress(const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, void *ctx) +static int zstd_decompress_one(struct acomp_req *req, struct zstd_ctx *ctx, + const void *src, void *dst, unsigned int *dlen) { size_t out_len; - struct zstd_ctx *zctx = ctx; - out_len = zstd_decompress_dctx(zctx->dctx, dst, *dlen, src, slen); + ctx->dctx = zstd_init_dctx(ctx->wksp, ctx->wksp_size); + if (!ctx->dctx) + return -EINVAL; + + out_len = zstd_decompress_dctx(ctx->dctx, dst, req->dlen, src, req->slen); if (zstd_is_error(out_len)) return -EINVAL; + *dlen = out_len; + return 0; } -static int zstd_sdecompress(struct crypto_scomp *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen, - void *ctx) +static int zstd_decompress(struct acomp_req *req) { - return __zstd_decompress(src, slen, dst, dlen, ctx); -} + struct crypto_acomp_stream *s; + unsigned int total_out = 0; + unsigned int scur, dcur; + zstd_out_buffer outbuf; + struct acomp_walk walk; + zstd_in_buffer inbuf; + struct zstd_ctx *ctx; + size_t pending_bytes; + int ret; -static struct scomp_alg scomp = { - .alloc_ctx = zstd_alloc_ctx, - .free_ctx = zstd_free_ctx, - .compress = zstd_scompress, - .decompress = zstd_sdecompress, - .base = { - .cra_name = "zstd", - .cra_driver_name = "zstd-scomp", - .cra_module = THIS_MODULE, + s = crypto_acomp_lock_stream_bh(&zstd_streams); + ctx = s->ctx; + + ret = acomp_walk_virt(&walk, req, true); + if (ret) + goto out; + + ctx->dctx = zstd_init_dstream(ZSTD_MAX_SIZE, ctx->wksp, ctx->wksp_size); + if (!ctx->dctx) { + ret = -EINVAL; + goto out; } + + do { + scur = acomp_walk_next_src(&walk); + if (scur) { + inbuf.pos = 0; + inbuf.size = scur; + inbuf.src = walk.src.virt.addr; + } else { + break; + } + + do { + dcur = acomp_walk_next_dst(&walk); + if (dcur == req->dlen && scur == req->slen) { + ret = zstd_decompress_one(req, ctx, walk.src.virt.addr, + walk.dst.virt.addr, &total_out); + acomp_walk_done_dst(&walk, dcur); + acomp_walk_done_src(&walk, scur); + goto out; + } + + if (!dcur) { + ret = -ENOSPC; + goto out; + } + + outbuf.pos = 0; + outbuf.dst = (u8 *)walk.dst.virt.addr; + outbuf.size = dcur; + + pending_bytes = zstd_decompress_stream(ctx->dctx, &outbuf, &inbuf); + if (ZSTD_isError(pending_bytes)) { + ret = -EIO; + goto out; + } + + total_out += outbuf.pos; + + acomp_walk_done_dst(&walk, outbuf.pos); + } while (inbuf.pos != scur); + + acomp_walk_done_src(&walk, scur); + } while (ret == 0); + +out: + if (ret) + req->dlen = 0; + else + req->dlen = total_out; + + crypto_acomp_unlock_stream_bh(s); + + return ret; +} + +static struct acomp_alg zstd_acomp = { + .base = { + .cra_name = "zstd", + .cra_driver_name = "zstd-generic", + .cra_flags = CRYPTO_ALG_REQ_VIRT, + .cra_module = THIS_MODULE, + }, + .init = zstd_init, + .exit = zstd_exit, + .compress = zstd_compress, + .decompress = zstd_decompress, }; static int __init zstd_mod_init(void) { - return crypto_register_scomp(&scomp); + return crypto_register_acomp(&zstd_acomp); } static void __exit zstd_mod_fini(void) { - crypto_unregister_scomp(&scomp); + crypto_unregister_acomp(&zstd_acomp); } module_init(zstd_mod_init); |