summaryrefslogtreecommitdiff
path: root/crypto/skcipher.c
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/skcipher.c')
-rw-r--r--crypto/skcipher.c588
1 files changed, 36 insertions, 552 deletions
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index bc70e159d27d..de5fc91bba26 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -17,462 +17,40 @@
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/mm.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <net/netlink.h>
#include "skcipher.h"
#define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e
-enum {
- SKCIPHER_WALK_PHYS = 1 << 0,
- SKCIPHER_WALK_SLOW = 1 << 1,
- SKCIPHER_WALK_COPY = 1 << 2,
- SKCIPHER_WALK_DIFF = 1 << 3,
- SKCIPHER_WALK_SLEEP = 1 << 4,
-};
-
-struct skcipher_walk_buffer {
- struct list_head entry;
- struct scatter_walk dst;
- unsigned int len;
- u8 *data;
- u8 buffer[];
-};
-
static const struct crypto_type crypto_skcipher_type;
-static int skcipher_walk_next(struct skcipher_walk *walk);
-
-static inline void skcipher_map_src(struct skcipher_walk *walk)
-{
- walk->src.virt.addr = scatterwalk_map(&walk->in);
-}
-
-static inline void skcipher_map_dst(struct skcipher_walk *walk)
-{
- walk->dst.virt.addr = scatterwalk_map(&walk->out);
-}
-
-static inline void skcipher_unmap_src(struct skcipher_walk *walk)
-{
- scatterwalk_unmap(walk->src.virt.addr);
-}
-
-static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
-{
- scatterwalk_unmap(walk->dst.virt.addr);
-}
-
-static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
-{
- return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
-}
-
-/* Get a spot of the specified length that does not straddle a page.
- * The caller needs to ensure that there is enough space for this operation.
- */
-static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
-{
- u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
-
- return max(start, end_page);
-}
-
static inline struct skcipher_alg *__crypto_skcipher_alg(
struct crypto_alg *alg)
{
return container_of(alg, struct skcipher_alg, base);
}
-static inline struct crypto_istat_cipher *skcipher_get_stat(
- struct skcipher_alg *alg)
-{
- return skcipher_get_stat_common(&alg->co);
-}
-
-static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err)
-{
- struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
-
- if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
- return err;
-
- if (err && err != -EINPROGRESS && err != -EBUSY)
- atomic64_inc(&istat->err_cnt);
-
- return err;
-}
-
-static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
-{
- u8 *addr;
-
- addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
- addr = skcipher_get_spot(addr, bsize);
- scatterwalk_copychunks(addr, &walk->out, bsize,
- (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
- return 0;
-}
-
-int skcipher_walk_done(struct skcipher_walk *walk, int err)
-{
- unsigned int n = walk->nbytes;
- unsigned int nbytes = 0;
-
- if (!n)
- goto finish;
-
- if (likely(err >= 0)) {
- n -= err;
- nbytes = walk->total - n;
- }
-
- if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
- SKCIPHER_WALK_SLOW |
- SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF)))) {
-unmap_src:
- skcipher_unmap_src(walk);
- } else if (walk->flags & SKCIPHER_WALK_DIFF) {
- skcipher_unmap_dst(walk);
- goto unmap_src;
- } else if (walk->flags & SKCIPHER_WALK_COPY) {
- skcipher_map_dst(walk);
- memcpy(walk->dst.virt.addr, walk->page, n);
- skcipher_unmap_dst(walk);
- } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
- if (err > 0) {
- /*
- * Didn't process all bytes. Either the algorithm is
- * broken, or this was the last step and it turned out
- * the message wasn't evenly divisible into blocks but
- * the algorithm requires it.
- */
- err = -EINVAL;
- nbytes = 0;
- } else
- n = skcipher_done_slow(walk, n);
- }
-
- if (err > 0)
- err = 0;
-
- walk->total = nbytes;
- walk->nbytes = 0;
-
- scatterwalk_advance(&walk->in, n);
- scatterwalk_advance(&walk->out, n);
- scatterwalk_done(&walk->in, 0, nbytes);
- scatterwalk_done(&walk->out, 1, nbytes);
-
- if (nbytes) {
- crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
- CRYPTO_TFM_REQ_MAY_SLEEP : 0);
- return skcipher_walk_next(walk);
- }
-
-finish:
- /* Short-circuit for the common/fast path. */
- if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
- goto out;
-
- if (walk->flags & SKCIPHER_WALK_PHYS)
- goto out;
-
- if (walk->iv != walk->oiv)
- memcpy(walk->oiv, walk->iv, walk->ivsize);
- if (walk->buffer != walk->page)
- kfree(walk->buffer);
- if (walk->page)
- free_page((unsigned long)walk->page);
-
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_done);
-
-void skcipher_walk_complete(struct skcipher_walk *walk, int err)
-{
- struct skcipher_walk_buffer *p, *tmp;
-
- list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
- u8 *data;
-
- if (err)
- goto done;
-
- data = p->data;
- if (!data) {
- data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
- data = skcipher_get_spot(data, walk->stride);
- }
-
- scatterwalk_copychunks(data, &p->dst, p->len, 1);
-
- if (offset_in_page(p->data) + p->len + walk->stride >
- PAGE_SIZE)
- free_page((unsigned long)p->data);
-
-done:
- list_del(&p->entry);
- kfree(p);
- }
-
- if (!err && walk->iv != walk->oiv)
- memcpy(walk->oiv, walk->iv, walk->ivsize);
- if (walk->buffer != walk->page)
- kfree(walk->buffer);
- if (walk->page)
- free_page((unsigned long)walk->page);
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_complete);
-
-static void skcipher_queue_write(struct skcipher_walk *walk,
- struct skcipher_walk_buffer *p)
-{
- p->dst = walk->out;
- list_add_tail(&p->entry, &walk->buffers);
-}
-
-static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
-{
- bool phys = walk->flags & SKCIPHER_WALK_PHYS;
- unsigned alignmask = walk->alignmask;
- struct skcipher_walk_buffer *p;
- unsigned a;
- unsigned n;
- u8 *buffer;
- void *v;
-
- if (!phys) {
- if (!walk->buffer)
- walk->buffer = walk->page;
- buffer = walk->buffer;
- if (buffer)
- goto ok;
- }
-
- /* Start with the minimum alignment of kmalloc. */
- a = crypto_tfm_ctx_alignment() - 1;
- n = bsize;
-
- if (phys) {
- /* Calculate the minimum alignment of p->buffer. */
- a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
- n += sizeof(*p);
- }
-
- /* Minimum size to align p->buffer by alignmask. */
- n += alignmask & ~a;
-
- /* Minimum size to ensure p->buffer does not straddle a page. */
- n += (bsize - 1) & ~(alignmask | a);
-
- v = kzalloc(n, skcipher_walk_gfp(walk));
- if (!v)
- return skcipher_walk_done(walk, -ENOMEM);
-
- if (phys) {
- p = v;
- p->len = bsize;
- skcipher_queue_write(walk, p);
- buffer = p->buffer;
- } else {
- walk->buffer = v;
- buffer = v;
- }
-
-ok:
- walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
- walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
- walk->src.virt.addr = walk->dst.virt.addr;
-
- scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
-
- walk->nbytes = bsize;
- walk->flags |= SKCIPHER_WALK_SLOW;
-
- return 0;
-}
-
-static int skcipher_next_copy(struct skcipher_walk *walk)
-{
- struct skcipher_walk_buffer *p;
- u8 *tmp = walk->page;
-
- skcipher_map_src(walk);
- memcpy(tmp, walk->src.virt.addr, walk->nbytes);
- skcipher_unmap_src(walk);
-
- walk->src.virt.addr = tmp;
- walk->dst.virt.addr = tmp;
-
- if (!(walk->flags & SKCIPHER_WALK_PHYS))
- return 0;
-
- p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
- if (!p)
- return -ENOMEM;
-
- p->data = walk->page;
- p->len = walk->nbytes;
- skcipher_queue_write(walk, p);
-
- if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
- PAGE_SIZE)
- walk->page = NULL;
- else
- walk->page += walk->nbytes;
-
- return 0;
-}
-
-static int skcipher_next_fast(struct skcipher_walk *walk)
-{
- unsigned long diff;
-
- walk->src.phys.page = scatterwalk_page(&walk->in);
- walk->src.phys.offset = offset_in_page(walk->in.offset);
- walk->dst.phys.page = scatterwalk_page(&walk->out);
- walk->dst.phys.offset = offset_in_page(walk->out.offset);
-
- if (walk->flags & SKCIPHER_WALK_PHYS)
- return 0;
-
- diff = walk->src.phys.offset - walk->dst.phys.offset;
- diff |= walk->src.virt.page - walk->dst.virt.page;
-
- skcipher_map_src(walk);
- walk->dst.virt.addr = walk->src.virt.addr;
-
- if (diff) {
- walk->flags |= SKCIPHER_WALK_DIFF;
- skcipher_map_dst(walk);
- }
-
- return 0;
-}
-
-static int skcipher_walk_next(struct skcipher_walk *walk)
+int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
+ struct skcipher_request *__restrict req, bool atomic)
{
- unsigned int bsize;
- unsigned int n;
- int err;
-
- walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF);
-
- n = walk->total;
- bsize = min(walk->stride, max(n, walk->blocksize));
- n = scatterwalk_clamp(&walk->in, n);
- n = scatterwalk_clamp(&walk->out, n);
-
- if (unlikely(n < bsize)) {
- if (unlikely(walk->total < walk->blocksize))
- return skcipher_walk_done(walk, -EINVAL);
-
-slow_path:
- err = skcipher_next_slow(walk, bsize);
- goto set_phys_lowmem;
- }
-
- if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
- if (!walk->page) {
- gfp_t gfp = skcipher_walk_gfp(walk);
-
- walk->page = (void *)__get_free_page(gfp);
- if (!walk->page)
- goto slow_path;
- }
-
- walk->nbytes = min_t(unsigned, n,
- PAGE_SIZE - offset_in_page(walk->page));
- walk->flags |= SKCIPHER_WALK_COPY;
- err = skcipher_next_copy(walk);
- goto set_phys_lowmem;
- }
-
- walk->nbytes = n;
-
- return skcipher_next_fast(walk);
-
-set_phys_lowmem:
- if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
- walk->src.phys.page = virt_to_page(walk->src.virt.addr);
- walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
- walk->src.phys.offset &= PAGE_SIZE - 1;
- walk->dst.phys.offset &= PAGE_SIZE - 1;
- }
- return err;
-}
-
-static int skcipher_copy_iv(struct skcipher_walk *walk)
-{
- unsigned a = crypto_tfm_ctx_alignment() - 1;
- unsigned alignmask = walk->alignmask;
- unsigned ivsize = walk->ivsize;
- unsigned bs = walk->stride;
- unsigned aligned_bs;
- unsigned size;
- u8 *iv;
-
- aligned_bs = ALIGN(bs, alignmask + 1);
-
- /* Minimum size to align buffer by alignmask. */
- size = alignmask & ~a;
-
- if (walk->flags & SKCIPHER_WALK_PHYS)
- size += ivsize;
- else {
- size += aligned_bs + ivsize;
-
- /* Minimum size to ensure buffer does not straddle a page. */
- size += (bs - 1) & ~(alignmask | a);
- }
-
- walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
- if (!walk->buffer)
- return -ENOMEM;
-
- iv = PTR_ALIGN(walk->buffer, alignmask + 1);
- iv = skcipher_get_spot(iv, bs) + aligned_bs;
-
- walk->iv = memcpy(iv, walk->iv, walk->ivsize);
- return 0;
-}
-
-static int skcipher_walk_first(struct skcipher_walk *walk)
-{
- if (WARN_ON_ONCE(in_hardirq()))
- return -EDEADLK;
-
- walk->buffer = NULL;
- if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
- int err = skcipher_copy_iv(walk);
- if (err)
- return err;
- }
-
- walk->page = NULL;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg;
- return skcipher_walk_next(walk);
-}
+ might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
-static int skcipher_walk_skcipher(struct skcipher_walk *walk,
- struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ alg = crypto_skcipher_alg(tfm);
walk->total = req->cryptlen;
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP))
+ atomic = true;
if (unlikely(!walk->total))
return 0;
@@ -480,10 +58,6 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);
- walk->flags &= ~SKCIPHER_WALK_SLEEP;
- walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
- SKCIPHER_WALK_SLEEP : 0;
-
walk->blocksize = crypto_skcipher_blocksize(tfm);
walk->ivsize = crypto_skcipher_ivsize(tfm);
walk->alignmask = crypto_skcipher_alignmask(tfm);
@@ -493,81 +67,39 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
else
walk->stride = alg->walksize;
- return skcipher_walk_first(walk);
-}
-
-int skcipher_walk_virt(struct skcipher_walk *walk,
- struct skcipher_request *req, bool atomic)
-{
- int err;
-
- might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
-
- walk->flags &= ~SKCIPHER_WALK_PHYS;
-
- err = skcipher_walk_skcipher(walk, req);
-
- walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
-
- return err;
+ return skcipher_walk_first(walk, atomic);
}
EXPORT_SYMBOL_GPL(skcipher_walk_virt);
-int skcipher_walk_async(struct skcipher_walk *walk,
- struct skcipher_request *req)
-{
- walk->flags |= SKCIPHER_WALK_PHYS;
-
- INIT_LIST_HEAD(&walk->buffers);
-
- return skcipher_walk_skcipher(walk, req);
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_async);
-
-static int skcipher_walk_aead_common(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic)
+static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- int err;
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP))
+ atomic = true;
if (unlikely(!walk->total))
return 0;
- walk->flags &= ~SKCIPHER_WALK_PHYS;
-
- scatterwalk_start(&walk->in, req->src);
- scatterwalk_start(&walk->out, req->dst);
-
- scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
- scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
-
- scatterwalk_done(&walk->in, 0, walk->total);
- scatterwalk_done(&walk->out, 0, walk->total);
-
- if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
- walk->flags |= SKCIPHER_WALK_SLEEP;
- else
- walk->flags &= ~SKCIPHER_WALK_SLEEP;
+ scatterwalk_start_at_pos(&walk->in, req->src, req->assoclen);
+ scatterwalk_start_at_pos(&walk->out, req->dst, req->assoclen);
walk->blocksize = crypto_aead_blocksize(tfm);
walk->stride = crypto_aead_chunksize(tfm);
walk->ivsize = crypto_aead_ivsize(tfm);
walk->alignmask = crypto_aead_alignmask(tfm);
- err = skcipher_walk_first(walk);
-
- if (atomic)
- walk->flags &= ~SKCIPHER_WALK_SLEEP;
-
- return err;
+ return skcipher_walk_first(walk, atomic);
}
-int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic)
+int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic)
{
walk->total = req->cryptlen;
@@ -575,8 +107,9 @@ int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
}
EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
-int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic)
+int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -654,23 +187,12 @@ int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- int ret;
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
-
- atomic64_inc(&istat->encrypt_cnt);
- atomic64_add(req->cryptlen, &istat->encrypt_tlen);
- }
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- ret = -ENOKEY;
- else if (alg->co.base.cra_type != &crypto_skcipher_type)
- ret = crypto_lskcipher_encrypt_sg(req);
- else
- ret = alg->encrypt(req);
-
- return crypto_skcipher_errstat(alg, ret);
+ return -ENOKEY;
+ if (alg->co.base.cra_type != &crypto_skcipher_type)
+ return crypto_lskcipher_encrypt_sg(req);
+ return alg->encrypt(req);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
@@ -678,23 +200,12 @@ int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- int ret;
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
-
- atomic64_inc(&istat->decrypt_cnt);
- atomic64_add(req->cryptlen, &istat->decrypt_tlen);
- }
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- ret = -ENOKEY;
- else if (alg->co.base.cra_type != &crypto_skcipher_type)
- ret = crypto_lskcipher_decrypt_sg(req);
- else
- ret = alg->decrypt(req);
-
- return crypto_skcipher_errstat(alg, ret);
+ return -ENOKEY;
+ if (alg->co.base.cra_type != &crypto_skcipher_type)
+ return crypto_lskcipher_decrypt_sg(req);
+ return alg->decrypt(req);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
@@ -816,7 +327,7 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "type : skcipher\n");
seq_printf(m, "async : %s\n",
- alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
+ str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC));
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
@@ -846,28 +357,6 @@ static int __maybe_unused crypto_skcipher_report(
sizeof(rblkcipher), &rblkcipher);
}
-static int __maybe_unused crypto_skcipher_report_stat(
- struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
- struct crypto_istat_cipher *istat;
- struct crypto_stat_cipher rcipher;
-
- istat = skcipher_get_stat(skcipher);
-
- memset(&rcipher, 0, sizeof(rcipher));
-
- strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
-
- rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
- rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
- rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
- rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
- rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
-
- return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
-}
-
static const struct crypto_type crypto_skcipher_type = {
.extsize = crypto_skcipher_extsize,
.init_tfm = crypto_skcipher_init_tfm,
@@ -878,13 +367,11 @@ static const struct crypto_type crypto_skcipher_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_skcipher_report,
#endif
-#ifdef CONFIG_CRYPTO_STATS
- .report_stat = crypto_skcipher_report_stat,
-#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK,
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.tfmsize = offsetof(struct crypto_skcipher, base),
+ .algsize = offsetof(struct skcipher_alg, base),
};
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
@@ -910,6 +397,7 @@ struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
/* Only sync algorithms allowed. */
mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE;
+ type &= ~(CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE);
tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
@@ -935,7 +423,6 @@ EXPORT_SYMBOL_GPL(crypto_has_skcipher);
int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
{
- struct crypto_istat_cipher *istat = skcipher_get_stat_common(alg);
struct crypto_alg *base = &alg->base;
if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
@@ -948,9 +435,6 @@ int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- memset(istat, 0, sizeof(*istat));
-
return 0;
}
@@ -1155,4 +639,4 @@ EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Symmetric key cipher type");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");