summaryrefslogtreecommitdiff
path: root/crypto/skcipher.c
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/skcipher.c')
-rw-r--r--crypto/skcipher.c649
1 files changed, 279 insertions, 370 deletions
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 0ecab31cfe79..14a820cb06c7 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -15,147 +15,111 @@
#include <crypto/scatterwalk.h>
#include <linux/bug.h>
#include <linux/cryptouser.h>
-#include <linux/compiler.h>
-#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/rtnetlink.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/string_choices.h>
#include <net/netlink.h>
+#include "skcipher.h"
-#include "internal.h"
+#define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e
enum {
- SKCIPHER_WALK_PHYS = 1 << 0,
- SKCIPHER_WALK_SLOW = 1 << 1,
- SKCIPHER_WALK_COPY = 1 << 2,
- SKCIPHER_WALK_DIFF = 1 << 3,
- SKCIPHER_WALK_SLEEP = 1 << 4,
+ SKCIPHER_WALK_SLOW = 1 << 0,
+ SKCIPHER_WALK_COPY = 1 << 1,
+ SKCIPHER_WALK_DIFF = 1 << 2,
+ SKCIPHER_WALK_SLEEP = 1 << 3,
};
-struct skcipher_walk_buffer {
- struct list_head entry;
- struct scatter_walk dst;
- unsigned int len;
- u8 *data;
- u8 buffer[];
-};
+static const struct crypto_type crypto_skcipher_type;
static int skcipher_walk_next(struct skcipher_walk *walk);
-static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
-{
- if (PageHighMem(scatterwalk_page(walk)))
- kunmap_atomic(vaddr);
-}
-
-static inline void *skcipher_map(struct scatter_walk *walk)
-{
- struct page *page = scatterwalk_page(walk);
-
- return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
- offset_in_page(walk->offset);
-}
-
-static inline void skcipher_map_src(struct skcipher_walk *walk)
-{
- walk->src.virt.addr = skcipher_map(&walk->in);
-}
-
-static inline void skcipher_map_dst(struct skcipher_walk *walk)
-{
- walk->dst.virt.addr = skcipher_map(&walk->out);
-}
-
-static inline void skcipher_unmap_src(struct skcipher_walk *walk)
-{
- skcipher_unmap(&walk->in, walk->src.virt.addr);
-}
-
-static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
-{
- skcipher_unmap(&walk->out, walk->dst.virt.addr);
-}
-
static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
{
return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
}
-/* Get a spot of the specified length that does not straddle a page.
- * The caller needs to ensure that there is enough space for this operation.
- */
-static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
+static inline struct skcipher_alg *__crypto_skcipher_alg(
+ struct crypto_alg *alg)
{
- u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
-
- return max(start, end_page);
+ return container_of(alg, struct skcipher_alg, base);
}
-static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
-{
- u8 *addr;
-
- addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
- addr = skcipher_get_spot(addr, bsize);
- scatterwalk_copychunks(addr, &walk->out, bsize,
- (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
- return 0;
-}
-
-int skcipher_walk_done(struct skcipher_walk *walk, int err)
+/**
+ * skcipher_walk_done() - finish one step of a skcipher_walk
+ * @walk: the skcipher_walk
+ * @res: number of bytes *not* processed (>= 0) from walk->nbytes,
+ * or a -errno value to terminate the walk due to an error
+ *
+ * This function cleans up after one step of walking through the source and
+ * destination scatterlists, and advances to the next step if applicable.
+ * walk->nbytes is set to the number of bytes available in the next step,
+ * walk->total is set to the new total number of bytes remaining, and
+ * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there
+ * is no more data, or if an error occurred (i.e. -errno return), then
+ * walk->nbytes and walk->total are set to 0 and all resources owned by the
+ * skcipher_walk are freed.
+ *
+ * Return: 0 or a -errno value. If @res was a -errno value then it will be
+ * returned, but other errors may occur too.
+ */
+int skcipher_walk_done(struct skcipher_walk *walk, int res)
{
- unsigned int n = walk->nbytes;
- unsigned int nbytes = 0;
+ unsigned int n = walk->nbytes; /* num bytes processed this step */
+ unsigned int total = 0; /* new total remaining */
if (!n)
goto finish;
- if (likely(err >= 0)) {
- n -= err;
- nbytes = walk->total - n;
+ if (likely(res >= 0)) {
+ n -= res; /* subtract num bytes *not* processed */
+ total = walk->total - n;
}
- if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
- SKCIPHER_WALK_SLOW |
+ if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
SKCIPHER_WALK_COPY |
SKCIPHER_WALK_DIFF)))) {
-unmap_src:
- skcipher_unmap_src(walk);
+ scatterwalk_advance(&walk->in, n);
} else if (walk->flags & SKCIPHER_WALK_DIFF) {
- skcipher_unmap_dst(walk);
- goto unmap_src;
+ scatterwalk_done_src(&walk->in, n);
} else if (walk->flags & SKCIPHER_WALK_COPY) {
- skcipher_map_dst(walk);
- memcpy(walk->dst.virt.addr, walk->page, n);
- skcipher_unmap_dst(walk);
- } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
- if (err > 0) {
+ scatterwalk_advance(&walk->in, n);
+ scatterwalk_map(&walk->out);
+ memcpy(walk->out.addr, walk->page, n);
+ } else { /* SKCIPHER_WALK_SLOW */
+ if (res > 0) {
/*
* Didn't process all bytes. Either the algorithm is
* broken, or this was the last step and it turned out
* the message wasn't evenly divisible into blocks but
* the algorithm requires it.
*/
- err = -EINVAL;
- nbytes = 0;
+ res = -EINVAL;
+ total = 0;
} else
- n = skcipher_done_slow(walk, n);
+ memcpy_to_scatterwalk(&walk->out, walk->out.addr, n);
+ goto dst_done;
}
- if (err > 0)
- err = 0;
+ scatterwalk_done_dst(&walk->out, n);
+dst_done:
- walk->total = nbytes;
- walk->nbytes = 0;
+ if (res > 0)
+ res = 0;
- scatterwalk_advance(&walk->in, n);
- scatterwalk_advance(&walk->out, n);
- scatterwalk_done(&walk->in, 0, nbytes);
- scatterwalk_done(&walk->out, 1, nbytes);
+ walk->total = total;
+ walk->nbytes = 0;
- if (nbytes) {
- crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
- CRYPTO_TFM_REQ_MAY_SLEEP : 0);
+ if (total) {
+ if (walk->flags & SKCIPHER_WALK_SLEEP)
+ cond_resched();
+ walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF);
return skcipher_walk_next(walk);
}
@@ -164,9 +128,6 @@ finish:
if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
goto out;
- if (walk->flags & SKCIPHER_WALK_PHYS)
- goto out;
-
if (walk->iv != walk->oiv)
memcpy(walk->oiv, walk->iv, walk->ivsize);
if (walk->buffer != walk->page)
@@ -175,107 +136,33 @@ finish:
free_page((unsigned long)walk->page);
out:
- return err;
+ return res;
}
EXPORT_SYMBOL_GPL(skcipher_walk_done);
-void skcipher_walk_complete(struct skcipher_walk *walk, int err)
-{
- struct skcipher_walk_buffer *p, *tmp;
-
- list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
- u8 *data;
-
- if (err)
- goto done;
-
- data = p->data;
- if (!data) {
- data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
- data = skcipher_get_spot(data, walk->stride);
- }
-
- scatterwalk_copychunks(data, &p->dst, p->len, 1);
-
- if (offset_in_page(p->data) + p->len + walk->stride >
- PAGE_SIZE)
- free_page((unsigned long)p->data);
-
-done:
- list_del(&p->entry);
- kfree(p);
- }
-
- if (!err && walk->iv != walk->oiv)
- memcpy(walk->oiv, walk->iv, walk->ivsize);
- if (walk->buffer != walk->page)
- kfree(walk->buffer);
- if (walk->page)
- free_page((unsigned long)walk->page);
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_complete);
-
-static void skcipher_queue_write(struct skcipher_walk *walk,
- struct skcipher_walk_buffer *p)
-{
- p->dst = walk->out;
- list_add_tail(&p->entry, &walk->buffers);
-}
-
static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
{
- bool phys = walk->flags & SKCIPHER_WALK_PHYS;
unsigned alignmask = walk->alignmask;
- struct skcipher_walk_buffer *p;
- unsigned a;
unsigned n;
- u8 *buffer;
- void *v;
-
- if (!phys) {
- if (!walk->buffer)
- walk->buffer = walk->page;
- buffer = walk->buffer;
- if (buffer)
- goto ok;
- }
-
- /* Start with the minimum alignment of kmalloc. */
- a = crypto_tfm_ctx_alignment() - 1;
- n = bsize;
+ void *buffer;
- if (phys) {
- /* Calculate the minimum alignment of p->buffer. */
- a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
- n += sizeof(*p);
- }
-
- /* Minimum size to align p->buffer by alignmask. */
- n += alignmask & ~a;
-
- /* Minimum size to ensure p->buffer does not straddle a page. */
- n += (bsize - 1) & ~(alignmask | a);
-
- v = kzalloc(n, skcipher_walk_gfp(walk));
- if (!v)
- return skcipher_walk_done(walk, -ENOMEM);
-
- if (phys) {
- p = v;
- p->len = bsize;
- skcipher_queue_write(walk, p);
- buffer = p->buffer;
- } else {
- walk->buffer = v;
- buffer = v;
+ if (!walk->buffer)
+ walk->buffer = walk->page;
+ buffer = walk->buffer;
+ if (!buffer) {
+ /* Min size for a buffer of bsize bytes aligned to alignmask */
+ n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
+
+ buffer = kzalloc(n, skcipher_walk_gfp(walk));
+ if (!buffer)
+ return skcipher_walk_done(walk, -ENOMEM);
+ walk->buffer = buffer;
}
-ok:
- walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
- walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
- walk->src.virt.addr = walk->dst.virt.addr;
-
- scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
+ buffer = PTR_ALIGN(buffer, alignmask + 1);
+ memcpy_from_scatterwalk(buffer, &walk->in, bsize);
+ walk->out.__addr = buffer;
+ walk->in.__addr = walk->out.addr;
walk->nbytes = bsize;
walk->flags |= SKCIPHER_WALK_SLOW;
@@ -285,33 +172,18 @@ ok:
static int skcipher_next_copy(struct skcipher_walk *walk)
{
- struct skcipher_walk_buffer *p;
- u8 *tmp = walk->page;
-
- skcipher_map_src(walk);
- memcpy(tmp, walk->src.virt.addr, walk->nbytes);
- skcipher_unmap_src(walk);
-
- walk->src.virt.addr = tmp;
- walk->dst.virt.addr = tmp;
-
- if (!(walk->flags & SKCIPHER_WALK_PHYS))
- return 0;
-
- p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
- if (!p)
- return -ENOMEM;
-
- p->data = walk->page;
- p->len = walk->nbytes;
- skcipher_queue_write(walk, p);
+ void *tmp = walk->page;
- if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
- PAGE_SIZE)
- walk->page = NULL;
- else
- walk->page += walk->nbytes;
+ scatterwalk_map(&walk->in);
+ memcpy(tmp, walk->in.addr, walk->nbytes);
+ scatterwalk_unmap(&walk->in);
+ /*
+ * walk->in is advanced later when the number of bytes actually
+ * processed (which might be less than walk->nbytes) is known.
+ */
+ walk->in.__addr = tmp;
+ walk->out.__addr = tmp;
return 0;
}
@@ -319,23 +191,17 @@ static int skcipher_next_fast(struct skcipher_walk *walk)
{
unsigned long diff;
- walk->src.phys.page = scatterwalk_page(&walk->in);
- walk->src.phys.offset = offset_in_page(walk->in.offset);
- walk->dst.phys.page = scatterwalk_page(&walk->out);
- walk->dst.phys.offset = offset_in_page(walk->out.offset);
-
- if (walk->flags & SKCIPHER_WALK_PHYS)
- return 0;
-
- diff = walk->src.phys.offset - walk->dst.phys.offset;
- diff |= walk->src.virt.page - walk->dst.virt.page;
+ diff = offset_in_page(walk->in.offset) -
+ offset_in_page(walk->out.offset);
+ diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) -
+ (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT));
- skcipher_map_src(walk);
- walk->dst.virt.addr = walk->src.virt.addr;
+ scatterwalk_map(&walk->out);
+ walk->in.__addr = walk->out.__addr;
if (diff) {
walk->flags |= SKCIPHER_WALK_DIFF;
- skcipher_map_dst(walk);
+ scatterwalk_map(&walk->in);
}
return 0;
@@ -345,10 +211,6 @@ static int skcipher_walk_next(struct skcipher_walk *walk)
{
unsigned int bsize;
unsigned int n;
- int err;
-
- walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF);
n = walk->total;
bsize = min(walk->stride, max(n, walk->blocksize));
@@ -360,9 +222,9 @@ static int skcipher_walk_next(struct skcipher_walk *walk)
return skcipher_walk_done(walk, -EINVAL);
slow_path:
- err = skcipher_next_slow(walk, bsize);
- goto set_phys_lowmem;
+ return skcipher_next_slow(walk, bsize);
}
+ walk->nbytes = n;
if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
if (!walk->page) {
@@ -372,58 +234,30 @@ slow_path:
if (!walk->page)
goto slow_path;
}
-
- walk->nbytes = min_t(unsigned, n,
- PAGE_SIZE - offset_in_page(walk->page));
walk->flags |= SKCIPHER_WALK_COPY;
- err = skcipher_next_copy(walk);
- goto set_phys_lowmem;
+ return skcipher_next_copy(walk);
}
- walk->nbytes = n;
-
return skcipher_next_fast(walk);
-
-set_phys_lowmem:
- if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
- walk->src.phys.page = virt_to_page(walk->src.virt.addr);
- walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
- walk->src.phys.offset &= PAGE_SIZE - 1;
- walk->dst.phys.offset &= PAGE_SIZE - 1;
- }
- return err;
}
static int skcipher_copy_iv(struct skcipher_walk *walk)
{
- unsigned a = crypto_tfm_ctx_alignment() - 1;
unsigned alignmask = walk->alignmask;
unsigned ivsize = walk->ivsize;
- unsigned bs = walk->stride;
- unsigned aligned_bs;
+ unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1);
unsigned size;
u8 *iv;
- aligned_bs = ALIGN(bs, alignmask + 1);
-
- /* Minimum size to align buffer by alignmask. */
- size = alignmask & ~a;
-
- if (walk->flags & SKCIPHER_WALK_PHYS)
- size += ivsize;
- else {
- size += aligned_bs + ivsize;
-
- /* Minimum size to ensure buffer does not straddle a page. */
- size += (bs - 1) & ~(alignmask | a);
- }
+ /* Min size for a buffer of stride + ivsize, aligned to alignmask */
+ size = aligned_stride + ivsize +
+ (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
if (!walk->buffer)
return -ENOMEM;
- iv = PTR_ALIGN(walk->buffer, alignmask + 1);
- iv = skcipher_get_spot(iv, bs) + aligned_bs;
+ iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride;
walk->iv = memcpy(iv, walk->iv, walk->ivsize);
return 0;
@@ -446,15 +280,24 @@ static int skcipher_walk_first(struct skcipher_walk *walk)
return skcipher_walk_next(walk);
}
-static int skcipher_walk_skcipher(struct skcipher_walk *walk,
- struct skcipher_request *req)
+int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
+ struct skcipher_request *__restrict req, bool atomic)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg;
+
+ might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+
+ alg = crypto_skcipher_alg(tfm);
walk->total = req->cryptlen;
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
+ walk->flags = SKCIPHER_WALK_SLEEP;
+ else
+ walk->flags = 0;
if (unlikely(!walk->total))
return 0;
@@ -462,90 +305,50 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);
- walk->flags &= ~SKCIPHER_WALK_SLEEP;
- walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
- SKCIPHER_WALK_SLEEP : 0;
-
walk->blocksize = crypto_skcipher_blocksize(tfm);
- walk->stride = crypto_skcipher_walksize(tfm);
walk->ivsize = crypto_skcipher_ivsize(tfm);
walk->alignmask = crypto_skcipher_alignmask(tfm);
- return skcipher_walk_first(walk);
-}
-
-int skcipher_walk_virt(struct skcipher_walk *walk,
- struct skcipher_request *req, bool atomic)
-{
- int err;
-
- might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
-
- walk->flags &= ~SKCIPHER_WALK_PHYS;
-
- err = skcipher_walk_skcipher(walk, req);
-
- walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
+ if (alg->co.base.cra_type != &crypto_skcipher_type)
+ walk->stride = alg->co.chunksize;
+ else
+ walk->stride = alg->walksize;
- return err;
+ return skcipher_walk_first(walk);
}
EXPORT_SYMBOL_GPL(skcipher_walk_virt);
-int skcipher_walk_async(struct skcipher_walk *walk,
- struct skcipher_request *req)
-{
- walk->flags |= SKCIPHER_WALK_PHYS;
-
- INIT_LIST_HEAD(&walk->buffers);
-
- return skcipher_walk_skcipher(walk, req);
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_async);
-
-static int skcipher_walk_aead_common(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic)
+static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- int err;
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
+ walk->flags = SKCIPHER_WALK_SLEEP;
+ else
+ walk->flags = 0;
if (unlikely(!walk->total))
return 0;
- walk->flags &= ~SKCIPHER_WALK_PHYS;
-
- scatterwalk_start(&walk->in, req->src);
- scatterwalk_start(&walk->out, req->dst);
-
- scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
- scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
-
- scatterwalk_done(&walk->in, 0, walk->total);
- scatterwalk_done(&walk->out, 0, walk->total);
-
- if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
- walk->flags |= SKCIPHER_WALK_SLEEP;
- else
- walk->flags &= ~SKCIPHER_WALK_SLEEP;
+ scatterwalk_start_at_pos(&walk->in, req->src, req->assoclen);
+ scatterwalk_start_at_pos(&walk->out, req->dst, req->assoclen);
walk->blocksize = crypto_aead_blocksize(tfm);
walk->stride = crypto_aead_chunksize(tfm);
walk->ivsize = crypto_aead_ivsize(tfm);
walk->alignmask = crypto_aead_alignmask(tfm);
- err = skcipher_walk_first(walk);
-
- if (atomic)
- walk->flags &= ~SKCIPHER_WALK_SLEEP;
-
- return err;
+ return skcipher_walk_first(walk);
}
-int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic)
+int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic)
{
walk->total = req->cryptlen;
@@ -553,8 +356,9 @@ int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
}
EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
-int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic)
+int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -598,6 +402,17 @@ int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned long alignmask = crypto_skcipher_alignmask(tfm);
int err;
+ if (cipher->co.base.cra_type != &crypto_skcipher_type) {
+ struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_lskcipher_clear_flags(*ctx, CRYPTO_TFM_REQ_MASK);
+ crypto_lskcipher_set_flags(*ctx,
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_lskcipher_setkey(*ctx, key, keylen);
+ goto out;
+ }
+
if (keylen < cipher->min_keysize || keylen > cipher->max_keysize)
return -EINVAL;
@@ -606,6 +421,7 @@ int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
else
err = cipher->setkey(tfm, key, keylen);
+out:
if (unlikely(err)) {
skcipher_set_needkey(tfm);
return err;
@@ -619,37 +435,87 @@ EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int cryptlen = req->cryptlen;
- int ret;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- crypto_stats_get(alg);
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- ret = -ENOKEY;
- else
- ret = crypto_skcipher_alg(tfm)->encrypt(req);
- crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
- return ret;
+ return -ENOKEY;
+ if (alg->co.base.cra_type != &crypto_skcipher_type)
+ return crypto_lskcipher_encrypt_sg(req);
+ return alg->encrypt(req);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int cryptlen = req->cryptlen;
- int ret;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- crypto_stats_get(alg);
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- ret = -ENOKEY;
- else
- ret = crypto_skcipher_alg(tfm)->decrypt(req);
- crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
- return ret;
+ return -ENOKEY;
+ if (alg->co.base.cra_type != &crypto_skcipher_type)
+ return crypto_lskcipher_decrypt_sg(req);
+ return alg->decrypt(req);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
+static int crypto_lskcipher_export(struct skcipher_request *req, void *out)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ u8 *ivs = skcipher_request_ctx(req);
+
+ ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
+
+ memcpy(out, ivs + crypto_skcipher_ivsize(tfm),
+ crypto_skcipher_statesize(tfm));
+
+ return 0;
+}
+
+static int crypto_lskcipher_import(struct skcipher_request *req, const void *in)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ u8 *ivs = skcipher_request_ctx(req);
+
+ ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
+
+ memcpy(ivs + crypto_skcipher_ivsize(tfm), in,
+ crypto_skcipher_statesize(tfm));
+
+ return 0;
+}
+
+static int skcipher_noexport(struct skcipher_request *req, void *out)
+{
+ return 0;
+}
+
+static int skcipher_noimport(struct skcipher_request *req, const void *in)
+{
+ return 0;
+}
+
+int crypto_skcipher_export(struct skcipher_request *req, void *out)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+
+ if (alg->co.base.cra_type != &crypto_skcipher_type)
+ return crypto_lskcipher_export(req, out);
+ return alg->export(req, out);
+}
+EXPORT_SYMBOL_GPL(crypto_skcipher_export);
+
+int crypto_skcipher_import(struct skcipher_request *req, const void *in)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+
+ if (alg->co.base.cra_type != &crypto_skcipher_type)
+ return crypto_lskcipher_import(req, in);
+ return alg->import(req, in);
+}
+EXPORT_SYMBOL_GPL(crypto_skcipher_import);
+
static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
@@ -665,6 +531,20 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
skcipher_set_needkey(skcipher);
+ if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) {
+ unsigned am = crypto_skcipher_alignmask(skcipher);
+ unsigned reqsize;
+
+ reqsize = am & ~(crypto_tfm_ctx_alignment() - 1);
+ reqsize += crypto_skcipher_ivsize(skcipher);
+ reqsize += crypto_skcipher_statesize(skcipher);
+ crypto_skcipher_set_reqsize(skcipher, reqsize);
+
+ return crypto_init_lskcipher_ops_sg(tfm);
+ }
+
+ crypto_skcipher_set_reqsize(skcipher, crypto_tfm_alg_reqsize(tfm));
+
if (alg->exit)
skcipher->base.exit = crypto_skcipher_exit_tfm;
@@ -674,6 +554,14 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
return 0;
}
+static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
+{
+ if (alg->cra_type != &crypto_skcipher_type)
+ return sizeof(struct crypto_lskcipher *);
+
+ return crypto_alg_extsize(alg);
+}
+
static void crypto_skcipher_free_instance(struct crypto_instance *inst)
{
struct skcipher_instance *skcipher =
@@ -686,26 +574,25 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
{
- struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
- base);
+ struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
seq_printf(m, "type : skcipher\n");
seq_printf(m, "async : %s\n",
- alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
+ str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC));
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
seq_printf(m, "walksize : %u\n", skcipher->walksize);
+ seq_printf(m, "statesize : %u\n", skcipher->statesize);
}
-#ifdef CONFIG_NET
-static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_skcipher_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
+ struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
struct crypto_report_blkcipher rblkcipher;
- struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
- base);
memset(&rblkcipher, 0, sizeof(rblkcipher));
@@ -720,25 +607,22 @@ static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
sizeof(rblkcipher), &rblkcipher);
}
-#else
-static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static const struct crypto_type crypto_skcipher_type = {
- .extsize = crypto_alg_extsize,
+ .extsize = crypto_skcipher_extsize,
.init_tfm = crypto_skcipher_init_tfm,
.free = crypto_skcipher_free_instance,
#ifdef CONFIG_PROC_FS
.show = crypto_skcipher_show,
#endif
+#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_skcipher_report,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
- .maskset = CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK,
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.tfmsize = offsetof(struct crypto_skcipher, base),
+ .algsize = offsetof(struct skcipher_alg, base),
};
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
@@ -764,6 +648,7 @@ struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
/* Only sync algorithms allowed. */
mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE;
+ type &= ~(CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE);
tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
@@ -787,21 +672,45 @@ int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_skcipher);
-static int skcipher_prepare_alg(struct skcipher_alg *alg)
+int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
{
struct crypto_alg *base = &alg->base;
if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
- alg->walksize > PAGE_SIZE / 8)
+ alg->statesize > PAGE_SIZE / 2 ||
+ (alg->ivsize + alg->statesize) > PAGE_SIZE / 2)
return -EINVAL;
if (!alg->chunksize)
alg->chunksize = base->cra_blocksize;
+
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+
+ return 0;
+}
+
+static int skcipher_prepare_alg(struct skcipher_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+ int err;
+
+ err = skcipher_prepare_alg_common(&alg->co);
+ if (err)
+ return err;
+
+ if (alg->walksize > PAGE_SIZE / 8)
+ return -EINVAL;
+
if (!alg->walksize)
alg->walksize = alg->chunksize;
+ if (!alg->statesize) {
+ alg->import = skcipher_noimport;
+ alg->export = skcipher_noexport;
+ } else if (!(alg->import && alg->export))
+ return -EINVAL;
+
base->cra_type = &crypto_skcipher_type;
- base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
return 0;
@@ -981,4 +890,4 @@ EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Symmetric key cipher type");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");