diff options
Diffstat (limited to 'crypto/ahash.c')
-rw-r--r-- | crypto/ahash.c | 773 |
1 files changed, 545 insertions, 228 deletions
diff --git a/crypto/ahash.c b/crypto/ahash.c index 0ac83f7f701d..bc84a07c924c 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -16,31 +16,154 @@ #include <linux/cryptouser.h> #include <linux/err.h> #include <linux/kernel.h> +#include <linux/mm.h> #include <linux/module.h> -#include <linux/sched.h> +#include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <net/netlink.h> #include "hash.h" #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e -static inline struct crypto_istat_hash *ahash_get_stat(struct ahash_alg *alg) +struct crypto_hash_walk { + const char *data; + + unsigned int offset; + unsigned int flags; + + struct page *pg; + unsigned int entrylen; + + unsigned int total; + struct scatterlist *sg; +}; + +static int ahash_def_finup(struct ahash_request *req); + +static inline bool crypto_ahash_block_only(struct crypto_ahash *tfm) +{ + return crypto_ahash_alg(tfm)->halg.base.cra_flags & + CRYPTO_AHASH_ALG_BLOCK_ONLY; +} + +static inline bool crypto_ahash_final_nonzero(struct crypto_ahash *tfm) +{ + return crypto_ahash_alg(tfm)->halg.base.cra_flags & + CRYPTO_AHASH_ALG_FINAL_NONZERO; +} + +static inline bool crypto_ahash_need_fallback(struct crypto_ahash *tfm) +{ + return crypto_ahash_alg(tfm)->halg.base.cra_flags & + CRYPTO_ALG_NEED_FALLBACK; +} + +static inline void ahash_op_done(void *data, int err, + int (*finish)(struct ahash_request *, int)) +{ + struct ahash_request *areq = data; + crypto_completion_t compl; + + compl = areq->saved_complete; + data = areq->saved_data; + if (err == -EINPROGRESS) + goto out; + + areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + err = finish(areq, err); + if (err == -EINPROGRESS || err == -EBUSY) + return; + +out: + compl(data, err); +} + +static int hash_walk_next(struct crypto_hash_walk *walk) { - return hash_get_stat(&alg->halg); + unsigned int offset = walk->offset; + unsigned int nbytes = min(walk->entrylen, + ((unsigned int)(PAGE_SIZE)) - offset); + + walk->data = kmap_local_page(walk->pg); + walk->data += offset; + walk->entrylen -= nbytes; + return nbytes; } -static inline int crypto_ahash_errstat(struct ahash_alg *alg, int err) +static int hash_walk_new_entry(struct crypto_hash_walk *walk) { - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) + struct scatterlist *sg; + + sg = walk->sg; + walk->offset = sg->offset; + walk->pg = nth_page(sg_page(walk->sg), (walk->offset >> PAGE_SHIFT)); + walk->offset = offset_in_page(walk->offset); + walk->entrylen = sg->length; + + if (walk->entrylen > walk->total) + walk->entrylen = walk->total; + walk->total -= walk->entrylen; + + return hash_walk_next(walk); +} + +static int crypto_hash_walk_first(struct ahash_request *req, + struct crypto_hash_walk *walk) +{ + walk->total = req->nbytes; + walk->entrylen = 0; + + if (!walk->total) + return 0; + + walk->flags = req->base.flags; + + if (ahash_request_isvirt(req)) { + walk->data = req->svirt; + walk->total = 0; + return req->nbytes; + } + + walk->sg = req->src; + + return hash_walk_new_entry(walk); +} + +static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) +{ + if ((walk->flags & CRYPTO_AHASH_REQ_VIRT)) return err; - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&ahash_get_stat(alg)->err_cnt); + walk->data -= walk->offset; - return err; + kunmap_local(walk->data); + crypto_yield(walk->flags); + + if (err) + return err; + + if (walk->entrylen) { + walk->offset = 0; + walk->pg++; + return hash_walk_next(walk); + } + + if (!walk->total) + return 0; + + walk->sg = sg_next(walk->sg); + + return hash_walk_new_entry(walk); +} + +static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) +{ + return !(walk->entrylen | walk->total); } /* @@ -100,21 +223,36 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) unsigned int nbytes = req->nbytes; struct scatterlist *sg; unsigned int offset; + struct page *page; + const u8 *data; int err; - if (nbytes && - (sg = req->src, offset = sg->offset, - nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { - void *data; + data = req->svirt; + if (!nbytes || ahash_request_isvirt(req)) + return crypto_shash_digest(desc, data, nbytes, req->result); + + sg = req->src; + if (nbytes > sg->length) + return crypto_shash_init(desc) ?: + shash_ahash_finup(req, desc); - data = kmap_local_page(sg_page(sg)); - err = crypto_shash_digest(desc, data + offset, nbytes, - req->result); - kunmap_local(data); - } else - err = crypto_shash_init(desc) ?: - shash_ahash_finup(req, desc); + page = sg_page(sg); + offset = sg->offset; + data = lowmem_page_address(page) + offset; + if (!IS_ENABLED(CONFIG_HIGHMEM)) + return crypto_shash_digest(desc, data, nbytes, req->result); + page = nth_page(page, offset >> PAGE_SHIFT); + offset = offset_in_page(offset); + + if (nbytes > (unsigned int)PAGE_SIZE - offset) + return crypto_shash_init(desc) ?: + shash_ahash_finup(req, desc); + + data = kmap_local_page(page); + err = crypto_shash_digest(desc, data + offset, nbytes, + req->result); + kunmap_local(data); return err; } EXPORT_SYMBOL_GPL(shash_ahash_digest); @@ -148,82 +286,10 @@ static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm) crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & CRYPTO_TFM_NEED_KEY); - crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); return 0; } -static int hash_walk_next(struct crypto_hash_walk *walk) -{ - unsigned int offset = walk->offset; - unsigned int nbytes = min(walk->entrylen, - ((unsigned int)(PAGE_SIZE)) - offset); - - walk->data = kmap_local_page(walk->pg); - walk->data += offset; - walk->entrylen -= nbytes; - return nbytes; -} - -static int hash_walk_new_entry(struct crypto_hash_walk *walk) -{ - struct scatterlist *sg; - - sg = walk->sg; - walk->offset = sg->offset; - walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); - walk->offset = offset_in_page(walk->offset); - walk->entrylen = sg->length; - - if (walk->entrylen > walk->total) - walk->entrylen = walk->total; - walk->total -= walk->entrylen; - - return hash_walk_next(walk); -} - -int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) -{ - walk->data -= walk->offset; - - kunmap_local(walk->data); - crypto_yield(walk->flags); - - if (err) - return err; - - if (walk->entrylen) { - walk->offset = 0; - walk->pg++; - return hash_walk_next(walk); - } - - if (!walk->total) - return 0; - - walk->sg = sg_next(walk->sg); - - return hash_walk_new_entry(walk); -} -EXPORT_SYMBOL_GPL(crypto_hash_walk_done); - -int crypto_hash_walk_first(struct ahash_request *req, - struct crypto_hash_walk *walk) -{ - walk->total = req->nbytes; - - if (!walk->total) { - walk->entrylen = 0; - return 0; - } - - walk->sg = req->src; - walk->flags = req->base.flags; - - return hash_walk_new_entry(walk); -} -EXPORT_SYMBOL_GPL(crypto_hash_walk_first); - static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { @@ -256,6 +322,9 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, int err; err = alg->setkey(tfm, key, keylen); + if (!err && crypto_ahash_need_fallback(tfm)) + err = crypto_ahash_setkey(crypto_ahash_fb(tfm), + key, keylen); if (unlikely(err)) { ahash_set_needkey(tfm, alg); return err; @@ -266,6 +335,49 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, } EXPORT_SYMBOL_GPL(crypto_ahash_setkey); +static int ahash_do_req_chain(struct ahash_request *req, + int (*const *op)(struct ahash_request *req)) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + int err; + + if (crypto_ahash_req_virt(tfm) || !ahash_request_isvirt(req)) + return (*op)(req); + + if (crypto_ahash_statesize(tfm) > HASH_MAX_STATESIZE) + return -ENOSYS; + + { + u8 state[HASH_MAX_STATESIZE]; + + if (op == &crypto_ahash_alg(tfm)->digest) { + ahash_request_set_tfm(req, crypto_ahash_fb(tfm)); + err = crypto_ahash_digest(req); + goto out_no_state; + } + + err = crypto_ahash_export(req, state); + ahash_request_set_tfm(req, crypto_ahash_fb(tfm)); + err = err ?: crypto_ahash_import(req, state); + + if (op == &crypto_ahash_alg(tfm)->finup) { + err = err ?: crypto_ahash_finup(req); + goto out_no_state; + } + + err = err ?: + crypto_ahash_update(req) ?: + crypto_ahash_export(req, state); + + ahash_request_set_tfm(req, tfm); + return err ?: crypto_ahash_import(req, state); + +out_no_state: + ahash_request_set_tfm(req, tfm); + return err; + } +} + int crypto_ahash_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -274,145 +386,204 @@ int crypto_ahash_init(struct ahash_request *req) return crypto_shash_init(prepare_shash_desc(req, tfm)); if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; + if (ahash_req_on_stack(req) && ahash_is_async(tfm)) + return -EAGAIN; + if (crypto_ahash_block_only(tfm)) { + u8 *buf = ahash_request_ctx(req); + + buf += crypto_ahash_reqsize(tfm) - 1; + *buf = 0; + } return crypto_ahash_alg(tfm)->init(req); } EXPORT_SYMBOL_GPL(crypto_ahash_init); -static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt, - bool has_state) +static void ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - unsigned int ds = crypto_ahash_digestsize(tfm); - struct ahash_request *subreq; - unsigned int subreq_size; - unsigned int reqsize; - u8 *result; - gfp_t gfp; - u32 flags; - - subreq_size = sizeof(*subreq); - reqsize = crypto_ahash_reqsize(tfm); - reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment()); - subreq_size += reqsize; - subreq_size += ds; - - flags = ahash_request_flags(req); - gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - subreq = kmalloc(subreq_size, gfp); - if (!subreq) - return -ENOMEM; - - ahash_request_set_tfm(subreq, tfm); - ahash_request_set_callback(subreq, flags, cplt, req); - - result = (u8 *)(subreq + 1) + reqsize; - - ahash_request_set_crypt(subreq, req->src, result, req->nbytes); - - if (has_state) { - void *state; - - state = kmalloc(crypto_ahash_statesize(tfm), gfp); - if (!state) { - kfree(subreq); - return -ENOMEM; - } - - crypto_ahash_export(req, state); - crypto_ahash_import(subreq, state); - kfree_sensitive(state); - } - - req->priv = subreq; + req->saved_complete = req->base.complete; + req->saved_data = req->base.data; + req->base.complete = cplt; + req->base.data = req; +} - return 0; +static void ahash_restore_req(struct ahash_request *req) +{ + req->base.complete = req->saved_complete; + req->base.data = req->saved_data; } -static void ahash_restore_req(struct ahash_request *req, int err) +static int ahash_update_finish(struct ahash_request *req, int err) { - struct ahash_request *subreq = req->priv; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + bool nonzero = crypto_ahash_final_nonzero(tfm); + int bs = crypto_ahash_blocksize(tfm); + u8 *blenp = ahash_request_ctx(req); + int blen; + u8 *buf; + + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + buf = blenp - bs; + + if (blen) { + req->src = req->sg_head + 1; + if (sg_is_chain(req->src)) + req->src = sg_chain_ptr(req->src); + } + + req->nbytes += nonzero - blen; - if (!err) - memcpy(req->result, subreq->result, - crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); + blen = err < 0 ? 0 : err + nonzero; + if (ahash_request_isvirt(req)) + memcpy(buf, req->svirt + req->nbytes - blen, blen); + else + memcpy_from_sglist(buf, req->src, req->nbytes - blen, blen); + *blenp = blen; - req->priv = NULL; + ahash_restore_req(req); - kfree_sensitive(subreq); + return err; +} + +static void ahash_update_done(void *data, int err) +{ + ahash_op_done(data, err, ahash_update_finish); } int crypto_ahash_update(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ahash_alg *alg; + bool nonzero = crypto_ahash_final_nonzero(tfm); + int bs = crypto_ahash_blocksize(tfm); + u8 *blenp = ahash_request_ctx(req); + int blen, err; + u8 *buf; if (likely(tfm->using_shash)) return shash_ahash_update(req, ahash_request_ctx(req)); + if (ahash_req_on_stack(req) && ahash_is_async(tfm)) + return -EAGAIN; + if (!crypto_ahash_block_only(tfm)) + return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update); + + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + buf = blenp - bs; + + if (blen + req->nbytes < bs + nonzero) { + if (ahash_request_isvirt(req)) + memcpy(buf + blen, req->svirt, req->nbytes); + else + memcpy_from_sglist(buf + blen, req->src, 0, + req->nbytes); + + *blenp += req->nbytes; + return 0; + } + + if (blen) { + memset(req->sg_head, 0, sizeof(req->sg_head[0])); + sg_set_buf(req->sg_head, buf, blen); + if (req->src != req->sg_head + 1) + sg_chain(req->sg_head, 2, req->src); + req->src = req->sg_head; + req->nbytes += blen; + } + req->nbytes -= nonzero; - alg = crypto_ahash_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_add(req->nbytes, &ahash_get_stat(alg)->hash_tlen); - return crypto_ahash_errstat(alg, alg->update(req)); + ahash_save_req(req, ahash_update_done); + + err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update); + if (err == -EINPROGRESS || err == -EBUSY) + return err; + + return ahash_update_finish(req, err); } EXPORT_SYMBOL_GPL(crypto_ahash_update); -int crypto_ahash_final(struct ahash_request *req) +static int ahash_finup_finish(struct ahash_request *req, int err) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ahash_alg *alg; + u8 *blenp = ahash_request_ctx(req); + int blen; + + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + + if (blen) { + if (sg_is_last(req->src)) + req->src = NULL; + else { + req->src = req->sg_head + 1; + if (sg_is_chain(req->src)) + req->src = sg_chain_ptr(req->src); + } + req->nbytes -= blen; + } - if (likely(tfm->using_shash)) - return crypto_shash_final(ahash_request_ctx(req), req->result); + ahash_restore_req(req); - alg = crypto_ahash_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&ahash_get_stat(alg)->hash_cnt); - return crypto_ahash_errstat(alg, alg->final(req)); + return err; +} + +static void ahash_finup_done(void *data, int err) +{ + ahash_op_done(data, err, ahash_finup_finish); } -EXPORT_SYMBOL_GPL(crypto_ahash_final); int crypto_ahash_finup(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ahash_alg *alg; + int bs = crypto_ahash_blocksize(tfm); + u8 *blenp = ahash_request_ctx(req); + int blen, err; + u8 *buf; if (likely(tfm->using_shash)) return shash_ahash_finup(req, ahash_request_ctx(req)); + if (ahash_req_on_stack(req) && ahash_is_async(tfm)) + return -EAGAIN; + if (!crypto_ahash_alg(tfm)->finup) + return ahash_def_finup(req); + if (!crypto_ahash_block_only(tfm)) + return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup); + + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + buf = blenp - bs; + + if (blen) { + memset(req->sg_head, 0, sizeof(req->sg_head[0])); + sg_set_buf(req->sg_head, buf, blen); + if (!req->src) + sg_mark_end(req->sg_head); + else if (req->src != req->sg_head + 1) + sg_chain(req->sg_head, 2, req->src); + req->src = req->sg_head; + req->nbytes += blen; + } - alg = crypto_ahash_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_hash *istat = ahash_get_stat(alg); + ahash_save_req(req, ahash_finup_done); - atomic64_inc(&istat->hash_cnt); - atomic64_add(req->nbytes, &istat->hash_tlen); - } - return crypto_ahash_errstat(alg, alg->finup(req)); + err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup); + if (err == -EINPROGRESS || err == -EBUSY) + return err; + + return ahash_finup_finish(req, err); } EXPORT_SYMBOL_GPL(crypto_ahash_finup); int crypto_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ahash_alg *alg; - int err; if (likely(tfm->using_shash)) return shash_ahash_digest(req, prepare_shash_desc(req, tfm)); - - alg = crypto_ahash_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_hash *istat = ahash_get_stat(alg); - - atomic64_inc(&istat->hash_cnt); - atomic64_add(req->nbytes, &istat->hash_tlen); - } - + if (ahash_req_on_stack(req) && ahash_is_async(tfm)) + return -EAGAIN; if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - err = -ENOKEY; - else - err = alg->digest(req); - - return crypto_ahash_errstat(alg, err); + return -ENOKEY; + return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->digest); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); @@ -423,74 +594,87 @@ static void ahash_def_finup_done2(void *data, int err) if (err == -EINPROGRESS) return; - ahash_restore_req(areq, err); - + ahash_restore_req(areq); ahash_request_complete(areq, err); } static int ahash_def_finup_finish1(struct ahash_request *req, int err) { - struct ahash_request *subreq = req->priv; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); if (err) goto out; - subreq->base.complete = ahash_def_finup_done2; + req->base.complete = ahash_def_finup_done2; - err = crypto_ahash_alg(crypto_ahash_reqtfm(req))->final(subreq); + err = crypto_ahash_alg(tfm)->final(req); if (err == -EINPROGRESS || err == -EBUSY) return err; out: - ahash_restore_req(req, err); + ahash_restore_req(req); return err; } static void ahash_def_finup_done1(void *data, int err) { - struct ahash_request *areq = data; - struct ahash_request *subreq; - - if (err == -EINPROGRESS) - goto out; - - subreq = areq->priv; - subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; - - err = ahash_def_finup_finish1(areq, err); - if (err == -EINPROGRESS || err == -EBUSY) - return; - -out: - ahash_request_complete(areq, err); + ahash_op_done(data, err, ahash_def_finup_finish1); } static int ahash_def_finup(struct ahash_request *req) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int err; - err = ahash_save_req(req, ahash_def_finup_done1, true); - if (err) - return err; + ahash_save_req(req, ahash_def_finup_done1); - err = crypto_ahash_alg(tfm)->update(req->priv); + err = crypto_ahash_update(req); if (err == -EINPROGRESS || err == -EBUSY) return err; return ahash_def_finup_finish1(req, err); } +int crypto_ahash_export_core(struct ahash_request *req, void *out) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (likely(tfm->using_shash)) + return crypto_shash_export_core(ahash_request_ctx(req), out); + return crypto_ahash_alg(tfm)->export_core(req, out); +} +EXPORT_SYMBOL_GPL(crypto_ahash_export_core); + int crypto_ahash_export(struct ahash_request *req, void *out) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); if (likely(tfm->using_shash)) return crypto_shash_export(ahash_request_ctx(req), out); + if (crypto_ahash_block_only(tfm)) { + unsigned int plen = crypto_ahash_blocksize(tfm) + 1; + unsigned int reqsize = crypto_ahash_reqsize(tfm); + unsigned int ss = crypto_ahash_statesize(tfm); + u8 *buf = ahash_request_ctx(req); + + memcpy(out + ss - plen, buf + reqsize - plen, plen); + } return crypto_ahash_alg(tfm)->export(req, out); } EXPORT_SYMBOL_GPL(crypto_ahash_export); +int crypto_ahash_import_core(struct ahash_request *req, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (likely(tfm->using_shash)) + return crypto_shash_import_core(prepare_shash_desc(req, tfm), + in); + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + return crypto_ahash_alg(tfm)->import_core(req, in); +} +EXPORT_SYMBOL_GPL(crypto_ahash_import_core); + int crypto_ahash_import(struct ahash_request *req, const void *in) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -499,6 +683,12 @@ int crypto_ahash_import(struct ahash_request *req, const void *in) return crypto_shash_import(prepare_shash_desc(req, tfm), in); if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; + if (crypto_ahash_block_only(tfm)) { + unsigned int reqsize = crypto_ahash_reqsize(tfm); + u8 *buf = ahash_request_ctx(req); + + buf[reqsize - 1] = 0; + } return crypto_ahash_alg(tfm)->import(req, in); } EXPORT_SYMBOL_GPL(crypto_ahash_import); @@ -508,25 +698,73 @@ static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm) struct crypto_ahash *hash = __crypto_ahash_cast(tfm); struct ahash_alg *alg = crypto_ahash_alg(hash); - alg->exit_tfm(hash); + if (alg->exit_tfm) + alg->exit_tfm(hash); + else if (tfm->__crt_alg->cra_exit) + tfm->__crt_alg->cra_exit(tfm); + + if (crypto_ahash_need_fallback(hash)) + crypto_free_ahash(crypto_ahash_fb(hash)); } static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) { struct crypto_ahash *hash = __crypto_ahash_cast(tfm); struct ahash_alg *alg = crypto_ahash_alg(hash); + struct crypto_ahash *fb = NULL; + int err; crypto_ahash_set_statesize(hash, alg->halg.statesize); + crypto_ahash_set_reqsize(hash, crypto_tfm_alg_reqsize(tfm)); if (tfm->__crt_alg->cra_type == &crypto_shash_type) return crypto_init_ahash_using_shash(tfm); + if (crypto_ahash_need_fallback(hash)) { + fb = crypto_alloc_ahash(crypto_ahash_alg_name(hash), + CRYPTO_ALG_REQ_VIRT, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_REQ_VIRT | + CRYPTO_AHASH_ALG_NO_EXPORT_CORE); + if (IS_ERR(fb)) + return PTR_ERR(fb); + + tfm->fb = crypto_ahash_tfm(fb); + } + ahash_set_needkey(hash, alg); - if (alg->exit_tfm) - tfm->exit = crypto_ahash_exit_tfm; + tfm->exit = crypto_ahash_exit_tfm; + + if (alg->init_tfm) + err = alg->init_tfm(hash); + else if (tfm->__crt_alg->cra_init) + err = tfm->__crt_alg->cra_init(tfm); + else + return 0; + + if (err) + goto out_free_sync_hash; + + if (!ahash_is_async(hash) && crypto_ahash_reqsize(hash) > + MAX_SYNC_HASH_REQSIZE) + goto out_exit_tfm; + + BUILD_BUG_ON(HASH_MAX_DESCSIZE > MAX_SYNC_HASH_REQSIZE); + if (crypto_ahash_reqsize(hash) < HASH_MAX_DESCSIZE) + crypto_ahash_set_reqsize(hash, HASH_MAX_DESCSIZE); + + return 0; - return alg->init_tfm ? alg->init_tfm(hash) : 0; +out_exit_tfm: + if (alg->exit_tfm) + alg->exit_tfm(hash); + else if (tfm->__crt_alg->cra_exit) + tfm->__crt_alg->cra_exit(tfm); + err = -EINVAL; +out_free_sync_hash: + crypto_free_ahash(fb); + return err; } static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) @@ -564,19 +802,13 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : ahash\n"); - seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? - "yes" : "no"); + seq_printf(m, "async : %s\n", + str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC)); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "digestsize : %u\n", __crypto_hash_alg_common(alg)->digestsize); } -static int __maybe_unused crypto_ahash_report_stat( - struct sk_buff *skb, struct crypto_alg *alg) -{ - return crypto_hash_report_stat(skb, alg, "ahash"); -} - static const struct crypto_type crypto_ahash_type = { .extsize = crypto_ahash_extsize, .init_tfm = crypto_ahash_init_tfm, @@ -587,13 +819,11 @@ static const struct crypto_type crypto_ahash_type = { #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_ahash_report, #endif -#ifdef CONFIG_CRYPTO_STATS - .report_stat = crypto_ahash_report_stat, -#endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, .type = CRYPTO_ALG_TYPE_AHASH, .tfmsize = offsetof(struct crypto_ahash, base), + .algsize = offsetof(struct ahash_alg, halg.base), }; int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, @@ -618,7 +848,7 @@ int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_ahash); -static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) { struct crypto_alg *alg = &halg->base; @@ -627,11 +857,13 @@ static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey; } +EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash) { struct hash_alg_common *halg = crypto_hash_alg_common(hash); struct crypto_tfm *tfm = crypto_ahash_tfm(hash); + struct crypto_ahash *fb = NULL; struct crypto_ahash *nhash; struct ahash_alg *alg; int err; @@ -661,28 +893,52 @@ struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash) err = PTR_ERR(shash); goto out_free_nhash; } + crypto_ahash_tfm(nhash)->exit = crypto_exit_ahash_using_shash; nhash->using_shash = true; *nctx = shash; return nhash; } + if (crypto_ahash_need_fallback(hash)) { + fb = crypto_clone_ahash(crypto_ahash_fb(hash)); + err = PTR_ERR(fb); + if (IS_ERR(fb)) + goto out_free_nhash; + + crypto_ahash_tfm(nhash)->fb = crypto_ahash_tfm(fb); + } + err = -ENOSYS; alg = crypto_ahash_alg(hash); if (!alg->clone_tfm) - goto out_free_nhash; + goto out_free_fb; err = alg->clone_tfm(nhash, hash); if (err) - goto out_free_nhash; + goto out_free_fb; + + crypto_ahash_tfm(nhash)->exit = crypto_ahash_exit_tfm; return nhash; +out_free_fb: + crypto_free_ahash(fb); out_free_nhash: crypto_free_ahash(nhash); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(crypto_clone_ahash); +static int ahash_default_export_core(struct ahash_request *req, void *out) +{ + return -ENOSYS; +} + +static int ahash_default_import_core(struct ahash_request *req, const void *in) +{ + return -ENOSYS; +} + static int ahash_prepare_alg(struct ahash_alg *alg) { struct crypto_alg *base = &alg->halg.base; @@ -691,6 +947,13 @@ static int ahash_prepare_alg(struct ahash_alg *alg) if (alg->halg.statesize == 0) return -EINVAL; + if (base->cra_reqsize && base->cra_reqsize < alg->halg.statesize) + return -EINVAL; + + if (!(base->cra_flags & CRYPTO_ALG_ASYNC) && + base->cra_reqsize > MAX_SYNC_HASH_REQSIZE) + return -EINVAL; + err = hash_prepare_alg(&alg->halg); if (err) return err; @@ -698,11 +961,28 @@ static int ahash_prepare_alg(struct ahash_alg *alg) base->cra_type = &crypto_ahash_type; base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; - if (!alg->finup) - alg->finup = ahash_def_finup; + if ((base->cra_flags ^ CRYPTO_ALG_REQ_VIRT) & + (CRYPTO_ALG_ASYNC | CRYPTO_ALG_REQ_VIRT)) + base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK; + if (!alg->setkey) alg->setkey = ahash_nosetkey; + if (base->cra_flags & CRYPTO_AHASH_ALG_BLOCK_ONLY) { + BUILD_BUG_ON(MAX_ALGAPI_BLOCKSIZE >= 256); + if (!alg->finup) + return -EINVAL; + + base->cra_reqsize += base->cra_blocksize + 1; + alg->halg.statesize += base->cra_blocksize + 1; + alg->export_core = alg->export; + alg->import_core = alg->import; + } else if (!alg->export_core || !alg->import_core) { + alg->export_core = ahash_default_export_core; + alg->import_core = ahash_default_import_core; + base->cra_flags |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; + } + return 0; } @@ -770,5 +1050,42 @@ int ahash_register_instance(struct crypto_template *tmpl, } EXPORT_SYMBOL_GPL(ahash_register_instance); +void ahash_request_free(struct ahash_request *req) +{ + if (unlikely(!req)) + return; + + if (!ahash_req_on_stack(req)) { + kfree(req); + return; + } + + ahash_request_zero(req); +} +EXPORT_SYMBOL_GPL(ahash_request_free); + +int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data, + unsigned int len, u8 *out) +{ + HASH_REQUEST_ON_STACK(req, crypto_ahash_fb(tfm)); + int err; + + ahash_request_set_callback(req, 0, NULL, NULL); + ahash_request_set_virt(req, data, out, len); + err = crypto_ahash_digest(req); + + ahash_request_zero(req); + + return err; +} +EXPORT_SYMBOL_GPL(crypto_hash_digest); + +void ahash_free_singlespawn_instance(struct ahash_instance *inst) +{ + crypto_drop_spawn(ahash_instance_ctx(inst)); + kfree(inst); +} +EXPORT_SYMBOL_GPL(ahash_free_singlespawn_instance); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); |