diff options
Diffstat (limited to 'crypto/algapi.c')
| -rw-r--r-- | crypto/algapi.c | 968 |
1 files changed, 381 insertions, 587 deletions
diff --git a/crypto/algapi.c b/crypto/algapi.c index 8b65ada33e5d..e604d0d8b7b4 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -1,13 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Cryptographic API for algorithms (i.e., low-level API). * * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * */ #include <crypto/algapi.h> @@ -21,28 +16,12 @@ #include <linux/rtnetlink.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/workqueue.h> #include "internal.h" static LIST_HEAD(crypto_template_list); -static inline int crypto_set_driver_name(struct crypto_alg *alg) -{ - static const char suffix[] = "-generic"; - char *driver_name = alg->cra_driver_name; - int len; - - if (*driver_name) - return 0; - - len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); - if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME) - return -ENAMETOOLONG; - - memcpy(driver_name + len, suffix, sizeof(suffix)); - return 0; -} - static inline void crypto_check_module_sig(struct module *mod) { if (fips_enabled && mod && !module_sig_ok(mod)) @@ -54,6 +33,9 @@ static int crypto_check_alg(struct crypto_alg *alg) { crypto_check_module_sig(alg->cra_module); + if (!alg->cra_name[0] || !alg->cra_driver_name[0]) + return -EINVAL; + if (alg->cra_alignmask & (alg->cra_alignmask + 1)) return -EINVAL; @@ -79,28 +61,55 @@ static int crypto_check_alg(struct crypto_alg *alg) refcount_set(&alg->cra_refcnt, 1); - return crypto_set_driver_name(alg); + return 0; } static void crypto_free_instance(struct crypto_instance *inst) { - if (!inst->alg.cra_type->free) { - inst->tmpl->free(inst); - return; + inst->alg.cra_type->free(inst); +} + +static void crypto_destroy_instance_workfn(struct work_struct *w) +{ + struct crypto_template *tmpl = container_of(w, struct crypto_template, + free_work); + struct crypto_instance *inst; + struct hlist_node *n; + HLIST_HEAD(list); + + down_write(&crypto_alg_sem); + hlist_for_each_entry_safe(inst, n, &tmpl->dead, list) { + if (refcount_read(&inst->alg.cra_refcnt) != -1) + continue; + hlist_del(&inst->list); + hlist_add_head(&inst->list, &list); } + up_write(&crypto_alg_sem); - inst->alg.cra_type->free(inst); + hlist_for_each_entry_safe(inst, n, &list, list) + crypto_free_instance(inst); } static void crypto_destroy_instance(struct crypto_alg *alg) { - struct crypto_instance *inst = (void *)alg; + struct crypto_instance *inst = container_of(alg, + struct crypto_instance, + alg); struct crypto_template *tmpl = inst->tmpl; - crypto_free_instance(inst); - crypto_tmpl_put(tmpl); + refcount_set(&alg->cra_refcnt, -1); + schedule_work(&tmpl->free_work); } +/* + * This function adds a spawn to the list secondary_spawns which + * will be used at the end of crypto_remove_spawns to unregister + * instances, unless the spawn happens to be one that is depended + * on by the new algorithm (nalg in crypto_remove_spawns). + * + * This function is also responsible for resurrecting any algorithms + * in the dependency chain of nalg by unsetting n->dead. + */ static struct list_head *crypto_more_spawns(struct crypto_alg *alg, struct list_head *stack, struct list_head *top, @@ -112,15 +121,17 @@ static struct list_head *crypto_more_spawns(struct crypto_alg *alg, if (!spawn) return NULL; - n = list_next_entry(spawn, list); + n = list_prev_entry(spawn, list); + list_move(&spawn->list, secondary_spawns); - if (spawn->alg && &n->list != stack && !n->alg) - n->alg = (n->list.next == stack) ? alg : - &list_next_entry(n, list)->inst->alg; + if (list_is_last(&n->list, stack)) + return top; - list_move(&spawn->list, secondary_spawns); + n = list_next_entry(n, list); + if (!spawn->dead) + n->dead = false; - return &n->list == stack ? top : &n->inst->alg.cra_users; + return &n->inst->alg.cra_users; } static void crypto_remove_instance(struct crypto_instance *inst, @@ -132,19 +143,25 @@ static void crypto_remove_instance(struct crypto_instance *inst, return; inst->alg.cra_flags |= CRYPTO_ALG_DEAD; - if (hlist_unhashed(&inst->list)) - return; - if (!tmpl || !crypto_tmpl_get(tmpl)) + if (!tmpl) return; - list_move(&inst->alg.cra_list, list); + list_del_init(&inst->alg.cra_list); hlist_del(&inst->list); - inst->alg.cra_destroy = crypto_destroy_instance; + hlist_add_head(&inst->list, &tmpl->dead); BUG_ON(!list_empty(&inst->alg.cra_users)); + + crypto_alg_put(&inst->alg); } +/* + * Given an algorithm alg, remove all algorithms that depend on it + * through spawns. If nalg is not null, then exempt any algorithms + * that is depended on by nalg. This is useful when nalg itself + * depends on alg. + */ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, struct crypto_alg *nalg) { @@ -163,6 +180,11 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, list_move(&spawn->list, &top); } + /* + * Perform a depth-first walk starting from alg through + * the cra_users tree. The list stack records the path + * from alg to the current spawn. + */ spawns = ⊤ do { while (!list_empty(spawns)) { @@ -172,17 +194,26 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, list); inst = spawn->inst; - BUG_ON(&inst->alg == alg); - list_move(&spawn->list, &stack); + spawn->dead = !spawn->registered || &inst->alg != nalg; + + if (!spawn->registered) + break; + + BUG_ON(&inst->alg == alg); if (&inst->alg == nalg) break; - spawn->alg = NULL; spawns = &inst->alg.cra_users; /* + * Even if spawn->registered is true, the + * instance itself may still be unregistered. + * This is because it may have failed during + * registration. Therefore we still need to + * make the following test. + * * We may encounter an unregistered instance here, since * an instance's spawns are set up prior to the instance * being registered. An unregistered instance will have @@ -197,16 +228,77 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, } while ((spawns = crypto_more_spawns(alg, &stack, &top, &secondary_spawns))); + /* + * Remove all instances that are marked as dead. Also + * complete the resurrection of the others by moving them + * back to the cra_users list. + */ list_for_each_entry_safe(spawn, n, &secondary_spawns, list) { - if (spawn->alg) + if (!spawn->dead) list_move(&spawn->list, &spawn->alg->cra_users); - else + else if (spawn->registered) crypto_remove_instance(spawn->inst, list); } } EXPORT_SYMBOL_GPL(crypto_remove_spawns); -static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) +static void crypto_alg_finish_registration(struct crypto_alg *alg, + struct list_head *algs_to_put) +{ + struct crypto_alg *q; + + list_for_each_entry(q, &crypto_alg_list, cra_list) { + if (q == alg) + continue; + + if (crypto_is_moribund(q)) + continue; + + if (crypto_is_larval(q)) + continue; + + if (strcmp(alg->cra_name, q->cra_name)) + continue; + + if (strcmp(alg->cra_driver_name, q->cra_driver_name) && + q->cra_priority > alg->cra_priority) + continue; + + crypto_remove_spawns(q, algs_to_put, alg); + } + + crypto_notify(CRYPTO_MSG_ALG_LOADED, alg); +} + +static struct crypto_larval *crypto_alloc_test_larval(struct crypto_alg *alg) +{ + struct crypto_larval *larval; + + if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) || + (alg->cra_flags & CRYPTO_ALG_INTERNAL)) + return NULL; /* No self-test needed */ + + larval = crypto_larval_alloc(alg->cra_name, + alg->cra_flags | CRYPTO_ALG_TESTED, 0); + if (IS_ERR(larval)) + return larval; + + larval->adult = crypto_mod_get(alg); + if (!larval->adult) { + kfree(larval); + return ERR_PTR(-ENOENT); + } + + refcount_set(&larval->alg.cra_refcnt, 1); + memcpy(larval->alg.cra_driver_name, alg->cra_driver_name, + CRYPTO_MAX_ALG_NAME); + larval->alg.cra_priority = alg->cra_priority; + + return larval; +} + +static struct crypto_larval * +__crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put) { struct crypto_alg *q; struct crypto_larval *larval; @@ -217,9 +309,6 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) INIT_LIST_HEAD(&alg->cra_users); - /* No cheating! */ - alg->cra_flags &= ~CRYPTO_ALG_TESTED; - ret = -EEXIST; list_for_each_entry(q, &crypto_alg_list, cra_list) { @@ -236,35 +325,30 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) } if (!strcmp(q->cra_driver_name, alg->cra_name) || + !strcmp(q->cra_driver_name, alg->cra_driver_name) || !strcmp(q->cra_name, alg->cra_driver_name)) goto err; } - larval = crypto_larval_alloc(alg->cra_name, - alg->cra_flags | CRYPTO_ALG_TESTED, 0); + larval = crypto_alloc_test_larval(alg); if (IS_ERR(larval)) goto out; - ret = -ENOENT; - larval->adult = crypto_mod_get(alg); - if (!larval->adult) - goto free_larval; - - refcount_set(&larval->alg.cra_refcnt, 1); - memcpy(larval->alg.cra_driver_name, alg->cra_driver_name, - CRYPTO_MAX_ALG_NAME); - larval->alg.cra_priority = alg->cra_priority; - list_add(&alg->cra_list, &crypto_alg_list); - list_add(&larval->alg.cra_list, &crypto_alg_list); - crypto_stats_init(alg); + if (larval) { + /* No cheating! */ + alg->cra_flags &= ~CRYPTO_ALG_TESTED; + + list_add(&larval->alg.cra_list, &crypto_alg_list); + } else { + alg->cra_flags |= CRYPTO_ALG_TESTED; + crypto_alg_finish_registration(alg, algs_to_put); + } out: return larval; -free_larval: - kfree(larval); err: larval = ERR_PTR(ret); goto out; @@ -289,62 +373,34 @@ void crypto_alg_tested(const char *name, int err) } pr_err("alg: Unexpected test result for %s: %d\n", name, err); - goto unlock; + up_write(&crypto_alg_sem); + return; found: q->cra_flags |= CRYPTO_ALG_DEAD; alg = test->adult; - if (err || list_empty(&alg->cra_list)) - goto complete; - - alg->cra_flags |= CRYPTO_ALG_TESTED; - - list_for_each_entry(q, &crypto_alg_list, cra_list) { - if (q == alg) - continue; - - if (crypto_is_moribund(q)) - continue; - - if (crypto_is_larval(q)) { - struct crypto_larval *larval = (void *)q; - /* - * Check to see if either our generic name or - * specific name can satisfy the name requested - * by the larval entry q. - */ - if (strcmp(alg->cra_name, q->cra_name) && - strcmp(alg->cra_driver_name, q->cra_name)) - continue; - - if (larval->adult) - continue; - if ((q->cra_flags ^ alg->cra_flags) & larval->mask) - continue; - if (!crypto_mod_get(alg)) - continue; - - larval->adult = alg; - continue; - } + if (crypto_is_dead(alg)) + goto complete; - if (strcmp(alg->cra_name, q->cra_name)) - continue; + if (err == -ECANCELED) + alg->cra_flags |= CRYPTO_ALG_FIPS_INTERNAL; + else if (err) + goto complete; + else + alg->cra_flags &= ~CRYPTO_ALG_FIPS_INTERNAL; - if (strcmp(alg->cra_driver_name, q->cra_driver_name) && - q->cra_priority > alg->cra_priority) - continue; + alg->cra_flags |= CRYPTO_ALG_TESTED; - crypto_remove_spawns(q, &list, alg); - } + crypto_alg_finish_registration(alg, &list); complete: + list_del_init(&test->alg.cra_list); complete_all(&test->completion); -unlock: up_write(&crypto_alg_sem); + crypto_alg_put(&test->alg); crypto_remove_final(&list); } EXPORT_SYMBOL_GPL(crypto_alg_tested); @@ -361,29 +417,20 @@ void crypto_remove_final(struct list_head *list) } EXPORT_SYMBOL_GPL(crypto_remove_final); -static void crypto_wait_for_test(struct crypto_larval *larval) +static void crypto_free_alg(struct crypto_alg *alg) { - int err; - - err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult); - if (err != NOTIFY_STOP) { - if (WARN_ON(err != NOTIFY_DONE)) - goto out; - crypto_alg_tested(larval->alg.cra_driver_name, 0); - } - - err = wait_for_completion_killable(&larval->completion); - WARN_ON(err); - if (!err) - crypto_probing_notify(CRYPTO_MSG_ALG_LOADED, larval); + unsigned int algsize = alg->cra_type->algsize; + u8 *p = (u8 *)alg - algsize; -out: - crypto_larval_kill(&larval->alg); + crypto_destroy_alg(alg); + kfree(p); } int crypto_register_alg(struct crypto_alg *alg) { struct crypto_larval *larval; + bool test_started = false; + LIST_HEAD(algs_to_put); int err; alg->cra_flags &= ~CRYPTO_ALG_DEAD; @@ -391,14 +438,37 @@ int crypto_register_alg(struct crypto_alg *alg) if (err) return err; + if (alg->cra_flags & CRYPTO_ALG_DUP_FIRST && + !WARN_ON_ONCE(alg->cra_destroy)) { + unsigned int algsize = alg->cra_type->algsize; + u8 *p = (u8 *)alg - algsize; + + p = kmemdup(p, algsize + sizeof(*alg), GFP_KERNEL); + if (!p) + return -ENOMEM; + + alg = (void *)(p + algsize); + alg->cra_destroy = crypto_free_alg; + } + down_write(&crypto_alg_sem); - larval = __crypto_register_alg(alg); + larval = __crypto_register_alg(alg, &algs_to_put); + if (!IS_ERR_OR_NULL(larval)) { + test_started = crypto_boot_test_finished(); + larval->test_started = test_started; + } up_write(&crypto_alg_sem); - if (IS_ERR(larval)) + if (IS_ERR(larval)) { + crypto_alg_put(alg); return PTR_ERR(larval); + } + + if (test_started) + crypto_schedule_test(larval); + else + crypto_remove_final(&algs_to_put); - crypto_wait_for_test(larval); return 0; } EXPORT_SYMBOL_GPL(crypto_register_alg); @@ -416,7 +486,7 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list) return 0; } -int crypto_unregister_alg(struct crypto_alg *alg) +void crypto_unregister_alg(struct crypto_alg *alg) { int ret; LIST_HEAD(list); @@ -425,15 +495,13 @@ int crypto_unregister_alg(struct crypto_alg *alg) ret = crypto_remove_alg(alg, &list); up_write(&crypto_alg_sem); - if (ret) - return ret; + if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name)) + return; - BUG_ON(refcount_read(&alg->cra_refcnt) != 1); - if (alg->cra_destroy) - alg->cra_destroy(alg); + WARN_ON(!alg->cra_destroy && refcount_read(&alg->cra_refcnt) != 1); + list_add(&alg->cra_list, &list); crypto_remove_final(&list); - return 0; } EXPORT_SYMBOL_GPL(crypto_unregister_alg); @@ -457,18 +525,12 @@ err: } EXPORT_SYMBOL_GPL(crypto_register_algs); -int crypto_unregister_algs(struct crypto_alg *algs, int count) +void crypto_unregister_algs(struct crypto_alg *algs, int count) { - int i, ret; + int i; - for (i = 0; i < count; i++) { - ret = crypto_unregister_alg(&algs[i]); - if (ret) - pr_err("Failed to unregister %s %s: %d\n", - algs[i].cra_driver_name, algs[i].cra_name, ret); - } - - return 0; + for (i = 0; i < count; i++) + crypto_unregister_alg(&algs[i]); } EXPORT_SYMBOL_GPL(crypto_unregister_algs); @@ -477,6 +539,8 @@ int crypto_register_template(struct crypto_template *tmpl) struct crypto_template *q; int err = -EEXIST; + INIT_WORK(&tmpl->free_work, crypto_destroy_instance_workfn); + down_write(&crypto_alg_sem); crypto_check_module_sig(tmpl->module); @@ -494,6 +558,24 @@ out: } EXPORT_SYMBOL_GPL(crypto_register_template); +int crypto_register_templates(struct crypto_template *tmpls, int count) +{ + int i, err; + + for (i = 0; i < count; i++) { + err = crypto_register_template(&tmpls[i]); + if (err) + goto out; + } + return 0; + +out: + for (--i; i >= 0; --i) + crypto_unregister_template(&tmpls[i]); + return err; +} +EXPORT_SYMBOL_GPL(crypto_register_templates); + void crypto_unregister_template(struct crypto_template *tmpl) { struct crypto_instance *inst; @@ -520,9 +602,20 @@ void crypto_unregister_template(struct crypto_template *tmpl) crypto_free_instance(inst); } crypto_remove_final(&users); + + flush_work(&tmpl->free_work); } EXPORT_SYMBOL_GPL(crypto_unregister_template); +void crypto_unregister_templates(struct crypto_template *tmpls, int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_unregister_template(&tmpls[i]); +} +EXPORT_SYMBOL_GPL(crypto_unregister_templates); + static struct crypto_template *__crypto_lookup_template(const char *name) { struct crypto_template *q, *tmpl = NULL; @@ -553,6 +646,9 @@ int crypto_register_instance(struct crypto_template *tmpl, struct crypto_instance *inst) { struct crypto_larval *larval; + struct crypto_spawn *spawn; + u32 fips_internal = 0; + LIST_HEAD(algs_to_put); int err; err = crypto_check_alg(&inst->alg); @@ -561,12 +657,35 @@ int crypto_register_instance(struct crypto_template *tmpl, inst->alg.cra_module = tmpl->module; inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE; + inst->alg.cra_destroy = crypto_destroy_instance; down_write(&crypto_alg_sem); - larval = __crypto_register_alg(&inst->alg); + larval = ERR_PTR(-EAGAIN); + for (spawn = inst->spawns; spawn;) { + struct crypto_spawn *next; + + if (spawn->dead) + goto unlock; + + next = spawn->next; + spawn->inst = inst; + spawn->registered = true; + + fips_internal |= spawn->alg->cra_flags; + + crypto_mod_put(spawn->alg); + + spawn = next; + } + + inst->alg.cra_flags |= (fips_internal & CRYPTO_ALG_FIPS_INTERNAL); + + larval = __crypto_register_alg(&inst->alg, &algs_to_put); if (IS_ERR(larval)) goto unlock; + else if (larval) + larval->test_started = true; hlist_add_head(&inst->list, &tmpl->instances); inst->tmpl = tmpl; @@ -574,19 +693,19 @@ int crypto_register_instance(struct crypto_template *tmpl, unlock: up_write(&crypto_alg_sem); - err = PTR_ERR(larval); if (IS_ERR(larval)) - goto err; + return PTR_ERR(larval); - crypto_wait_for_test(larval); - err = 0; + if (larval) + crypto_schedule_test(larval); + else + crypto_remove_final(&algs_to_put); -err: - return err; + return 0; } EXPORT_SYMBOL_GPL(crypto_register_instance); -int crypto_unregister_instance(struct crypto_instance *inst) +void crypto_unregister_instance(struct crypto_instance *inst) { LIST_HEAD(list); @@ -598,91 +717,80 @@ int crypto_unregister_instance(struct crypto_instance *inst) up_write(&crypto_alg_sem); crypto_remove_final(&list); - - return 0; } EXPORT_SYMBOL_GPL(crypto_unregister_instance); -int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, - struct crypto_instance *inst, u32 mask) +int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, + const char *name, u32 type, u32 mask) { + struct crypto_alg *alg; int err = -EAGAIN; - spawn->inst = inst; - spawn->mask = mask; + if (WARN_ON_ONCE(inst == NULL)) + return -EINVAL; + + /* Allow the result of crypto_attr_alg_name() to be passed directly */ + if (IS_ERR(name)) + return PTR_ERR(name); + + alg = crypto_find_alg(name, spawn->frontend, + type | CRYPTO_ALG_FIPS_INTERNAL, mask); + if (IS_ERR(alg)) + return PTR_ERR(alg); down_write(&crypto_alg_sem); if (!crypto_is_moribund(alg)) { list_add(&spawn->list, &alg->cra_users); spawn->alg = alg; + spawn->mask = mask; + spawn->next = inst->spawns; + inst->spawns = spawn; + inst->alg.cra_flags |= + (alg->cra_flags & CRYPTO_ALG_INHERITED_FLAGS); err = 0; } up_write(&crypto_alg_sem); - - return err; -} -EXPORT_SYMBOL_GPL(crypto_init_spawn); - -int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, - struct crypto_instance *inst, - const struct crypto_type *frontend) -{ - int err = -EINVAL; - - if ((alg->cra_flags ^ frontend->type) & frontend->maskset) - goto out; - - spawn->frontend = frontend; - err = crypto_init_spawn(spawn, alg, inst, frontend->maskset); - -out: - return err; -} -EXPORT_SYMBOL_GPL(crypto_init_spawn2); - -int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name, - u32 type, u32 mask) -{ - struct crypto_alg *alg; - int err; - - alg = crypto_find_alg(name, spawn->frontend, type, mask); - if (IS_ERR(alg)) - return PTR_ERR(alg); - - err = crypto_init_spawn(spawn, alg, spawn->inst, mask); - crypto_mod_put(alg); + if (err) + crypto_mod_put(alg); return err; } EXPORT_SYMBOL_GPL(crypto_grab_spawn); void crypto_drop_spawn(struct crypto_spawn *spawn) { - if (!spawn->alg) + if (!spawn->alg) /* not yet initialized? */ return; down_write(&crypto_alg_sem); - list_del(&spawn->list); + if (!spawn->dead) + list_del(&spawn->list); up_write(&crypto_alg_sem); + + if (!spawn->registered) + crypto_mod_put(spawn->alg); } EXPORT_SYMBOL_GPL(crypto_drop_spawn); static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn) { - struct crypto_alg *alg; - struct crypto_alg *alg2; + struct crypto_alg *alg = ERR_PTR(-EAGAIN); + struct crypto_alg *target; + bool shoot = false; down_read(&crypto_alg_sem); - alg = spawn->alg; - alg2 = alg; - if (alg2) - alg2 = crypto_mod_get(alg2); + if (!spawn->dead) { + alg = spawn->alg; + if (!crypto_mod_get(alg)) { + target = crypto_alg_get(alg); + shoot = true; + alg = ERR_PTR(-EAGAIN); + } + } up_read(&crypto_alg_sem); - if (!alg2) { - if (alg) - crypto_shoot_alg(alg); - return ERR_PTR(-EAGAIN); + if (shoot) { + crypto_shoot_alg(target); + crypto_alg_put(target); } return alg; @@ -765,7 +873,23 @@ struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb) } EXPORT_SYMBOL_GPL(crypto_get_attr_type); -int crypto_check_attr_type(struct rtattr **tb, u32 type) +/** + * crypto_check_attr_type() - check algorithm type and compute inherited mask + * @tb: the template parameters + * @type: the algorithm type the template would be instantiated as + * @mask_ret: (output) the mask that should be passed to crypto_grab_*() + * to restrict the flags of any inner algorithms + * + * Validate that the algorithm type the user requested is compatible with the + * one the template would actually be instantiated as. E.g., if the user is + * doing crypto_alloc_shash("cbc(aes)", ...), this would return an error because + * the "cbc" template creates an "skcipher" algorithm, not an "shash" algorithm. + * + * Also compute the mask to use to restrict the flags of any inner algorithms. + * + * Return: 0 on success; -errno on failure + */ +int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret) { struct crypto_attr_type *algt; @@ -776,6 +900,7 @@ int crypto_check_attr_type(struct rtattr **tb, u32 type) if ((algt->type ^ type) & algt->mask) return -EINVAL; + *mask_ret = crypto_algt_inherited_mask(algt); return 0; } EXPORT_SYMBOL_GPL(crypto_check_attr_type); @@ -798,107 +923,20 @@ const char *crypto_attr_alg_name(struct rtattr *rta) } EXPORT_SYMBOL_GPL(crypto_attr_alg_name); -struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, - const struct crypto_type *frontend, - u32 type, u32 mask) -{ - const char *name; - - name = crypto_attr_alg_name(rta); - if (IS_ERR(name)) - return ERR_CAST(name); - - return crypto_find_alg(name, frontend, type, mask); -} -EXPORT_SYMBOL_GPL(crypto_attr_alg2); - -int crypto_attr_u32(struct rtattr *rta, u32 *num) -{ - struct crypto_attr_u32 *nu32; - - if (!rta) - return -ENOENT; - if (RTA_PAYLOAD(rta) < sizeof(*nu32)) - return -EINVAL; - if (rta->rta_type != CRYPTOA_U32) - return -EINVAL; - - nu32 = RTA_DATA(rta); - *num = nu32->num; - - return 0; -} -EXPORT_SYMBOL_GPL(crypto_attr_u32); - -int crypto_inst_setname(struct crypto_instance *inst, const char *name, - struct crypto_alg *alg) +int __crypto_inst_setname(struct crypto_instance *inst, const char *name, + const char *driver, struct crypto_alg *alg) { if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, alg->cra_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", - name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + driver, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; return 0; } -EXPORT_SYMBOL_GPL(crypto_inst_setname); - -void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, - unsigned int head) -{ - struct crypto_instance *inst; - char *p; - int err; - - p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn), - GFP_KERNEL); - if (!p) - return ERR_PTR(-ENOMEM); - - inst = (void *)(p + head); - - err = crypto_inst_setname(inst, name, alg); - if (err) - goto err_free_inst; - - return p; - -err_free_inst: - kfree(p); - return ERR_PTR(err); -} -EXPORT_SYMBOL_GPL(crypto_alloc_instance2); - -struct crypto_instance *crypto_alloc_instance(const char *name, - struct crypto_alg *alg) -{ - struct crypto_instance *inst; - struct crypto_spawn *spawn; - int err; - - inst = crypto_alloc_instance2(name, alg, 0); - if (IS_ERR(inst)) - goto out; - - spawn = crypto_instance_ctx(inst); - err = crypto_init_spawn(spawn, alg, inst, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); - - if (err) - goto err_free_inst; - - return inst; - -err_free_inst: - kfree(inst); - inst = ERR_PTR(err); - -out: - return inst; -} -EXPORT_SYMBOL_GPL(crypto_alloc_instance); +EXPORT_SYMBOL_GPL(__crypto_inst_setname); void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen) { @@ -932,6 +970,17 @@ out: } EXPORT_SYMBOL_GPL(crypto_enqueue_request); +void crypto_enqueue_request_head(struct crypto_queue *queue, + struct crypto_async_request *request) +{ + if (unlikely(queue->qlen >= queue->max_qlen)) + queue->backlog = queue->backlog->prev; + + queue->qlen++; + list_add(&request->list, &queue->list); +} +EXPORT_SYMBOL_GPL(crypto_enqueue_request_head); + struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) { struct list_head *request; @@ -945,25 +994,12 @@ struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) queue->backlog = queue->backlog->next; request = queue->list.next; - list_del(request); + list_del_init(request); return list_entry(request, struct crypto_async_request, list); } EXPORT_SYMBOL_GPL(crypto_dequeue_request); -int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm) -{ - struct crypto_async_request *req; - - list_for_each_entry(req, &queue->list, list) { - if (req->tfm == tfm) - return 1; - } - - return 0; -} -EXPORT_SYMBOL_GPL(crypto_tfm_in_queue); - static inline void crypto_inc_byte(u8 *a, unsigned int size) { u8 *b = (a + size); @@ -995,59 +1031,6 @@ void crypto_inc(u8 *a, unsigned int size) } EXPORT_SYMBOL_GPL(crypto_inc); -void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len) -{ - int relalign = 0; - - if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { - int size = sizeof(unsigned long); - int d = (((unsigned long)dst ^ (unsigned long)src1) | - ((unsigned long)dst ^ (unsigned long)src2)) & - (size - 1); - - relalign = d ? 1 << __ffs(d) : size; - - /* - * If we care about alignment, process as many bytes as - * needed to advance dst and src to values whose alignments - * equal their relative alignment. This will allow us to - * process the remainder of the input using optimal strides. - */ - while (((unsigned long)dst & (relalign - 1)) && len > 0) { - *dst++ = *src1++ ^ *src2++; - len--; - } - } - - while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) { - *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2; - dst += 8; - src1 += 8; - src2 += 8; - len -= 8; - } - - while (len >= 4 && !(relalign & 3)) { - *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2; - dst += 4; - src1 += 4; - src2 += 4; - len -= 4; - } - - while (len >= 2 && !(relalign & 1)) { - *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2; - dst += 2; - src1 += 2; - src2 += 2; - len -= 2; - } - - while (len--) - *dst++ = *src1++ ^ *src2++; -} -EXPORT_SYMBOL_GPL(__crypto_xor); - unsigned int crypto_alg_extsize(struct crypto_alg *alg) { return alg->cra_ctxsize + @@ -1070,248 +1053,54 @@ int crypto_type_has_alg(const char *name, const struct crypto_type *frontend, } EXPORT_SYMBOL_GPL(crypto_type_has_alg); -#ifdef CONFIG_CRYPTO_STATS -void crypto_stats_init(struct crypto_alg *alg) -{ - memset(&alg->stats, 0, sizeof(alg->stats)); -} -EXPORT_SYMBOL_GPL(crypto_stats_init); - -void crypto_stats_get(struct crypto_alg *alg) +static void __init crypto_start_tests(void) { - crypto_alg_get(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_get); - -void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, - struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.cipher.err_cnt); - } else { - atomic64_inc(&alg->stats.cipher.encrypt_cnt); - atomic64_add(nbytes, &alg->stats.cipher.encrypt_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_ablkcipher_encrypt); - -void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, - struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.cipher.err_cnt); - } else { - atomic64_inc(&alg->stats.cipher.decrypt_cnt); - atomic64_add(nbytes, &alg->stats.cipher.decrypt_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_ablkcipher_decrypt); - -void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, - int ret) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.aead.err_cnt); - } else { - atomic64_inc(&alg->stats.aead.encrypt_cnt); - atomic64_add(cryptlen, &alg->stats.aead.encrypt_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_aead_encrypt); + if (!IS_BUILTIN(CONFIG_CRYPTO_ALGAPI)) + return; -void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, - int ret) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.aead.err_cnt); - } else { - atomic64_inc(&alg->stats.aead.decrypt_cnt); - atomic64_add(cryptlen, &alg->stats.aead.decrypt_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_aead_decrypt); + if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)) + return; -void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, - struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.akcipher.err_cnt); - } else { - atomic64_inc(&alg->stats.akcipher.encrypt_cnt); - atomic64_add(src_len, &alg->stats.akcipher.encrypt_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_akcipher_encrypt); + set_crypto_boot_test_finished(); -void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, - struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.akcipher.err_cnt); - } else { - atomic64_inc(&alg->stats.akcipher.decrypt_cnt); - atomic64_add(src_len, &alg->stats.akcipher.decrypt_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_akcipher_decrypt); + for (;;) { + struct crypto_larval *larval = NULL; + struct crypto_alg *q; -void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&alg->stats.akcipher.err_cnt); - else - atomic64_inc(&alg->stats.akcipher.sign_cnt); - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_akcipher_sign); + down_write(&crypto_alg_sem); -void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&alg->stats.akcipher.err_cnt); - else - atomic64_inc(&alg->stats.akcipher.verify_cnt); - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_akcipher_verify); + list_for_each_entry(q, &crypto_alg_list, cra_list) { + struct crypto_larval *l; -void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.compress.err_cnt); - } else { - atomic64_inc(&alg->stats.compress.compress_cnt); - atomic64_add(slen, &alg->stats.compress.compress_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_compress); - -void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.compress.err_cnt); - } else { - atomic64_inc(&alg->stats.compress.decompress_cnt); - atomic64_add(slen, &alg->stats.compress.decompress_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_decompress); - -void crypto_stats_ahash_update(unsigned int nbytes, int ret, - struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&alg->stats.hash.err_cnt); - else - atomic64_add(nbytes, &alg->stats.hash.hash_tlen); - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_ahash_update); - -void crypto_stats_ahash_final(unsigned int nbytes, int ret, - struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.hash.err_cnt); - } else { - atomic64_inc(&alg->stats.hash.hash_cnt); - atomic64_add(nbytes, &alg->stats.hash.hash_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_ahash_final); + if (!crypto_is_larval(q)) + continue; -void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret) -{ - if (ret) - atomic64_inc(&alg->stats.kpp.err_cnt); - else - atomic64_inc(&alg->stats.kpp.setsecret_cnt); - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_kpp_set_secret); + l = (void *)q; -void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret) -{ - if (ret) - atomic64_inc(&alg->stats.kpp.err_cnt); - else - atomic64_inc(&alg->stats.kpp.generate_public_key_cnt); - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_kpp_generate_public_key); + if (!crypto_is_test_larval(l)) + continue; -void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret) -{ - if (ret) - atomic64_inc(&alg->stats.kpp.err_cnt); - else - atomic64_inc(&alg->stats.kpp.compute_shared_secret_cnt); - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_kpp_compute_shared_secret); + if (l->test_started) + continue; -void crypto_stats_rng_seed(struct crypto_alg *alg, int ret) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&alg->stats.rng.err_cnt); - else - atomic64_inc(&alg->stats.rng.seed_cnt); - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_rng_seed); + l->test_started = true; + larval = l; + break; + } -void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, - int ret) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.rng.err_cnt); - } else { - atomic64_inc(&alg->stats.rng.generate_cnt); - atomic64_add(dlen, &alg->stats.rng.generate_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_rng_generate); + up_write(&crypto_alg_sem); -void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, - struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.cipher.err_cnt); - } else { - atomic64_inc(&alg->stats.cipher.encrypt_cnt); - atomic64_add(cryptlen, &alg->stats.cipher.encrypt_tlen); - } - crypto_alg_put(alg); -} -EXPORT_SYMBOL_GPL(crypto_stats_skcipher_encrypt); + if (!larval) + break; -void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, - struct crypto_alg *alg) -{ - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.cipher.err_cnt); - } else { - atomic64_inc(&alg->stats.cipher.decrypt_cnt); - atomic64_add(cryptlen, &alg->stats.cipher.decrypt_tlen); + crypto_schedule_test(larval); } - crypto_alg_put(alg); } -EXPORT_SYMBOL_GPL(crypto_stats_skcipher_decrypt); -#endif static int __init crypto_algapi_init(void) { crypto_init_proc(); + crypto_start_tests(); return 0; } @@ -1320,8 +1109,13 @@ static void __exit crypto_algapi_exit(void) crypto_exit_proc(); } -module_init(crypto_algapi_init); +/* + * We run this at late_initcall so that all the built-in algorithms + * have had a chance to register themselves first. + */ +late_initcall(crypto_algapi_init); module_exit(crypto_algapi_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cryptographic algorithms API"); +MODULE_SOFTDEP("pre: cryptomgr"); |
