diff options
Diffstat (limited to 'include/crypto/algapi.h')
| -rw-r--r-- | include/crypto/algapi.h | 166 |
1 files changed, 96 insertions, 70 deletions
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index f76ec723ceae..05deea9dac5e 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -7,11 +7,13 @@ #ifndef _CRYPTO_ALGAPI_H #define _CRYPTO_ALGAPI_H +#include <crypto/utils.h> #include <linux/align.h> +#include <linux/cache.h> #include <linux/crypto.h> -#include <linux/kconfig.h> #include <linux/list.h> #include <linux/types.h> +#include <linux/workqueue.h> /* * Maximum values for blocksize and alignmask, used to allocate @@ -19,32 +21,40 @@ * algs and architectures. Ciphers have a lower maximum size. */ #define MAX_ALGAPI_BLOCKSIZE 160 -#define MAX_ALGAPI_ALIGNMASK 63 +#define MAX_ALGAPI_ALIGNMASK 127 #define MAX_CIPHER_BLOCKSIZE 16 #define MAX_CIPHER_ALIGNMASK 15 +#ifdef ARCH_DMA_MINALIGN +#define CRYPTO_DMA_ALIGN ARCH_DMA_MINALIGN +#else +#define CRYPTO_DMA_ALIGN CRYPTO_MINALIGN +#endif + +#define CRYPTO_DMA_PADDING ((CRYPTO_DMA_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1)) + +/* + * Autoloaded crypto modules should only use a prefixed name to avoid allowing + * arbitrary modules to be loaded. Loading from userspace may still need the + * unprefixed names, so retains those aliases as well. + * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3 + * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro + * expands twice on the same line. Instead, use a separate base name for the + * alias. + */ +#define MODULE_ALIAS_CRYPTO(name) \ + MODULE_INFO(alias, name); \ + MODULE_INFO(alias, "crypto-" name) + struct crypto_aead; struct crypto_instance; struct module; struct notifier_block; struct rtattr; +struct scatterlist; struct seq_file; struct sk_buff; - -struct crypto_type { - unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); - unsigned int (*extsize)(struct crypto_alg *alg); - int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); - int (*init_tfm)(struct crypto_tfm *tfm); - void (*show)(struct seq_file *m, struct crypto_alg *alg); - int (*report)(struct sk_buff *skb, struct crypto_alg *alg); - void (*free)(struct crypto_instance *inst); - - unsigned int type; - unsigned int maskclear; - unsigned int maskset; - unsigned int tfmsize; -}; +union crypto_no_such_thing; struct crypto_instance { struct crypto_alg alg; @@ -64,8 +74,11 @@ struct crypto_instance { struct crypto_template { struct list_head list; struct hlist_head instances; + struct hlist_head dead; struct module *module; + struct work_struct free_work; + int (*create)(struct crypto_template *tmpl, struct rtattr **tb); char name[CRYPTO_MAX_ALG_NAME]; @@ -95,6 +108,13 @@ struct crypto_queue { }; struct scatter_walk { + /* Must be the first member, see struct skcipher_walk. */ + union { + void *const addr; + + /* Private API field, do not touch. */ + union crypto_no_such_thing *__addr; + }; struct scatterlist *sg; unsigned int offset; }; @@ -108,6 +128,14 @@ struct crypto_attr_type { u32 mask; }; +/* + * Algorithm registration interface. + */ +int crypto_register_alg(struct crypto_alg *alg); +void crypto_unregister_alg(struct crypto_alg *alg); +int crypto_register_algs(struct crypto_alg *algs, int count); +void crypto_unregister_algs(struct crypto_alg *algs, int count); + void crypto_mod_put(struct crypto_alg *alg); int crypto_register_template(struct crypto_template *tmpl); @@ -130,8 +158,16 @@ void *crypto_spawn_tfm2(struct crypto_spawn *spawn); struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret); const char *crypto_attr_alg_name(struct rtattr *rta); -int crypto_inst_setname(struct crypto_instance *inst, const char *name, - struct crypto_alg *alg); +int __crypto_inst_setname(struct crypto_instance *inst, const char *name, + const char *driver, struct crypto_alg *alg); + +#define crypto_inst_setname(inst, name, ...) \ + CONCATENATE(crypto_inst_setname_, COUNT_ARGS(__VA_ARGS__))( \ + inst, name, ##__VA_ARGS__) +#define crypto_inst_setname_1(inst, name, alg) \ + __crypto_inst_setname(inst, name, name, alg) +#define crypto_inst_setname_2(inst, name, driver, alg) \ + __crypto_inst_setname(inst, name, driver, alg) void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); int crypto_enqueue_request(struct crypto_queue *queue, @@ -145,48 +181,34 @@ static inline unsigned int crypto_queue_len(struct crypto_queue *queue) } void crypto_inc(u8 *a, unsigned int size); -void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size); -static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size) +static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) +{ + return tfm->__crt_ctx; +} + +static inline void *crypto_tfm_ctx_align(struct crypto_tfm *tfm, + unsigned int align) { - if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && - __builtin_constant_p(size) && - (size % sizeof(unsigned long)) == 0) { - unsigned long *d = (unsigned long *)dst; - unsigned long *s = (unsigned long *)src; - - while (size > 0) { - *d++ ^= *s++; - size -= sizeof(unsigned long); - } - } else { - __crypto_xor(dst, dst, src, size); - } + if (align <= crypto_tfm_ctx_alignment()) + align = 1; + + return PTR_ALIGN(crypto_tfm_ctx(tfm), align); } -static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2, - unsigned int size) +static inline unsigned int crypto_dma_align(void) { - if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && - __builtin_constant_p(size) && - (size % sizeof(unsigned long)) == 0) { - unsigned long *d = (unsigned long *)dst; - unsigned long *s1 = (unsigned long *)src1; - unsigned long *s2 = (unsigned long *)src2; - - while (size > 0) { - *d++ = *s1++ ^ *s2++; - size -= sizeof(unsigned long); - } - } else { - __crypto_xor(dst, src1, src2, size); - } + return CRYPTO_DMA_ALIGN; } -static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) +static inline unsigned int crypto_dma_padding(void) { - return PTR_ALIGN(crypto_tfm_ctx(tfm), - crypto_tfm_alg_alignmask(tfm) + 1); + return (crypto_dma_align() - 1) & ~(crypto_tfm_ctx_alignment() - 1); +} + +static inline void *crypto_tfm_ctx_dma(struct crypto_tfm *tfm) +{ + return crypto_tfm_ctx_align(tfm, crypto_dma_align()); } static inline struct crypto_instance *crypto_tfm_alg_instance( @@ -232,23 +254,6 @@ static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt) return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS); } -noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); - -/** - * crypto_memneq - Compare two areas of memory without leaking - * timing information. - * - * @a: One area of memory - * @b: Another area of memory - * @size: The size of the area. - * - * Returns 0 when data is equal, 1 otherwise. - */ -static inline int crypto_memneq(const void *a, const void *b, size_t size) -{ - return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; -} - int crypto_register_notifier(struct notifier_block *nb); int crypto_unregister_notifier(struct notifier_block *nb); @@ -259,4 +264,25 @@ enum { CRYPTO_MSG_ALG_LOADED, }; +static inline void crypto_request_complete(struct crypto_async_request *req, + int err) +{ + req->complete(req->data, err); +} + +static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; +} + +static inline bool crypto_tfm_req_virt(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_flags & CRYPTO_ALG_REQ_VIRT; +} + +static inline u32 crypto_request_flags(struct crypto_async_request *req) +{ + return req->flags & ~CRYPTO_TFM_REQ_ON_STACK; +} + #endif /* _CRYPTO_ALGAPI_H */ |
