summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-01-28 15:38:56 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-01-28 15:38:56 -0800
commita78208e2436963d0b2c7d186277d6e1a9755029a (patch)
tree090caa51386d811a2750aef3dc70cd247f6aa622 /include
parent68353984d63d8d7ea728819dbdb7aecc5f32d360 (diff)
parent0bc81767c5bd9d005fae1099fb39eb3688370cb1 (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Removed CRYPTO_TFM_RES flags - Extended spawn grabbing to all algorithm types - Moved hash descsize verification into API code Algorithms: - Fixed recursive pcrypt dead-lock - Added new 32 and 64-bit generic versions of poly1305 - Added cryptogams implementation of x86/poly1305 Drivers: - Added support for i.MX8M Mini in caam - Added support for i.MX8M Nano in caam - Added support for i.MX8M Plus in caam - Added support for A33 variant of SS in sun4i-ss - Added TEE support for Raven Ridge in ccp - Added in-kernel API to submit TEE commands in ccp - Added AMD-TEE driver - Added support for BCM2711 in iproc-rng200 - Added support for AES256-GCM based ciphers for chtls - Added aead support on SEC2 in hisilicon" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (244 commits) crypto: arm/chacha - fix build failured when kernel mode NEON is disabled crypto: caam - add support for i.MX8M Plus crypto: x86/poly1305 - emit does base conversion itself crypto: hisilicon - fix spelling mistake "disgest" -> "digest" crypto: chacha20poly1305 - add back missing test vectors and test chunking crypto: x86/poly1305 - fix .gitignore typo tee: fix memory allocation failure checks on drv_data and amdtee crypto: ccree - erase unneeded inline funcs crypto: ccree - make cc_pm_put_suspend() void crypto: ccree - split overloaded usage of irq field crypto: ccree - fix PM race condition crypto: ccree - fix FDE descriptor sequence crypto: ccree - cc_do_send_request() is void func crypto: ccree - fix pm wrongful error reporting crypto: ccree - turn errors to debug msgs crypto: ccree - fix AEAD decrypt auth fail crypto: ccree - fix typo in comment crypto: ccree - fix typos in error msgs crypto: atmel-{aes,sha,tdes} - Retire crypto_platform_data crypto: x86/sha - Eliminate casts on asm implementations ...
Diffstat (limited to 'include')
-rw-r--r--include/crypto/aead.h10
-rw-r--r--include/crypto/algapi.h84
-rw-r--r--include/crypto/cast6.h7
-rw-r--r--include/crypto/hash.h13
-rw-r--r--include/crypto/internal/acompress.h4
-rw-r--r--include/crypto/internal/aead.h21
-rw-r--r--include/crypto/internal/akcipher.h12
-rw-r--r--include/crypto/internal/chacha.h2
-rw-r--r--include/crypto/internal/des.h23
-rw-r--r--include/crypto/internal/geniv.h1
-rw-r--r--include/crypto/internal/hash.h90
-rw-r--r--include/crypto/internal/poly1305.h45
-rw-r--r--include/crypto/internal/scompress.h4
-rw-r--r--include/crypto/internal/skcipher.h27
-rw-r--r--include/crypto/nhpoly1305.h4
-rw-r--r--include/crypto/poly1305.h26
-rw-r--r--include/crypto/serpent.h4
-rw-r--r--include/crypto/skcipher.h26
-rw-r--r--include/crypto/twofish.h2
-rw-r--r--include/crypto/xts.h21
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/crypto.h104
-rw-r--r--include/linux/padata.h56
-rw-r--r--include/linux/platform_data/crypto-atmel.h23
-rw-r--r--include/linux/psp-tee.h91
-rw-r--r--include/uapi/linux/tee.h1
26 files changed, 343 insertions, 359 deletions
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index a3bdadf6221e..1b3ebe8593c0 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -227,6 +227,16 @@ static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
return tfm->authsize;
}
+static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
+{
+ return alg->maxauthsize;
+}
+
+static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
+{
+ return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
+}
+
/**
* crypto_aead_blocksize() - obtain block size of cipher
* @tfm: cipher handle
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 5cd846defdd6..e115f9215ed5 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -47,7 +47,13 @@ struct crypto_instance {
struct crypto_alg alg;
struct crypto_template *tmpl;
- struct hlist_node list;
+
+ union {
+ /* Node in list of instances after registration. */
+ struct hlist_node list;
+ /* List of attached spawns before registration. */
+ struct crypto_spawn *spawns;
+ };
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
@@ -57,8 +63,6 @@ struct crypto_template {
struct hlist_head instances;
struct module *module;
- struct crypto_instance *(*alloc)(struct rtattr **tb);
- void (*free)(struct crypto_instance *inst);
int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
char name[CRYPTO_MAX_ALG_NAME];
@@ -67,9 +71,16 @@ struct crypto_template {
struct crypto_spawn {
struct list_head list;
struct crypto_alg *alg;
- struct crypto_instance *inst;
+ union {
+ /* Back pointer to instance after registration.*/
+ struct crypto_instance *inst;
+ /* Spawn list pointer prior to registration. */
+ struct crypto_spawn *next;
+ };
const struct crypto_type *frontend;
u32 mask;
+ bool dead;
+ bool registered;
};
struct crypto_queue {
@@ -95,45 +106,21 @@ struct crypto_template *crypto_lookup_template(const char *name);
int crypto_register_instance(struct crypto_template *tmpl,
struct crypto_instance *inst);
-int crypto_unregister_instance(struct crypto_instance *inst);
-
-int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
- struct crypto_instance *inst, u32 mask);
-int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
- struct crypto_instance *inst,
- const struct crypto_type *frontend);
-int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
- u32 type, u32 mask);
+void crypto_unregister_instance(struct crypto_instance *inst);
+int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
void crypto_drop_spawn(struct crypto_spawn *spawn);
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
u32 mask);
void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
-static inline void crypto_set_spawn(struct crypto_spawn *spawn,
- struct crypto_instance *inst)
-{
- spawn->inst = inst;
-}
-
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
int crypto_check_attr_type(struct rtattr **tb, u32 type);
const char *crypto_attr_alg_name(struct rtattr *rta);
-struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
- const struct crypto_type *frontend,
- u32 type, u32 mask);
-
-static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
- u32 type, u32 mask)
-{
- return crypto_attr_alg2(rta, NULL, type, mask);
-}
-
int crypto_attr_u32(struct rtattr *rta, u32 *num);
int crypto_inst_setname(struct crypto_instance *inst, const char *name,
struct crypto_alg *alg);
-void *crypto_alloc_instance(const char *name, struct crypto_alg *alg,
- unsigned int head);
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
int crypto_enqueue_request(struct crypto_queue *queue,
@@ -200,13 +187,38 @@ static inline void *crypto_instance_ctx(struct crypto_instance *inst)
return inst->__ctx;
}
+struct crypto_cipher_spawn {
+ struct crypto_spawn base;
+};
+
+static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+ return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
+}
+
+static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct crypto_alg *crypto_spawn_cipher_alg(
+ struct crypto_cipher_spawn *spawn)
+{
+ return spawn->base.alg;
+}
+
static inline struct crypto_cipher *crypto_spawn_cipher(
- struct crypto_spawn *spawn)
+ struct crypto_cipher_spawn *spawn)
{
u32 type = CRYPTO_ALG_TYPE_CIPHER;
u32 mask = CRYPTO_ALG_TYPE_MASK;
- return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
+ return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask));
}
static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
@@ -221,12 +233,6 @@ static inline struct crypto_async_request *crypto_get_backlog(
container_of(queue->backlog, struct crypto_async_request, list);
}
-static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
- u32 type, u32 mask)
-{
- return crypto_attr_alg(tb[1], type, mask);
-}
-
static inline int crypto_requires_off(u32 type, u32 mask, u32 off)
{
return (type ^ off) & mask & off;
diff --git a/include/crypto/cast6.h b/include/crypto/cast6.h
index c71f6ef47f0f..38f490cd50a8 100644
--- a/include/crypto/cast6.h
+++ b/include/crypto/cast6.h
@@ -15,11 +15,10 @@ struct cast6_ctx {
u8 Kr[12][4];
};
-int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key,
- unsigned int keylen, u32 *flags);
+int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key, unsigned int keylen);
int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
-void __cast6_encrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
-void __cast6_decrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
+void __cast6_encrypt(const void *ctx, u8 *dst, const u8 *src);
+void __cast6_decrypt(const void *ctx, u8 *dst, const u8 *src);
#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index fe7f73bad1e2..cee446c59497 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -169,6 +169,17 @@ struct shash_desc {
* @export: see struct ahash_alg
* @import: see struct ahash_alg
* @setkey: see struct ahash_alg
+ * @init_tfm: Initialize the cryptographic transformation object.
+ * This function is called only once at the instantiation
+ * time, right after the transformation context was
+ * allocated. In case the cryptographic hardware has
+ * some special requirements which need to be handled
+ * by software, this function shall check for the precise
+ * requirement of the transformation and put any software
+ * fallbacks in place.
+ * @exit_tfm: Deinitialize the cryptographic transformation object.
+ * This is a counterpart to @init_tfm, used to remove
+ * various changes set in @init_tfm.
* @digestsize: see struct ahash_alg
* @statesize: see struct ahash_alg
* @descsize: Size of the operational state for the message digest. This state
@@ -189,6 +200,8 @@ struct shash_alg {
int (*import)(struct shash_desc *desc, const void *in);
int (*setkey)(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
+ int (*init_tfm)(struct crypto_shash *tfm);
+ void (*exit_tfm)(struct crypto_shash *tfm);
unsigned int descsize;
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index 9de57367afbb..cf478681b53e 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -68,10 +68,8 @@ int crypto_register_acomp(struct acomp_alg *alg);
* compression algorithm
*
* @alg: algorithm definition
- *
- * Return: zero on success; error code in case of error
*/
-int crypto_unregister_acomp(struct acomp_alg *alg);
+void crypto_unregister_acomp(struct acomp_alg *alg);
int crypto_register_acomps(struct acomp_alg *algs, int count);
void crypto_unregister_acomps(struct acomp_alg *algs, int count);
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index c509ec30fc65..27b7b0224ea6 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -81,14 +81,9 @@ static inline struct aead_request *aead_request_cast(
return container_of(req, struct aead_request, base);
}
-static inline void crypto_set_aead_spawn(
- struct crypto_aead_spawn *spawn, struct crypto_instance *inst)
-{
- crypto_set_spawn(&spawn->base, inst);
-}
-
-int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
- u32 type, u32 mask);
+int crypto_grab_aead(struct crypto_aead_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn)
{
@@ -113,16 +108,6 @@ static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
aead->reqsize = reqsize;
}
-static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
-{
- return alg->maxauthsize;
-}
-
-static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
-{
- return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
-}
-
static inline void aead_init_queue(struct aead_queue *queue,
unsigned int max_qlen)
{
diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h
index d6c8a42789ad..8d3220c9ab77 100644
--- a/include/crypto/internal/akcipher.h
+++ b/include/crypto/internal/akcipher.h
@@ -78,15 +78,9 @@ static inline void *akcipher_instance_ctx(struct akcipher_instance *inst)
return crypto_instance_ctx(akcipher_crypto_instance(inst));
}
-static inline void crypto_set_akcipher_spawn(
- struct crypto_akcipher_spawn *spawn,
- struct crypto_instance *inst)
-{
- crypto_set_spawn(&spawn->base, inst);
-}
-
-int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name,
- u32 type, u32 mask);
+int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
static inline struct crypto_akcipher *crypto_spawn_akcipher(
struct crypto_akcipher_spawn *spawn)
diff --git a/include/crypto/internal/chacha.h b/include/crypto/internal/chacha.h
index aa5d4a16aac5..b085dc1ac151 100644
--- a/include/crypto/internal/chacha.h
+++ b/include/crypto/internal/chacha.h
@@ -34,7 +34,7 @@ static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
return chacha_setkey(tfm, key, keysize, 20);
}
-static int inline chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
+static inline int chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keysize)
{
return chacha_setkey(tfm, key, keysize, 12);
diff --git a/include/crypto/internal/des.h b/include/crypto/internal/des.h
index f62a2bb1866b..723fe5bf16da 100644
--- a/include/crypto/internal/des.h
+++ b/include/crypto/internal/des.h
@@ -35,10 +35,6 @@ static inline int crypto_des_verify_key(struct crypto_tfm *tfm, const u8 *key)
else
err = 0;
}
-
- if (err)
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
-
memzero_explicit(&tmp, sizeof(tmp));
return err;
}
@@ -95,14 +91,9 @@ bad:
static inline int crypto_des3_ede_verify_key(struct crypto_tfm *tfm,
const u8 *key)
{
- int err;
-
- err = des3_ede_verify_key(key, DES3_EDE_KEY_SIZE,
- crypto_tfm_get_flags(tfm) &
- CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
- if (err)
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
- return err;
+ return des3_ede_verify_key(key, DES3_EDE_KEY_SIZE,
+ crypto_tfm_get_flags(tfm) &
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
}
static inline int verify_skcipher_des_key(struct crypto_skcipher *tfm,
@@ -120,20 +111,16 @@ static inline int verify_skcipher_des3_key(struct crypto_skcipher *tfm,
static inline int verify_aead_des_key(struct crypto_aead *tfm, const u8 *key,
int keylen)
{
- if (keylen != DES_KEY_SIZE) {
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != DES_KEY_SIZE)
return -EINVAL;
- }
return crypto_des_verify_key(crypto_aead_tfm(tfm), key);
}
static inline int verify_aead_des3_key(struct crypto_aead *tfm, const u8 *key,
int keylen)
{
- if (keylen != DES3_EDE_KEY_SIZE) {
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != DES3_EDE_KEY_SIZE)
return -EINVAL;
- }
return crypto_des3_ede_verify_key(crypto_aead_tfm(tfm), key);
}
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 0108c0c7b2ed..229d37681a9d 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -21,7 +21,6 @@ struct aead_geniv_ctx {
struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
struct rtattr **tb, u32 type, u32 mask);
-void aead_geniv_free(struct aead_instance *inst);
int aead_init_geniv(struct crypto_aead *tfm);
void aead_exit_geniv(struct crypto_aead *tfm);
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index bfc9db7b100d..89f6f46ab2b8 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -30,11 +30,25 @@ struct crypto_hash_walk {
};
struct ahash_instance {
- struct ahash_alg alg;
+ void (*free)(struct ahash_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct ahash_alg, halg.base)];
+ struct crypto_instance base;
+ } s;
+ struct ahash_alg alg;
+ };
};
struct shash_instance {
- struct shash_alg alg;
+ void (*free)(struct shash_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct shash_alg, base)];
+ struct crypto_instance base;
+ } s;
+ struct shash_alg alg;
+ };
};
struct crypto_ahash_spawn {
@@ -45,8 +59,6 @@ struct crypto_shash_spawn {
struct crypto_spawn base;
};
-extern const struct crypto_type crypto_ahash_type;
-
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk);
@@ -70,12 +82,11 @@ static inline int crypto_ahash_walk_last(struct crypto_hash_walk *walk)
}
int crypto_register_ahash(struct ahash_alg *alg);
-int crypto_unregister_ahash(struct ahash_alg *alg);
+void crypto_unregister_ahash(struct ahash_alg *alg);
int crypto_register_ahashes(struct ahash_alg *algs, int count);
void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst);
-void ahash_free_instance(struct crypto_instance *inst);
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
@@ -85,37 +96,51 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
return alg->setkey != shash_no_setkey;
}
+static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
+{
+ return crypto_shash_alg_has_setkey(alg) &&
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
+}
+
bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
-int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
- struct hash_alg_common *alg,
- struct crypto_instance *inst);
+int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
static inline void crypto_drop_ahash(struct crypto_ahash_spawn *spawn)
{
crypto_drop_spawn(&spawn->base);
}
-struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
+static inline struct hash_alg_common *crypto_spawn_ahash_alg(
+ struct crypto_ahash_spawn *spawn)
+{
+ return __crypto_hash_alg_common(spawn->base.alg);
+}
int crypto_register_shash(struct shash_alg *alg);
-int crypto_unregister_shash(struct shash_alg *alg);
+void crypto_unregister_shash(struct shash_alg *alg);
int crypto_register_shashes(struct shash_alg *algs, int count);
-int crypto_unregister_shashes(struct shash_alg *algs, int count);
+void crypto_unregister_shashes(struct shash_alg *algs, int count);
int shash_register_instance(struct crypto_template *tmpl,
struct shash_instance *inst);
-void shash_free_instance(struct crypto_instance *inst);
+void shash_free_singlespawn_instance(struct shash_instance *inst);
-int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn,
- struct shash_alg *alg,
- struct crypto_instance *inst);
+int crypto_grab_shash(struct crypto_shash_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
static inline void crypto_drop_shash(struct crypto_shash_spawn *spawn)
{
crypto_drop_spawn(&spawn->base);
}
-struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
+static inline struct shash_alg *crypto_spawn_shash_alg(
+ struct crypto_shash_spawn *spawn)
+{
+ return __crypto_shash_alg(spawn->base.alg);
+}
int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
@@ -143,13 +168,13 @@ static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
static inline struct crypto_instance *ahash_crypto_instance(
struct ahash_instance *inst)
{
- return container_of(&inst->alg.halg.base, struct crypto_instance, alg);
+ return &inst->s.base;
}
static inline struct ahash_instance *ahash_instance(
struct crypto_instance *inst)
{
- return container_of(&inst->alg, struct ahash_instance, alg.halg.base);
+ return container_of(inst, struct ahash_instance, s.base);
}
static inline void *ahash_instance_ctx(struct ahash_instance *inst)
@@ -157,17 +182,6 @@ static inline void *ahash_instance_ctx(struct ahash_instance *inst)
return crypto_instance_ctx(ahash_crypto_instance(inst));
}
-static inline unsigned int ahash_instance_headroom(void)
-{
- return sizeof(struct ahash_alg) - sizeof(struct crypto_alg);
-}
-
-static inline struct ahash_instance *ahash_alloc_instance(
- const char *name, struct crypto_alg *alg)
-{
- return crypto_alloc_instance(name, alg, ahash_instance_headroom());
-}
-
static inline void ahash_request_complete(struct ahash_request *req, int err)
{
req->base.complete(&req->base, err);
@@ -204,26 +218,24 @@ static inline void *crypto_shash_ctx(struct crypto_shash *tfm)
static inline struct crypto_instance *shash_crypto_instance(
struct shash_instance *inst)
{
- return container_of(&inst->alg.base, struct crypto_instance, alg);
+ return &inst->s.base;
}
static inline struct shash_instance *shash_instance(
struct crypto_instance *inst)
{
- return container_of(__crypto_shash_alg(&inst->alg),
- struct shash_instance, alg);
+ return container_of(inst, struct shash_instance, s.base);
}
-static inline void *shash_instance_ctx(struct shash_instance *inst)
+static inline struct shash_instance *shash_alg_instance(
+ struct crypto_shash *shash)
{
- return crypto_instance_ctx(shash_crypto_instance(inst));
+ return shash_instance(crypto_tfm_alg_instance(&shash->base));
}
-static inline struct shash_instance *shash_alloc_instance(
- const char *name, struct crypto_alg *alg)
+static inline void *shash_instance_ctx(struct shash_instance *inst)
{
- return crypto_alloc_instance(name, alg,
- sizeof(struct shash_alg) - sizeof(*alg));
+ return crypto_instance_ctx(shash_crypto_instance(inst));
}
static inline struct crypto_shash *crypto_spawn_shash(
diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
index 479b0cab2a1a..064e52ca5248 100644
--- a/include/crypto/internal/poly1305.h
+++ b/include/crypto/internal/poly1305.h
@@ -11,48 +11,23 @@
#include <crypto/poly1305.h>
/*
- * Poly1305 core functions. These implement the ε-almost-∆-universal hash
- * function underlying the Poly1305 MAC, i.e. they don't add an encrypted nonce
- * ("s key") at the end. They also only support block-aligned inputs.
+ * Poly1305 core functions. These only accept whole blocks; the caller must
+ * handle any needed block buffering and padding. 'hibit' must be 1 for any
+ * full blocks, or 0 for the final block if it had to be padded. If 'nonce' is
+ * non-NULL, then it's added at the end to compute the Poly1305 MAC. Otherwise,
+ * only the ε-almost-∆-universal hash function (not the full MAC) is computed.
*/
-void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key);
+
+void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key);
static inline void poly1305_core_init(struct poly1305_state *state)
{
*state = (struct poly1305_state){};
}
void poly1305_core_blocks(struct poly1305_state *state,
- const struct poly1305_key *key, const void *src,
+ const struct poly1305_core_key *key, const void *src,
unsigned int nblocks, u32 hibit);
-void poly1305_core_emit(const struct poly1305_state *state, void *dst);
-
-/*
- * Poly1305 requires a unique key for each tag, which implies that we can't set
- * it on the tfm that gets accessed by multiple users simultaneously. Instead we
- * expect the key as the first 32 bytes in the update() call.
- */
-static inline
-unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
- const u8 *src, unsigned int srclen)
-{
- if (!dctx->sset) {
- if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
- poly1305_core_setkey(dctx->r, src);
- src += POLY1305_BLOCK_SIZE;
- srclen -= POLY1305_BLOCK_SIZE;
- dctx->rset = 1;
- }
- if (srclen >= POLY1305_BLOCK_SIZE) {
- dctx->s[0] = get_unaligned_le32(src + 0);
- dctx->s[1] = get_unaligned_le32(src + 4);
- dctx->s[2] = get_unaligned_le32(src + 8);
- dctx->s[3] = get_unaligned_le32(src + 12);
- src += POLY1305_BLOCK_SIZE;
- srclen -= POLY1305_BLOCK_SIZE;
- dctx->sset = true;
- }
- }
- return srclen;
-}
+void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
+ void *dst);
#endif
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index 6727ef0fc4d1..f834274c2493 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -112,10 +112,8 @@ int crypto_register_scomp(struct scomp_alg *alg);
* compression algorithm
*
* @alg: algorithm definition
- *
- * Return: zero on success; error code in case of error
*/
-int crypto_unregister_scomp(struct scomp_alg *alg);
+void crypto_unregister_scomp(struct scomp_alg *alg);
int crypto_register_scomps(struct scomp_alg *algs, int count);
void crypto_unregister_scomps(struct scomp_alg *algs, int count);
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 921c409fe1b1..10226c12c5df 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -88,14 +88,9 @@ static inline void skcipher_request_complete(struct skcipher_request *req, int e
req->base.complete(&req->base, err);
}
-static inline void crypto_set_skcipher_spawn(
- struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst)
-{
- crypto_set_spawn(&spawn->base, inst);
-}
-
-int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
- u32 type, u32 mask);
+int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
{
@@ -140,8 +135,6 @@ int skcipher_walk_virt(struct skcipher_walk *walk,
void skcipher_walk_atomise(struct skcipher_walk *walk);
int skcipher_walk_async(struct skcipher_walk *walk,
struct skcipher_request *req);
-int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
- bool atomic);
int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
struct aead_request *req, bool atomic);
int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
@@ -214,9 +207,17 @@ skcipher_cipher_simple(struct crypto_skcipher *tfm)
return ctx->cipher;
}
-struct skcipher_instance *
-skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
- struct crypto_alg **cipher_alg_ret);
+
+struct skcipher_instance *skcipher_alloc_instance_simple(
+ struct crypto_template *tmpl, struct rtattr **tb);
+
+static inline struct crypto_alg *skcipher_ialg_simple(
+ struct skcipher_instance *inst)
+{
+ struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
+
+ return crypto_spawn_cipher_alg(spawn);
+}
#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
diff --git a/include/crypto/nhpoly1305.h b/include/crypto/nhpoly1305.h
index 53c04423c582..306925fea190 100644
--- a/include/crypto/nhpoly1305.h
+++ b/include/crypto/nhpoly1305.h
@@ -7,7 +7,7 @@
#define _NHPOLY1305_H
#include <crypto/hash.h>
-#include <crypto/poly1305.h>
+#include <crypto/internal/poly1305.h>
/* NH parameterization: */
@@ -33,7 +33,7 @@
#define NHPOLY1305_KEY_SIZE (POLY1305_BLOCK_SIZE + NH_KEY_BYTES)
struct nhpoly1305_key {
- struct poly1305_key poly_key;
+ struct poly1305_core_key poly_key;
u32 nh_key[NH_KEY_WORDS];
};
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index 74c6e1cd73ee..f1f67fc749cf 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -13,12 +13,29 @@
#define POLY1305_KEY_SIZE 32
#define POLY1305_DIGEST_SIZE 16
+/* The poly1305_key and poly1305_state types are mostly opaque and
+ * implementation-defined. Limbs might be in base 2^64 or base 2^26, or
+ * different yet. The union type provided keeps these 64-bit aligned for the
+ * case in which this is implemented using 64x64 multiplies.
+ */
+
struct poly1305_key {
- u32 r[5]; /* key, base 2^26 */
+ union {
+ u32 r[5];
+ u64 r64[3];
+ };
+};
+
+struct poly1305_core_key {
+ struct poly1305_key key;
+ struct poly1305_key precomputed_s;
};
struct poly1305_state {
- u32 h[5]; /* accumulator, base 2^26 */
+ union {
+ u32 h[5];
+ u64 h64[3];
+ };
};
struct poly1305_desc_ctx {
@@ -35,7 +52,10 @@ struct poly1305_desc_ctx {
/* accumulator */
struct poly1305_state h;
/* key */
- struct poly1305_key r[CONFIG_CRYPTO_LIB_POLY1305_RSIZE];
+ union {
+ struct poly1305_key opaque_r[CONFIG_CRYPTO_LIB_POLY1305_RSIZE];
+ struct poly1305_core_key core_r;
+ };
};
void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key);
diff --git a/include/crypto/serpent.h b/include/crypto/serpent.h
index 7dd780c5d058..75c7eaa20853 100644
--- a/include/crypto/serpent.h
+++ b/include/crypto/serpent.h
@@ -22,7 +22,7 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
unsigned int keylen);
int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
-void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
-void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
+void __serpent_encrypt(const void *ctx, u8 *dst, const u8 *src);
+void __serpent_decrypt(const void *ctx, u8 *dst, const u8 *src);
#endif
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index b4655d91661f..141e7690f9c3 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -35,14 +35,7 @@ struct skcipher_request {
};
struct crypto_skcipher {
- int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct skcipher_request *req);
- int (*decrypt)(struct skcipher_request *req);
-
- unsigned int ivsize;
unsigned int reqsize;
- unsigned int keysize;
struct crypto_tfm base;
};
@@ -255,7 +248,7 @@ static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg)
*/
static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
{
- return tfm->ivsize;
+ return crypto_skcipher_alg(tfm)->ivsize;
}
static inline unsigned int crypto_sync_skcipher_ivsize(
@@ -366,11 +359,8 @@ static inline void crypto_sync_skcipher_clear_flags(
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
-static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- return tfm->setkey(tfm, key, keylen);
-}
+int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen);
static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm,
const u8 *key, unsigned int keylen)
@@ -378,10 +368,16 @@ static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm,
return crypto_skcipher_setkey(&tfm->base, key, keylen);
}
-static inline unsigned int crypto_skcipher_default_keysize(
+static inline unsigned int crypto_skcipher_min_keysize(
+ struct crypto_skcipher *tfm)
+{
+ return crypto_skcipher_alg(tfm)->min_keysize;
+}
+
+static inline unsigned int crypto_skcipher_max_keysize(
struct crypto_skcipher *tfm)
{
- return tfm->keysize;
+ return crypto_skcipher_alg(tfm)->max_keysize;
}
/**
diff --git a/include/crypto/twofish.h b/include/crypto/twofish.h
index 2e2c09673d88..f6b307a58554 100644
--- a/include/crypto/twofish.h
+++ b/include/crypto/twofish.h
@@ -19,7 +19,7 @@ struct twofish_ctx {
};
int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key,
- unsigned int key_len, u32 *flags);
+ unsigned int key_len);
int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len);
#endif
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
index 75fd96ff976b..0f8dba69feb4 100644
--- a/include/crypto/xts.h
+++ b/include/crypto/xts.h
@@ -8,28 +8,19 @@
#define XTS_BLOCK_SIZE 16
-#define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x))
-
static inline int xts_check_key(struct crypto_tfm *tfm,
const u8 *key, unsigned int keylen)
{
- u32 *flags = &tfm->crt_flags;
-
/*
* key consists of keys of equal size concatenated, therefore
* the length must be even.
*/
- if (keylen % 2) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (keylen % 2)
return -EINVAL;
- }
/* ensure that the AES and tweak key are not identical */
- if (fips_enabled &&
- !crypto_memneq(key, key + (keylen / 2), keylen / 2)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ if (fips_enabled && !crypto_memneq(key, key + (keylen / 2), keylen / 2))
return -EINVAL;
- }
return 0;
}
@@ -41,18 +32,14 @@ static inline int xts_verify_key(struct crypto_skcipher *tfm,
* key consists of keys of equal size concatenated, therefore
* the length must be even.
*/
- if (keylen % 2) {
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen % 2)
return -EINVAL;
- }
/* ensure that the AES and tweak key are not identical */
if ((fips_enabled || (crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) &&
- !crypto_memneq(key, key + (keylen / 2), keylen / 2)) {
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
+ !crypto_memneq(key, key + (keylen / 2), keylen / 2))
return -EINVAL;
- }
return 0;
}
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index e51ee772b9f5..def48a583670 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -59,6 +59,7 @@ enum cpuhp_state {
CPUHP_IOMMU_INTEL_DEAD,
CPUHP_LUSTRE_CFS_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
+ CPUHP_PADATA_DEAD,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 23365a9d062e..763863dbc079 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -107,16 +107,9 @@
#define CRYPTO_TFM_NEED_KEY 0x00000001
#define CRYPTO_TFM_REQ_MASK 0x000fff00
-#define CRYPTO_TFM_RES_MASK 0xfff00000
-
#define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100
#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
-#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
-#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
-#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
-#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
-#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
/*
* Miscellaneous stuff.
@@ -570,7 +563,7 @@ static inline int crypto_wait_req(int err, struct crypto_wait *wait)
reinit_completion(&wait->completion);
err = wait->err;
break;
- };
+ }
return err;
}
@@ -584,9 +577,9 @@ static inline void crypto_init_wait(struct crypto_wait *wait)
* Algorithm registration interface.
*/
int crypto_register_alg(struct crypto_alg *alg);
-int crypto_unregister_alg(struct crypto_alg *alg);
+void crypto_unregister_alg(struct crypto_alg *alg);
int crypto_register_algs(struct crypto_alg *algs, int count);
-int crypto_unregister_algs(struct crypto_alg *algs, int count);
+void crypto_unregister_algs(struct crypto_alg *algs, int count);
/*
* Algorithm query interface.
@@ -599,34 +592,10 @@ int crypto_has_alg(const char *name, u32 type, u32 mask);
* crypto_free_*(), as well as the various helpers below.
*/
-struct cipher_tfm {
- int (*cit_setkey)(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen);
- void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
- void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
-};
-
-struct compress_tfm {
- int (*cot_compress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
- int (*cot_decompress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-};
-
-#define crt_cipher crt_u.cipher
-#define crt_compress crt_u.compress
-
struct crypto_tfm {
u32 crt_flags;
- union {
- struct cipher_tfm cipher;
- struct compress_tfm compress;
- } crt_u;
-
void (*exit)(struct crypto_tfm *tfm);
struct crypto_alg *__crt_alg;
@@ -763,12 +732,6 @@ static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
return (struct crypto_cipher *)tfm;
}
-static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return __crypto_cipher_cast(tfm);
-}
-
/**
* crypto_alloc_cipher() - allocate single block cipher handle
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
@@ -826,11 +789,6 @@ static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
return crypto_has_alg(alg_name, type, mask);
}
-static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
-{
- return &crypto_cipher_tfm(tfm)->crt_cipher;
-}
-
/**
* crypto_cipher_blocksize() - obtain block size for cipher
* @tfm: cipher handle
@@ -884,12 +842,8 @@ static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
-static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
- key, keylen);
-}
+int crypto_cipher_setkey(struct crypto_cipher *tfm,
+ const u8 *key, unsigned int keylen);
/**
* crypto_cipher_encrypt_one() - encrypt one block of plaintext
@@ -900,12 +854,8 @@ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
* Invoke the encryption operation of one block. The caller must ensure that
* the plaintext and ciphertext buffers are at least one block in size.
*/
-static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
- u8 *dst, const u8 *src)
-{
- crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
- dst, src);
-}
+void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src);
/**
* crypto_cipher_decrypt_one() - decrypt one block of ciphertext
@@ -916,25 +866,14 @@ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
* Invoke the decryption operation of one block. The caller must ensure that
* the plaintext and ciphertext buffers are at least one block in size.
*/
-static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
- u8 *dst, const u8 *src)
-{
- crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
- dst, src);
-}
+void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src);
static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
{
return (struct crypto_comp *)tfm;
}
-static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
-{
- BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) &
- CRYPTO_ALG_TYPE_MASK);
- return __crypto_comp_cast(tfm);
-}
-
static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
u32 type, u32 mask)
{
@@ -969,26 +908,13 @@ static inline const char *crypto_comp_name(struct crypto_comp *tfm)
return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
}
-static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
-{
- return &crypto_comp_tfm(tfm)->crt_compress;
-}
+int crypto_comp_compress(struct crypto_comp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
-static inline int crypto_comp_compress(struct crypto_comp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
- src, slen, dst, dlen);
-}
-
-static inline int crypto_comp_decompress(struct crypto_comp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
- src, slen, dst, dlen);
-}
+int crypto_comp_decompress(struct crypto_comp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
#endif /* _LINUX_CRYPTO_H */
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 23717eeaad23..a0d8b41850b2 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -9,17 +9,17 @@
#ifndef PADATA_H
#define PADATA_H
+#include <linux/compiler_types.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/list.h>
-#include <linux/notifier.h>
#include <linux/kobject.h>
#define PADATA_CPU_SERIAL 0x01
#define PADATA_CPU_PARALLEL 0x02
/**
- * struct padata_priv - Embedded to the users data structure.
+ * struct padata_priv - Represents one job
*
* @list: List entry, to attach to the padata lists.
* @pd: Pointer to the internal control structure.
@@ -42,7 +42,7 @@ struct padata_priv {
};
/**
- * struct padata_list
+ * struct padata_list - one per work type per CPU
*
* @list: List head.
* @lock: List lock.
@@ -70,9 +70,6 @@ struct padata_serial_queue {
*
* @parallel: List to wait for parallelization.
* @reorder: List to wait for reordering after parallel processing.
- * @serial: List to wait for serialization after reordering.
- * @pwork: work struct for parallelization.
- * @swork: work struct for serialization.
* @work: work struct for parallelization.
* @num_obj: Number of objects that are processed by this cpu.
*/
@@ -98,12 +95,11 @@ struct padata_cpumask {
* struct parallel_data - Internal control structure, covers everything
* that depends on the cpumask in use.
*
- * @pinst: padata instance.
+ * @ps: padata_shell object.
* @pqueue: percpu padata queues used for parallelization.
* @squeue: percpu padata queues used for serialuzation.
- * @reorder_objects: Number of objects waiting in the reorder queues.
* @refcnt: Number of objects holding a reference on this parallel_data.
- * @max_seq_nr: Maximal used sequence number.
+ * @seq_nr: Sequence number of the parallelized data object.
* @processed: Number of already processed objects.
* @cpu: Next CPU to be processed.
* @cpumask: The cpumasks in use for parallel and serial workers.
@@ -111,30 +107,44 @@ struct padata_cpumask {
* @lock: Reorder lock.
*/
struct parallel_data {
- struct padata_instance *pinst;
+ struct padata_shell *ps;
struct padata_parallel_queue __percpu *pqueue;
struct padata_serial_queue __percpu *squeue;
- atomic_t reorder_objects;
atomic_t refcnt;
atomic_t seq_nr;
unsigned int processed;
int cpu;
struct padata_cpumask cpumask;
struct work_struct reorder_work;
- spinlock_t lock ____cacheline_aligned;
+ spinlock_t ____cacheline_aligned lock;
+};
+
+/**
+ * struct padata_shell - Wrapper around struct parallel_data, its
+ * purpose is to allow the underlying control structure to be replaced
+ * on the fly using RCU.
+ *
+ * @pinst: padat instance.
+ * @pd: Actual parallel_data structure which may be substituted on the fly.
+ * @opd: Pointer to old pd to be freed by padata_replace.
+ * @list: List entry in padata_instance list.
+ */
+struct padata_shell {
+ struct padata_instance *pinst;
+ struct parallel_data __rcu *pd;
+ struct parallel_data *opd;
+ struct list_head list;
};
/**
* struct padata_instance - The overall control structure.
*
- * @cpu_notifier: cpu hotplug notifier.
+ * @node: Used by CPU hotplug.
* @parallel_wq: The workqueue used for parallel work.
* @serial_wq: The workqueue used for serial work.
- * @pd: The internal control structure.
+ * @pslist: List of padata_shell objects attached to this instance.
* @cpumask: User supplied cpumasks for parallel and serial works.
- * @cpumask_change_notifier: Notifiers chain for user-defined notify
- * callbacks that will be called when either @pcpu or @cbcpu
- * or both cpumasks change.
+ * @rcpumask: Actual cpumasks based on user cpumask and cpu_online_mask.
* @kobj: padata instance kernel object.
* @lock: padata instance lock.
* @flags: padata flags.
@@ -143,9 +153,9 @@ struct padata_instance {
struct hlist_node node;
struct workqueue_struct *parallel_wq;
struct workqueue_struct *serial_wq;
- struct parallel_data *pd;
+ struct list_head pslist;
struct padata_cpumask cpumask;
- struct blocking_notifier_head cpumask_change_notifier;
+ struct padata_cpumask rcpumask;
struct kobject kobj;
struct mutex lock;
u8 flags;
@@ -156,15 +166,13 @@ struct padata_instance {
extern struct padata_instance *padata_alloc_possible(const char *name);
extern void padata_free(struct padata_instance *pinst);
-extern int padata_do_parallel(struct padata_instance *pinst,
+extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst);
+extern void padata_free_shell(struct padata_shell *ps);
+extern int padata_do_parallel(struct padata_shell *ps,
struct padata_priv *padata, int *cb_cpu);
extern void padata_do_serial(struct padata_priv *padata);
extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
cpumask_var_t cpumask);
extern int padata_start(struct padata_instance *pinst);
extern void padata_stop(struct padata_instance *pinst);
-extern int padata_register_cpumask_notifier(struct padata_instance *pinst,
- struct notifier_block *nblock);
-extern int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
- struct notifier_block *nblock);
#endif
diff --git a/include/linux/platform_data/crypto-atmel.h b/include/linux/platform_data/crypto-atmel.h
deleted file mode 100644
index 0471aaf6999b..000000000000
--- a/include/linux/platform_data/crypto-atmel.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_CRYPTO_ATMEL_H
-#define __LINUX_CRYPTO_ATMEL_H
-
-#include <linux/platform_data/dma-atmel.h>
-
-/**
- * struct crypto_dma_data - DMA data for AES/TDES/SHA
- */
-struct crypto_dma_data {
- struct at_dma_slave txdata;
- struct at_dma_slave rxdata;
-};
-
-/**
- * struct crypto_platform_data - board-specific AES/TDES/SHA configuration
- * @dma_slave: DMA slave interface to use in data transfers.
- */
-struct crypto_platform_data {
- struct crypto_dma_data *dma_slave;
-};
-
-#endif /* __LINUX_CRYPTO_ATMEL_H */
diff --git a/include/linux/psp-tee.h b/include/linux/psp-tee.h
new file mode 100644
index 000000000000..cb0c95d6d76b
--- /dev/null
+++ b/include/linux/psp-tee.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * AMD Trusted Execution Environment (TEE) interface
+ *
+ * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
+ *
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ */
+
+#ifndef __PSP_TEE_H_
+#define __PSP_TEE_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+/* This file defines the Trusted Execution Environment (TEE) interface commands
+ * and the API exported by AMD Secure Processor driver to communicate with
+ * AMD-TEE Trusted OS.
+ */
+
+/**
+ * enum tee_cmd_id - TEE Interface Command IDs
+ * @TEE_CMD_ID_LOAD_TA: Load Trusted Application (TA) binary into
+ * TEE environment
+ * @TEE_CMD_ID_UNLOAD_TA: Unload TA binary from TEE environment
+ * @TEE_CMD_ID_OPEN_SESSION: Open session with loaded TA
+ * @TEE_CMD_ID_CLOSE_SESSION: Close session with loaded TA
+ * @TEE_CMD_ID_INVOKE_CMD: Invoke a command with loaded TA
+ * @TEE_CMD_ID_MAP_SHARED_MEM: Map shared memory
+ * @TEE_CMD_ID_UNMAP_SHARED_MEM: Unmap shared memory
+ */
+enum tee_cmd_id {
+ TEE_CMD_ID_LOAD_TA = 1,
+ TEE_CMD_ID_UNLOAD_TA,
+ TEE_CMD_ID_OPEN_SESSION,
+ TEE_CMD_ID_CLOSE_SESSION,
+ TEE_CMD_ID_INVOKE_CMD,
+ TEE_CMD_ID_MAP_SHARED_MEM,
+ TEE_CMD_ID_UNMAP_SHARED_MEM,
+};
+
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+/**
+ * psp_tee_process_cmd() - Process command in Trusted Execution Environment
+ * @cmd_id: TEE command ID (&enum tee_cmd_id)
+ * @buf: Command buffer for TEE processing. On success, is updated
+ * with the response
+ * @len: Length of command buffer in bytes
+ * @status: On success, holds the TEE command execution status
+ *
+ * This function submits a command to the Trusted OS for processing in the
+ * TEE environment and waits for a response or until the command times out.
+ *
+ * Returns:
+ * 0 if TEE successfully processed the command
+ * -%ENODEV if PSP device not available
+ * -%EINVAL if invalid input
+ * -%ETIMEDOUT if TEE command timed out
+ * -%EBUSY if PSP device is not responsive
+ */
+int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
+ u32 *status);
+
+/**
+ * psp_check_tee_status() - Checks whether there is a TEE which a driver can
+ * talk to.
+ *
+ * This function can be used by AMD-TEE driver to query if there is TEE with
+ * which it can communicate.
+ *
+ * Returns:
+ * 0 if the device has TEE
+ * -%ENODEV if there is no TEE available
+ */
+int psp_check_tee_status(void);
+
+#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
+
+static inline int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf,
+ size_t len, u32 *status)
+{
+ return -ENODEV;
+}
+
+static inline int psp_check_tee_status(void)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
+#endif /* __PSP_TEE_H_ */
diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h
index 4b9eb064d7e7..6596f3a09e54 100644
--- a/include/uapi/linux/tee.h
+++ b/include/uapi/linux/tee.h
@@ -56,6 +56,7 @@
* TEE Implementation ID
*/
#define TEE_IMPL_ID_OPTEE 1
+#define TEE_IMPL_ID_AMDTEE 2
/*
* OP-TEE specific capabilities