summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/crypto/devel-algos.rst4
-rw-r--r--crypto/ahash.c117
-rw-r--r--crypto/shash.c8
-rw-r--r--include/crypto/internal/hash.h4
-rw-r--r--include/linux/crypto.h27
5 files changed, 28 insertions, 132 deletions
diff --git a/Documentation/crypto/devel-algos.rst b/Documentation/crypto/devel-algos.rst
index 3506899ef83e..9b7782f4f6e0 100644
--- a/Documentation/crypto/devel-algos.rst
+++ b/Documentation/crypto/devel-algos.rst
@@ -235,6 +235,4 @@ Specifics Of Asynchronous HASH Transformation
Some of the drivers will want to use the Generic ScatterWalk in case the
implementation needs to be fed separate chunks of the scatterlist which
-contains the input data. The buffer containing the resulting hash will
-always be properly aligned to .cra_alignmask so there is no need to
-worry about this.
+contains the input data.
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 213bb3e9f245..744fd3b8ea25 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -35,21 +35,12 @@ struct ahash_request_priv {
static int hash_walk_next(struct crypto_hash_walk *walk)
{
- unsigned int alignmask = walk->alignmask;
unsigned int offset = walk->offset;
unsigned int nbytes = min(walk->entrylen,
((unsigned int)(PAGE_SIZE)) - offset);
walk->data = kmap_local_page(walk->pg);
walk->data += offset;
-
- if (offset & alignmask) {
- unsigned int unaligned = alignmask + 1 - (offset & alignmask);
-
- if (nbytes > unaligned)
- nbytes = unaligned;
- }
-
walk->entrylen -= nbytes;
return nbytes;
}
@@ -73,23 +64,8 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
- unsigned int alignmask = walk->alignmask;
-
walk->data -= walk->offset;
- if (walk->entrylen && (walk->offset & alignmask) && !err) {
- unsigned int nbytes;
-
- walk->offset = ALIGN(walk->offset, alignmask + 1);
- nbytes = min(walk->entrylen,
- (unsigned int)(PAGE_SIZE - walk->offset));
- if (nbytes) {
- walk->entrylen -= nbytes;
- walk->data += walk->offset;
- return nbytes;
- }
- }
-
kunmap_local(walk->data);
crypto_yield(walk->flags);
@@ -121,7 +97,6 @@ int crypto_hash_walk_first(struct ahash_request *req,
return 0;
}
- walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
walk->sg = req->src;
walk->flags = req->base.flags;
@@ -129,26 +104,6 @@ int crypto_hash_walk_first(struct ahash_request *req,
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
-static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
-{
- unsigned long alignmask = crypto_ahash_alignmask(tfm);
- int ret;
- u8 *buffer, *alignbuffer;
- unsigned long absize;
-
- absize = keylen + alignmask;
- buffer = kmalloc(absize, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
- memcpy(alignbuffer, key, keylen);
- ret = tfm->setkey(tfm, alignbuffer, keylen);
- kfree_sensitive(buffer);
- return ret;
-}
-
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -167,13 +122,7 @@ static void ahash_set_needkey(struct crypto_ahash *tfm)
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
- unsigned long alignmask = crypto_ahash_alignmask(tfm);
- int err;
-
- if ((unsigned long)key & alignmask)
- err = ahash_setkey_unaligned(tfm, key, keylen);
- else
- err = tfm->setkey(tfm, key, keylen);
+ int err = tfm->setkey(tfm, key, keylen);
if (unlikely(err)) {
ahash_set_needkey(tfm);
@@ -189,7 +138,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
bool has_state)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- unsigned long alignmask = crypto_ahash_alignmask(tfm);
unsigned int ds = crypto_ahash_digestsize(tfm);
struct ahash_request *subreq;
unsigned int subreq_size;
@@ -203,7 +151,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
subreq_size += reqsize;
subreq_size += ds;
- subreq_size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
flags = ahash_request_flags(req);
gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
@@ -215,7 +162,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
ahash_request_set_callback(subreq, flags, cplt, req);
result = (u8 *)(subreq + 1) + reqsize;
- result = PTR_ALIGN(result, alignmask + 1);
ahash_request_set_crypt(subreq, req->src, result, req->nbytes);
@@ -251,56 +197,6 @@ static void ahash_restore_req(struct ahash_request *req, int err)
kfree_sensitive(subreq);
}
-static void ahash_op_unaligned_done(void *data, int err)
-{
- struct ahash_request *areq = data;
-
- if (err == -EINPROGRESS)
- goto out;
-
- /* First copy req->result into req->priv.result */
- ahash_restore_req(areq, err);
-
-out:
- /* Complete the ORIGINAL request. */
- ahash_request_complete(areq, err);
-}
-
-static int ahash_op_unaligned(struct ahash_request *req,
- int (*op)(struct ahash_request *),
- bool has_state)
-{
- int err;
-
- err = ahash_save_req(req, ahash_op_unaligned_done, has_state);
- if (err)
- return err;
-
- err = op(req->priv);
- if (err == -EINPROGRESS || err == -EBUSY)
- return err;
-
- ahash_restore_req(req, err);
-
- return err;
-}
-
-static int crypto_ahash_op(struct ahash_request *req,
- int (*op)(struct ahash_request *),
- bool has_state)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- unsigned long alignmask = crypto_ahash_alignmask(tfm);
- int err;
-
- if ((unsigned long)req->result & alignmask)
- err = ahash_op_unaligned(req, op, has_state);
- else
- err = op(req);
-
- return crypto_hash_errstat(crypto_hash_alg_common(tfm), err);
-}
-
int crypto_ahash_final(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -309,7 +205,7 @@ int crypto_ahash_final(struct ahash_request *req)
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
atomic64_inc(&hash_get_stat(alg)->hash_cnt);
- return crypto_ahash_op(req, tfm->final, true);
+ return crypto_hash_errstat(alg, tfm->final(req));
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);
@@ -325,7 +221,7 @@ int crypto_ahash_finup(struct ahash_request *req)
atomic64_add(req->nbytes, &istat->hash_tlen);
}
- return crypto_ahash_op(req, tfm->finup, true);
+ return crypto_hash_errstat(alg, tfm->finup(req));
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
@@ -333,6 +229,7 @@ int crypto_ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
+ int err;
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
struct crypto_istat_hash *istat = hash_get_stat(alg);
@@ -342,9 +239,11 @@ int crypto_ahash_digest(struct ahash_request *req)
}
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return crypto_hash_errstat(alg, -ENOKEY);
+ err = -ENOKEY;
+ else
+ err = tfm->digest(req);
- return crypto_ahash_op(req, tfm->digest, false);
+ return crypto_hash_errstat(alg, err);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
diff --git a/crypto/shash.c b/crypto/shash.c
index 409b33f9c97c..359702c2cd02 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -541,6 +541,10 @@ int hash_prepare_alg(struct hash_alg_common *alg)
if (alg->digestsize > HASH_MAX_DIGESTSIZE)
return -EINVAL;
+ /* alignmask is not useful for hashes, so it is not supported. */
+ if (base->cra_alignmask)
+ return -EINVAL;
+
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
@@ -557,10 +561,6 @@ static int shash_prepare_alg(struct shash_alg *alg)
if (alg->descsize > HASH_MAX_DESCSIZE)
return -EINVAL;
- /* alignmask is not useful for shash, so it is not supported. */
- if (base->cra_alignmask)
- return -EINVAL;
-
if ((alg->export && !alg->import) || (alg->import && !alg->export))
return -EINVAL;
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 8d0cd0c591a0..59c707e4dea4 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -18,15 +18,13 @@ struct crypto_hash_walk {
char *data;
unsigned int offset;
- unsigned int alignmask;
+ unsigned int flags;
struct page *pg;
unsigned int entrylen;
unsigned int total;
struct scatterlist *sg;
-
- unsigned int flags;
};
struct ahash_instance {
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index f3c3a3b27fac..b164da5e129e 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -110,7 +110,6 @@
* crypto_aead_walksize() (with the remainder going at the end), no chunk
* can cross a page boundary or a scatterlist element boundary.
* ahash:
- * - The result buffer must be aligned to the algorithm's alignmask.
* - crypto_ahash_finup() must not be used unless the algorithm implements
* ->finup() natively.
*/
@@ -278,18 +277,20 @@ struct compress_alg {
* @cra_ctxsize: Size of the operational context of the transformation. This
* value informs the kernel crypto API about the memory size
* needed to be allocated for the transformation context.
- * @cra_alignmask: Alignment mask for the input and output data buffer. The data
- * buffer containing the input data for the algorithm must be
- * aligned to this alignment mask. The data buffer for the
- * output data must be aligned to this alignment mask. Note that
- * the Crypto API will do the re-alignment in software, but
- * only under special conditions and there is a performance hit.
- * The re-alignment happens at these occasions for different
- * @cra_u types: cipher -- For both input data and output data
- * buffer; ahash -- For output hash destination buf; shash --
- * For output hash destination buf.
- * This is needed on hardware which is flawed by design and
- * cannot pick data from arbitrary addresses.
+ * @cra_alignmask: For cipher, skcipher, lskcipher, and aead algorithms this is
+ * 1 less than the alignment, in bytes, that the algorithm
+ * implementation requires for input and output buffers. When
+ * the crypto API is invoked with buffers that are not aligned
+ * to this alignment, the crypto API automatically utilizes
+ * appropriately aligned temporary buffers to comply with what
+ * the algorithm needs. (For scatterlists this happens only if
+ * the algorithm uses the skcipher_walk helper functions.) This
+ * misalignment handling carries a performance penalty, so it is
+ * preferred that algorithms do not set a nonzero alignmask.
+ * Also, crypto API users may wish to allocate buffers aligned
+ * to the alignmask of the algorithm being used, in order to
+ * avoid the API having to realign them. Note: the alignmask is
+ * not supported for hash algorithms and is always 0 for them.
* @cra_priority: Priority of this transformation implementation. In case
* multiple transformations with same @cra_name are available to
* the Crypto API, the kernel will use the one with highest