summaryrefslogtreecommitdiff
path: root/arch/arm64/crypto/sha256-glue.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/crypto/sha256-glue.c')
-rw-r--r--arch/arm64/crypto/sha256-glue.c42
1 files changed, 24 insertions, 18 deletions
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index e273faca924f..35356987cc1e 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -10,11 +10,11 @@
#include <asm/simd.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
-#include <linux/cryptohash.h>
-#include <linux/types.h>
+#include <linux/module.h>
#include <linux/string.h>
+#include <linux/types.h>
MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash for arm64");
MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>");
@@ -27,24 +27,33 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
unsigned int num_blks);
EXPORT_SYMBOL(sha256_block_data_order);
+static void sha256_arm64_transform(struct sha256_state *sst, u8 const *src,
+ int blocks)
+{
+ sha256_block_data_order(sst->state, src, blocks);
+}
+
asmlinkage void sha256_block_neon(u32 *digest, const void *data,
unsigned int num_blks);
+static void sha256_neon_transform(struct sha256_state *sst, u8 const *src,
+ int blocks)
+{
+ sha256_block_neon(sst->state, src, blocks);
+}
+
static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ return sha256_base_do_update(desc, data, len, sha256_arm64_transform);
}
static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
if (len)
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ sha256_base_do_update(desc, data, len, sha256_arm64_transform);
+ sha256_base_do_finalize(desc, sha256_arm64_transform);
return sha256_base_finish(desc, out);
}
@@ -87,7 +96,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ sha256_arm64_transform);
while (len > 0) {
unsigned int chunk = len;
@@ -97,14 +106,13 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
* input when running on a preemptible kernel, but process the
* data block by block instead.
*/
- if (IS_ENABLED(CONFIG_PREEMPT) &&
+ if (IS_ENABLED(CONFIG_PREEMPTION) &&
chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE)
chunk = SHA256_BLOCK_SIZE -
sctx->count % SHA256_BLOCK_SIZE;
kernel_neon_begin();
- sha256_base_do_update(desc, data, chunk,
- (sha256_block_fn *)sha256_block_neon);
+ sha256_base_do_update(desc, data, chunk, sha256_neon_transform);
kernel_neon_end();
data += chunk;
len -= chunk;
@@ -118,15 +126,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ sha256_arm64_transform);
+ sha256_base_do_finalize(desc, sha256_arm64_transform);
} else {
if (len)
sha256_update_neon(desc, data, len);
kernel_neon_begin();
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_neon);
+ sha256_base_do_finalize(desc, sha256_neon_transform);
kernel_neon_end();
}
return sha256_base_finish(desc, out);