diff options
Diffstat (limited to 'drivers/crypto/nx/nx-sha256.c')
| -rw-r--r-- | drivers/crypto/nx/nx-sha256.c | 179 |
1 files changed, 71 insertions, 108 deletions
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index a6764af83c6d..5b29dd026df2 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c @@ -1,37 +1,32 @@ -/** +// SPDX-License-Identifier: GPL-2.0-only +/* * SHA-256 routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 only. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * * Author: Kent Yoder <yoder1@us.ibm.com> */ #include <crypto/internal/hash.h> -#include <crypto/sha.h> +#include <crypto/sha2.h> +#include <linux/errno.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <asm/vio.h> -#include <asm/byteorder.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/unaligned.h> #include "nx_csbcpb.h" #include "nx.h" +struct sha256_state_be { + __be32 state[SHA256_DIGEST_SIZE / 4]; + u64 count; +}; -static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm) +static int nx_crypto_ctx_sha256_init(struct crypto_shash *tfm) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm); int err; err = nx_crypto_ctx_sha_init(tfm); @@ -47,10 +42,9 @@ static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm) return 0; } -static int nx_sha256_init(struct shash_desc *desc) { - struct sha256_state *sctx = shash_desc_ctx(desc); - - memset(sctx, 0, sizeof *sctx); +static int nx_sha256_init(struct shash_desc *desc) +{ + struct sha256_state_be *sctx = shash_desc_ctx(desc); sctx->state[0] = __cpu_to_be32(SHA256_H0); sctx->state[1] = __cpu_to_be32(SHA256_H1); @@ -68,30 +62,18 @@ static int nx_sha256_init(struct shash_desc *desc) { static int nx_sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct sha256_state *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm); + struct sha256_state_be *sctx = shash_desc_ctx(desc); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + u64 to_process, leftover, total = len; struct nx_sg *out_sg; - u64 to_process = 0, leftover, total; unsigned long irq_flags; int rc = 0; int data_len; u32 max_sg_len; - u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE); spin_lock_irqsave(&nx_ctx->lock, irq_flags); - /* 2 cases for total data len: - * 1: < SHA256_BLOCK_SIZE: copy into state, return 0 - * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover - */ - total = (sctx->count % SHA256_BLOCK_SIZE) + len; - if (total < SHA256_BLOCK_SIZE) { - memcpy(sctx->buf + buf_len, data, len); - sctx->count += len; - goto out; - } - memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE); NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; @@ -112,41 +94,17 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, } do { - int used_sgs = 0; struct nx_sg *in_sg = nx_ctx->in_sg; - if (buf_len) { - data_len = buf_len; - in_sg = nx_build_sg_list(in_sg, - (u8 *) sctx->buf, - &data_len, - max_sg_len); - - if (data_len != buf_len) { - rc = -EINVAL; - goto out; - } - used_sgs = in_sg - nx_ctx->in_sg; - } + to_process = total & ~(SHA256_BLOCK_SIZE - 1); - /* to_process: SHA256_BLOCK_SIZE aligned chunk to be - * processed in this iteration. This value is restricted - * by sg list limits and number of sgs we already used - * for leftover data. (see above) - * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len, - * but because data may not be aligned, we need to account - * for that too. */ - to_process = min_t(u64, total, - (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE); - to_process = to_process & ~(SHA256_BLOCK_SIZE - 1); - - data_len = to_process - buf_len; + data_len = to_process; in_sg = nx_build_sg_list(in_sg, (u8 *) data, &data_len, max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); - to_process = data_len + buf_len; + to_process = data_len; leftover = total - to_process; /* @@ -162,34 +120,29 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, goto out; } - rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, - desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0); if (rc) goto out; atomic_inc(&(nx_ctx->stats->sha256_ops)); total -= to_process; - data += to_process - buf_len; - buf_len = 0; - + data += to_process; + sctx->count += to_process; } while (leftover >= SHA256_BLOCK_SIZE); - /* copy the leftover back into the state struct */ - if (leftover) - memcpy(sctx->buf, data, leftover); - - sctx->count += len; + rc = leftover; memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } -static int nx_sha256_final(struct shash_desc *desc, u8 *out) +static int nx_sha256_finup(struct shash_desc *desc, const u8 *src, + unsigned int nbytes, u8 *out) { - struct sha256_state *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm); + struct sha256_state_be *sctx = shash_desc_ctx(desc); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; unsigned long irq_flags; @@ -205,25 +158,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) nx_ctx->ap->databytelen/NX_PAGE_SIZE); /* final is represented by continuing the operation and indicating that - * this is not an intermediate operation */ - if (sctx->count >= SHA256_BLOCK_SIZE) { - /* we've hit the nx chip previously, now we're finalizing, - * so copy over the partial digest */ - memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE); - NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; - NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; - } else { - NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; - NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; - } + * this is not an intermediate operation + * copy over the partial digest */ + memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE); + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + sctx->count += nbytes; csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8); - len = sctx->count & (SHA256_BLOCK_SIZE - 1); - in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf, - &len, max_sg_len); + len = nbytes; + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len, max_sg_len); - if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) { + if (len != nbytes) { rc = -EINVAL; goto out; } @@ -243,8 +190,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) goto out; } - rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, - desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0); if (rc) goto out; @@ -259,19 +205,35 @@ out: static int nx_sha256_export(struct shash_desc *desc, void *out) { - struct sha256_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); - + struct sha256_state_be *sctx = shash_desc_ctx(desc); + union { + u8 *u8; + u32 *u32; + u64 *u64; + } p = { .u8 = out }; + int i; + + for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(*p.u32); i++) + put_unaligned(be32_to_cpu(sctx->state[i]), p.u32++); + + put_unaligned(sctx->count, p.u64++); return 0; } static int nx_sha256_import(struct shash_desc *desc, const void *in) { - struct sha256_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); - + struct sha256_state_be *sctx = shash_desc_ctx(desc); + union { + const u8 *u8; + const u32 *u32; + const u64 *u64; + } p = { .u8 = in }; + int i; + + for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(*p.u32); i++) + sctx->state[i] = cpu_to_be32(get_unaligned(p.u32++)); + + sctx->count = get_unaligned(p.u64++); return 0; } @@ -279,19 +241,20 @@ struct shash_alg nx_shash_sha256_alg = { .digestsize = SHA256_DIGEST_SIZE, .init = nx_sha256_init, .update = nx_sha256_update, - .final = nx_sha256_final, + .finup = nx_sha256_finup, .export = nx_sha256_export, .import = nx_sha256_import, - .descsize = sizeof(struct sha256_state), - .statesize = sizeof(struct sha256_state), + .init_tfm = nx_crypto_ctx_sha256_init, + .exit_tfm = nx_crypto_ctx_shash_exit, + .descsize = sizeof(struct sha256_state_be), + .statesize = sizeof(struct sha256_state_be), .base = { .cra_name = "sha256", .cra_driver_name = "sha256-nx", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_init = nx_crypto_ctx_sha256_init, - .cra_exit = nx_crypto_ctx_exit, } }; |
