summaryrefslogtreecommitdiff
path: root/drivers/crypto/nx/nx-aes-ccm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/nx/nx-aes-ccm.c')
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c86
1 files changed, 25 insertions, 61 deletions
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 7038f364acb5..56a0b3a67c33 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -1,21 +1,9 @@
-/**
+// SPDX-License-Identifier: GPL-2.0-only
+/*
* AES CCM routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2012 International Business Machines Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 only.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
@@ -146,7 +134,6 @@ static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
unsigned int cryptlen, u8 *b0)
{
unsigned int l, lp, m = authsize;
- int rc;
memcpy(b0, iv, 16);
@@ -160,9 +147,7 @@ static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
if (assoclen)
*b0 |= 64;
- rc = set_msg_len(b0 + 16 - l, cryptlen, l);
-
- return rc;
+ return set_msg_len(b0 + 16 - l, cryptlen, l);
}
static int generate_pat(u8 *iv,
@@ -232,13 +217,11 @@ static int generate_pat(u8 *iv,
memset(b1, 0, 16);
if (assoclen <= 65280) {
*(u16 *)b1 = assoclen;
- scatterwalk_map_and_copy(b1 + 2, req->src, 0,
- iauth_len, SCATTERWALK_FROM_SG);
+ memcpy_from_sglist(b1 + 2, req->src, 0, iauth_len);
} else {
*(u16 *)b1 = (u16)(0xfffe);
*(u32 *)&b1[2] = assoclen;
- scatterwalk_map_and_copy(b1 + 6, req->src, 0,
- iauth_len, SCATTERWALK_FROM_SG);
+ memcpy_from_sglist(b1 + 6, req->src, 0, iauth_len);
}
}
@@ -339,7 +322,7 @@ static int generate_pat(u8 *iv,
}
static int ccm_nx_decrypt(struct aead_request *req,
- struct blkcipher_desc *desc,
+ u8 *iv,
unsigned int assoclen)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
@@ -356,11 +339,10 @@ static int ccm_nx_decrypt(struct aead_request *req,
nbytes -= authsize;
/* copy out the auth tag to compare with later */
- scatterwalk_map_and_copy(priv->oauth_tag,
- req->src, nbytes + req->assoclen, authsize,
- SCATTERWALK_FROM_SG);
+ memcpy_from_sglist(priv->oauth_tag, req->src, nbytes + req->assoclen,
+ authsize);
- rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
+ rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc)
goto out;
@@ -379,7 +361,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
- rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
&to_process, processed + req->assoclen,
csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
@@ -393,7 +375,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
/* for partial completion, copy following for next
* entry into loop...
*/
- memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
+ memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_s0,
@@ -403,7 +385,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
/* update stats */
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
+ atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
&(nx_ctx->stats->aes_bytes));
processed += to_process;
@@ -417,7 +399,7 @@ out:
}
static int ccm_nx_encrypt(struct aead_request *req,
- struct blkcipher_desc *desc,
+ u8 *iv,
unsigned int assoclen)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
@@ -430,7 +412,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
+ rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc)
goto out;
@@ -448,7 +430,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
- rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
&to_process, processed + req->assoclen,
csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
@@ -462,7 +444,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
/* for partial completion, copy following for next
* entry into loop...
*/
- memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
+ memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_s0,
@@ -472,7 +454,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
/* update stats */
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
+ atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
&(nx_ctx->stats->aes_bytes));
processed += to_process;
@@ -480,9 +462,8 @@ static int ccm_nx_encrypt(struct aead_request *req,
} while (processed < nbytes);
/* copy out the auth tag */
- scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
- req->dst, nbytes + req->assoclen, authsize,
- SCATTERWALK_TO_SG);
+ memcpy_to_sglist(req->dst, nbytes + req->assoclen,
+ csbcpb->cpb.aes_ccm.out_pat_or_mac, authsize);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
@@ -493,67 +474,50 @@ static int ccm4309_aes_nx_encrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
- struct blkcipher_desc desc;
u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
memcpy(iv + 4, req->iv, 8);
- desc.info = iv;
-
- return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
+ return ccm_nx_encrypt(req, iv, req->assoclen - 8);
}
static int ccm_aes_nx_encrypt(struct aead_request *req)
{
- struct blkcipher_desc desc;
int rc;
- desc.info = req->iv;
-
- rc = crypto_ccm_check_iv(desc.info);
+ rc = crypto_ccm_check_iv(req->iv);
if (rc)
return rc;
- return ccm_nx_encrypt(req, &desc, req->assoclen);
+ return ccm_nx_encrypt(req, req->iv, req->assoclen);
}
static int ccm4309_aes_nx_decrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
- struct blkcipher_desc desc;
u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
memcpy(iv + 4, req->iv, 8);
- desc.info = iv;
-
- return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
+ return ccm_nx_decrypt(req, iv, req->assoclen - 8);
}
static int ccm_aes_nx_decrypt(struct aead_request *req)
{
- struct blkcipher_desc desc;
int rc;
- desc.info = req->iv;
-
- rc = crypto_ccm_check_iv(desc.info);
+ rc = crypto_ccm_check_iv(req->iv);
if (rc)
return rc;
- return ccm_nx_decrypt(req, &desc, req->assoclen);
+ return ccm_nx_decrypt(req, req->iv, req->assoclen);
}
-/* tell the block cipher walk routines that this is a stream cipher by
- * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
- * during encrypt/decrypt doesn't solve this problem, because it calls
- * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
- * but instead uses this tfm->blocksize. */
struct aead_alg nx_ccm_aes_alg = {
.base = {
.cra_name = "ccm(aes)",