summaryrefslogtreecommitdiff
path: root/drivers/crypto/nx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/nx')
-rw-r--r--drivers/crypto/nx/nx-842.c39
-rw-r--r--drivers/crypto/nx/nx-842.h28
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c8
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c16
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c8
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c8
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c17
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c128
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c31
-rw-r--r--drivers/crypto/nx/nx-common-pseries.c107
-rw-r--r--drivers/crypto/nx/nx-sha256.c130
-rw-r--r--drivers/crypto/nx/nx-sha512.c143
-rw-r--r--drivers/crypto/nx/nx.c53
-rw-r--r--drivers/crypto/nx/nx.h14
14 files changed, 307 insertions, 423 deletions
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 2ab90ec10e61..b950fcce8a9b 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -101,9 +101,13 @@ static int update_param(struct nx842_crypto_param *p,
return 0;
}
-int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
+void *nx842_crypto_alloc_ctx(struct nx842_driver *driver)
{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_crypto_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
spin_lock_init(&ctx->lock);
ctx->driver = driver;
@@ -114,22 +118,23 @@ int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
kfree(ctx->wmem);
free_page((unsigned long)ctx->sbounce);
free_page((unsigned long)ctx->dbounce);
- return -ENOMEM;
+ kfree(ctx);
+ return ERR_PTR(-ENOMEM);
}
- return 0;
+ return ctx;
}
-EXPORT_SYMBOL_GPL(nx842_crypto_init);
+EXPORT_SYMBOL_GPL(nx842_crypto_alloc_ctx);
-void nx842_crypto_exit(struct crypto_tfm *tfm)
+void nx842_crypto_free_ctx(void *p)
{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_crypto_ctx *ctx = p;
kfree(ctx->wmem);
free_page((unsigned long)ctx->sbounce);
free_page((unsigned long)ctx->dbounce);
}
-EXPORT_SYMBOL_GPL(nx842_crypto_exit);
+EXPORT_SYMBOL_GPL(nx842_crypto_free_ctx);
static void check_constraints(struct nx842_constraints *c)
{
@@ -246,12 +251,14 @@ nospc:
return update_param(p, slen, dskip + dlen);
}
-int nx842_crypto_compress(struct crypto_tfm *tfm,
+int nx842_crypto_compress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
+ u8 *dst, unsigned int *dlen, void *pctx)
{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
- struct nx842_crypto_header *hdr = &ctx->header;
+ struct nx842_crypto_ctx *ctx = pctx;
+ struct nx842_crypto_header *hdr =
+ container_of(&ctx->header,
+ struct nx842_crypto_header, hdr);
struct nx842_crypto_param p;
struct nx842_constraints c = *ctx->driver->constraints;
unsigned int groups, hdrsize, h;
@@ -429,11 +436,11 @@ usesw:
return update_param(p, slen + padding, dlen);
}
-int nx842_crypto_decompress(struct crypto_tfm *tfm,
+int nx842_crypto_decompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
+ u8 *dst, unsigned int *dlen, void *pctx)
{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_crypto_ctx *ctx = pctx;
struct nx842_crypto_header *hdr;
struct nx842_crypto_param p;
struct nx842_constraints c = *ctx->driver->constraints;
@@ -490,7 +497,7 @@ int nx842_crypto_decompress(struct crypto_tfm *tfm,
}
memcpy(&ctx->header, src, hdr_len);
- hdr = &ctx->header;
+ hdr = container_of(&ctx->header, struct nx842_crypto_header, hdr);
for (n = 0; n < hdr->groups; n++) {
/* ignore applies to last group */
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
index 7590bfb24d79..f5e2c82ba876 100644
--- a/drivers/crypto/nx/nx-842.h
+++ b/drivers/crypto/nx/nx-842.h
@@ -3,7 +3,6 @@
#ifndef __NX_842_H__
#define __NX_842_H__
-#include <crypto/algapi.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -101,6 +100,8 @@
#define LEN_ON_SIZE(pa, size) ((size) - ((pa) & ((size) - 1)))
#define LEN_ON_PAGE(pa) LEN_ON_SIZE(pa, PAGE_SIZE)
+struct crypto_scomp;
+
static inline unsigned long nx842_get_pa(void *addr)
{
if (!is_vmalloc_addr(addr))
@@ -157,11 +158,16 @@ struct nx842_crypto_header_group {
} __packed;
struct nx842_crypto_header {
- __be16 magic; /* NX842_CRYPTO_MAGIC */
- __be16 ignore; /* decompressed end bytes to ignore */
- u8 groups; /* total groups in this header */
+ /* New members MUST be added within the struct_group() macro below. */
+ struct_group_tagged(nx842_crypto_header_hdr, hdr,
+ __be16 magic; /* NX842_CRYPTO_MAGIC */
+ __be16 ignore; /* decompressed end bytes to ignore */
+ u8 groups; /* total groups in this header */
+ );
struct nx842_crypto_header_group group[];
} __packed;
+static_assert(offsetof(struct nx842_crypto_header, group) == sizeof(struct nx842_crypto_header_hdr),
+ "struct member likely outside of struct_group_tagged()");
#define NX842_CRYPTO_GROUP_MAX (0x20)
@@ -171,19 +177,19 @@ struct nx842_crypto_ctx {
u8 *wmem;
u8 *sbounce, *dbounce;
- struct nx842_crypto_header header;
+ struct nx842_crypto_header_hdr header;
struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX];
struct nx842_driver *driver;
};
-int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver);
-void nx842_crypto_exit(struct crypto_tfm *tfm);
-int nx842_crypto_compress(struct crypto_tfm *tfm,
+void *nx842_crypto_alloc_ctx(struct nx842_driver *driver);
+void nx842_crypto_free_ctx(void *ctx);
+int nx842_crypto_compress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-int nx842_crypto_decompress(struct crypto_tfm *tfm,
+ u8 *dst, unsigned int *dlen, void *ctx);
+int nx842_crypto_decompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
+ u8 *dst, unsigned int *dlen, void *ctx);
#endif /* __NX_842_H__ */
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index 0e440f704a8f..35fa5bad1d9f 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -8,10 +8,12 @@
*/
#include <crypto/aes.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index c843f4c6f684..56a0b3a67c33 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -217,13 +217,11 @@ static int generate_pat(u8 *iv,
memset(b1, 0, 16);
if (assoclen <= 65280) {
*(u16 *)b1 = assoclen;
- scatterwalk_map_and_copy(b1 + 2, req->src, 0,
- iauth_len, SCATTERWALK_FROM_SG);
+ memcpy_from_sglist(b1 + 2, req->src, 0, iauth_len);
} else {
*(u16 *)b1 = (u16)(0xfffe);
*(u32 *)&b1[2] = assoclen;
- scatterwalk_map_and_copy(b1 + 6, req->src, 0,
- iauth_len, SCATTERWALK_FROM_SG);
+ memcpy_from_sglist(b1 + 6, req->src, 0, iauth_len);
}
}
@@ -341,9 +339,8 @@ static int ccm_nx_decrypt(struct aead_request *req,
nbytes -= authsize;
/* copy out the auth tag to compare with later */
- scatterwalk_map_and_copy(priv->oauth_tag,
- req->src, nbytes + req->assoclen, authsize,
- SCATTERWALK_FROM_SG);
+ memcpy_from_sglist(priv->oauth_tag, req->src, nbytes + req->assoclen,
+ authsize);
rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
@@ -465,9 +462,8 @@ static int ccm_nx_encrypt(struct aead_request *req,
} while (processed < nbytes);
/* copy out the auth tag */
- scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
- req->dst, nbytes + req->assoclen, authsize,
- SCATTERWALK_TO_SG);
+ memcpy_to_sglist(req->dst, nbytes + req->assoclen,
+ csbcpb->cpb.aes_ccm.out_pat_or_mac, authsize);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index dfa3ad1a12f2..709b3ee74657 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -9,10 +9,12 @@
#include <crypto/aes.h>
#include <crypto/ctr.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index 502a565074e9..4039cf3b22d4 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -8,10 +8,12 @@
*/
#include <crypto/aes.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 4a796318b430..b7fe2de96d96 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -103,16 +103,13 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
{
int rc;
struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
- struct scatter_walk walk;
struct nx_sg *nx_sg = nx_ctx->in_sg;
unsigned int nbytes = assoclen;
unsigned int processed = 0, to_process;
unsigned int max_sg_len;
if (nbytes <= AES_BLOCK_SIZE) {
- scatterwalk_start(&walk, req->src);
- scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
- scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
+ memcpy_from_sglist(out, req->src, 0, nbytes);
return 0;
}
@@ -391,19 +388,17 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
mac:
if (enc) {
/* copy out the auth tag */
- scatterwalk_map_and_copy(
- csbcpb->cpb.aes_gcm.out_pat_or_mac,
+ memcpy_to_sglist(
req->dst, req->assoclen + nbytes,
- crypto_aead_authsize(crypto_aead_reqtfm(req)),
- SCATTERWALK_TO_SG);
+ csbcpb->cpb.aes_gcm.out_pat_or_mac,
+ crypto_aead_authsize(crypto_aead_reqtfm(req)));
} else {
u8 *itag = nx_ctx->priv.gcm.iauth_tag;
u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
- scatterwalk_map_and_copy(
+ memcpy_from_sglist(
itag, req->src, req->assoclen + nbytes,
- crypto_aead_authsize(crypto_aead_reqtfm(req)),
- SCATTERWALK_FROM_SG);
+ crypto_aead_authsize(crypto_aead_reqtfm(req)));
rc = crypto_memneq(itag, otag,
crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
-EBADMSG : 0;
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index eb5c8f689360..bf465d824e2c 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -7,13 +7,14 @@
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
-#include <crypto/internal/hash.h>
#include <crypto/aes.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-#include <asm/vio.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include "nx_csbcpb.h"
#include "nx.h"
@@ -21,8 +22,6 @@
struct xcbc_state {
u8 state[AES_BLOCK_SIZE];
- unsigned int count;
- u8 buffer[AES_BLOCK_SIZE];
};
static int nx_xcbc_set_key(struct crypto_shash *desc,
@@ -58,7 +57,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
*/
static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
u8 keys[2][AES_BLOCK_SIZE];
@@ -135,9 +134,9 @@ out:
return rc;
}
-static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
+static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_shash *tfm)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
int err;
@@ -166,31 +165,24 @@ static int nx_xcbc_update(struct shash_desc *desc,
const u8 *data,
unsigned int len)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct xcbc_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg;
struct nx_sg *out_sg;
- u32 to_process = 0, leftover, total;
unsigned int max_sg_len;
unsigned long irq_flags;
+ u32 to_process, total;
int rc = 0;
int data_len;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+ memcpy(csbcpb->cpb.aes_xcbc.out_cv_mac, sctx->state, AES_BLOCK_SIZE);
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- total = sctx->count + len;
-
- /* 2 cases for total data len:
- * 1: <= AES_BLOCK_SIZE: copy into state, return 0
- * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
- */
- if (total <= AES_BLOCK_SIZE) {
- memcpy(sctx->buffer + sctx->count, data, len);
- sctx->count += len;
- goto out;
- }
+ total = len;
in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
@@ -200,7 +192,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
data_len = AES_BLOCK_SIZE;
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- &len, nx_ctx->ap->sglen);
+ &data_len, nx_ctx->ap->sglen);
if (data_len != AES_BLOCK_SIZE) {
rc = -EINVAL;
@@ -210,56 +202,21 @@ static int nx_xcbc_update(struct shash_desc *desc,
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
do {
- to_process = total - to_process;
- to_process = to_process & ~(AES_BLOCK_SIZE - 1);
-
- leftover = total - to_process;
-
- /* the hardware will not accept a 0 byte operation for this
- * algorithm and the operation MUST be finalized to be correct.
- * So if we happen to get an update that falls on a block sized
- * boundary, we must save off the last block to finalize with
- * later. */
- if (!leftover) {
- to_process -= AES_BLOCK_SIZE;
- leftover = AES_BLOCK_SIZE;
- }
-
- if (sctx->count) {
- data_len = sctx->count;
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
- (u8 *) sctx->buffer,
- &data_len,
- max_sg_len);
- if (data_len != sctx->count) {
- rc = -EINVAL;
- goto out;
- }
- }
+ to_process = total & ~(AES_BLOCK_SIZE - 1);
- data_len = to_process - sctx->count;
in_sg = nx_build_sg_list(in_sg,
(u8 *) data,
- &data_len,
+ &to_process,
max_sg_len);
- if (data_len != to_process - sctx->count) {
- rc = -EINVAL;
- goto out;
- }
-
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
/* we've hit the nx chip previously and we're updating again,
* so copy over the partial digest */
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
- memcpy(csbcpb->cpb.aes_xcbc.cv,
- csbcpb->cpb.aes_xcbc.out_cv_mac,
- AES_BLOCK_SIZE);
- }
+ memcpy(csbcpb->cpb.aes_xcbc.cv,
+ csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
@@ -271,28 +228,24 @@ static int nx_xcbc_update(struct shash_desc *desc,
atomic_inc(&(nx_ctx->stats->aes_ops));
- /* everything after the first update is continuation */
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
-
total -= to_process;
- data += to_process - sctx->count;
- sctx->count = 0;
+ data += to_process;
in_sg = nx_ctx->in_sg;
- } while (leftover > AES_BLOCK_SIZE);
+ } while (total >= AES_BLOCK_SIZE);
- /* copy the leftover back into the state struct */
- memcpy(sctx->buffer, data, leftover);
- sctx->count = leftover;
+ rc = total;
+ memcpy(sctx->state, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
+static int nx_xcbc_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int nbytes, u8 *out)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct xcbc_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
unsigned long irq_flags;
@@ -301,12 +254,10 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
- /* we've hit the nx chip previously, now we're finalizing,
- * so copy over the partial digest */
- memcpy(csbcpb->cpb.aes_xcbc.cv,
- csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
- } else if (sctx->count == 0) {
+ if (nbytes) {
+ /* non-zero final, so copy over the partial digest */
+ memcpy(csbcpb->cpb.aes_xcbc.cv, sctx->state, AES_BLOCK_SIZE);
+ } else {
/*
* we've never seen an update, so this is a 0 byte op. The
* hardware cannot handle a 0 byte op, so just ECB to
@@ -320,11 +271,11 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
* this is not an intermediate operation */
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- len = sctx->count;
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
- &len, nx_ctx->ap->sglen);
+ len = nbytes;
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len,
+ nx_ctx->ap->sglen);
- if (len != sctx->count) {
+ if (len != nbytes) {
rc = -EINVAL;
goto out;
}
@@ -362,18 +313,19 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
.digestsize = AES_BLOCK_SIZE,
.init = nx_xcbc_init,
.update = nx_xcbc_update,
- .final = nx_xcbc_final,
+ .finup = nx_xcbc_finup,
.setkey = nx_xcbc_set_key,
.descsize = sizeof(struct xcbc_state),
- .statesize = sizeof(struct xcbc_state),
+ .init_tfm = nx_crypto_ctx_aes_xcbc_init2,
+ .exit_tfm = nx_crypto_ctx_shash_exit,
.base = {
.cra_name = "xcbc(aes)",
.cra_driver_name = "xcbc-aes-nx",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_aes_xcbc_init2,
- .cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index 8c859872c183..fd0a98b2fb1b 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -9,6 +9,7 @@
#include "nx-842.h"
+#include <crypto/internal/scompress.h>
#include <linux/timer.h>
#include <asm/prom.h>
@@ -1031,23 +1032,21 @@ static struct nx842_driver nx842_powernv_driver = {
.decompress = nx842_powernv_decompress,
};
-static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
+static void *nx842_powernv_crypto_alloc_ctx(void)
{
- return nx842_crypto_init(tfm, &nx842_powernv_driver);
+ return nx842_crypto_alloc_ctx(&nx842_powernv_driver);
}
-static struct crypto_alg nx842_powernv_alg = {
- .cra_name = "842",
- .cra_driver_name = "842-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = nx842_powernv_crypto_init,
- .cra_exit = nx842_crypto_exit,
- .cra_u = { .compress = {
- .coa_compress = nx842_crypto_compress,
- .coa_decompress = nx842_crypto_decompress } }
+static struct scomp_alg nx842_powernv_alg = {
+ .base.cra_name = "842",
+ .base.cra_driver_name = "842-nx",
+ .base.cra_priority = 300,
+ .base.cra_module = THIS_MODULE,
+
+ .alloc_ctx = nx842_powernv_crypto_alloc_ctx,
+ .free_ctx = nx842_crypto_free_ctx,
+ .compress = nx842_crypto_compress,
+ .decompress = nx842_crypto_decompress,
};
static __init int nx_compress_powernv_init(void)
@@ -1107,7 +1106,7 @@ static __init int nx_compress_powernv_init(void)
nx842_powernv_exec = nx842_exec_vas;
}
- ret = crypto_register_alg(&nx842_powernv_alg);
+ ret = crypto_register_scomp(&nx842_powernv_alg);
if (ret) {
nx_delete_coprocs();
return ret;
@@ -1128,7 +1127,7 @@ static void __exit nx_compress_powernv_exit(void)
if (!nx842_ct)
vas_unregister_api_powernv();
- crypto_unregister_alg(&nx842_powernv_alg);
+ crypto_unregister_scomp(&nx842_powernv_alg);
nx_delete_coprocs();
}
diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c
index 35f2d0d8507e..f528e072494a 100644
--- a/drivers/crypto/nx/nx-common-pseries.c
+++ b/drivers/crypto/nx/nx-common-pseries.c
@@ -11,6 +11,7 @@
#include <asm/vio.h>
#include <asm/hvcall.h>
#include <asm/vas.h>
+#include <crypto/internal/scompress.h>
#include "nx-842.h"
#include "nx_csbcpb.h" /* struct nx_csbcpb */
@@ -133,7 +134,7 @@ struct nx842_devdata {
};
static struct nx842_devdata __rcu *devdata;
-static DEFINE_SPINLOCK(devdata_mutex);
+static DEFINE_SPINLOCK(devdata_spinlock);
#define NX842_COUNTER_INC(_x) \
static inline void nx842_inc_##_x( \
@@ -750,15 +751,15 @@ static int nx842_OF_upd(struct property *new_prop)
if (!new_devdata)
return -ENOMEM;
- spin_lock_irqsave(&devdata_mutex, flags);
+ spin_lock_irqsave(&devdata_spinlock, flags);
old_devdata = rcu_dereference_check(devdata,
- lockdep_is_held(&devdata_mutex));
+ lockdep_is_held(&devdata_spinlock));
if (old_devdata)
of_node = old_devdata->dev->of_node;
if (!old_devdata || !of_node) {
pr_err("%s: device is not available\n", __func__);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
kfree(new_devdata);
return -ENODEV;
}
@@ -810,7 +811,7 @@ out:
old_devdata->max_sg_len);
rcu_assign_pointer(devdata, new_devdata);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
dev_set_drvdata(new_devdata->dev, new_devdata);
kfree(old_devdata);
@@ -821,13 +822,13 @@ error_out:
dev_info(old_devdata->dev, "%s: device disabled\n", __func__);
nx842_OF_set_defaults(new_devdata);
rcu_assign_pointer(devdata, new_devdata);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
dev_set_drvdata(new_devdata->dev, new_devdata);
kfree(old_devdata);
} else {
dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
}
if (!ret)
@@ -1008,23 +1009,21 @@ static struct nx842_driver nx842_pseries_driver = {
.decompress = nx842_pseries_decompress,
};
-static int nx842_pseries_crypto_init(struct crypto_tfm *tfm)
+static void *nx842_pseries_crypto_alloc_ctx(void)
{
- return nx842_crypto_init(tfm, &nx842_pseries_driver);
+ return nx842_crypto_alloc_ctx(&nx842_pseries_driver);
}
-static struct crypto_alg nx842_pseries_alg = {
- .cra_name = "842",
- .cra_driver_name = "842-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = nx842_pseries_crypto_init,
- .cra_exit = nx842_crypto_exit,
- .cra_u = { .compress = {
- .coa_compress = nx842_crypto_compress,
- .coa_decompress = nx842_crypto_decompress } }
+static struct scomp_alg nx842_pseries_alg = {
+ .base.cra_name = "842",
+ .base.cra_driver_name = "842-nx",
+ .base.cra_priority = 300,
+ .base.cra_module = THIS_MODULE,
+
+ .alloc_ctx = nx842_pseries_crypto_alloc_ctx,
+ .free_ctx = nx842_crypto_free_ctx,
+ .compress = nx842_crypto_compress,
+ .decompress = nx842_crypto_decompress,
};
static int nx842_probe(struct vio_dev *viodev,
@@ -1045,9 +1044,9 @@ static int nx842_probe(struct vio_dev *viodev,
return -ENOMEM;
}
- spin_lock_irqsave(&devdata_mutex, flags);
+ spin_lock_irqsave(&devdata_spinlock, flags);
old_devdata = rcu_dereference_check(devdata,
- lockdep_is_held(&devdata_mutex));
+ lockdep_is_held(&devdata_spinlock));
if (old_devdata && old_devdata->vdev != NULL) {
dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__);
@@ -1062,7 +1061,7 @@ static int nx842_probe(struct vio_dev *viodev,
nx842_OF_set_defaults(new_devdata);
rcu_assign_pointer(devdata, new_devdata);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
kfree(old_devdata);
@@ -1072,7 +1071,7 @@ static int nx842_probe(struct vio_dev *viodev,
if (ret)
goto error;
- ret = crypto_register_alg(&nx842_pseries_alg);
+ ret = crypto_register_scomp(&nx842_pseries_alg);
if (ret) {
dev_err(&viodev->dev, "could not register comp alg: %d\n", ret);
goto error;
@@ -1101,7 +1100,7 @@ static int nx842_probe(struct vio_dev *viodev,
return 0;
error_unlock:
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
if (new_devdata)
kfree(new_devdata->counters);
kfree(new_devdata);
@@ -1120,14 +1119,15 @@ static void nx842_remove(struct vio_dev *viodev)
if (caps_feat)
sysfs_remove_group(&viodev->dev.kobj, &nxcop_caps_attr_group);
- crypto_unregister_alg(&nx842_pseries_alg);
+ crypto_unregister_scomp(&nx842_pseries_alg);
- spin_lock_irqsave(&devdata_mutex, flags);
- old_devdata = rcu_dereference_check(devdata,
- lockdep_is_held(&devdata_mutex));
of_reconfig_notifier_unregister(&nx842_of_nb);
+
+ spin_lock_irqsave(&devdata_spinlock, flags);
+ old_devdata = rcu_dereference_check(devdata,
+ lockdep_is_held(&devdata_spinlock));
RCU_INIT_POINTER(devdata, NULL);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
dev_set_drvdata(&viodev->dev, NULL);
if (old_devdata)
@@ -1144,6 +1144,7 @@ static void __init nxcop_get_capabilities(void)
{
struct hv_vas_all_caps *hv_caps;
struct hv_nx_cop_caps *hv_nxc;
+ u64 feat;
int rc;
hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL);
@@ -1154,27 +1155,26 @@ static void __init nxcop_get_capabilities(void)
*/
rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 0,
(u64)virt_to_phys(hv_caps));
+ if (!rc)
+ feat = be64_to_cpu(hv_caps->feat_type);
+ kfree(hv_caps);
if (rc)
- goto out;
+ return;
+ if (!(feat & VAS_NX_GZIP_FEAT_BIT))
+ return;
- caps_feat = be64_to_cpu(hv_caps->feat_type);
/*
* NX-GZIP feature available
*/
- if (caps_feat & VAS_NX_GZIP_FEAT_BIT) {
- hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL);
- if (!hv_nxc)
- goto out;
- /*
- * Get capabilities for NX-GZIP feature
- */
- rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES,
- VAS_NX_GZIP_FEAT,
- (u64)virt_to_phys(hv_nxc));
- } else {
- pr_err("NX-GZIP feature is not available\n");
- rc = -EINVAL;
- }
+ hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL);
+ if (!hv_nxc)
+ return;
+ /*
+ * Get capabilities for NX-GZIP feature
+ */
+ rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES,
+ VAS_NX_GZIP_FEAT,
+ (u64)virt_to_phys(hv_nxc));
if (!rc) {
nx_cop_caps.descriptor = be64_to_cpu(hv_nxc->descriptor);
@@ -1184,13 +1184,10 @@ static void __init nxcop_get_capabilities(void)
be64_to_cpu(hv_nxc->min_compress_len);
nx_cop_caps.min_decompress_len =
be64_to_cpu(hv_nxc->min_decompress_len);
- } else {
- caps_feat = 0;
+ caps_feat = feat;
}
kfree(hv_nxc);
-out:
- kfree(hv_caps);
}
static const struct vio_device_id nx842_vio_driver_ids[] = {
@@ -1255,13 +1252,13 @@ static void __exit nx842_pseries_exit(void)
vas_unregister_api_pseries();
- crypto_unregister_alg(&nx842_pseries_alg);
+ crypto_unregister_scomp(&nx842_pseries_alg);
- spin_lock_irqsave(&devdata_mutex, flags);
+ spin_lock_irqsave(&devdata_spinlock, flags);
old_devdata = rcu_dereference_check(devdata,
- lockdep_is_held(&devdata_mutex));
+ lockdep_is_held(&devdata_spinlock));
RCU_INIT_POINTER(devdata, NULL);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
if (old_devdata && old_devdata->dev)
dev_set_drvdata(old_devdata->dev, NULL);
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index c3bebf0feabe..5b29dd026df2 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -9,9 +9,12 @@
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/vio.h>
-#include <asm/byteorder.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
#include "nx_csbcpb.h"
#include "nx.h"
@@ -19,12 +22,11 @@
struct sha256_state_be {
__be32 state[SHA256_DIGEST_SIZE / 4];
u64 count;
- u8 buf[SHA256_BLOCK_SIZE];
};
-static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
+static int nx_crypto_ctx_sha256_init(struct crypto_shash *tfm)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm);
int err;
err = nx_crypto_ctx_sha_init(tfm);
@@ -40,11 +42,10 @@ static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
return 0;
}
-static int nx_sha256_init(struct shash_desc *desc) {
+static int nx_sha256_init(struct shash_desc *desc)
+{
struct sha256_state_be *sctx = shash_desc_ctx(desc);
- memset(sctx, 0, sizeof *sctx);
-
sctx->state[0] = __cpu_to_be32(SHA256_H0);
sctx->state[1] = __cpu_to_be32(SHA256_H1);
sctx->state[2] = __cpu_to_be32(SHA256_H2);
@@ -61,30 +62,18 @@ static int nx_sha256_init(struct shash_desc *desc) {
static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct sha256_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ u64 to_process, leftover, total = len;
struct nx_sg *out_sg;
- u64 to_process = 0, leftover, total;
unsigned long irq_flags;
int rc = 0;
int data_len;
u32 max_sg_len;
- u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- /* 2 cases for total data len:
- * 1: < SHA256_BLOCK_SIZE: copy into state, return 0
- * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
- */
- total = (sctx->count % SHA256_BLOCK_SIZE) + len;
- if (total < SHA256_BLOCK_SIZE) {
- memcpy(sctx->buf + buf_len, data, len);
- sctx->count += len;
- goto out;
- }
-
memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
@@ -105,41 +94,17 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
}
do {
- int used_sgs = 0;
struct nx_sg *in_sg = nx_ctx->in_sg;
- if (buf_len) {
- data_len = buf_len;
- in_sg = nx_build_sg_list(in_sg,
- (u8 *) sctx->buf,
- &data_len,
- max_sg_len);
-
- if (data_len != buf_len) {
- rc = -EINVAL;
- goto out;
- }
- used_sgs = in_sg - nx_ctx->in_sg;
- }
+ to_process = total & ~(SHA256_BLOCK_SIZE - 1);
- /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
- * processed in this iteration. This value is restricted
- * by sg list limits and number of sgs we already used
- * for leftover data. (see above)
- * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
- * but because data may not be aligned, we need to account
- * for that too. */
- to_process = min_t(u64, total,
- (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
- to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
-
- data_len = to_process - buf_len;
+ data_len = to_process;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- to_process = data_len + buf_len;
+ to_process = data_len;
leftover = total - to_process;
/*
@@ -162,26 +127,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
atomic_inc(&(nx_ctx->stats->sha256_ops));
total -= to_process;
- data += to_process - buf_len;
- buf_len = 0;
-
+ data += to_process;
+ sctx->count += to_process;
} while (leftover >= SHA256_BLOCK_SIZE);
- /* copy the leftover back into the state struct */
- if (leftover)
- memcpy(sctx->buf, data, leftover);
-
- sctx->count += len;
+ rc = leftover;
memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int nx_sha256_final(struct shash_desc *desc, u8 *out)
+static int nx_sha256_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int nbytes, u8 *out)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct sha256_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
unsigned long irq_flags;
@@ -197,25 +158,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* final is represented by continuing the operation and indicating that
- * this is not an intermediate operation */
- if (sctx->count >= SHA256_BLOCK_SIZE) {
- /* we've hit the nx chip previously, now we're finalizing,
- * so copy over the partial digest */
- memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- } else {
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
- }
+ * this is not an intermediate operation
+ * copy over the partial digest */
+ memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ sctx->count += nbytes;
csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
- len = sctx->count & (SHA256_BLOCK_SIZE - 1);
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
- &len, max_sg_len);
+ len = nbytes;
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len, max_sg_len);
- if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
+ if (len != nbytes) {
rc = -EINVAL;
goto out;
}
@@ -251,18 +206,34 @@ out:
static int nx_sha256_export(struct shash_desc *desc, void *out)
{
struct sha256_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ u8 *u8;
+ u32 *u32;
+ u64 *u64;
+ } p = { .u8 = out };
+ int i;
- memcpy(out, sctx, sizeof(*sctx));
+ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(*p.u32); i++)
+ put_unaligned(be32_to_cpu(sctx->state[i]), p.u32++);
+ put_unaligned(sctx->count, p.u64++);
return 0;
}
static int nx_sha256_import(struct shash_desc *desc, const void *in)
{
struct sha256_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ const u8 *u8;
+ const u32 *u32;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int i;
- memcpy(sctx, in, sizeof(*sctx));
+ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(*p.u32); i++)
+ sctx->state[i] = cpu_to_be32(get_unaligned(p.u32++));
+ sctx->count = get_unaligned(p.u64++);
return 0;
}
@@ -270,19 +241,20 @@ struct shash_alg nx_shash_sha256_alg = {
.digestsize = SHA256_DIGEST_SIZE,
.init = nx_sha256_init,
.update = nx_sha256_update,
- .final = nx_sha256_final,
+ .finup = nx_sha256_finup,
.export = nx_sha256_export,
.import = nx_sha256_import,
+ .init_tfm = nx_crypto_ctx_sha256_init,
+ .exit_tfm = nx_crypto_ctx_shash_exit,
.descsize = sizeof(struct sha256_state_be),
.statesize = sizeof(struct sha256_state_be),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-nx",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_sha256_init,
- .cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 1ffb40d2c324..f74776b7d7d7 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -9,8 +9,12 @@
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/vio.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
#include "nx_csbcpb.h"
#include "nx.h"
@@ -18,12 +22,11 @@
struct sha512_state_be {
__be64 state[SHA512_DIGEST_SIZE / 8];
u64 count[2];
- u8 buf[SHA512_BLOCK_SIZE];
};
-static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
+static int nx_crypto_ctx_sha512_init(struct crypto_shash *tfm)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm);
int err;
err = nx_crypto_ctx_sha_init(tfm);
@@ -43,8 +46,6 @@ static int nx_sha512_init(struct shash_desc *desc)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
- memset(sctx, 0, sizeof *sctx);
-
sctx->state[0] = __cpu_to_be64(SHA512_H0);
sctx->state[1] = __cpu_to_be64(SHA512_H1);
sctx->state[2] = __cpu_to_be64(SHA512_H2);
@@ -54,6 +55,7 @@ static int nx_sha512_init(struct shash_desc *desc)
sctx->state[6] = __cpu_to_be64(SHA512_H6);
sctx->state[7] = __cpu_to_be64(SHA512_H7);
sctx->count[0] = 0;
+ sctx->count[1] = 0;
return 0;
}
@@ -61,30 +63,18 @@ static int nx_sha512_init(struct shash_desc *desc)
static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct sha512_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ u64 to_process, leftover, total = len;
struct nx_sg *out_sg;
- u64 to_process, leftover = 0, total;
unsigned long irq_flags;
int rc = 0;
int data_len;
u32 max_sg_len;
- u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- /* 2 cases for total data len:
- * 1: < SHA512_BLOCK_SIZE: copy into state, return 0
- * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
- */
- total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len;
- if (total < SHA512_BLOCK_SIZE) {
- memcpy(sctx->buf + buf_len, data, len);
- sctx->count[0] += len;
- goto out;
- }
-
memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
@@ -105,45 +95,17 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
}
do {
- int used_sgs = 0;
struct nx_sg *in_sg = nx_ctx->in_sg;
- if (buf_len) {
- data_len = buf_len;
- in_sg = nx_build_sg_list(in_sg,
- (u8 *) sctx->buf,
- &data_len, max_sg_len);
-
- if (data_len != buf_len) {
- rc = -EINVAL;
- goto out;
- }
- used_sgs = in_sg - nx_ctx->in_sg;
- }
+ to_process = total & ~(SHA512_BLOCK_SIZE - 1);
- /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
- * processed in this iteration. This value is restricted
- * by sg list limits and number of sgs we already used
- * for leftover data. (see above)
- * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
- * but because data may not be aligned, we need to account
- * for that too. */
- to_process = min_t(u64, total,
- (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
- to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
-
- data_len = to_process - buf_len;
+ data_len = to_process;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- if (data_len != (to_process - buf_len)) {
- rc = -EINVAL;
- goto out;
- }
-
- to_process = data_len + buf_len;
+ to_process = data_len;
leftover = total - to_process;
/*
@@ -166,30 +128,29 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
atomic_inc(&(nx_ctx->stats->sha512_ops));
total -= to_process;
- data += to_process - buf_len;
- buf_len = 0;
-
+ data += to_process;
+ sctx->count[0] += to_process;
+ if (sctx->count[0] < to_process)
+ sctx->count[1]++;
} while (leftover >= SHA512_BLOCK_SIZE);
- /* copy the leftover back into the state struct */
- if (leftover)
- memcpy(sctx->buf, data, leftover);
- sctx->count[0] += len;
+ rc = leftover;
memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int nx_sha512_final(struct shash_desc *desc, u8 *out)
+static int nx_sha512_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int nbytes, u8 *out)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
u32 max_sg_len;
- u64 count0;
unsigned long irq_flags;
+ u64 count0, count1;
int rc = 0;
int len;
@@ -201,30 +162,23 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* final is represented by continuing the operation and indicating that
- * this is not an intermediate operation */
- if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
- /* we've hit the nx chip previously, now we're finalizing,
- * so copy over the partial digest */
- memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
- SHA512_DIGEST_SIZE);
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- } else {
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
- }
-
+ * this is not an intermediate operation
+ * copy over the partial digest */
+ memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state, SHA512_DIGEST_SIZE);
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- count0 = sctx->count[0] * 8;
+ count0 = sctx->count[0] + nbytes;
+ count1 = sctx->count[1];
- csbcpb->cpb.sha512.message_bit_length_lo = count0;
+ csbcpb->cpb.sha512.message_bit_length_lo = count0 << 3;
+ csbcpb->cpb.sha512.message_bit_length_hi = (count1 << 3) |
+ (count0 >> 61);
- len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
- in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
- max_sg_len);
+ len = nbytes;
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len, max_sg_len);
- if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
+ if (len != nbytes) {
rc = -EINVAL;
goto out;
}
@@ -246,7 +200,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
goto out;
atomic_inc(&(nx_ctx->stats->sha512_ops));
- atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));
+ atomic64_add(count0, &(nx_ctx->stats->sha512_bytes));
memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
out:
@@ -257,18 +211,34 @@ out:
static int nx_sha512_export(struct shash_desc *desc, void *out)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ u8 *u8;
+ u64 *u64;
+ } p = { .u8 = out };
+ int i;
- memcpy(out, sctx, sizeof(*sctx));
+ for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(*p.u64); i++)
+ put_unaligned(be64_to_cpu(sctx->state[i]), p.u64++);
+ put_unaligned(sctx->count[0], p.u64++);
+ put_unaligned(sctx->count[1], p.u64++);
return 0;
}
static int nx_sha512_import(struct shash_desc *desc, const void *in)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ const u8 *u8;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int i;
- memcpy(sctx, in, sizeof(*sctx));
+ for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(*p.u64); i++)
+ sctx->state[i] = cpu_to_be64(get_unaligned(p.u64++));
+ sctx->count[0] = get_unaligned(p.u64++);
+ sctx->count[1] = get_unaligned(p.u64++);
return 0;
}
@@ -276,19 +246,20 @@ struct shash_alg nx_shash_sha512_alg = {
.digestsize = SHA512_DIGEST_SIZE,
.init = nx_sha512_init,
.update = nx_sha512_update,
- .final = nx_sha512_final,
+ .finup = nx_sha512_finup,
.export = nx_sha512_export,
.import = nx_sha512_import,
+ .init_tfm = nx_crypto_ctx_sha512_init,
+ .exit_tfm = nx_crypto_ctx_shash_exit,
.descsize = sizeof(struct sha512_state_be),
.statesize = sizeof(struct sha512_state_be),
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-nx",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_sha512_init,
- .cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 010e87d9da36..78135fb13f5c 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -7,11 +7,11 @@
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
+#include <crypto/aes.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
-#include <crypto/aes.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/sha2.h>
-#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -124,8 +124,6 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
}
if ((sg - sg_head) == sgmax) {
- pr_err("nx: scatter/gather list overflow, pid: %d\n",
- current->pid);
sg++;
break;
}
@@ -153,40 +151,18 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
{
struct scatter_walk walk;
struct nx_sg *nx_sg = nx_dst;
- unsigned int n, offset = 0, len = *src_len;
- char *dst;
+ unsigned int n, len = *src_len;
/* we need to fast forward through @start bytes first */
- for (;;) {
- scatterwalk_start(&walk, sg_src);
-
- if (start < offset + sg_src->length)
- break;
-
- offset += sg_src->length;
- sg_src = sg_next(sg_src);
- }
-
- /* start - offset is the number of bytes to advance in the scatterlist
- * element we're currently looking at */
- scatterwalk_advance(&walk, start - offset);
+ scatterwalk_start_at_pos(&walk, sg_src, start);
while (len && (nx_sg - nx_dst) < sglen) {
- n = scatterwalk_clamp(&walk, len);
- if (!n) {
- /* In cases where we have scatterlist chain sg_next
- * handles with it properly */
- scatterwalk_start(&walk, sg_next(walk.sg));
- n = scatterwalk_clamp(&walk, len);
- }
- dst = scatterwalk_map(&walk);
+ n = scatterwalk_next(&walk, len);
- nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst));
- len -= n;
+ nx_sg = nx_build_sg_list(nx_sg, walk.addr, &n, sglen - (nx_sg - nx_dst));
- scatterwalk_unmap(dst);
- scatterwalk_advance(&walk, n);
- scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len);
+ scatterwalk_done_src(&walk, n);
+ len -= n;
}
/* update to_process */
*src_len -= len;
@@ -724,14 +700,14 @@ int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm)
NX_MODE_AES_ECB);
}
-int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_sha_init(struct crypto_shash *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
+ return nx_crypto_ctx_init(crypto_shash_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
}
-int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_xcbc_init(struct crypto_shash *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ return nx_crypto_ctx_init(crypto_shash_ctx(tfm), NX_FC_AES,
NX_MODE_AES_XCBC_MAC);
}
@@ -766,6 +742,11 @@ void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm)
kfree_sensitive(nx_ctx->kmem);
}
+void nx_crypto_ctx_shash_exit(struct crypto_shash *tfm)
+{
+ nx_crypto_ctx_exit(crypto_shash_ctx(tfm));
+}
+
static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id)
{
dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n",
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index 2697baebb6a3..36974f08490a 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -3,7 +3,11 @@
#ifndef __NX_H__
#define __NX_H__
+#include <asm/vio.h>
#include <crypto/ctr.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#define NX_NAME "nx-crypto"
#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
@@ -139,19 +143,20 @@ struct nx_crypto_ctx {
} priv;
};
-struct crypto_aead;
+struct scatterlist;
/* prototypes */
int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm);
int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm);
-int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_xcbc_init(struct crypto_shash *tfm);
int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm);
int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm);
int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm);
-int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_sha_init(struct crypto_shash *tfm);
void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm);
void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm);
+void nx_crypto_ctx_shash_exit(struct crypto_shash *tfm);
void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
u32 may_sleep);
@@ -189,7 +194,4 @@ extern struct shash_alg nx_shash_sha256_alg;
extern struct nx_crypto_driver nx_driver;
-#define SCATTERWALK_TO_SG 1
-#define SCATTERWALK_FROM_SG 0
-
#endif