summaryrefslogtreecommitdiff
path: root/drivers/crypto/caam/caamalg_qi2.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/caam/caamalg_qi2.c')
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c1409
1 files changed, 830 insertions, 579 deletions
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 425d5d974613..107ccb2ade42 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2015-2016 Freescale Semiconductor Inc.
- * Copyright 2017-2018 NXP
+ * Copyright 2017-2019 NXP
*/
#include "compat.h"
@@ -15,9 +15,15 @@
#include "key_gen.h"
#include "caamalg_desc.h"
#include "caamhash_desc.h"
+#include "dpseci-debugfs.h"
+#include <linux/dma-mapping.h>
#include <linux/fsl/mc.h>
+#include <linux/kernel.h>
+#include <linux/string_choices.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
+#include <crypto/xts.h>
+#include <linux/unaligned.h>
#define CAAM_CRA_PRIORITY 2000
@@ -25,15 +31,8 @@
#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
SHA512_DIGEST_SIZE * 2)
-#if !IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM)
-bool caam_little_end;
-EXPORT_SYMBOL(caam_little_end);
-bool caam_imx;
-EXPORT_SYMBOL(caam_imx);
-#endif
-
/*
- * This is a a cache of buffers, from which the users of CAAM QI driver
+ * This is a cache of buffers, from which the users of CAAM QI driver
* can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
* NOTE: A more elegant solution would be to have some headroom in the frames
* being processed. This can be added by the dpaa2-eth driver. This would
@@ -49,6 +48,7 @@ struct caam_alg_entry {
int class2_alg_type;
bool rfc3686;
bool geniv;
+ bool nodkp;
};
struct caam_aead_alg {
@@ -64,7 +64,7 @@ struct caam_skcipher_alg {
};
/**
- * caam_ctx - per-session context
+ * struct caam_ctx - per-session context
* @flc: Flow Contexts array
* @key: [authentication key], encryption key
* @flc_dma: I/O virtual addresses of the Flow Contexts
@@ -74,6 +74,9 @@ struct caam_skcipher_alg {
* @adata: authentication algorithm details
* @cdata: encryption algorithm details
* @authsize: authentication tag (a.k.a. ICV / MAC) size
+ * @xts_key_fallback: true if fallback tfm needs to be used due
+ * to unsupported xts key lengths
+ * @fallback: xts fallback tfm
*/
struct caam_ctx {
struct caam_flc flc[NUM_OP];
@@ -85,6 +88,8 @@ struct caam_ctx {
struct alginfo adata;
struct alginfo cdata;
unsigned int authsize;
+ bool xts_key_fallback;
+ struct crypto_skcipher *fallback;
};
static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
@@ -132,12 +137,12 @@ static struct caam_request *to_caam_req(struct crypto_async_request *areq)
{
switch (crypto_tfm_alg_type(areq->tfm)) {
case CRYPTO_ALG_TYPE_SKCIPHER:
- return skcipher_request_ctx(skcipher_request_cast(areq));
+ return skcipher_request_ctx_dma(skcipher_request_cast(areq));
case CRYPTO_ALG_TYPE_AEAD:
- return aead_request_ctx(container_of(areq, struct aead_request,
- base));
+ return aead_request_ctx_dma(
+ container_of(areq, struct aead_request, base));
case CRYPTO_ALG_TYPE_AHASH:
- return ahash_request_ctx(ahash_request_cast(areq));
+ return ahash_request_ctx_dma(ahash_request_cast(areq));
default:
return ERR_PTR(-EINVAL);
}
@@ -146,18 +151,20 @@ static struct caam_request *to_caam_req(struct crypto_async_request *areq)
static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
int dst_nents, dma_addr_t iv_dma, int ivsize,
- dma_addr_t qm_sg_dma, int qm_sg_bytes)
+ enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
+ int qm_sg_bytes)
{
if (dst != src) {
if (src_nents)
dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ if (dst_nents)
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
} else {
dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+ dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
if (qm_sg_bytes)
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
@@ -167,7 +174,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
{
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
typeof(*alg), aead);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct device *dev = ctx->dev;
struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
@@ -202,6 +209,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
}
+ /*
+ * In case |user key| > |derived key|, using DKP<imm,imm> would result
+ * in invalid opcodes (last bytes of user key) in the resulting
+ * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
+ * addresses are needed.
+ */
+ ctx->adata.key_virt = ctx->key;
+ ctx->adata.key_dma = ctx->key_dma;
+
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
data_len[0] = ctx->adata.keylen_pad;
data_len[1] = ctx->cdata.keylen;
@@ -213,16 +232,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
- if (inl_mask & 1)
- ctx->adata.key_virt = ctx->key;
- else
- ctx->adata.key_dma = ctx->key_dma;
-
- if (inl_mask & 2)
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- else
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
@@ -251,16 +260,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
- if (inl_mask & 1)
- ctx->adata.key_virt = ctx->key;
- else
- ctx->adata.key_dma = ctx->key_dma;
-
- if (inl_mask & 2)
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- else
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
@@ -280,7 +279,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
ctx->authsize = authsize;
aead_set_sh_desc(authenc);
@@ -291,7 +290,7 @@ static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
static int aead_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
struct crypto_authenc_keys keys;
@@ -324,25 +323,47 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
memzero_explicit(&keys, sizeof(keys));
return aead_set_sh_desc(aead);
badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
+static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto out;
+
+ err = -EINVAL;
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto out;
+
+ err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
+ aead_setkey(aead, key, keylen);
+
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+}
+
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_request *req_ctx = aead_request_ctx(req);
+ struct caam_request *req_ctx = aead_request_ctx_dma(req);
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
typeof(*alg), aead);
struct device *dev = ctx->dev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ int src_len, dst_len = 0;
struct aead_edesc *edesc;
dma_addr_t qm_sg_dma, iv_dma = 0;
int ivsize = 0;
@@ -352,30 +373,28 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
struct dpaa2_sg_entry *sg_table;
/* allocate space for base edesc, link tables and IV */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (unlikely(!edesc)) {
dev_err(dev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
}
if (unlikely(req->dst != req->src)) {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen);
+ src_len = req->assoclen + req->cryptlen;
+ dst_len = src_len + (encrypt ? authsize : (-authsize));
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) {
dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen);
+ src_len);
qi_cache_free(edesc);
return ERR_PTR(src_nents);
}
- dst_nents = sg_nents_for_len(req->dst, req->assoclen +
- req->cryptlen +
- (encrypt ? authsize :
- (-authsize)));
+ dst_nents = sg_nents_for_len(req->dst, dst_len);
if (unlikely(dst_nents < 0)) {
dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : (-authsize)));
+ dst_len);
qi_cache_free(edesc);
return ERR_PTR(dst_nents);
}
@@ -392,22 +411,27 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
mapped_src_nents = 0;
}
- mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(dev, "unable to map destination\n");
- dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
+ if (dst_nents) {
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ dma_unmap_sg(dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_dst_nents = 0;
}
} else {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen +
- (encrypt ? authsize : 0));
+ src_len = req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0);
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) {
dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : 0));
+ src_len);
qi_cache_free(edesc);
return ERR_PTR(src_nents);
}
@@ -427,9 +451,25 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
/*
* Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
* Input is not contiguous.
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries. Logic:
+ * if (src != dst && output S/G)
+ * pad output S/G, if needed
+ * else if (src == dst && S/G)
+ * overlapping S/Gs; pad one of them
+ * else if (input S/G) ...
+ * pad input S/G, if needed
*/
- qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
- (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
+ if (mapped_dst_nents > 1)
+ qm_sg_nents += pad_sg_nents(mapped_dst_nents);
+ else if ((req->src == req->dst) && (mapped_src_nents > 1))
+ qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
+ 1 + !!ivsize +
+ pad_sg_nents(mapped_src_nents));
+ else
+ qm_sg_nents = pad_sg_nents(qm_sg_nents);
+
sg_table = &edesc->sgt[0];
qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
@@ -437,7 +477,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_nents, ivsize);
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -452,7 +492,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(dev, iv_dma)) {
dev_err(dev, "unable to map IV\n");
caam_unmap(dev, req->src, req->dst, src_nents,
- dst_nents, 0, 0, 0, 0);
+ dst_nents, 0, 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -476,7 +516,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(dev, edesc->assoclen_dma)) {
dev_err(dev, "unable to map assoclen\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -487,19 +527,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
qm_sg_index++;
}
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
qm_sg_index += mapped_src_nents;
if (mapped_dst_nents > 1)
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
- qm_sg_index, 0);
+ sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(dev, qm_sg_dma)) {
dev_err(dev, "unable to map S/G table\n");
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -526,6 +565,14 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dpaa2_fl_set_addr(out_fle, qm_sg_dma +
(1 + !!ivsize) * sizeof(*sg_table));
}
+ } else if (!mapped_dst_nents) {
+ /*
+ * crypto engine requires the output entry to be present when
+ * "frame list" FD is used.
+ * Since engine does not support FMT=2'b11 (unused entry type),
+ * leaving out_fle zeroized is the best option.
+ */
+ goto skip_out_fle;
} else if (mapped_dst_nents == 1) {
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
@@ -537,12 +584,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dpaa2_fl_set_len(out_fle, out_len);
+skip_out_fle:
return edesc;
}
static int chachapoly_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct device *dev = ctx->dev;
struct caam_flc *flc;
@@ -575,7 +623,7 @@ static int chachapoly_set_sh_desc(struct crypto_aead *aead)
static int chachapoly_setauthsize(struct crypto_aead *aead,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
if (authsize != POLY1305_DIGEST_SIZE)
return -EINVAL;
@@ -587,16 +635,15 @@ static int chachapoly_setauthsize(struct crypto_aead *aead,
static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
- if (keylen != CHACHA_KEY_SIZE + saltlen) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != CHACHA_KEY_SIZE + saltlen)
return -EINVAL;
- }
- ctx->cdata.key_virt = key;
+ memcpy(ctx->key, key, keylen);
+ ctx->cdata.key_virt = ctx->key;
ctx->cdata.keylen = keylen - saltlen;
return chachapoly_set_sh_desc(aead);
@@ -604,7 +651,7 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
static int gcm_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_flc *flc;
@@ -661,7 +708,12 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
+ int err;
+
+ err = crypto_gcm_check_authsize(authsize);
+ if (err)
+ return err;
ctx->authsize = authsize;
gcm_set_sh_desc(authenc);
@@ -672,9 +724,13 @@ static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
static int gcm_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
+ int ret;
+ ret = aes_check_keylen(keylen);
+ if (ret)
+ return ret;
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -687,7 +743,7 @@ static int gcm_setkey(struct crypto_aead *aead,
static int rfc4106_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_flc *flc;
@@ -747,7 +803,12 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
static int rfc4106_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
+ int err;
+
+ err = crypto_rfc4106_check_authsize(authsize);
+ if (err)
+ return err;
ctx->authsize = authsize;
rfc4106_set_sh_desc(authenc);
@@ -758,11 +819,13 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc,
static int rfc4106_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
+ int ret;
- if (keylen < 4)
- return -EINVAL;
+ ret = aes_check_keylen(keylen - 4);
+ if (ret)
+ return ret;
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -781,7 +844,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
static int rfc4543_set_sh_desc(struct crypto_aead *aead)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_flc *flc;
@@ -841,7 +904,10 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
static int rfc4543_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
+
+ if (authsize != 16)
+ return -EINVAL;
ctx->authsize = authsize;
rfc4543_set_sh_desc(authenc);
@@ -852,11 +918,13 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
static int rfc4543_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *dev = ctx->dev;
+ int ret;
- if (keylen < 4)
- return -EINVAL;
+ ret = aes_check_keylen(keylen - 4);
+ if (ret)
+ return ret;
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -874,9 +942,9 @@ static int rfc4543_setkey(struct crypto_aead *aead,
}
static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
+ unsigned int keylen, const u32 ctx1_iv_off)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct caam_skcipher_alg *alg =
container_of(crypto_skcipher_alg(skcipher),
struct caam_skcipher_alg, skcipher);
@@ -884,34 +952,11 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
struct caam_flc *flc;
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc;
- u32 ctx1_iv_off = 0;
- const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
- OP_ALG_AAI_CTR_MOD128) &&
- ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
- OP_ALG_ALGSEL_CHACHA20);
const bool is_rfc3686 = alg->caam.rfc3686;
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- /*
- * AES-CTR needs to load IV in CONTEXT1 reg
- * at an offset of 128bits (16bytes)
- * CONTEXT1[255:128] = IV
- */
- if (ctr_mode)
- ctx1_iv_off = 16;
-
- /*
- * RFC3686 specific:
- * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
- * | *key = {KEY, NONCE}
- */
- if (is_rfc3686) {
- ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
- keylen -= CTR_RFC3686_NONCE_SIZE;
- }
-
ctx->cdata.keylen = keylen;
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
@@ -939,18 +984,105 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
return 0;
}
+static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ int err;
+
+ err = aes_check_keylen(keylen);
+ if (err)
+ return err;
+
+ return skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 ctx1_iv_off;
+ int err;
+
+ /*
+ * RFC3686 specific:
+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ * | *key = {KEY, NONCE}
+ */
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+
+ err = aes_check_keylen(keylen);
+ if (err)
+ return err;
+
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
+}
+
+static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 ctx1_iv_off;
+ int err;
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ ctx1_iv_off = 16;
+
+ err = aes_check_keylen(keylen);
+ if (err)
+ return err;
+
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
+}
+
+static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != CHACHA_KEY_SIZE)
+ return -EINVAL;
+
+ return skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return verify_skcipher_des_key(skcipher, key) ?:
+ skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return verify_skcipher_des3_key(skcipher, key) ?:
+ skcipher_setkey(skcipher, key, keylen, 0);
+}
+
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *dev = ctx->dev;
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
struct caam_flc *flc;
u32 *desc;
+ int err;
- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- dev_err(dev, "key size mismatch\n");
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
+ err = xts_verify_key(skcipher, key, keylen);
+ if (err) {
+ dev_dbg(dev, "key size mismatch\n");
+ return err;
+ }
+
+ if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
+ ctx->xts_key_fallback = true;
+
+ if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) {
+ err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ if (err)
+ return err;
}
ctx->cdata.keylen = keylen;
@@ -981,10 +1113,10 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_request *req_ctx = skcipher_request_ctx(req);
+ struct caam_request *req_ctx = skcipher_request_ctx_dma(req);
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *dev = ctx->dev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
@@ -1037,23 +1169,35 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
qm_sg_ents = 1 + mapped_src_nents;
dst_sg_idx = qm_sg_ents;
- qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ /*
+ * Input, output HW S/G tables: [IV, src][dst, IV]
+ * IV entries point to the same buffer
+ * If src == dst, S/G entries are reused (S/G tables overlap)
+ *
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries.
+ */
+ if (req->src != req->dst)
+ qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
+ else
+ qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
+
qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
/* allocate space for base edesc, link tables and IV */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (unlikely(!edesc)) {
dev_err(dev, "could not allocate extended descriptor\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1062,11 +1206,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
iv = (u8 *)(sg_table + qm_sg_ents);
memcpy(iv, req->iv, ivsize);
- iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, iv_dma)) {
dev_err(dev, "unable to map IV\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1077,18 +1221,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
edesc->qm_sg_bytes = qm_sg_bytes;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+ sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
- if (mapped_dst_nents > 1)
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
- dst_sg_idx, 0);
+ if (req->src != req->dst)
+ sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
+
+ dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
+ ivsize, 0);
edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
dev_err(dev, "unable to map S/G table\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1096,23 +1242,19 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
- dpaa2_fl_set_len(out_fle, req->cryptlen);
+ dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
- if (req->src == req->dst) {
- dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+
+ if (req->src == req->dst)
dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
sizeof(*sg_table));
- } else if (mapped_dst_nents > 1) {
- dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ else
dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
sizeof(*sg_table));
- } else {
- dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
- }
return edesc;
}
@@ -1124,7 +1266,8 @@ static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
int ivsize = crypto_aead_ivsize(aead);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
+ edesc->qm_sg_bytes);
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
}
@@ -1135,7 +1278,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
int ivsize = crypto_skcipher_ivsize(skcipher);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
+ edesc->qm_sg_bytes);
}
static void aead_encrypt_done(void *cbk_ctx, u32 status)
@@ -1146,15 +1290,13 @@ static void aead_encrypt_done(void *cbk_ctx, u32 status)
struct caam_request *req_ctx = to_caam_req(areq);
struct aead_edesc *edesc = req_ctx->edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
int ecode = 0;
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
aead_unmap(ctx->dev, edesc, req);
qi_cache_free(edesc);
@@ -1169,22 +1311,13 @@ static void aead_decrypt_done(void *cbk_ctx, u32 status)
struct caam_request *req_ctx = to_caam_req(areq);
struct aead_edesc *edesc = req_ctx->edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
int ecode = 0;
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- /*
- * verify hw auth check passed else return -EBADMSG
- */
- if ((status & JRSTA_CCBERR_ERRID_MASK) ==
- JRSTA_CCBERR_ERRID_ICVCHK)
- ecode = -EBADMSG;
- else
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
aead_unmap(ctx->dev, edesc, req);
qi_cache_free(edesc);
@@ -1195,8 +1328,8 @@ static int aead_encrypt(struct aead_request *req)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct caam_request *caam_req = aead_request_ctx(req);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
+ struct caam_request *caam_req = aead_request_ctx_dma(req);
int ret;
/* allocate extended descriptor */
@@ -1223,8 +1356,8 @@ static int aead_decrypt(struct aead_request *req)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct caam_request *caam_req = aead_request_ctx(req);
+ struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
+ struct caam_request *caam_req = aead_request_ctx_dma(req);
int ret;
/* allocate extended descriptor */
@@ -1249,18 +1382,12 @@ static int aead_decrypt(struct aead_request *req)
static int ipsec_gcm_encrypt(struct aead_request *req)
{
- if (req->assoclen < 8)
- return -EINVAL;
-
- return aead_encrypt(req);
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
}
static int ipsec_gcm_decrypt(struct aead_request *req)
{
- if (req->assoclen < 8)
- return -EINVAL;
-
- return aead_decrypt(req);
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
}
static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
@@ -1269,22 +1396,20 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
struct skcipher_request *req = skcipher_request_cast(areq);
struct caam_request *req_ctx = to_caam_req(areq);
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct skcipher_edesc *edesc = req_ctx->edesc;
int ecode = 0;
int ivsize = crypto_skcipher_ivsize(skcipher);
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents > 1 ? 100 : ivsize, 1);
- caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
@@ -1292,10 +1417,12 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
/*
* The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block. This is used e.g. by the CTS mode.
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
*/
- scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
- ivsize, 0);
+ if (!ecode)
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
+ ivsize);
qi_cache_free(edesc);
skcipher_request_complete(req, ecode);
@@ -1307,38 +1434,76 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
struct skcipher_request *req = skcipher_request_cast(areq);
struct caam_request *req_ctx = to_caam_req(areq);
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct skcipher_edesc *edesc = req_ctx->edesc;
int ecode = 0;
int ivsize = crypto_skcipher_ivsize(skcipher);
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents > 1 ? 100 : ivsize, 1);
- caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
skcipher_unmap(ctx->dev, edesc, req);
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
+ */
+ if (!ecode)
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
+ ivsize);
+
qi_cache_free(edesc);
skcipher_request_complete(req, ecode);
}
+static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
+}
+
static int skcipher_encrypt(struct skcipher_request *req)
{
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- struct caam_request *caam_req = skcipher_request_ctx(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
+ struct caam_request *caam_req = skcipher_request_ctx_dma(req);
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
int ret;
+ /*
+ * XTS is expected to return an error even for input length = 0
+ * Note that the case input length < block size will be caught during
+ * HW offloading and return an error.
+ */
+ if (!req->cryptlen && !ctx->fallback)
+ return 0;
+
+ if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
+ ctx->xts_key_fallback)) {
+ skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&caam_req->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+
+ return crypto_skcipher_encrypt(&caam_req->fallback_req);
+ }
+
/* allocate extended descriptor */
edesc = skcipher_edesc_alloc(req);
if (IS_ERR(edesc))
@@ -1363,23 +1528,37 @@ static int skcipher_decrypt(struct skcipher_request *req)
{
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- struct caam_request *caam_req = skcipher_request_ctx(req);
- int ivsize = crypto_skcipher_ivsize(skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
+ struct caam_request *caam_req = skcipher_request_ctx_dma(req);
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
int ret;
+ /*
+ * XTS is expected to return an error even for input length = 0
+ * Note that the case input length < block size will be caught during
+ * HW offloading and return an error.
+ */
+ if (!req->cryptlen && !ctx->fallback)
+ return 0;
+
+ if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
+ ctx->xts_key_fallback)) {
+ skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&caam_req->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+
+ return crypto_skcipher_decrypt(&caam_req->fallback_req);
+ }
+
/* allocate extended descriptor */
edesc = skcipher_edesc_alloc(req);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- /*
- * The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block.
- */
- scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
- ivsize, 0);
-
caam_req->flc = &ctx->flc[DECRYPT];
caam_req->flc_dma = ctx->flc_dma[DECRYPT];
caam_req->cbk = skcipher_decrypt_done;
@@ -1428,9 +1607,37 @@ static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
container_of(alg, typeof(*caam_alg), skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
+ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+ int ret = 0;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
- return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
+ if (alg_aai == OP_ALG_AAI_XTS) {
+ const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
+ struct crypto_skcipher *fallback;
+
+ fallback = crypto_alloc_skcipher(tfm_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ dev_err(caam_alg->caam.dev,
+ "Failed to allocate %s fallback: %ld\n",
+ tfm_name, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+
+ ctx->fallback = fallback;
+ crypto_skcipher_set_reqsize_dma(
+ tfm, sizeof(struct caam_request) +
+ crypto_skcipher_reqsize(fallback));
+ } else {
+ crypto_skcipher_set_reqsize_dma(tfm,
+ sizeof(struct caam_request));
+ }
+
+ ret = caam_cra_init(ctx, &caam_alg->caam, false);
+ if (ret && ctx->fallback)
+ crypto_free_skcipher(ctx->fallback);
+
+ return ret;
}
static int caam_cra_init_aead(struct crypto_aead *tfm)
@@ -1439,9 +1646,9 @@ static int caam_cra_init_aead(struct crypto_aead *tfm)
struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
aead);
- crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
- return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
- alg->setkey == aead_setkey);
+ crypto_aead_set_reqsize_dma(tfm, sizeof(struct caam_request));
+ return caam_cra_init(crypto_aead_ctx_dma(tfm), &caam_alg->caam,
+ !caam_alg->caam.nodkp);
}
static void caam_exit_common(struct caam_ctx *ctx)
@@ -1453,12 +1660,16 @@ static void caam_exit_common(struct caam_ctx *ctx)
static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- caam_exit_common(crypto_skcipher_ctx(tfm));
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
+
+ if (ctx->fallback)
+ crypto_free_skcipher(ctx->fallback);
+ caam_exit_common(ctx);
}
static void caam_cra_exit_aead(struct crypto_aead *tfm)
{
- caam_exit_common(crypto_aead_ctx(tfm));
+ caam_exit_common(crypto_aead_ctx_dma(tfm));
}
static struct caam_skcipher_alg driver_algs[] = {
@@ -1469,7 +1680,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-aes-caam-qi2",
.cra_blocksize = AES_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = aes_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1485,7 +1696,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-3des-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = des3_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
@@ -1501,7 +1712,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-des-caam-qi2",
.cra_blocksize = DES_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = des_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES_KEY_SIZE,
@@ -1517,7 +1728,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "ctr-aes-caam-qi2",
.cra_blocksize = 1,
},
- .setkey = skcipher_setkey,
+ .setkey = ctr_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1535,7 +1746,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
.cra_blocksize = 1,
},
- .setkey = skcipher_setkey,
+ .setkey = rfc3686_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE +
@@ -1556,6 +1767,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.base = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-caam-qi2",
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
},
.setkey = xts_skcipher_setkey,
@@ -1574,7 +1786,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "chacha20-caam-qi2",
.cra_blocksize = 1,
},
- .setkey = skcipher_setkey,
+ .setkey = chacha20_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = CHACHA_KEY_SIZE,
@@ -1602,6 +1814,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
{
@@ -1620,6 +1833,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
/* Galois Counter Mode */
@@ -1639,6 +1853,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
}
},
/* single-pass ipsec_esp descriptor */
@@ -1917,7 +2132,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1939,7 +2154,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1962,7 +2177,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1985,7 +2200,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2008,7 +2223,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2031,7 +2246,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2054,7 +2269,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2077,7 +2292,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2100,7 +2315,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2123,7 +2338,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2146,7 +2361,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2169,7 +2384,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2343,7 +2558,7 @@ static struct caam_aead_alg driver_aeads[] = {
.cra_name = "echainiv(authenc(hmac(sha256),"
"cbc(des)))",
.cra_driver_name = "echainiv-authenc-"
- "hmac-sha256-cbc-desi-"
+ "hmac-sha256-cbc-des-"
"caam-qi2",
.cra_blocksize = DES_BLOCK_SIZE,
},
@@ -2716,6 +2931,7 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_AEAD,
.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
OP_ALG_AAI_AEAD,
+ .nodkp = true,
},
},
{
@@ -2738,6 +2954,7 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_AEAD,
.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
OP_ALG_AAI_AEAD,
+ .nodkp = true,
},
},
{
@@ -2797,8 +3014,9 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
- alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
+ alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY);
alg->init = caam_cra_init_skcipher;
alg->exit = caam_cra_exit;
@@ -2810,8 +3028,9 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
- alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = caam_cra_init_aead;
alg->exit = caam_cra_exit_aead;
@@ -2835,8 +3054,9 @@ enum hash_optype {
};
/**
- * caam_hash_ctx - ahash per-session context
+ * struct caam_hash_ctx - ahash per-session context
* @flc: Flow Contexts array
+ * @key: authentication key
* @flc_dma: I/O virtual addresses of the Flow Contexts
* @dev: dpseci device
* @ctx_len: size of Context Register
@@ -2844,6 +3064,7 @@ enum hash_optype {
*/
struct caam_hash_ctx {
struct caam_flc flc[HASH_NUM_OP];
+ u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
dma_addr_t flc_dma[HASH_NUM_OP];
struct device *dev;
int ctx_len;
@@ -2855,15 +3076,14 @@ struct caam_hash_state {
struct caam_request caam_req;
dma_addr_t buf_dma;
dma_addr_t ctx_dma;
- u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_0;
- u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_1;
+ int ctx_dma_len;
+ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen;
+ int next_buflen;
u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
- int current_buf;
};
struct caam_export_state {
@@ -2875,42 +3095,17 @@ struct caam_export_state {
int (*finup)(struct ahash_request *req);
};
-static inline void switch_buf(struct caam_hash_state *state)
-{
- state->current_buf ^= 1;
-}
-
-static inline u8 *current_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_1 : state->buf_0;
-}
-
-static inline u8 *alt_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_0 : state->buf_1;
-}
-
-static inline int *current_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_1 : &state->buflen_0;
-}
-
-static inline int *alt_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_0 : &state->buflen_1;
-}
-
/* Map current buffer in state (if length > 0) and put it in link table */
static inline int buf_map_to_qm_sg(struct device *dev,
struct dpaa2_sg_entry *qm_sg,
struct caam_hash_state *state)
{
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
if (!buflen)
return 0;
- state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
+ state->buf_dma = dma_map_single(dev, state->buf, buflen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, state->buf_dma)) {
dev_err(dev, "unable to map buf\n");
@@ -2928,6 +3123,7 @@ static inline int ctx_map_to_qm_sg(struct device *dev,
struct caam_hash_state *state, int ctx_len,
struct dpaa2_sg_entry *qm_sg, u32 flag)
{
+ state->ctx_dma_len = ctx_len;
state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
if (dma_mapping_error(dev, state->ctx_dma)) {
dev_err(dev, "unable to map ctx\n");
@@ -2942,7 +3138,7 @@ static inline int ctx_map_to_qm_sg(struct device *dev,
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
{
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
struct caam_flc *flc;
@@ -3011,48 +3207,38 @@ static void split_key_sh_done(void *cbk_ctx, u32 err)
dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
- if (err)
- caam_qi2_strstatus(res->dev, err);
-
- res->err = err;
+ res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
complete(&res->completion);
}
/* Digest hash size if it is too large */
-static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
- u32 *keylen, u8 *key_out, u32 digestsize)
+static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
+ u32 digestsize)
{
struct caam_request *req_ctx;
u32 *desc;
struct split_key_sh_result result;
- dma_addr_t src_dma, dst_dma;
+ dma_addr_t key_dma;
struct caam_flc *flc;
dma_addr_t flc_dma;
int ret = -ENOMEM;
struct dpaa2_fl_entry *in_fle, *out_fle;
- req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
+ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL);
if (!req_ctx)
return -ENOMEM;
in_fle = &req_ctx->fd_flt[1];
out_fle = &req_ctx->fd_flt[0];
- flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
+ flc = kzalloc(sizeof(*flc), GFP_KERNEL);
if (!flc)
goto err_flc;
- src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(ctx->dev, src_dma)) {
- dev_err(ctx->dev, "unable to map key input memory\n");
- goto err_src_dma;
- }
- dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, dst_dma)) {
- dev_err(ctx->dev, "unable to map key output memory\n");
- goto err_dst_dma;
+ key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(ctx->dev, key_dma)) {
+ dev_err(ctx->dev, "unable to map key memory\n");
+ goto err_key_dma;
}
desc = flc->sh_desc;
@@ -3077,14 +3263,14 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(in_fle, src_dma);
+ dpaa2_fl_set_addr(in_fle, key_dma);
dpaa2_fl_set_len(in_fle, *keylen);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, dst_dma);
+ dpaa2_fl_set_addr(out_fle, key_dma);
dpaa2_fl_set_len(out_fle, digestsize);
print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1);
@@ -3104,17 +3290,15 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
wait_for_completion(&result.completion);
ret = result.err;
print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in,
+ DUMP_PREFIX_ADDRESS, 16, 4, key,
digestsize, 1);
}
dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
DMA_TO_DEVICE);
err_flc_dma:
- dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
-err_dst_dma:
- dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
-err_src_dma:
+ dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
+err_key_dma:
kfree(flc);
err_flc:
kfree(req_ctx);
@@ -3127,7 +3311,7 @@ err_flc:
static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
unsigned int keylen)
{
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
unsigned int digestsize = crypto_ahash_digestsize(ahash);
int ret;
@@ -3136,12 +3320,16 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
if (keylen > blocksize) {
- hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
- GFP_KERNEL | GFP_DMA);
+ unsigned int aligned_len =
+ ALIGN(keylen, dma_get_cache_alignment());
+
+ if (aligned_len < keylen)
+ return -EOVERFLOW;
+
+ hashed_key = kmemdup(key, aligned_len, GFP_KERNEL);
if (!hashed_key)
return -ENOMEM;
- ret = hash_digest_key(ctx, key, &keylen, hashed_key,
- digestsize);
+ ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
if (ret)
goto bad_free_key;
key = hashed_key;
@@ -3156,31 +3344,41 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
ctx->adata.key_virt = key;
ctx->adata.key_inline = true;
+ /*
+ * In case |user key| > |derived key|, using DKP<imm,imm> would result
+ * in invalid opcodes (last bytes of user key) in the resulting
+ * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
+ * addresses are needed.
+ */
+ if (keylen > ctx->adata.keylen_pad) {
+ memcpy(ctx->key, key, keylen);
+ dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
+ ctx->adata.keylen_pad,
+ DMA_TO_DEVICE);
+ }
+
ret = ahash_set_sh_desc(ahash);
kfree(hashed_key);
return ret;
bad_free_key:
kfree(hashed_key);
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
- struct ahash_request *req, int dst_len)
+ struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
if (edesc->src_nents)
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
- if (edesc->dst_dma)
- dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
if (edesc->qm_sg_bytes)
dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
DMA_TO_DEVICE);
if (state->buf_dma) {
- dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+ dma_unmap_single(dev, state->buf_dma, state->buflen,
DMA_TO_DEVICE);
state->buf_dma = 0;
}
@@ -3188,18 +3386,15 @@ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
static inline void ahash_unmap_ctx(struct device *dev,
struct ahash_edesc *edesc,
- struct ahash_request *req, int dst_len,
- u32 flag)
+ struct ahash_request *req, u32 flag)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
if (state->ctx_dma) {
- dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+ dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
state->ctx_dma = 0;
}
- ahash_unmap(dev, edesc, req, dst_len);
+ ahash_unmap(dev, edesc, req);
}
static void ahash_done(void *cbk_ctx, u32 status)
@@ -3207,31 +3402,26 @@ static void ahash_done(void *cbk_ctx, u32 status)
struct crypto_async_request *areq = cbk_ctx;
struct ahash_request *req = ahash_request_cast(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct ahash_edesc *edesc = state->caam_req.edesc;
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
int ecode = 0;
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
- ahash_unmap(ctx->dev, edesc, req, digestsize);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
+ memcpy(req->result, state->caam_ctx, digestsize);
qi_cache_free(edesc);
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- if (req->result)
- print_hex_dump_debug("result@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
}
static void ahash_done_bi(void *cbk_ctx, u32 status)
@@ -3239,22 +3429,28 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
struct crypto_async_request *areq = cbk_ctx;
struct ahash_request *req = ahash_request_cast(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct ahash_edesc *edesc = state->caam_req.edesc;
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int ecode = 0;
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
- switch_buf(state);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
qi_cache_free(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -3263,7 +3459,7 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
crypto_ahash_digestsize(ahash), 1);
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
}
static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
@@ -3271,31 +3467,26 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
struct crypto_async_request *areq = cbk_ctx;
struct ahash_request *req = ahash_request_cast(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct ahash_edesc *edesc = state->caam_req.edesc;
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
int ecode = 0;
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
- ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
+ memcpy(req->result, state->caam_ctx, digestsize);
qi_cache_free(edesc);
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- if (req->result)
- print_hex_dump_debug("result@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
}
static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
@@ -3303,22 +3494,28 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
struct crypto_async_request *areq = cbk_ctx;
struct ahash_request *req = ahash_request_cast(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct ahash_edesc *edesc = state->caam_req.edesc;
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
int ecode = 0;
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
- switch_buf(state);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
qi_cache_free(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -3327,37 +3524,35 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
crypto_ahash_digestsize(ahash), 1);
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
}
static int ahash_update_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state), last_buflen;
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int in_len = *buflen + req->nbytes, to_hash;
int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
struct ahash_edesc *edesc;
int ret = 0;
- last_buflen = *next_buflen;
*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
to_hash = in_len - *next_buflen;
if (to_hash) {
struct dpaa2_sg_entry *sg_table;
+ int src_len = req->nbytes - *next_buflen;
- src_nents = sg_nents_for_len(req->src,
- req->nbytes - (*next_buflen));
+ src_nents = sg_nents_for_len(req->src, src_len);
if (src_nents < 0) {
dev_err(ctx->dev, "Invalid number of src SG.\n");
return src_nents;
@@ -3375,7 +3570,7 @@ static int ahash_update_ctx(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents,
DMA_TO_DEVICE);
@@ -3384,7 +3579,7 @@ static int ahash_update_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
qm_sg_src_index = 1 + (*buflen ? 1 : 0);
- qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
+ qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
sizeof(*sg_table);
sg_table = &edesc->sgt[0];
@@ -3398,12 +3593,8 @@ static int ahash_update_ctx(struct ahash_request *req)
goto unmap_ctx;
if (mapped_nents) {
- sg_to_qm_sg_last(req->src, mapped_nents,
+ sg_to_qm_sg_last(req->src, src_len,
sg_table + qm_sg_src_index, 0);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
} else {
dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
true);
@@ -3442,18 +3633,15 @@ static int ahash_update_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = last_buflen;
- }
- print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
qi_cache_free(edesc);
return ret;
}
@@ -3461,31 +3649,30 @@ unmap_ctx:
static int ahash_final_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
- int qm_sg_bytes, qm_sg_src_index;
+ int buflen = state->buflen;
+ int qm_sg_bytes;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
struct dpaa2_sg_entry *sg_table;
int ret;
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc)
return -ENOMEM;
- qm_sg_src_index = 1 + (buflen ? 1 : 0);
- qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
+ qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
sg_table = &edesc->sgt[0];
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
@@ -3493,7 +3680,7 @@ static int ahash_final_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
+ dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
DMA_TO_DEVICE);
@@ -3504,22 +3691,13 @@ static int ahash_final_ctx(struct ahash_request *req)
}
edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
- ret = -ENOMEM;
- goto unmap_ctx;
- }
-
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[FINALIZE];
@@ -3534,7 +3712,7 @@ static int ahash_final_ctx(struct ahash_request *req)
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
qi_cache_free(edesc);
return ret;
}
@@ -3542,14 +3720,14 @@ unmap_ctx:
static int ahash_finup_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
int qm_sg_bytes, qm_sg_src_index;
int src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -3575,7 +3753,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@@ -3583,11 +3761,12 @@ static int ahash_finup_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
qm_sg_src_index = 1 + (buflen ? 1 : 0);
- qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
+ qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
+ sizeof(*sg_table);
sg_table = &edesc->sgt[0];
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
@@ -3595,7 +3774,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
+ sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
DMA_TO_DEVICE);
@@ -3606,22 +3785,13 @@ static int ahash_finup_ctx(struct ahash_request *req)
}
edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
- ret = -ENOMEM;
- goto unmap_ctx;
- }
-
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[FINALIZE];
@@ -3636,7 +3806,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
qi_cache_free(edesc);
return ret;
}
@@ -3644,8 +3814,8 @@ unmap_ctx:
static int ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
@@ -3676,7 +3846,7 @@ static int ahash_digest(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
return ret;
@@ -3689,8 +3859,8 @@ static int ahash_digest(struct ahash_request *req)
int qm_sg_bytes;
struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
- qm_sg_bytes = mapped_nents * sizeof(*sg_table);
- sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
+ qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
+ sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
qm_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
@@ -3705,18 +3875,19 @@ static int ahash_digest(struct ahash_request *req)
dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
}
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ state->ctx_dma_len = digestsize;
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
goto unmap;
}
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_len(in_fle, req->nbytes);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[DIGEST];
@@ -3730,7 +3901,7 @@ static int ahash_digest(struct ahash_request *req)
return ret;
unmap:
- ahash_unmap(ctx->dev, edesc, req, digestsize);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
qi_cache_free(edesc);
return ret;
}
@@ -3738,45 +3909,57 @@ unmap:
static int ahash_final_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int buflen = *current_buflen(state);
+ u8 *buf = state->buf;
+ int buflen = state->buflen;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = -ENOMEM;
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc)
return ret;
- state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
- if (dma_mapping_error(ctx->dev, state->buf_dma)) {
- dev_err(ctx->dev, "unable to map src\n");
- goto unmap;
+ if (buflen) {
+ state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->buf_dma)) {
+ dev_err(ctx->dev, "unable to map src\n");
+ goto unmap;
+ }
}
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ state->ctx_dma_len = digestsize;
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
goto unmap;
}
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
dpaa2_fl_set_final(in_fle, true);
- dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(in_fle, state->buf_dma);
- dpaa2_fl_set_len(in_fle, buflen);
+ /*
+ * crypto engine requires the input entry to be present when
+ * "frame list" FD is used.
+ * Since engine does not support FMT=2'b11 (unused entry type), leaving
+ * in_fle zeroized (except for "Final" flag) is the best option.
+ */
+ if (buflen) {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, state->buf_dma);
+ dpaa2_fl_set_len(in_fle, buflen);
+ }
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[DIGEST];
@@ -3791,7 +3974,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
return ret;
unmap:
- ahash_unmap(ctx->dev, edesc, req, digestsize);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
qi_cache_free(edesc);
return ret;
}
@@ -3799,17 +3982,16 @@ unmap:
static int ahash_update_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int in_len = *buflen + req->nbytes, to_hash;
int qm_sg_bytes, src_nents, mapped_nents;
struct ahash_edesc *edesc;
@@ -3820,9 +4002,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
if (to_hash) {
struct dpaa2_sg_entry *sg_table;
+ int src_len = req->nbytes - *next_buflen;
- src_nents = sg_nents_for_len(req->src,
- req->nbytes - *next_buflen);
+ src_nents = sg_nents_for_len(req->src, src_len);
if (src_nents < 0) {
dev_err(ctx->dev, "Invalid number of src SG.\n");
return src_nents;
@@ -3840,7 +4022,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents,
DMA_TO_DEVICE);
@@ -3848,19 +4030,15 @@ static int ahash_update_no_ctx(struct ahash_request *req)
}
edesc->src_nents = src_nents;
- qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
+ qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
+ sizeof(*sg_table);
sg_table = &edesc->sgt[0];
ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
if (ret)
goto unmap_ctx;
- sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
-
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
+ sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
qm_sg_bytes, DMA_TO_DEVICE);
@@ -3871,6 +4049,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
}
edesc->qm_sg_bytes = qm_sg_bytes;
+ state->ctx_dma_len = ctx->ctx_len;
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
ctx->ctx_len, DMA_FROM_DEVICE);
if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
@@ -3908,18 +4087,15 @@ static int ahash_update_no_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = 0;
- }
- print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
qi_cache_free(edesc);
return ret;
}
@@ -3927,19 +4103,19 @@ unmap_ctx:
static int ahash_finup_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
int qm_sg_bytes, src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
struct dpaa2_sg_entry *sg_table;
- int ret;
+ int ret = -ENOMEM;
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
@@ -3952,28 +4128,28 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
DMA_TO_DEVICE);
if (!mapped_nents) {
dev_err(ctx->dev, "unable to DMA map source\n");
- return -ENOMEM;
+ return ret;
}
} else {
mapped_nents = 0;
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
- return -ENOMEM;
+ return ret;
}
edesc->src_nents = src_nents;
- qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
+ qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
sg_table = &edesc->sgt[0];
ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
if (ret)
goto unmap;
- sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
+ sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
DMA_TO_DEVICE);
@@ -3984,11 +4160,12 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
}
edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ state->ctx_dma_len = digestsize;
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
ret = -ENOMEM;
goto unmap;
}
@@ -3999,7 +4176,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[DIGEST];
@@ -4014,23 +4191,24 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
return ret;
unmap:
- ahash_unmap(ctx->dev, edesc, req, digestsize);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
qi_cache_free(edesc);
- return -ENOMEM;
+ return ret;
}
static int ahash_update_first(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_request *req_ctx = &state->caam_req;
struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int to_hash;
int src_nents, mapped_nents;
struct ahash_edesc *edesc;
@@ -4042,9 +4220,9 @@ static int ahash_update_first(struct ahash_request *req)
if (to_hash) {
struct dpaa2_sg_entry *sg_table;
+ int src_len = req->nbytes - *next_buflen;
- src_nents = sg_nents_for_len(req->src,
- req->nbytes - (*next_buflen));
+ src_nents = sg_nents_for_len(req->src, src_len);
if (src_nents < 0) {
dev_err(ctx->dev, "Invalid number of src SG.\n");
return src_nents;
@@ -4062,7 +4240,7 @@ static int ahash_update_first(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents,
DMA_TO_DEVICE);
@@ -4079,8 +4257,9 @@ static int ahash_update_first(struct ahash_request *req)
if (mapped_nents > 1) {
int qm_sg_bytes;
- sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
- qm_sg_bytes = mapped_nents * sizeof(*sg_table);
+ sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
+ qm_sg_bytes = pad_sg_nents(mapped_nents) *
+ sizeof(*sg_table);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
qm_sg_bytes,
DMA_TO_DEVICE);
@@ -4097,10 +4276,7 @@ static int ahash_update_first(struct ahash_request *req)
dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
}
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src, to_hash,
- *next_buflen, 0);
-
+ state->ctx_dma_len = ctx->ctx_len;
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
ctx->ctx_len, DMA_FROM_DEVICE);
if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
@@ -4133,18 +4309,18 @@ static int ahash_update_first(struct ahash_request *req)
state->update = ahash_update_no_ctx;
state->finup = ahash_finup_no_ctx;
state->final = ahash_final_no_ctx;
- scatterwalk_map_and_copy(next_buf, req->src, 0,
+ scatterwalk_map_and_copy(buf, req->src, 0,
req->nbytes, 0);
- switch_buf(state);
- }
+ *buflen = *next_buflen;
- print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
qi_cache_free(edesc);
return ret;
}
@@ -4156,56 +4332,48 @@ static int ahash_finup_first(struct ahash_request *req)
static int ahash_init(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
state->update = ahash_update_first;
state->finup = ahash_finup_first;
state->final = ahash_final_no_ctx;
state->ctx_dma = 0;
- state->current_buf = 0;
+ state->ctx_dma_len = 0;
state->buf_dma = 0;
- state->buflen_0 = 0;
- state->buflen_1 = 0;
+ state->buflen = 0;
+ state->next_buflen = 0;
return 0;
}
static int ahash_update(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
return state->update(req);
}
static int ahash_finup(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
return state->finup(req);
}
static int ahash_final(struct ahash_request *req)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
return state->final(req);
}
static int ahash_export(struct ahash_request *req, void *out)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
struct caam_export_state *export = out;
- int len;
- u8 *buf;
-
- if (state->current_buf) {
- buf = state->buf_1;
- len = state->buflen_1;
- } else {
- buf = state->buf_0;
- len = state->buflen_0;
- }
+ u8 *buf = state->buf;
+ int len = state->buflen;
memcpy(export->buf, buf, len);
memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
@@ -4219,13 +4387,13 @@ static int ahash_export(struct ahash_request *req, void *out)
static int ahash_import(struct ahash_request *req, const void *in)
{
- struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_state *state = ahash_request_ctx_dma(req);
const struct caam_export_state *export = in;
memset(state, 0, sizeof(*state));
- memcpy(state->buf_0, export->buf, export->buflen);
+ memcpy(state->buf, export->buf, export->buflen);
memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
- state->buflen_0 = export->buflen;
+ state->buflen = export->buflen;
state->update = export->update;
state->final = export->final;
state->finup = export->finup;
@@ -4378,6 +4546,7 @@ struct caam_hash_alg {
struct list_head entry;
struct device *dev;
int alg_type;
+ bool is_hmac;
struct ahash_alg ahash_alg;
};
@@ -4391,7 +4560,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
container_of(halg, struct ahash_alg, halg);
struct caam_hash_alg *caam_hash =
container_of(alg, struct caam_hash_alg, ahash_alg);
- struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
HASH_MSG_LEN + SHA1_DIGEST_SIZE,
@@ -4404,11 +4573,27 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
ctx->dev = caam_hash->dev;
+ if (caam_hash->is_hmac) {
+ ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
+ ARRAY_SIZE(ctx->key),
+ DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
+ dev_err(ctx->dev, "unable to map key\n");
+ return -ENOMEM;
+ }
+ }
+
dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->dev, dma_addr)) {
dev_err(ctx->dev, "unable to map shared descriptors\n");
+ if (ctx->adata.key_dma)
+ dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
+ ARRAY_SIZE(ctx->key),
+ DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
return -ENOMEM;
}
@@ -4422,18 +4607,25 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
OP_ALG_ALGSEL_SUBMASK) >>
OP_ALG_ALGSEL_SHIFT];
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct caam_hash_state));
+ crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
- return ahash_set_sh_desc(ahash);
+ /*
+ * For keyed hash algorithms shared descriptors
+ * will be created later in setkey() callback
+ */
+ return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash);
}
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
{
- struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
+ if (ctx->adata.key_dma)
+ dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
+ ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
}
static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
@@ -4456,21 +4648,23 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
template->hmac_name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->hmac_driver_name);
+ t_alg->is_hmac = true;
} else {
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
t_alg->ahash_alg.setkey = NULL;
+ t_alg->is_hmac = false;
}
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;
alg->cra_exit = caam_hash_cra_exit;
- alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
+ alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
alg->cra_priority = CAAM_CRA_PRIORITY;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_flags = CRYPTO_ALG_ASYNC;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
t_alg->alg_type = template->alg_type;
t_alg->dev = dev;
@@ -4503,7 +4697,8 @@ static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
nctx->cb = dpaa2_caam_fqdan_cb;
/* Register notification callbacks */
- err = dpaa2_io_service_register(NULL, nctx);
+ ppriv->dpio = dpaa2_io_service_select(cpu);
+ err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
if (unlikely(err)) {
dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
nctx->cb = NULL;
@@ -4536,7 +4731,7 @@ err:
ppriv = per_cpu_ptr(priv->ppriv, cpu);
if (!ppriv->nctx.cb)
break;
- dpaa2_io_service_deregister(NULL, &ppriv->nctx);
+ dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
}
for_each_online_cpu(cpu) {
@@ -4556,7 +4751,8 @@ static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
for_each_online_cpu(cpu) {
ppriv = per_cpu_ptr(priv->ppriv, cpu);
- dpaa2_io_service_deregister(NULL, &ppriv->nctx);
+ dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
+ priv->dev);
dpaa2_io_store_destroy(ppriv->store);
if (++i == priv->num_pairs)
@@ -4618,6 +4814,13 @@ static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
{
struct device *dev = priv->dev;
struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ int err;
+
+ if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
+ err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
+ if (err)
+ dev_err(dev, "dpseci_reset() failed\n");
+ }
dpaa2_dpseci_congestion_free(priv);
dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
@@ -4636,7 +4839,7 @@ static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
if (unlikely(fd_err))
- dev_err(priv->dev, "FD error: %08x\n", fd_err);
+ dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
/*
* FD[ADDR] is guaranteed to be valid, irrespective of errors reported
@@ -4654,7 +4857,7 @@ static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
/* Retry while portal is busy */
do {
- err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
+ err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
ppriv->store);
} while (err == -EBUSY);
@@ -4722,7 +4925,7 @@ static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
if (cleaned < budget) {
napi_complete_done(napi, cleaned);
- err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
+ err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
if (unlikely(err))
dev_err(priv->dev, "Notification rearm failed: %d\n",
err);
@@ -4736,6 +4939,7 @@ static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
{
struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
struct device *dev = priv->dev;
+ unsigned int alignmask;
int err;
/*
@@ -4746,13 +4950,14 @@ static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
!(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
return 0;
- priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
- GFP_KERNEL | GFP_DMA);
+ alignmask = DPAA2_CSCN_ALIGN - 1;
+ alignmask |= dma_get_cache_alignment() - 1;
+ priv->cscn_mem = kzalloc(ALIGN(DPAA2_CSCN_SIZE, alignmask + 1),
+ GFP_KERNEL);
if (!priv->cscn_mem)
return -ENOMEM;
- priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
- priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
+ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem,
DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, priv->cscn_dma)) {
dev_err(dev, "Error mapping CSCN memory area\n");
@@ -4786,14 +4991,30 @@ err_dma_map:
return err;
}
+static void free_dpaa2_pcpu_netdev(struct dpaa2_caam_priv *priv, const cpumask_t *cpus)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int i;
+
+ for_each_cpu(i, cpus) {
+ ppriv = per_cpu_ptr(priv->ppriv, i);
+ free_netdev(ppriv->net_dev);
+ }
+}
+
static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
{
struct device *dev = &ls_dev->dev;
struct dpaa2_caam_priv *priv;
struct dpaa2_caam_priv_per_cpu *ppriv;
+ cpumask_var_t clean_mask;
int err, cpu;
u8 i;
+ err = -ENOMEM;
+ if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL))
+ goto err_cpumask;
+
priv = dev_get_drvdata(dev);
priv->dev = dev;
@@ -4815,6 +5036,14 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
+ if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
+ err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpseci_reset() failed\n");
+ goto err_get_vers;
+ }
+ }
+
err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
&priv->dpseci_attr);
if (err) {
@@ -4863,30 +5092,53 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
i = 0;
for_each_online_cpu(cpu) {
- dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", i,
- priv->rx_queue_attr[i].fqid,
- priv->tx_queue_attr[i].fqid);
+ u8 j;
+
+ j = i % priv->num_pairs;
ppriv = per_cpu_ptr(priv->ppriv, cpu);
- ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
- ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
- ppriv->prio = i;
-
- ppriv->net_dev.dev = *dev;
- INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
- netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
- DPAA2_CAAM_NAPI_WEIGHT);
- if (++i == priv->num_pairs)
- break;
+ ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
+
+ /*
+ * Allow all cores to enqueue, while only some of them
+ * will take part in dequeuing.
+ */
+ if (++i > priv->num_pairs)
+ continue;
+
+ ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
+ ppriv->prio = j;
+
+ dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
+ priv->rx_queue_attr[j].fqid,
+ priv->tx_queue_attr[j].fqid);
+
+ ppriv->net_dev = alloc_netdev_dummy(0);
+ if (!ppriv->net_dev) {
+ err = -ENOMEM;
+ goto err_alloc_netdev;
+ }
+ cpumask_set_cpu(cpu, clean_mask);
+ ppriv->net_dev->dev = *dev;
+
+ netif_napi_add_tx_weight(ppriv->net_dev, &ppriv->napi,
+ dpaa2_dpseci_poll,
+ DPAA2_CAAM_NAPI_WEIGHT);
}
- return 0;
+ err = 0;
+ goto free_cpumask;
+err_alloc_netdev:
+ free_dpaa2_pcpu_netdev(priv, clean_mask);
err_get_rx_queue:
dpaa2_dpseci_congestion_free(priv);
err_get_vers:
dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
err_open:
+free_cpumask:
+ free_cpumask_var(clean_mask);
+err_cpumask:
return err;
}
@@ -4924,12 +5176,13 @@ static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
return err;
}
- dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
+ dev_dbg(dev, "disable: %s\n", str_false_true(enabled));
for (i = 0; i < priv->num_pairs; i++) {
ppriv = per_cpu_ptr(priv->ppriv, i);
napi_disable(&ppriv->napi);
netif_napi_del(&ppriv->napi);
+ free_netdev(ppriv->net_dev);
}
return 0;
@@ -4965,7 +5218,7 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
priv->domain = iommu_get_domain_for_dev(dev);
qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
- 0, SLAB_CACHE_DMA, NULL);
+ 0, 0, NULL);
if (!qi_cache) {
dev_err(dev, "Can't allocate SEC cache\n");
return -ENOMEM;
@@ -5005,8 +5258,7 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
/* DPIO */
err = dpaa2_dpseci_dpio_setup(priv);
if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
+ dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n");
goto err_dpio_setup;
}
@@ -5024,6 +5276,8 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
goto err_bind;
}
+ dpaa2_dpseci_debugfs_init(priv);
+
/* register crypto algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
struct caam_skcipher_alg *t_alg = driver_algs + i;
@@ -5130,7 +5384,7 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
dev_warn(dev, "%s hash alg allocation failed: %d\n",
- alg->driver_name, err);
+ alg->hmac_driver_name, err);
continue;
}
@@ -5182,7 +5436,7 @@ err_dma_mask:
return err;
}
-static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
+static void __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
{
struct device *dev;
struct dpaa2_caam_priv *priv;
@@ -5191,6 +5445,8 @@ static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
dev = &ls_dev->dev;
priv = dev_get_drvdata(dev);
+ dpaa2_dpseci_debugfs_exit(priv);
+
for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
struct caam_aead_alg *t_alg = driver_aeads + i;
@@ -5221,15 +5477,14 @@ static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
free_percpu(priv->ppriv);
fsl_mc_portal_free(priv->mc_io);
kmem_cache_destroy(qi_cache);
-
- return 0;
}
int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
{
struct dpaa2_fd fd;
struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
- int err = 0, i, id;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err = 0, i;
if (IS_ERR(req))
return PTR_ERR(req);
@@ -5238,7 +5493,7 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
DPAA2_CSCN_SIZE,
DMA_FROM_DEVICE);
- if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
+ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
dev_dbg_ratelimited(dev, "Dropping request\n");
return -EBUSY;
}
@@ -5259,23 +5514,18 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
dpaa2_fd_set_flc(&fd, req->flc_dma);
- /*
- * There is no guarantee that preemption is disabled here,
- * thus take action.
- */
- preempt_disable();
- id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
+ ppriv = raw_cpu_ptr(priv->ppriv);
for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
- err = dpaa2_io_service_enqueue_fq(NULL,
- priv->tx_queue_attr[id].fqid,
+ err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
&fd);
if (err != -EBUSY)
break;
+
+ cpu_relax();
}
- preempt_enable();
if (unlikely(err)) {
- dev_err(dev, "Error enqueuing frame: %d\n", err);
+ dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
goto err_out;
}
@@ -5295,6 +5545,7 @@ static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
},
{ .vendor = 0x0 }
};
+MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table);
static struct fsl_mc_driver dpaa2_caam_driver = {
.driver = {