diff options
Diffstat (limited to 'drivers/crypto/caam/caamalg_qi2.c')
| -rw-r--r-- | drivers/crypto/caam/caamalg_qi2.c | 438 |
1 files changed, 300 insertions, 138 deletions
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 28669cbecf77..107ccb2ade42 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -16,9 +16,14 @@ #include "caamalg_desc.h" #include "caamhash_desc.h" #include "dpseci-debugfs.h" +#include <linux/dma-mapping.h> #include <linux/fsl/mc.h> +#include <linux/kernel.h> +#include <linux/string_choices.h> #include <soc/fsl/dpaa2-io.h> #include <soc/fsl/dpaa2-fd.h> +#include <crypto/xts.h> +#include <linux/unaligned.h> #define CAAM_CRA_PRIORITY 2000 @@ -27,7 +32,7 @@ SHA512_DIGEST_SIZE * 2) /* - * This is a a cache of buffers, from which the users of CAAM QI driver + * This is a cache of buffers, from which the users of CAAM QI driver * can allocate short buffers. It's speedier than doing kmalloc on the hotpath. * NOTE: A more elegant solution would be to have some headroom in the frames * being processed. This can be added by the dpaa2-eth driver. This would @@ -59,7 +64,7 @@ struct caam_skcipher_alg { }; /** - * caam_ctx - per-session context + * struct caam_ctx - per-session context * @flc: Flow Contexts array * @key: [authentication key], encryption key * @flc_dma: I/O virtual addresses of the Flow Contexts @@ -69,6 +74,9 @@ struct caam_skcipher_alg { * @adata: authentication algorithm details * @cdata: encryption algorithm details * @authsize: authentication tag (a.k.a. ICV / MAC) size + * @xts_key_fallback: true if fallback tfm needs to be used due + * to unsupported xts key lengths + * @fallback: xts fallback tfm */ struct caam_ctx { struct caam_flc flc[NUM_OP]; @@ -80,6 +88,8 @@ struct caam_ctx { struct alginfo adata; struct alginfo cdata; unsigned int authsize; + bool xts_key_fallback; + struct crypto_skcipher *fallback; }; static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv, @@ -127,12 +137,12 @@ static struct caam_request *to_caam_req(struct crypto_async_request *areq) { switch (crypto_tfm_alg_type(areq->tfm)) { case CRYPTO_ALG_TYPE_SKCIPHER: - return skcipher_request_ctx(skcipher_request_cast(areq)); + return skcipher_request_ctx_dma(skcipher_request_cast(areq)); case CRYPTO_ALG_TYPE_AEAD: - return aead_request_ctx(container_of(areq, struct aead_request, - base)); + return aead_request_ctx_dma( + container_of(areq, struct aead_request, base)); case CRYPTO_ALG_TYPE_AHASH: - return ahash_request_ctx(ahash_request_cast(areq)); + return ahash_request_ctx_dma(ahash_request_cast(areq)); default: return ERR_PTR(-EINVAL); } @@ -164,7 +174,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) { struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), typeof(*alg), aead); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); struct device *dev = ctx->dev; struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); @@ -269,7 +279,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(authenc); + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); ctx->authsize = authsize; aead_set_sh_desc(authenc); @@ -280,7 +290,7 @@ static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) static int aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; struct crypto_authenc_keys keys; @@ -343,10 +353,10 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_request *req_ctx = aead_request_ctx(req); + struct caam_request *req_ctx = aead_request_ctx_dma(req); struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), typeof(*alg), aead); struct device *dev = ctx->dev; @@ -363,7 +373,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, struct dpaa2_sg_entry *sg_table; /* allocate space for base edesc, link tables and IV */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (unlikely(!edesc)) { dev_err(dev, "could not allocate extended descriptor\n"); return ERR_PTR(-ENOMEM); @@ -580,7 +590,7 @@ skip_out_fle: static int chachapoly_set_sh_desc(struct crypto_aead *aead) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); struct device *dev = ctx->dev; struct caam_flc *flc; @@ -613,7 +623,7 @@ static int chachapoly_set_sh_desc(struct crypto_aead *aead) static int chachapoly_setauthsize(struct crypto_aead *aead, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); if (authsize != POLY1305_DIGEST_SIZE) return -EINVAL; @@ -625,14 +635,15 @@ static int chachapoly_setauthsize(struct crypto_aead *aead, static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; if (keylen != CHACHA_KEY_SIZE + saltlen) return -EINVAL; - ctx->cdata.key_virt = key; + memcpy(ctx->key, key, keylen); + ctx->cdata.key_virt = ctx->key; ctx->cdata.keylen = keylen - saltlen; return chachapoly_set_sh_desc(aead); @@ -640,7 +651,7 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, static int gcm_set_sh_desc(struct crypto_aead *aead) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_flc *flc; @@ -697,7 +708,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(authenc); + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); int err; err = crypto_gcm_check_authsize(authsize); @@ -713,7 +724,7 @@ static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) static int gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; int ret; @@ -732,7 +743,7 @@ static int gcm_setkey(struct crypto_aead *aead, static int rfc4106_set_sh_desc(struct crypto_aead *aead) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_flc *flc; @@ -792,7 +803,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) static int rfc4106_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(authenc); + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); int err; err = crypto_rfc4106_check_authsize(authsize); @@ -808,7 +819,7 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc, static int rfc4106_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; int ret; @@ -833,7 +844,7 @@ static int rfc4106_setkey(struct crypto_aead *aead, static int rfc4543_set_sh_desc(struct crypto_aead *aead) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_flc *flc; @@ -893,7 +904,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) static int rfc4543_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(authenc); + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); if (authsize != 16) return -EINVAL; @@ -907,7 +918,7 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc, static int rfc4543_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; int ret; @@ -933,7 +944,7 @@ static int rfc4543_setkey(struct crypto_aead *aead, static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen, const u32 ctx1_iv_off) { - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct caam_skcipher_alg *alg = container_of(crypto_skcipher_alg(skcipher), struct caam_skcipher_alg, skcipher); @@ -1052,14 +1063,26 @@ static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *dev = ctx->dev; + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); struct caam_flc *flc; u32 *desc; + int err; - if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { - dev_err(dev, "key size mismatch\n"); - return -EINVAL; + err = xts_verify_key(skcipher, key, keylen); + if (err) { + dev_dbg(dev, "key size mismatch\n"); + return err; + } + + if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256) + ctx->xts_key_fallback = true; + + if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) { + err = crypto_skcipher_setkey(ctx->fallback, key, keylen); + if (err) + return err; } ctx->cdata.keylen = keylen; @@ -1090,10 +1113,10 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct caam_request *req_ctx = skcipher_request_ctx(req); + struct caam_request *req_ctx = skcipher_request_ctx_dma(req); struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *dev = ctx->dev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; @@ -1170,7 +1193,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) } /* allocate space for base edesc, link tables and IV */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (unlikely(!edesc)) { dev_err(dev, "could not allocate extended descriptor\n"); caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, @@ -1267,7 +1290,7 @@ static void aead_encrypt_done(void *cbk_ctx, u32 status) struct caam_request *req_ctx = to_caam_req(areq); struct aead_edesc *edesc = req_ctx->edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); int ecode = 0; dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); @@ -1288,7 +1311,7 @@ static void aead_decrypt_done(void *cbk_ctx, u32 status) struct caam_request *req_ctx = to_caam_req(areq); struct aead_edesc *edesc = req_ctx->edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); int ecode = 0; dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); @@ -1305,8 +1328,8 @@ static int aead_encrypt(struct aead_request *req) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); - struct caam_request *caam_req = aead_request_ctx(req); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); + struct caam_request *caam_req = aead_request_ctx_dma(req); int ret; /* allocate extended descriptor */ @@ -1333,8 +1356,8 @@ static int aead_decrypt(struct aead_request *req) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); - struct caam_request *caam_req = aead_request_ctx(req); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); + struct caam_request *caam_req = aead_request_ctx_dma(req); int ret; /* allocate extended descriptor */ @@ -1373,7 +1396,7 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status) struct skcipher_request *req = skcipher_request_cast(areq); struct caam_request *req_ctx = to_caam_req(areq); struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct skcipher_edesc *edesc = req_ctx->edesc; int ecode = 0; int ivsize = crypto_skcipher_ivsize(skcipher); @@ -1411,7 +1434,7 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status) struct skcipher_request *req = skcipher_request_cast(areq); struct caam_request *req_ctx = to_caam_req(areq); struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct skcipher_edesc *edesc = req_ctx->edesc; int ecode = 0; int ivsize = crypto_skcipher_ivsize(skcipher); @@ -1443,17 +1466,44 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status) skcipher_request_complete(req, ecode); } +static inline bool xts_skcipher_ivsize(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); + + return !!get_unaligned((u64 *)(req->iv + (ivsize / 2))); +} + static int skcipher_encrypt(struct skcipher_request *req) { struct skcipher_edesc *edesc; struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); - struct caam_request *caam_req = skcipher_request_ctx(req); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); + struct caam_request *caam_req = skcipher_request_ctx_dma(req); + struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); int ret; - if (!req->cryptlen) + /* + * XTS is expected to return an error even for input length = 0 + * Note that the case input length < block size will be caught during + * HW offloading and return an error. + */ + if (!req->cryptlen && !ctx->fallback) return 0; + if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) || + ctx->xts_key_fallback)) { + skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback); + skcipher_request_set_callback(&caam_req->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&caam_req->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + + return crypto_skcipher_encrypt(&caam_req->fallback_req); + } + /* allocate extended descriptor */ edesc = skcipher_edesc_alloc(req); if (IS_ERR(edesc)) @@ -1478,12 +1528,32 @@ static int skcipher_decrypt(struct skcipher_request *req) { struct skcipher_edesc *edesc; struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); - struct caam_request *caam_req = skcipher_request_ctx(req); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); + struct caam_request *caam_req = skcipher_request_ctx_dma(req); + struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); int ret; - if (!req->cryptlen) + /* + * XTS is expected to return an error even for input length = 0 + * Note that the case input length < block size will be caught during + * HW offloading and return an error. + */ + if (!req->cryptlen && !ctx->fallback) return 0; + + if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) || + ctx->xts_key_fallback)) { + skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback); + skcipher_request_set_callback(&caam_req->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&caam_req->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + + return crypto_skcipher_decrypt(&caam_req->fallback_req); + } + /* allocate extended descriptor */ edesc = skcipher_edesc_alloc(req); if (IS_ERR(edesc)) @@ -1537,9 +1607,37 @@ static int caam_cra_init_skcipher(struct crypto_skcipher *tfm) struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct caam_skcipher_alg *caam_alg = container_of(alg, typeof(*caam_alg), skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); + u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; + int ret = 0; + + if (alg_aai == OP_ALG_AAI_XTS) { + const char *tfm_name = crypto_tfm_alg_name(&tfm->base); + struct crypto_skcipher *fallback; + + fallback = crypto_alloc_skcipher(tfm_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + dev_err(caam_alg->caam.dev, + "Failed to allocate %s fallback: %ld\n", + tfm_name, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + + ctx->fallback = fallback; + crypto_skcipher_set_reqsize_dma( + tfm, sizeof(struct caam_request) + + crypto_skcipher_reqsize(fallback)); + } else { + crypto_skcipher_set_reqsize_dma(tfm, + sizeof(struct caam_request)); + } + + ret = caam_cra_init(ctx, &caam_alg->caam, false); + if (ret && ctx->fallback) + crypto_free_skcipher(ctx->fallback); - crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request)); - return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false); + return ret; } static int caam_cra_init_aead(struct crypto_aead *tfm) @@ -1548,8 +1646,8 @@ static int caam_cra_init_aead(struct crypto_aead *tfm) struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), aead); - crypto_aead_set_reqsize(tfm, sizeof(struct caam_request)); - return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam, + crypto_aead_set_reqsize_dma(tfm, sizeof(struct caam_request)); + return caam_cra_init(crypto_aead_ctx_dma(tfm), &caam_alg->caam, !caam_alg->caam.nodkp); } @@ -1562,12 +1660,16 @@ static void caam_exit_common(struct caam_ctx *ctx) static void caam_cra_exit(struct crypto_skcipher *tfm) { - caam_exit_common(crypto_skcipher_ctx(tfm)); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); + + if (ctx->fallback) + crypto_free_skcipher(ctx->fallback); + caam_exit_common(ctx); } static void caam_cra_exit_aead(struct crypto_aead *tfm) { - caam_exit_common(crypto_aead_ctx(tfm)); + caam_exit_common(crypto_aead_ctx_dma(tfm)); } static struct caam_skcipher_alg driver_algs[] = { @@ -1665,6 +1767,7 @@ static struct caam_skcipher_alg driver_algs[] = { .base = { .cra_name = "xts(aes)", .cra_driver_name = "xts-aes-caam-qi2", + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = xts_skcipher_setkey, @@ -2911,8 +3014,9 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; - alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); + alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY); alg->init = caam_cra_init_skcipher; alg->exit = caam_cra_exit; @@ -2924,8 +3028,9 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; - alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_cra_init_aead; alg->exit = caam_cra_exit_aead; @@ -2949,7 +3054,7 @@ enum hash_optype { }; /** - * caam_hash_ctx - ahash per-session context + * struct caam_hash_ctx - ahash per-session context * @flc: Flow Contexts array * @key: authentication key * @flc_dma: I/O virtual addresses of the Flow Contexts @@ -3033,7 +3138,7 @@ static inline int ctx_map_to_qm_sg(struct device *dev, static int ahash_set_sh_desc(struct crypto_ahash *ahash) { - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int digestsize = crypto_ahash_digestsize(ahash); struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); struct caam_flc *flc; @@ -3119,14 +3224,14 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, int ret = -ENOMEM; struct dpaa2_fl_entry *in_fle, *out_fle; - req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA); + req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL); if (!req_ctx) return -ENOMEM; in_fle = &req_ctx->fd_flt[1]; out_fle = &req_ctx->fd_flt[0]; - flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA); + flc = kzalloc(sizeof(*flc), GFP_KERNEL); if (!flc) goto err_flc; @@ -3206,7 +3311,7 @@ err_flc: static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base); unsigned int digestsize = crypto_ahash_digestsize(ahash); int ret; @@ -3215,7 +3320,13 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize); if (keylen > blocksize) { - hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); + unsigned int aligned_len = + ALIGN(keylen, dma_get_cache_alignment()); + + if (aligned_len < keylen) + return -EOVERFLOW; + + hashed_key = kmemdup(key, aligned_len, GFP_KERNEL); if (!hashed_key) return -ENOMEM; ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); @@ -3257,7 +3368,7 @@ bad_free_key: static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); if (edesc->src_nents) dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); @@ -3277,7 +3388,7 @@ static inline void ahash_unmap_ctx(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req, u32 flag) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); if (state->ctx_dma) { dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); @@ -3291,9 +3402,9 @@ static void ahash_done(void *cbk_ctx, u32 status) struct crypto_async_request *areq = cbk_ctx; struct ahash_request *req = ahash_request_cast(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct ahash_edesc *edesc = state->caam_req.edesc; - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int digestsize = crypto_ahash_digestsize(ahash); int ecode = 0; @@ -3310,7 +3421,7 @@ static void ahash_done(void *cbk_ctx, u32 status) DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); - req->base.complete(&req->base, ecode); + ahash_request_complete(req, ecode); } static void ahash_done_bi(void *cbk_ctx, u32 status) @@ -3318,9 +3429,9 @@ static void ahash_done_bi(void *cbk_ctx, u32 status) struct crypto_async_request *areq = cbk_ctx; struct ahash_request *req = ahash_request_cast(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct ahash_edesc *edesc = state->caam_req.edesc; - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int ecode = 0; dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); @@ -3348,7 +3459,7 @@ static void ahash_done_bi(void *cbk_ctx, u32 status) DUMP_PREFIX_ADDRESS, 16, 4, req->result, crypto_ahash_digestsize(ahash), 1); - req->base.complete(&req->base, ecode); + ahash_request_complete(req, ecode); } static void ahash_done_ctx_src(void *cbk_ctx, u32 status) @@ -3356,9 +3467,9 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status) struct crypto_async_request *areq = cbk_ctx; struct ahash_request *req = ahash_request_cast(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct ahash_edesc *edesc = state->caam_req.edesc; - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int digestsize = crypto_ahash_digestsize(ahash); int ecode = 0; @@ -3375,7 +3486,7 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status) DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); - req->base.complete(&req->base, ecode); + ahash_request_complete(req, ecode); } static void ahash_done_ctx_dst(void *cbk_ctx, u32 status) @@ -3383,9 +3494,9 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status) struct crypto_async_request *areq = cbk_ctx; struct ahash_request *req = ahash_request_cast(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct ahash_edesc *edesc = state->caam_req.edesc; - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int ecode = 0; dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); @@ -3413,14 +3524,14 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status) DUMP_PREFIX_ADDRESS, 16, 4, req->result, crypto_ahash_digestsize(ahash), 1); - req->base.complete(&req->base, ecode); + ahash_request_complete(req, ecode); } static int ahash_update_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; @@ -3459,7 +3570,7 @@ static int ahash_update_ctx(struct ahash_request *req) } /* allocate space for base edesc and link tables */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); @@ -3538,8 +3649,8 @@ unmap_ctx: static int ahash_final_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; @@ -3553,7 +3664,7 @@ static int ahash_final_ctx(struct ahash_request *req) int ret; /* allocate space for base edesc and link tables */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (!edesc) return -ENOMEM; @@ -3609,8 +3720,8 @@ unmap_ctx: static int ahash_finup_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; @@ -3642,7 +3753,7 @@ static int ahash_finup_ctx(struct ahash_request *req) } /* allocate space for base edesc and link tables */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; @@ -3703,8 +3814,8 @@ unmap_ctx: static int ahash_digest(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; @@ -3735,7 +3846,7 @@ static int ahash_digest(struct ahash_request *req) } /* allocate space for base edesc and link tables */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); return ret; @@ -3798,8 +3909,8 @@ unmap: static int ahash_final_no_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; @@ -3812,7 +3923,7 @@ static int ahash_final_no_ctx(struct ahash_request *req) int ret = -ENOMEM; /* allocate space for base edesc and link tables */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (!edesc) return ret; @@ -3871,8 +3982,8 @@ unmap: static int ahash_update_no_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; @@ -3911,7 +4022,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) } /* allocate space for base edesc and link tables */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); @@ -3992,8 +4103,8 @@ unmap_ctx: static int ahash_finup_no_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; @@ -4004,7 +4115,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; struct dpaa2_sg_entry *sg_table; - int ret; + int ret = -ENOMEM; src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { @@ -4017,17 +4128,17 @@ static int ahash_finup_no_ctx(struct ahash_request *req) DMA_TO_DEVICE); if (!mapped_nents) { dev_err(ctx->dev, "unable to DMA map source\n"); - return -ENOMEM; + return ret; } } else { mapped_nents = 0; } /* allocate space for base edesc and link tables */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); - return -ENOMEM; + return ret; } edesc->src_nents = src_nents; @@ -4082,14 +4193,14 @@ static int ahash_finup_no_ctx(struct ahash_request *req) unmap: ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); qi_cache_free(edesc); - return -ENOMEM; + return ret; } static int ahash_update_first(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; @@ -4129,7 +4240,7 @@ static int ahash_update_first(struct ahash_request *req) } /* allocate space for base edesc and link tables */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); @@ -4221,7 +4332,7 @@ static int ahash_finup_first(struct ahash_request *req) static int ahash_init(struct ahash_request *req) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); state->update = ahash_update_first; state->finup = ahash_finup_first; @@ -4238,28 +4349,28 @@ static int ahash_init(struct ahash_request *req) static int ahash_update(struct ahash_request *req) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); return state->update(req); } static int ahash_finup(struct ahash_request *req) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); return state->finup(req); } static int ahash_final(struct ahash_request *req) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); return state->final(req); } static int ahash_export(struct ahash_request *req, void *out) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_export_state *export = out; u8 *buf = state->buf; int len = state->buflen; @@ -4276,7 +4387,7 @@ static int ahash_export(struct ahash_request *req, void *out) static int ahash_import(struct ahash_request *req, const void *in) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); const struct caam_export_state *export = in; memset(state, 0, sizeof(*state)); @@ -4435,6 +4546,7 @@ struct caam_hash_alg { struct list_head entry; struct device *dev; int alg_type; + bool is_hmac; struct ahash_alg ahash_alg; }; @@ -4448,7 +4560,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) container_of(halg, struct ahash_alg, halg); struct caam_hash_alg *caam_hash = container_of(alg, struct caam_hash_alg, ahash_alg); - struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, HASH_MSG_LEN + SHA1_DIGEST_SIZE, @@ -4461,7 +4573,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) ctx->dev = caam_hash->dev; - if (alg->setkey) { + if (caam_hash->is_hmac) { ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key, ARRAY_SIZE(ctx->key), DMA_TO_DEVICE, @@ -4495,15 +4607,18 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT]; - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct caam_hash_state)); + crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state)); - return ahash_set_sh_desc(ahash); + /* + * For keyed hash algorithms shared descriptors + * will be created later in setkey() callback + */ + return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash); } static void caam_hash_cra_exit(struct crypto_tfm *tfm) { - struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc), DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); @@ -4533,21 +4648,23 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev, template->hmac_name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", template->hmac_driver_name); + t_alg->is_hmac = true; } else { snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", template->driver_name); t_alg->ahash_alg.setkey = NULL; + t_alg->is_hmac = false; } alg->cra_module = THIS_MODULE; alg->cra_init = caam_hash_cra_init; alg->cra_exit = caam_hash_cra_exit; - alg->cra_ctxsize = sizeof(struct caam_hash_ctx); + alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding(); alg->cra_priority = CAAM_CRA_PRIORITY; alg->cra_blocksize = template->blocksize; alg->cra_alignmask = 0; - alg->cra_flags = CRYPTO_ALG_ASYNC; + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; t_alg->alg_type = template->alg_type; t_alg->dev = dev; @@ -4697,6 +4814,13 @@ static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv) { struct device *dev = priv->dev; struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); + int err; + + if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) { + err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle); + if (err) + dev_err(dev, "dpseci_reset() failed\n"); + } dpaa2_dpseci_congestion_free(priv); dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); @@ -4815,6 +4939,7 @@ static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv, { struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 }; struct device *dev = priv->dev; + unsigned int alignmask; int err; /* @@ -4825,13 +4950,14 @@ static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv, !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG)) return 0; - priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN, - GFP_KERNEL | GFP_DMA); + alignmask = DPAA2_CSCN_ALIGN - 1; + alignmask |= dma_get_cache_alignment() - 1; + priv->cscn_mem = kzalloc(ALIGN(DPAA2_CSCN_SIZE, alignmask + 1), + GFP_KERNEL); if (!priv->cscn_mem) return -ENOMEM; - priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN); - priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned, + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(dev, priv->cscn_dma)) { dev_err(dev, "Error mapping CSCN memory area\n"); @@ -4865,14 +4991,30 @@ err_dma_map: return err; } +static void free_dpaa2_pcpu_netdev(struct dpaa2_caam_priv *priv, const cpumask_t *cpus) +{ + struct dpaa2_caam_priv_per_cpu *ppriv; + int i; + + for_each_cpu(i, cpus) { + ppriv = per_cpu_ptr(priv->ppriv, i); + free_netdev(ppriv->net_dev); + } +} + static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) { struct device *dev = &ls_dev->dev; struct dpaa2_caam_priv *priv; struct dpaa2_caam_priv_per_cpu *ppriv; + cpumask_var_t clean_mask; int err, cpu; u8 i; + err = -ENOMEM; + if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL)) + goto err_cpumask; + priv = dev_get_drvdata(dev); priv->dev = dev; @@ -4894,6 +5036,14 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver); + if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) { + err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle); + if (err) { + dev_err(dev, "dpseci_reset() failed\n"); + goto err_get_vers; + } + } + err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle, &priv->dpseci_attr); if (err) { @@ -4963,19 +5113,32 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) priv->rx_queue_attr[j].fqid, priv->tx_queue_attr[j].fqid); - ppriv->net_dev.dev = *dev; - INIT_LIST_HEAD(&ppriv->net_dev.napi_list); - netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll, - DPAA2_CAAM_NAPI_WEIGHT); + ppriv->net_dev = alloc_netdev_dummy(0); + if (!ppriv->net_dev) { + err = -ENOMEM; + goto err_alloc_netdev; + } + cpumask_set_cpu(cpu, clean_mask); + ppriv->net_dev->dev = *dev; + + netif_napi_add_tx_weight(ppriv->net_dev, &ppriv->napi, + dpaa2_dpseci_poll, + DPAA2_CAAM_NAPI_WEIGHT); } - return 0; + err = 0; + goto free_cpumask; +err_alloc_netdev: + free_dpaa2_pcpu_netdev(priv, clean_mask); err_get_rx_queue: dpaa2_dpseci_congestion_free(priv); err_get_vers: dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); err_open: +free_cpumask: + free_cpumask_var(clean_mask); +err_cpumask: return err; } @@ -5013,12 +5176,13 @@ static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv) return err; } - dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true"); + dev_dbg(dev, "disable: %s\n", str_false_true(enabled)); for (i = 0; i < priv->num_pairs; i++) { ppriv = per_cpu_ptr(priv->ppriv, i); napi_disable(&ppriv->napi); netif_napi_del(&ppriv->napi); + free_netdev(ppriv->net_dev); } return 0; @@ -5054,7 +5218,7 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) priv->domain = iommu_get_domain_for_dev(dev); qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE, - 0, SLAB_CACHE_DMA, NULL); + 0, 0, NULL); if (!qi_cache) { dev_err(dev, "Can't allocate SEC cache\n"); return -ENOMEM; @@ -5094,8 +5258,7 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) /* DPIO */ err = dpaa2_dpseci_dpio_setup(priv); if (err) { - if (err != -EPROBE_DEFER) - dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n"); + dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n"); goto err_dpio_setup; } @@ -5221,7 +5384,7 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); dev_warn(dev, "%s hash alg allocation failed: %d\n", - alg->driver_name, err); + alg->hmac_driver_name, err); continue; } @@ -5273,7 +5436,7 @@ err_dma_mask: return err; } -static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev) +static void __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev) { struct device *dev; struct dpaa2_caam_priv *priv; @@ -5314,8 +5477,6 @@ static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev) free_percpu(priv->ppriv); fsl_mc_portal_free(priv->mc_io); kmem_cache_destroy(qi_cache); - - return 0; } int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req) @@ -5332,7 +5493,7 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req) dma_sync_single_for_cpu(priv->dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); - if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) { + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) { dev_dbg_ratelimited(dev, "Dropping request\n"); return -EBUSY; } @@ -5353,7 +5514,7 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req) dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1])); dpaa2_fd_set_flc(&fd, req->flc_dma); - ppriv = this_cpu_ptr(priv->ppriv); + ppriv = raw_cpu_ptr(priv->ppriv); for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) { err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid, &fd); @@ -5384,6 +5545,7 @@ static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = { }, { .vendor = 0x0 } }; +MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table); static struct fsl_mc_driver dpaa2_caam_driver = { .driver = { |
