diff options
Diffstat (limited to 'drivers/crypto/chelsio/chcr_algo.c')
| -rw-r--r-- | drivers/crypto/chelsio/chcr_algo.c | 442 |
1 files changed, 155 insertions, 287 deletions
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 4c2553672b6f..22cbc343198a 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -51,9 +51,9 @@ #include <crypto/aes.h> #include <crypto/algapi.h> -#include <crypto/hash.h> #include <crypto/gcm.h> -#include <crypto/sha.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> #include <crypto/authenc.h> #include <crypto/ctr.h> #include <crypto/gf128mul.h> @@ -97,17 +97,17 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req, static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx) { - return ctx->crypto_ctx->aeadctx; + return &ctx->crypto_ctx->aeadctx; } static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx) { - return ctx->crypto_ctx->ablkctx; + return &ctx->crypto_ctx->ablkctx; } static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx) { - return ctx->crypto_ctx->hmacctx; + return &ctx->crypto_ctx->hmacctx; } static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx) @@ -125,11 +125,6 @@ static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx) return container_of(ctx->dev, struct uld_ctx, dev); } -static inline int is_ofld_imm(const struct sk_buff *skb) -{ - return (skb->len <= SGE_MAX_WR_LEN); -} - static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx) { memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr)); @@ -214,7 +209,7 @@ static inline int chcr_handle_aead_resp(struct aead_request *req, unsigned char *input, int err) { - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_dev *dev = a_ctx(tfm)->dev; @@ -224,7 +219,7 @@ static inline int chcr_handle_aead_resp(struct aead_request *req, reqctx->verify = VERIFY_HW; } chcr_dec_wrcount(dev); - req->base.complete(&req->base, err); + aead_request_complete(req, err); return err; } @@ -281,88 +276,60 @@ static void get_aes_decrypt_key(unsigned char *dec_key, } } -static struct crypto_shash *chcr_alloc_shash(unsigned int ds) +static int chcr_prepare_hmac_key(const u8 *raw_key, unsigned int raw_key_len, + int digestsize, void *istate, void *ostate) { - struct crypto_shash *base_hash = ERR_PTR(-EINVAL); - - switch (ds) { + __be32 *istate32 = istate, *ostate32 = ostate; + __be64 *istate64 = istate, *ostate64 = ostate; + union { + struct hmac_sha1_key sha1; + struct hmac_sha224_key sha224; + struct hmac_sha256_key sha256; + struct hmac_sha384_key sha384; + struct hmac_sha512_key sha512; + } k; + + switch (digestsize) { case SHA1_DIGEST_SIZE: - base_hash = crypto_alloc_shash("sha1", 0, 0); + hmac_sha1_preparekey(&k.sha1, raw_key, raw_key_len); + for (int i = 0; i < ARRAY_SIZE(k.sha1.istate.h); i++) { + istate32[i] = cpu_to_be32(k.sha1.istate.h[i]); + ostate32[i] = cpu_to_be32(k.sha1.ostate.h[i]); + } break; case SHA224_DIGEST_SIZE: - base_hash = crypto_alloc_shash("sha224", 0, 0); + hmac_sha224_preparekey(&k.sha224, raw_key, raw_key_len); + for (int i = 0; i < ARRAY_SIZE(k.sha224.key.istate.h); i++) { + istate32[i] = cpu_to_be32(k.sha224.key.istate.h[i]); + ostate32[i] = cpu_to_be32(k.sha224.key.ostate.h[i]); + } break; case SHA256_DIGEST_SIZE: - base_hash = crypto_alloc_shash("sha256", 0, 0); + hmac_sha256_preparekey(&k.sha256, raw_key, raw_key_len); + for (int i = 0; i < ARRAY_SIZE(k.sha256.key.istate.h); i++) { + istate32[i] = cpu_to_be32(k.sha256.key.istate.h[i]); + ostate32[i] = cpu_to_be32(k.sha256.key.ostate.h[i]); + } break; case SHA384_DIGEST_SIZE: - base_hash = crypto_alloc_shash("sha384", 0, 0); + hmac_sha384_preparekey(&k.sha384, raw_key, raw_key_len); + for (int i = 0; i < ARRAY_SIZE(k.sha384.key.istate.h); i++) { + istate64[i] = cpu_to_be64(k.sha384.key.istate.h[i]); + ostate64[i] = cpu_to_be64(k.sha384.key.ostate.h[i]); + } break; case SHA512_DIGEST_SIZE: - base_hash = crypto_alloc_shash("sha512", 0, 0); + hmac_sha512_preparekey(&k.sha512, raw_key, raw_key_len); + for (int i = 0; i < ARRAY_SIZE(k.sha512.key.istate.h); i++) { + istate64[i] = cpu_to_be64(k.sha512.key.istate.h[i]); + ostate64[i] = cpu_to_be64(k.sha512.key.ostate.h[i]); + } break; + default: + return -EINVAL; } - - return base_hash; -} - -static int chcr_compute_partial_hash(struct shash_desc *desc, - char *iopad, char *result_hash, - int digest_size) -{ - struct sha1_state sha1_st; - struct sha256_state sha256_st; - struct sha512_state sha512_st; - int error; - - if (digest_size == SHA1_DIGEST_SIZE) { - error = crypto_shash_init(desc) ?: - crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?: - crypto_shash_export(desc, (void *)&sha1_st); - memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE); - } else if (digest_size == SHA224_DIGEST_SIZE) { - error = crypto_shash_init(desc) ?: - crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: - crypto_shash_export(desc, (void *)&sha256_st); - memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); - - } else if (digest_size == SHA256_DIGEST_SIZE) { - error = crypto_shash_init(desc) ?: - crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: - crypto_shash_export(desc, (void *)&sha256_st); - memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); - - } else if (digest_size == SHA384_DIGEST_SIZE) { - error = crypto_shash_init(desc) ?: - crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: - crypto_shash_export(desc, (void *)&sha512_st); - memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); - - } else if (digest_size == SHA512_DIGEST_SIZE) { - error = crypto_shash_init(desc) ?: - crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: - crypto_shash_export(desc, (void *)&sha512_st); - memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); - } else { - error = -EINVAL; - pr_err("Unknown digest size %d\n", digest_size); - } - return error; -} - -static void chcr_change_order(char *buf, int ds) -{ - int i; - - if (ds == SHA512_DIGEST_SIZE) { - for (i = 0; i < (ds / sizeof(u64)); i++) - *((__be64 *)buf + i) = - cpu_to_be64(*((u64 *)buf + i)); - } else { - for (i = 0; i < (ds / sizeof(u32)); i++) - *((__be32 *)buf + i) = - cpu_to_be32(*((u32 *)buf + i)); - } + memzero_explicit(&k, sizeof(k)); + return 0; } static inline int is_hmac(struct crypto_tfm *tfm) @@ -690,26 +657,22 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src, return min(srclen, dstlen); } -static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher, - u32 flags, - struct scatterlist *src, - struct scatterlist *dst, - unsigned int nbytes, +static int chcr_cipher_fallback(struct crypto_skcipher *cipher, + struct skcipher_request *req, u8 *iv, unsigned short op_type) { + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); int err; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher); + skcipher_request_set_tfm(&reqctx->fallback_req, cipher); + skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags, + req->base.complete, req->base.data); + skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst, + req->cryptlen, iv); - skcipher_request_set_sync_tfm(subreq, cipher); - skcipher_request_set_callback(subreq, flags, NULL, NULL); - skcipher_request_set_crypt(subreq, src, dst, - nbytes, iv); - - err = op_type ? crypto_skcipher_decrypt(subreq) : - crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); + err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) : + crypto_skcipher_encrypt(&reqctx->fallback_req); return err; @@ -726,7 +689,7 @@ static inline int get_qidxs(struct crypto_async_request *req, { struct aead_request *aead_req = container_of(req, struct aead_request, base); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(aead_req); *txqidx = reqctx->txqidx; *rxqidx = reqctx->rxqidx; break; @@ -772,13 +735,14 @@ static inline void create_wreq(struct chcr_context *ctx, struct uld_ctx *u_ctx = ULD_CTX(ctx); unsigned int tx_channel_id, rx_channel_id; unsigned int txqidx = 0, rxqidx = 0; - unsigned int qid, fid; + unsigned int qid, fid, portno; get_qidxs(req, &txqidx, &rxqidx); qid = u_ctx->lldi.rxq_ids[rxqidx]; fid = u_ctx->lldi.rxq_ids[0]; + portno = rxqidx / ctx->rxq_perchan; tx_channel_id = txqidx / ctx->txq_perchan; - rx_channel_id = rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]); chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE; @@ -800,15 +764,13 @@ static inline void create_wreq(struct chcr_context *ctx, /** * create_cipher_wr - form the WR for cipher operations - * @req: cipher req. - * @ctx: crypto driver context of the request. - * @qid: ingress qid where response of this WR should be received. - * @op_type: encryption or decryption + * @wrparam: Container for create_cipher_wr()'s parameters */ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req); struct chcr_context *ctx = c_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); struct ablk_ctx *ablkctx = ABLK_CTX(ctx); struct sk_buff *skb = NULL; struct chcr_wr *chcr_req; @@ -825,6 +787,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) struct adapter *adap = padap(ctx->dev); unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE, reqctx->dst_ofst); dst_size = get_space_for_phys_dsgl(nents); @@ -924,11 +887,11 @@ static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher, { struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); - crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher, + crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(ablkctx->sw_cipher, + crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen); + return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen); } static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher, @@ -1194,7 +1157,7 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req, else bytes = rounddown(bytes, 16); } else { - /*CTR mode counter overfloa*/ + /*CTR mode counter overflow*/ bytes = req->cryptlen - reqctx->processed; } err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv); @@ -1206,13 +1169,8 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req, req); memcpy(req->iv, reqctx->init_iv, IV); atomic_inc(&adap->chcr_stats.fallback); - err = chcr_cipher_fallback(ablkctx->sw_cipher, - req->base.flags, - req->src, - req->dst, - req->cryptlen, - req->iv, - reqctx->op); + err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv, + reqctx->op); goto complete; } @@ -1224,7 +1182,7 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req, wrparam.bytes = bytes; skb = create_cipher_wr(&wrparam); if (IS_ERR(skb)) { - pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); + pr_err("%s : Failed to form WR. No memory\n", __func__); err = PTR_ERR(skb); goto unmap; } @@ -1248,7 +1206,7 @@ complete: complete(&ctx->cbc_aes_aio_done); } chcr_dec_wrcount(dev); - req->base.complete(&req->base, err); + skcipher_request_complete(req, err); return err; } @@ -1341,11 +1299,7 @@ static int process_cipher(struct skcipher_request *req, chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); fallback: atomic_inc(&adap->chcr_stats.fallback); - err = chcr_cipher_fallback(ablkctx->sw_cipher, - req->base.flags, - req->src, - req->dst, - req->cryptlen, + err = chcr_cipher_fallback(ablkctx->sw_cipher, req, subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ? reqctx->iv : req->iv, @@ -1486,14 +1440,15 @@ static int chcr_init_tfm(struct crypto_skcipher *tfm) struct chcr_context *ctx = crypto_skcipher_ctx(tfm); struct ablk_ctx *ablkctx = ABLK_CTX(ctx); - ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0, + ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ablkctx->sw_cipher)) { pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); return PTR_ERR(ablkctx->sw_cipher); } init_completion(&ctx->cbc_aes_aio_done); - crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) + + crypto_skcipher_reqsize(ablkctx->sw_cipher)); return chcr_device_init(ctx); } @@ -1507,13 +1462,14 @@ static int chcr_rfc3686_init(struct crypto_skcipher *tfm) /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes)) * cannot be used as fallback in chcr_handle_cipher_response */ - ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0, + ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ablkctx->sw_cipher)) { pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); return PTR_ERR(ablkctx->sw_cipher); } - crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) + + crypto_skcipher_reqsize(ablkctx->sw_cipher)); return chcr_device_init(ctx); } @@ -1523,7 +1479,7 @@ static void chcr_exit_tfm(struct crypto_skcipher *tfm) struct chcr_context *ctx = crypto_skcipher_ctx(tfm); struct ablk_ctx *ablkctx = ABLK_CTX(ctx); - crypto_free_sync_skcipher(ablkctx->sw_cipher); + crypto_free_skcipher(ablkctx->sw_cipher); } static int get_alg_config(struct algo_param *params, @@ -1556,20 +1512,16 @@ static int get_alg_config(struct algo_param *params, params->result_size = SHA512_DIGEST_SIZE; break; default: - pr_err("chcr : ERROR, unsupported digest size\n"); + pr_err("ERROR, unsupported digest size\n"); return -EINVAL; } return 0; } -static inline void chcr_free_shash(struct crypto_shash *base_hash) -{ - crypto_free_shash(base_hash); -} - /** * create_hash_wr - Create hash work request - * @req - Cipher req base + * @req: Cipher req base + * @param: Container for create_hash_wr()'s parameters */ static struct sk_buff *create_hash_wr(struct ahash_request *req, struct hash_wr_param *param) @@ -1590,6 +1542,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req, int error = 0; unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len); req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len + param->sg_len) <= SGE_MAX_WR_LEN; @@ -1933,6 +1886,9 @@ err: return error; } +static int chcr_hmac_init(struct ahash_request *areq); +static int chcr_sha_init(struct ahash_request *areq); + static int chcr_ahash_digest(struct ahash_request *req) { struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); @@ -1951,7 +1907,11 @@ static int chcr_ahash_digest(struct ahash_request *req) req_ctx->rxqidx = cpu % ctx->nrxq; put_cpu(); - rtfm->init(req); + if (is_hmac(crypto_ahash_tfm(rtfm))) + chcr_hmac_init(req); + else + chcr_sha_init(req); + bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); error = chcr_inc_wrcount(dev); if (error) @@ -2145,7 +2105,7 @@ unmap: out: chcr_dec_wrcount(dev); - req->base.complete(&req->base, err); + ahash_request_complete(req, err); } /* @@ -2208,52 +2168,13 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm)); - unsigned int digestsize = crypto_ahash_digestsize(tfm); - unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); - unsigned int i, err = 0, updated_digestsize; - - SHASH_DESC_ON_STACK(shash, hmacctx->base_hash); /* use the key to calculate the ipad and opad. ipad will sent with the * first request's data. opad will be sent with the final hash result * ipad in hmacctx->ipad and opad in hmacctx->opad location */ - shash->tfm = hmacctx->base_hash; - if (keylen > bs) { - err = crypto_shash_digest(shash, key, keylen, - hmacctx->ipad); - if (err) - goto out; - keylen = digestsize; - } else { - memcpy(hmacctx->ipad, key, keylen); - } - memset(hmacctx->ipad + keylen, 0, bs - keylen); - memcpy(hmacctx->opad, hmacctx->ipad, bs); - - for (i = 0; i < bs / sizeof(int); i++) { - *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA; - *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA; - } - - updated_digestsize = digestsize; - if (digestsize == SHA224_DIGEST_SIZE) - updated_digestsize = SHA256_DIGEST_SIZE; - else if (digestsize == SHA384_DIGEST_SIZE) - updated_digestsize = SHA512_DIGEST_SIZE; - err = chcr_compute_partial_hash(shash, hmacctx->ipad, - hmacctx->ipad, digestsize); - if (err) - goto out; - chcr_change_order(hmacctx->ipad, updated_digestsize); - - err = chcr_compute_partial_hash(shash, hmacctx->opad, - hmacctx->opad, digestsize); - if (err) - goto out; - chcr_change_order(hmacctx->opad, updated_digestsize); -out: - return err; + return chcr_prepare_hmac_key(key, keylen, crypto_ahash_digestsize(tfm), + hmacctx->ipad, hmacctx->opad); } static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key, @@ -2349,33 +2270,14 @@ static int chcr_hmac_init(struct ahash_request *areq) static int chcr_hmac_cra_init(struct crypto_tfm *tfm) { - struct chcr_context *ctx = crypto_tfm_ctx(tfm); - struct hmac_ctx *hmacctx = HMAC_CTX(ctx); - unsigned int digestsize = - crypto_ahash_digestsize(__crypto_ahash_cast(tfm)); - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct chcr_ahash_req_ctx)); - hmacctx->base_hash = chcr_alloc_shash(digestsize); - if (IS_ERR(hmacctx->base_hash)) - return PTR_ERR(hmacctx->base_hash); return chcr_device_init(crypto_tfm_ctx(tfm)); } -static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) -{ - struct chcr_context *ctx = crypto_tfm_ctx(tfm); - struct hmac_ctx *hmacctx = HMAC_CTX(ctx); - - if (hmacctx->base_hash) { - chcr_free_shash(hmacctx->base_hash); - hmacctx->base_hash = NULL; - } -} - inline void chcr_aead_common_exit(struct aead_request *req) { - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm)); @@ -2386,7 +2288,7 @@ static int chcr_aead_common_init(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); unsigned int authsize = crypto_aead_authsize(tfm); int error = -EINVAL; @@ -2430,7 +2332,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); - struct aead_request *subreq = aead_request_ctx(req); + struct aead_request *subreq = aead_request_ctx_dma(req); aead_request_set_tfm(subreq, aeadctx->sw_cipher); aead_request_set_callback(subreq, req->base.flags, @@ -2448,9 +2350,10 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_context *ctx = a_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct sk_buff *skb = NULL; struct chcr_wr *chcr_req; struct cpl_rx_phys_dsgl *phys_cpl; @@ -2467,6 +2370,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, struct adapter *adap = padap(ctx->dev); unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); if (req->cryptlen == 0) return NULL; @@ -2587,7 +2491,7 @@ int chcr_aead_dma_map(struct device *dev, unsigned short op_type) { int error; - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); unsigned int authsize = crypto_aead_authsize(tfm); int src_len, dst_len; @@ -2648,7 +2552,7 @@ void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req, unsigned short op_type) { - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); unsigned int authsize = crypto_aead_authsize(tfm); int src_len, dst_len; @@ -2689,7 +2593,7 @@ void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx) { struct ulptx_walk ulp_walk; - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); if (reqctx->imm) { u8 *buf = (u8 *)ulptx; @@ -2715,14 +2619,16 @@ void chcr_add_aead_dst_ent(struct aead_request *req, struct cpl_rx_phys_dsgl *phys_cpl, unsigned short qid) { - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct dsgl_walk dsgl_walk; unsigned int authsize = crypto_aead_authsize(tfm); struct chcr_context *ctx = a_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); u32 temp; unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); dsgl_walk_init(&dsgl_walk, phys_cpl); dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma); temp = req->assoclen + req->cryptlen + @@ -2762,9 +2668,11 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req, struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req); struct chcr_context *ctx = c_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); struct dsgl_walk dsgl_walk; unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); dsgl_walk_init(&dsgl_walk, phys_cpl); dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes, reqctx->dst_ofst); @@ -2901,7 +2809,7 @@ static int generate_b0(struct aead_request *req, u8 *ivptr, unsigned int l, lp, m; int rc; struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); u8 *b0 = reqctx->scratch_pad; m = crypto_aead_authsize(aead); @@ -2939,7 +2847,7 @@ static int ccm_format_packet(struct aead_request *req, unsigned short op_type, unsigned int assoclen) { - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); int rc = 0; @@ -2968,8 +2876,9 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_context *ctx = a_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; @@ -2977,6 +2886,8 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, unsigned int tag_offset = 0, auth_offset = 0; unsigned int assoclen; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); + if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) assoclen = req->assoclen - 8; else @@ -3040,7 +2951,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct sk_buff *skb = NULL; struct chcr_wr *chcr_req; struct cpl_rx_phys_dsgl *phys_cpl; @@ -3137,8 +3048,9 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_context *ctx = a_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct sk_buff *skb = NULL; struct chcr_wr *chcr_req; struct cpl_rx_phys_dsgl *phys_cpl; @@ -3153,6 +3065,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, struct adapter *adap = padap(ctx->dev); unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) assoclen = req->assoclen - 8; @@ -3257,9 +3170,10 @@ static int chcr_aead_cra_init(struct crypto_aead *tfm) CRYPTO_ALG_ASYNC); if (IS_ERR(aeadctx->sw_cipher)) return PTR_ERR(aeadctx->sw_cipher); - crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx), - sizeof(struct aead_request) + - crypto_aead_reqsize(aeadctx->sw_cipher))); + crypto_aead_set_reqsize_dma( + tfm, max(sizeof(struct chcr_aead_reqctx), + sizeof(struct aead_request) + + crypto_aead_reqsize(aeadctx->sw_cipher))); return chcr_device_init(a_ctx(tfm)); } @@ -3550,15 +3464,12 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); /* it contains auth and cipher key both*/ struct crypto_authenc_keys keys; - unsigned int bs, subtype; + unsigned int subtype; unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize; - int err = 0, i, key_ctx_len = 0; + int err = 0, key_ctx_len = 0; unsigned char ck_size = 0; - unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 }; - struct crypto_shash *base_hash = ERR_PTR(-EINVAL); struct algo_param param; int align; - u8 *o_ptr = NULL; crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) @@ -3571,7 +3482,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, goto out; if (get_alg_config(¶m, max_authsize)) { - pr_err("chcr : Unsupported digest size\n"); + pr_err("Unsupported digest size\n"); goto out; } subtype = get_aead_subtype(authenc); @@ -3590,7 +3501,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, } else if (keys.enckeylen == AES_KEYSIZE_256) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; } else { - pr_err("chcr : Unsupported cipher key\n"); + pr_err("Unsupported cipher key\n"); goto out; } @@ -3606,70 +3517,26 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, aeadctx->enckey_len << 3); } - base_hash = chcr_alloc_shash(max_authsize); - if (IS_ERR(base_hash)) { - pr_err("chcr : Base driver cannot be loaded\n"); - aeadctx->enckey_len = 0; - memzero_explicit(&keys, sizeof(keys)); - return -EINVAL; - } - { - SHASH_DESC_ON_STACK(shash, base_hash); - - shash->tfm = base_hash; - bs = crypto_shash_blocksize(base_hash); - align = KEYCTX_ALIGN_PAD(max_authsize); - o_ptr = actx->h_iopad + param.result_size + align; - - if (keys.authkeylen > bs) { - err = crypto_shash_digest(shash, keys.authkey, - keys.authkeylen, - o_ptr); - if (err) { - pr_err("chcr : Base driver cannot be loaded\n"); - goto out; - } - keys.authkeylen = max_authsize; - } else - memcpy(o_ptr, keys.authkey, keys.authkeylen); - - /* Compute the ipad-digest*/ - memset(pad + keys.authkeylen, 0, bs - keys.authkeylen); - memcpy(pad, o_ptr, keys.authkeylen); - for (i = 0; i < bs >> 2; i++) - *((unsigned int *)pad + i) ^= IPAD_DATA; - - if (chcr_compute_partial_hash(shash, pad, actx->h_iopad, - max_authsize)) - goto out; - /* Compute the opad-digest */ - memset(pad + keys.authkeylen, 0, bs - keys.authkeylen); - memcpy(pad, o_ptr, keys.authkeylen); - for (i = 0; i < bs >> 2; i++) - *((unsigned int *)pad + i) ^= OPAD_DATA; - if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize)) - goto out; + align = KEYCTX_ALIGN_PAD(max_authsize); + err = chcr_prepare_hmac_key(keys.authkey, keys.authkeylen, max_authsize, + actx->h_iopad, + actx->h_iopad + param.result_size + align); + if (err) + goto out; + + key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16) + + (param.result_size + align) * 2; + aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size, 0, 1, + key_ctx_len >> 4); + actx->auth_mode = param.auth_mode; + + memzero_explicit(&keys, sizeof(keys)); + return 0; - /* convert the ipad and opad digest to network order */ - chcr_change_order(actx->h_iopad, param.result_size); - chcr_change_order(o_ptr, param.result_size); - key_ctx_len = sizeof(struct _key_ctx) + - roundup(keys.enckeylen, 16) + - (param.result_size + align) * 2; - aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size, - 0, 1, key_ctx_len >> 4); - actx->auth_mode = param.auth_mode; - chcr_free_shash(base_hash); - - memzero_explicit(&keys, sizeof(keys)); - return 0; - } out: aeadctx->enckey_len = 0; memzero_explicit(&keys, sizeof(keys)); - if (!IS_ERR(base_hash)) - chcr_free_shash(base_hash); return -EINVAL; } @@ -3711,7 +3578,7 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, } else if (keys.enckeylen == AES_KEYSIZE_256) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; } else { - pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen); + pr_err("Unsupported cipher key %d\n", keys.enckeylen); goto out; } memcpy(aeadctx->key, keys.enckey, keys.enckeylen); @@ -3739,7 +3606,7 @@ static int chcr_aead_op(struct aead_request *req, create_wr_t create_wr_fn) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct chcr_context *ctx = a_ctx(tfm); struct uld_ctx *u_ctx = ULD_CTX(ctx); struct sk_buff *skb; @@ -3747,7 +3614,7 @@ static int chcr_aead_op(struct aead_request *req, cdev = a_ctx(tfm)->dev; if (!cdev) { - pr_err("chcr : %s : No crypto device.\n", __func__); + pr_err("%s : No crypto device.\n", __func__); return -ENXIO; } @@ -3789,7 +3656,7 @@ static int chcr_aead_op(struct aead_request *req, static int chcr_aead_encrypt(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct chcr_context *ctx = a_ctx(tfm); unsigned int cpu; @@ -3820,7 +3687,7 @@ static int chcr_aead_decrypt(struct aead_request *req) struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_context *ctx = a_ctx(tfm); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); int size; unsigned int cpu; @@ -4445,6 +4312,7 @@ static int chcr_register_alg(void) driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE; driver_algs[i].alg.skcipher.base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK; driver_algs[i].alg.skcipher.base.cra_ctxsize = sizeof(struct chcr_context) + @@ -4456,7 +4324,8 @@ static int chcr_register_alg(void) break; case CRYPTO_ALG_TYPE_AEAD: driver_algs[i].alg.aead.base.cra_flags = - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK; + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ALLOCATES_MEMORY; driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt; driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt; driver_algs[i].alg.aead.init = chcr_aead_cra_init; @@ -4476,13 +4345,13 @@ static int chcr_register_alg(void) a_hash->halg.statesize = SZ_AHASH_REQ_CTX; a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY; a_hash->halg.base.cra_module = THIS_MODULE; - a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC; + a_hash->halg.base.cra_flags = + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; a_hash->halg.base.cra_alignmask = 0; a_hash->halg.base.cra_exit = NULL; if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) { a_hash->halg.base.cra_init = chcr_hmac_cra_init; - a_hash->halg.base.cra_exit = chcr_hmac_cra_exit; a_hash->init = chcr_hmac_init; a_hash->setkey = chcr_ahash_setkey; a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX; @@ -4497,8 +4366,7 @@ static int chcr_register_alg(void) break; } if (err) { - pr_err("chcr : %s : Algorithm registration failed\n", - name); + pr_err("%s : Algorithm registration failed\n", name); goto register_err; } else { driver_algs[i].is_registered = 1; |
