diff options
Diffstat (limited to 'drivers/crypto/bcm/cipher.c')
| -rw-r--r-- | drivers/crypto/bcm/cipher.c | 403 |
1 files changed, 138 insertions, 265 deletions
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index a353217a0d33..6b80d033648e 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -15,8 +15,8 @@ #include <linux/kthread.h> #include <linux/rtnetlink.h> #include <linux/sched.h> -#include <linux/of_address.h> -#include <linux/of_device.h> +#include <linux/string_choices.h> +#include <linux/of.h> #include <linux/io.h> #include <linux/bitops.h> @@ -26,11 +26,12 @@ #include <crypto/aes.h> #include <crypto/internal/des.h> #include <crypto/hmac.h> -#include <crypto/sha.h> #include <crypto/md5.h> #include <crypto/authenc.h> #include <crypto/skcipher.h> #include <crypto/hash.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> #include <crypto/sha3.h> #include "util.h" @@ -41,7 +42,7 @@ /* ================= Device Structure ================== */ -struct device_private iproc_priv; +struct bcm_device_private iproc_priv; /* ==================== Parameters ===================== */ @@ -140,8 +141,8 @@ spu_skcipher_rx_sg_create(struct brcm_message *mssg, struct iproc_ctx_s *ctx = rctx->ctx; u32 datalen; /* Number of bytes of response data expected */ - mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist), - rctx->gfp); + mssg->spu.dst = kmalloc_array(rx_frag_num, sizeof(struct scatterlist), + rctx->gfp); if (!mssg->spu.dst) return -ENOMEM; @@ -165,10 +166,6 @@ spu_skcipher_rx_sg_create(struct brcm_message *mssg, return -EFAULT; } - if (ctx->cipher.alg == CIPHER_ALG_RC4) - /* Add buffer to catch 260-byte SUPDT field for RC4 */ - sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_SUPDT_LEN); - if (stat_pad_len) sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len); @@ -208,8 +205,8 @@ spu_skcipher_tx_sg_create(struct brcm_message *mssg, u32 datalen; /* Number of bytes of response data expected */ u32 stat_len; - mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist), - rctx->gfp); + mssg->spu.src = kmalloc_array(tx_frag_num, sizeof(struct scatterlist), + rctx->gfp); if (unlikely(!mssg->spu.src)) return -ENOMEM; @@ -317,7 +314,6 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx) u8 local_iv_ctr[MAX_IV_SIZE]; u32 stat_pad_len; /* num bytes to align status field */ u32 pad_len; /* total length of all padding */ - bool update_key = false; struct brcm_message *mssg; /* mailbox message */ /* number of entries in src and dst sg in mailbox message. */ @@ -391,28 +387,6 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx) } } - if (ctx->cipher.alg == CIPHER_ALG_RC4) { - rx_frag_num++; - if (chunk_start) { - /* - * for non-first RC4 chunks, use SUPDT from previous - * response as key for this chunk. - */ - cipher_parms.key_buf = rctx->msg_buf.c.supdt_tweak; - update_key = true; - cipher_parms.type = CIPHER_TYPE_UPDT; - } else if (!rctx->is_encrypt) { - /* - * First RC4 chunk. For decrypt, key in pre-built msg - * header may have been changed if encrypt required - * multiple chunks. So revert the key to the - * ctx->enckey value. - */ - update_key = true; - cipher_parms.type = CIPHER_TYPE_INIT; - } - } - if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) flow_log("max_payload infinite\n"); else @@ -425,14 +399,9 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx) memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr, sizeof(rctx->msg_buf.bcm_spu_req_hdr)); - /* - * Pass SUPDT field as key. Key field in finish() call is only used - * when update_key has been set above for RC4. Will be ignored in - * all other cases. - */ spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, ctx->spu_req_hdr_len, !(rctx->is_encrypt), - &cipher_parms, update_key, chunksize); + &cipher_parms, chunksize); atomic64_add(chunksize, &iproc_priv.bytes_out); @@ -502,10 +471,8 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx) static void handle_skcipher_resp(struct iproc_reqctx_s *rctx) { struct spu_hw *spu = &iproc_priv.spu; -#ifdef DEBUG struct crypto_async_request *areq = rctx->parent; struct skcipher_request *req = skcipher_request_cast(areq); -#endif struct iproc_ctx_s *ctx = rctx->ctx; u32 payload_len; @@ -527,9 +494,6 @@ static void handle_skcipher_resp(struct iproc_reqctx_s *rctx) __func__, rctx->total_received, payload_len); dump_sg(req->dst, rctx->total_received, payload_len); - if (ctx->cipher.alg == CIPHER_ALG_RC4) - packet_dump(" supdt ", rctx->msg_buf.c.supdt_tweak, - SPU_SUPDT_LEN); rctx->total_received += payload_len; if (rctx->total_received == rctx->total_todo) { @@ -568,8 +532,8 @@ spu_ahash_rx_sg_create(struct brcm_message *mssg, struct scatterlist *sg; /* used to build sgs in mbox message */ struct iproc_ctx_s *ctx = rctx->ctx; - mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist), - rctx->gfp); + mssg->spu.dst = kmalloc_array(rx_frag_num, sizeof(struct scatterlist), + rctx->gfp); if (!mssg->spu.dst) return -ENOMEM; @@ -623,8 +587,8 @@ spu_ahash_tx_sg_create(struct brcm_message *mssg, u32 datalen; /* Number of bytes of response data expected */ u32 stat_len; - mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist), - rctx->gfp); + mssg->spu.src = kmalloc_array(tx_frag_num, sizeof(struct scatterlist), + rctx->gfp); if (!mssg->spu.src) return -ENOMEM; @@ -1030,13 +994,11 @@ static int ahash_req_done(struct iproc_reqctx_s *rctx) static void handle_ahash_resp(struct iproc_reqctx_s *rctx) { struct iproc_ctx_s *ctx = rctx->ctx; -#ifdef DEBUG struct crypto_async_request *areq = rctx->parent; struct ahash_request *req = ahash_request_cast(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); unsigned int blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); -#endif /* * Save hash to use as input to next op if incremental. Might be copying * too much, but that's easier than figuring out actual digest size here @@ -1057,6 +1019,7 @@ static void handle_ahash_resp(struct iproc_reqctx_s *rctx) * a SPU response message for an AEAD request. Includes buffers to catch SPU * message headers and the response data. * @mssg: mailbox message containing the receive sg + * @req: Crypto API request * @rctx: crypto request context * @rx_frag_num: number of scatterlist elements required to hold the * SPU response message @@ -1114,8 +1077,8 @@ static int spu_aead_rx_sg_create(struct brcm_message *mssg, /* have to catch gcm pad in separate buffer */ rx_frag_num++; - mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist), - rctx->gfp); + mssg->spu.dst = kmalloc_array(rx_frag_num, sizeof(struct scatterlist), + rctx->gfp); if (!mssg->spu.dst) return -ENOMEM; @@ -1216,8 +1179,8 @@ static int spu_aead_tx_sg_create(struct brcm_message *mssg, u32 assoc_offset = 0; u32 stat_len; - mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist), - rctx->gfp); + mssg->spu.src = kmalloc_array(tx_frag_num, sizeof(struct scatterlist), + rctx->gfp); if (!mssg->spu.src) return -ENOMEM; @@ -1651,7 +1614,7 @@ static void finish_req(struct iproc_reqctx_s *rctx, int err) spu_chunk_cleanup(rctx); if (areq) - areq->complete(areq, err); + crypto_request_complete(areq, err); } /** @@ -1853,26 +1816,6 @@ static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key, return 0; } -static int rc4_setkey(struct crypto_skcipher *cipher, const u8 *key, - unsigned int keylen) -{ - struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher); - int i; - - ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE; - - ctx->enckey[0] = 0x00; /* 0x00 */ - ctx->enckey[1] = 0x00; /* i */ - ctx->enckey[2] = 0x00; /* 0x00 */ - ctx->enckey[3] = 0x00; /* j */ - for (i = 0; i < ARC4_MAX_KEY_SIZE; i++) - ctx->enckey[i + ARC4_STATE_SIZE] = key[i % keylen]; - - ctx->cipher_type = CIPHER_TYPE_INIT; - - return 0; -} - static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { @@ -1895,9 +1838,6 @@ static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key, case CIPHER_ALG_AES: err = aes_setkey(cipher, key, keylen); break; - case CIPHER_ALG_RC4: - err = rc4_setkey(cipher, key, keylen); - break; default: pr_err("%s() Error: unknown cipher alg\n", __func__); err = -EINVAL; @@ -1905,11 +1845,9 @@ static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key, if (err) return err; - /* RC4 already populated ctx->enkey */ - if (ctx->cipher.alg != CIPHER_ALG_RC4) { - memcpy(ctx->enckey, key, keylen); - ctx->enckeylen = keylen; - } + memcpy(ctx->enckey, key, keylen); + ctx->enckeylen = keylen; + /* SPU needs XTS keys in the reverse order the crypto API presents */ if ((ctx->cipher.alg == CIPHER_ALG_AES) && (ctx->cipher.mode == CIPHER_MODE_XTS)) { @@ -1990,7 +1928,7 @@ static int ahash_enqueue(struct ahash_request *req) /* SPU2 hardware does not compute hash of zero length data */ if ((rctx->is_final == 1) && (rctx->total_todo == 0) && (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) { - alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm)); + alg_name = crypto_ahash_alg_name(tfm); flow_log("Doing %sfinal %s zero-len hash request in software\n", rctx->is_final ? "" : "non-", alg_name); err = do_shash((unsigned char *)alg_name, req->result, @@ -2091,7 +2029,7 @@ static int ahash_init(struct ahash_request *req) * supported by the hardware, we need to handle it in software * by calling synchronous hash functions. */ - alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm)); + alg_name = crypto_ahash_alg_name(tfm); hash = crypto_alloc_shash(alg_name, 0, 0); if (IS_ERR(hash)) { ret = PTR_ERR(hash); @@ -2459,7 +2397,8 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, memset(ctx->ipad + ctx->authkeylen, 0, blocksize - ctx->authkeylen); ctx->authkeylen = 0; - memcpy(ctx->opad, ctx->ipad, blocksize); + unsafe_memcpy(ctx->opad, ctx->ipad, blocksize, + "fortified memcpy causes -Wrestrict warning"); for (index = 0; index < blocksize; index++) { ctx->ipad[index] ^= HMAC_IPAD_VALUE; @@ -2477,6 +2416,7 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, static int ahash_hmac_init(struct ahash_request *req) { + int ret; struct iproc_reqctx_s *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); @@ -2486,7 +2426,9 @@ static int ahash_hmac_init(struct ahash_request *req) flow_log("ahash_hmac_init()\n"); /* init the context as a hash */ - ahash_init(req); + ret = ahash_init(req); + if (ret) + return ret; if (!spu_no_incr_hash(ctx)) { /* SPU-M can do incr hashing but needs sw for outer HMAC */ @@ -2632,66 +2574,29 @@ static int aead_need_fallback(struct aead_request *req) return payload_len > ctx->max_payload; } -static void aead_complete(struct crypto_async_request *areq, int err) -{ - struct aead_request *req = - container_of(areq, struct aead_request, base); - struct iproc_reqctx_s *rctx = aead_request_ctx(req); - struct crypto_aead *aead = crypto_aead_reqtfm(req); - - flow_log("%s() err:%d\n", __func__, err); - - areq->tfm = crypto_aead_tfm(aead); - - areq->complete = rctx->old_complete; - areq->data = rctx->old_data; - - areq->complete(areq, err); -} - static int aead_do_fallback(struct aead_request *req, bool is_encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct iproc_reqctx_s *rctx = aead_request_ctx(req); struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); - int err; - u32 req_flags; + struct aead_request *subreq; flow_log("%s() enc:%u\n", __func__, is_encrypt); - if (ctx->fallback_cipher) { - /* Store the cipher tfm and then use the fallback tfm */ - rctx->old_tfm = tfm; - aead_request_set_tfm(req, ctx->fallback_cipher); - /* - * Save the callback and chain ourselves in, so we can restore - * the tfm - */ - rctx->old_complete = req->base.complete; - rctx->old_data = req->base.data; - req_flags = aead_request_flags(req); - aead_request_set_callback(req, req_flags, aead_complete, req); - err = is_encrypt ? crypto_aead_encrypt(req) : - crypto_aead_decrypt(req); - - if (err == 0) { - /* - * fallback was synchronous (did not return - * -EINPROGRESS). So restore request state here. - */ - aead_request_set_callback(req, req_flags, - rctx->old_complete, req); - req->base.data = rctx->old_data; - aead_request_set_tfm(req, aead); - flow_log("%s() fallback completed successfully\n\n", - __func__); - } - } else { - err = -EINVAL; - } + if (!ctx->fallback_cipher) + return -EINVAL; - return err; + subreq = &rctx->req; + aead_request_set_tfm(subreq, ctx->fallback_cipher); + aead_request_set_callback(subreq, aead_request_flags(req), + req->base.complete, req->base.data); + aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, + req->iv); + aead_request_set_ad(subreq, req->assoclen); + + return is_encrypt ? crypto_aead_encrypt(req) : + crypto_aead_decrypt(req); } static int aead_enqueue(struct aead_request *req, bool is_encrypt) @@ -2783,7 +2688,7 @@ static int aead_enqueue(struct aead_request *req, bool is_encrypt) flow_log(" iv_ctr_len:%u\n", rctx->iv_ctr_len); flow_dump(" iv: ", req->iv, rctx->iv_ctr_len); flow_log(" authkeylen:%u\n", ctx->authkeylen); - flow_log(" is_esp: %s\n", ctx->is_esp ? "yes" : "no"); + flow_log(" is_esp: %s\n", str_yes_no(ctx->is_esp)); if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) flow_log(" max_payload infinite"); @@ -2872,9 +2777,6 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, goto badkey; } break; - case CIPHER_ALG_RC4: - ctx->cipher_type = CIPHER_TYPE_INIT; - break; default: pr_err("%s() Error: Unknown cipher alg\n", __func__); return -EINVAL; @@ -2930,7 +2832,6 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher, ctx->enckeylen = keylen; ctx->authkeylen = 0; - memcpy(ctx->enckey, key, ctx->enckeylen); switch (ctx->enckeylen) { case AES_KEYSIZE_128: @@ -2946,6 +2847,8 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher, goto badkey; } + memcpy(ctx->enckey, key, ctx->enckeylen); + flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen, ctx->authkeylen); flow_dump(" enc: ", ctx->enckey, ctx->enckeylen); @@ -3000,6 +2903,10 @@ static int aead_gcm_esp_setkey(struct crypto_aead *cipher, struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); flow_log("%s\n", __func__); + + if (keylen < GCM_ESP_SALT_SIZE) + return -EINVAL; + ctx->salt_len = GCM_ESP_SALT_SIZE; ctx->salt_offset = GCM_ESP_SALT_OFFSET; memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE); @@ -3013,9 +2920,9 @@ static int aead_gcm_esp_setkey(struct crypto_aead *cipher, /** * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC. - * cipher: AEAD structure - * key: Key followed by 4 bytes of salt - * keylen: Length of key plus salt, in bytes + * @cipher: AEAD structure + * @key: Key followed by 4 bytes of salt + * @keylen: Length of key plus salt, in bytes * * Extracts salt from key and stores it to be prepended to IV on each request. * Digest is always 16 bytes @@ -3028,6 +2935,10 @@ static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher, struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); flow_log("%s\n", __func__); + + if (keylen < GCM_ESP_SALT_SIZE) + return -EINVAL; + ctx->salt_len = GCM_ESP_SALT_SIZE; ctx->salt_offset = GCM_ESP_SALT_OFFSET; memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE); @@ -3057,6 +2968,10 @@ static int aead_ccm_esp_setkey(struct crypto_aead *cipher, struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); flow_log("%s\n", __func__); + + if (keylen < CCM_ESP_SALT_SIZE) + return -EINVAL; + ctx->salt_len = CCM_ESP_SALT_SIZE; ctx->salt_offset = CCM_ESP_SALT_OFFSET; memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE); @@ -3233,7 +3148,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(md5),cbc(aes))", .cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = AES_BLOCK_SIZE, @@ -3256,7 +3173,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = AES_BLOCK_SIZE, @@ -3279,7 +3198,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = AES_BLOCK_SIZE, @@ -3302,7 +3223,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(md5),cbc(des))", .cra_driver_name = "authenc-hmac-md5-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3325,7 +3248,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha1),cbc(des))", .cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3348,7 +3273,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha224),cbc(des))", .cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3371,7 +3298,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha256),cbc(des))", .cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3394,7 +3323,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha384),cbc(des))", .cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3417,7 +3348,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha512),cbc(des))", .cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3440,7 +3373,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(md5),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3463,7 +3398,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3486,7 +3423,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha224),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3509,7 +3448,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3532,7 +3473,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha384),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3555,7 +3498,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha512),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3576,44 +3521,6 @@ static struct iproc_alg_s driver_algs[] = { { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { - .base.cra_name = "ecb(arc4)", - .base.cra_driver_name = "ecb-arc4-iproc", - .base.cra_blocksize = ARC4_BLOCK_SIZE, - .min_keysize = ARC4_MIN_KEY_SIZE, - .max_keysize = ARC4_MAX_KEY_SIZE, - .ivsize = 0, - }, - .cipher_info = { - .alg = CIPHER_ALG_RC4, - .mode = CIPHER_MODE_NONE, - }, - .auth_info = { - .alg = HASH_ALG_NONE, - .mode = HASH_MODE_NONE, - }, - }, - { - .type = CRYPTO_ALG_TYPE_SKCIPHER, - .alg.skcipher = { - .base.cra_name = "ofb(des)", - .base.cra_driver_name = "ofb-des-iproc", - .base.cra_blocksize = DES_BLOCK_SIZE, - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, - }, - .cipher_info = { - .alg = CIPHER_ALG_DES, - .mode = CIPHER_MODE_OFB, - }, - .auth_info = { - .alg = HASH_ALG_NONE, - .mode = HASH_MODE_NONE, - }, - }, - { - .type = CRYPTO_ALG_TYPE_SKCIPHER, - .alg.skcipher = { .base.cra_name = "cbc(des)", .base.cra_driver_name = "cbc-des-iproc", .base.cra_blocksize = DES_BLOCK_SIZE, @@ -3652,25 +3559,6 @@ static struct iproc_alg_s driver_algs[] = { { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { - .base.cra_name = "ofb(des3_ede)", - .base.cra_driver_name = "ofb-des3-iproc", - .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, - }, - .cipher_info = { - .alg = CIPHER_ALG_3DES, - .mode = CIPHER_MODE_OFB, - }, - .auth_info = { - .alg = HASH_ALG_NONE, - .mode = HASH_MODE_NONE, - }, - }, - { - .type = CRYPTO_ALG_TYPE_SKCIPHER, - .alg.skcipher = { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cbc-des3-iproc", .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -3709,25 +3597,6 @@ static struct iproc_alg_s driver_algs[] = { { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { - .base.cra_name = "ofb(aes)", - .base.cra_driver_name = "ofb-aes-iproc", - .base.cra_blocksize = AES_BLOCK_SIZE, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - }, - .cipher_info = { - .alg = CIPHER_ALG_AES, - .mode = CIPHER_MODE_OFB, - }, - .auth_info = { - .alg = HASH_ALG_NONE, - .mode = HASH_MODE_NONE, - }, - }, - { - .type = CRYPTO_ALG_TYPE_SKCIPHER, - .alg.skcipher = { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cbc-aes-iproc", .base.cra_blocksize = AES_BLOCK_SIZE, @@ -3811,7 +3680,8 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "md5", .cra_driver_name = "md5-iproc", .cra_blocksize = MD5_BLOCK_WORDS * 4, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .cipher_info = { @@ -4283,6 +4153,7 @@ static int ahash_cra_init(struct crypto_tfm *tfm) static int aead_cra_init(struct crypto_aead *aead) { + unsigned int reqsize = sizeof(struct iproc_reqctx_s); struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); struct crypto_alg *alg = tfm->__crt_alg; @@ -4294,7 +4165,6 @@ static int aead_cra_init(struct crypto_aead *aead) flow_log("%s()\n", __func__); - crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s)); ctx->is_esp = false; ctx->salt_len = 0; ctx->salt_offset = 0; @@ -4303,22 +4173,29 @@ static int aead_cra_init(struct crypto_aead *aead) get_random_bytes(ctx->iv, MAX_IV_SIZE); flow_dump(" iv: ", ctx->iv, MAX_IV_SIZE); - if (!err) { - if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) { - flow_log("%s() creating fallback cipher\n", __func__); - - ctx->fallback_cipher = - crypto_alloc_aead(alg->cra_name, 0, - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(ctx->fallback_cipher)) { - pr_err("%s() Error: failed to allocate fallback for %s\n", - __func__, alg->cra_name); - return PTR_ERR(ctx->fallback_cipher); - } - } + if (err) + goto out; + + if (!(alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK)) + goto reqsize; + + flow_log("%s() creating fallback cipher\n", __func__); + + ctx->fallback_cipher = crypto_alloc_aead(alg->cra_name, 0, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback_cipher)) { + pr_err("%s() Error: failed to allocate fallback for %s\n", + __func__, alg->cra_name); + return PTR_ERR(ctx->fallback_cipher); } + reqsize += crypto_aead_reqsize(ctx->fallback_cipher); + +reqsize: + crypto_aead_set_reqsize(aead, reqsize); + +out: return err; } @@ -4495,20 +4372,16 @@ static void spu_counters_init(void) static int spu_register_skcipher(struct iproc_alg_s *driver_alg) { - struct spu_hw *spu = &iproc_priv.spu; struct skcipher_alg *crypto = &driver_alg->alg.skcipher; int err; - /* SPU2 does not support RC4 */ - if ((driver_alg->cipher_info.alg == CIPHER_ALG_RC4) && - (spu->spu_type == SPU_TYPE_SPU2)) - return 0; - crypto->base.cra_module = THIS_MODULE; crypto->base.cra_priority = cipher_pri; crypto->base.cra_alignmask = 0; crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s); - crypto->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + crypto->base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; crypto->init = skcipher_init_tfm; crypto->exit = skcipher_exit_tfm; @@ -4547,7 +4420,8 @@ static int spu_register_ahash(struct iproc_alg_s *driver_alg) hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s); hash->halg.base.cra_init = ahash_cra_init; hash->halg.base.cra_exit = generic_cra_exit; - hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC; + hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY; hash->halg.statesize = sizeof(struct spu_hash_export_s); if (driver_alg->auth_info.mode != HASH_MODE_HMAC) { @@ -4591,7 +4465,7 @@ static int spu_register_aead(struct iproc_alg_s *driver_alg) aead->base.cra_alignmask = 0; aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s); - aead->base.cra_flags |= CRYPTO_ALG_ASYNC; + aead->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; /* setkey set in alg initialization */ aead->setauthsize = aead_setauthsize; aead->encrypt = aead_encrypt; @@ -4786,7 +4660,7 @@ failure: return err; } -static int bcm_spu_remove(struct platform_device *pdev) +static void bcm_spu_remove(struct platform_device *pdev) { int i; struct device *dev = &pdev->dev; @@ -4824,7 +4698,6 @@ static int bcm_spu_remove(struct platform_device *pdev) } spu_free_debugfs(); spu_mb_release(pdev); - return 0; } /* ===== Kernel Module API ===== */ |
