diff options
Diffstat (limited to 'drivers/crypto/inside-secure')
21 files changed, 10078 insertions, 1200 deletions
diff --git a/drivers/crypto/inside-secure/Makefile b/drivers/crypto/inside-secure/Makefile index 302f07dde98c..30d13fd5d58e 100644 --- a/drivers/crypto/inside-secure/Makefile +++ b/drivers/crypto/inside-secure/Makefile @@ -1,2 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += crypto_safexcel.o crypto_safexcel-objs := safexcel.o safexcel_ring.o safexcel_cipher.o safexcel_hash.o +obj-y += eip93/ diff --git a/drivers/crypto/inside-secure/eip93/Kconfig b/drivers/crypto/inside-secure/eip93/Kconfig new file mode 100644 index 000000000000..8353d3d7ec9b --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0 +config CRYPTO_DEV_EIP93 + tristate "Support for EIP93 crypto HW accelerators" + depends on SOC_MT7621 || ARCH_AIROHA ||COMPILE_TEST + select CRYPTO_LIB_AES + select CRYPTO_LIB_DES + select CRYPTO_SKCIPHER + select CRYPTO_AEAD + select CRYPTO_AUTHENC + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_SHA256 + help + EIP93 have various crypto HW accelerators. Select this if + you want to use the EIP93 modules for any of the crypto algorithms. + + If the IP supports it, this provide offload for AES - ECB, CBC and + CTR crypto. Also provide DES and 3DES ECB and CBC. + + Also provide AEAD authenc(hmac(x), cipher(y)) for supported algo. diff --git a/drivers/crypto/inside-secure/eip93/Makefile b/drivers/crypto/inside-secure/eip93/Makefile new file mode 100644 index 000000000000..a3d3d3677cdc --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_CRYPTO_DEV_EIP93) += crypto-hw-eip93.o + +crypto-hw-eip93-y += eip93-main.o eip93-common.o +crypto-hw-eip93-y += eip93-cipher.o eip93-aead.o +crypto-hw-eip93-y += eip93-hash.o diff --git a/drivers/crypto/inside-secure/eip93/eip93-aead.c b/drivers/crypto/inside-secure/eip93/eip93-aead.c new file mode 100644 index 000000000000..18dd8a9a5165 --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-aead.c @@ -0,0 +1,711 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ + +#include <crypto/aead.h> +#include <crypto/aes.h> +#include <crypto/authenc.h> +#include <crypto/ctr.h> +#include <crypto/hmac.h> +#include <crypto/internal/aead.h> +#include <crypto/md5.h> +#include <crypto/null.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> + +#include <crypto/internal/des.h> + +#include <linux/crypto.h> +#include <linux/dma-mapping.h> + +#include "eip93-aead.h" +#include "eip93-cipher.h" +#include "eip93-common.h" +#include "eip93-regs.h" + +void eip93_aead_handle_result(struct crypto_async_request *async, int err) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm); + struct eip93_device *eip93 = ctx->eip93; + struct aead_request *req = aead_request_cast(async); + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); + + eip93_unmap_dma(eip93, rctx, req->src, req->dst); + eip93_handle_result(eip93, rctx, req->iv); + + aead_request_complete(req, err); +} + +static int eip93_aead_send_req(struct crypto_async_request *async) +{ + struct aead_request *req = aead_request_cast(async); + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); + int err; + + err = check_valid_request(rctx); + if (err) { + aead_request_complete(req, err); + return err; + } + + return eip93_send_req(async, req->iv, rctx); +} + +/* Crypto aead API functions */ +static int eip93_aead_cra_init(struct crypto_tfm *tfm) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg, + struct eip93_alg_template, alg.aead.base); + + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct eip93_cipher_reqctx)); + + ctx->eip93 = tmpl->eip93; + ctx->flags = tmpl->flags; + ctx->type = tmpl->type; + ctx->set_assoc = true; + + ctx->sa_record = kzalloc(sizeof(*ctx->sa_record), GFP_KERNEL); + if (!ctx->sa_record) + return -ENOMEM; + + return 0; +} + +static void eip93_aead_cra_exit(struct crypto_tfm *tfm) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + + dma_unmap_single(ctx->eip93->dev, ctx->sa_record_base, + sizeof(*ctx->sa_record), DMA_TO_DEVICE); + kfree(ctx->sa_record); +} + +static int eip93_aead_setkey(struct crypto_aead *ctfm, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_authenc_keys keys; + struct crypto_aes_ctx aes; + struct sa_record *sa_record = ctx->sa_record; + u32 nonce = 0; + int ret; + + if (crypto_authenc_extractkeys(&keys, key, len)) + return -EINVAL; + + if (IS_RFC3686(ctx->flags)) { + if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE) + return -EINVAL; + + keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; + memcpy(&nonce, keys.enckey + keys.enckeylen, + CTR_RFC3686_NONCE_SIZE); + } + + switch ((ctx->flags & EIP93_ALG_MASK)) { + case EIP93_ALG_DES: + ret = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen); + if (ret) + return ret; + + break; + case EIP93_ALG_3DES: + if (keys.enckeylen != DES3_EDE_KEY_SIZE) + return -EINVAL; + + ret = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen); + if (ret) + return ret; + + break; + case EIP93_ALG_AES: + ret = aes_expandkey(&aes, keys.enckey, keys.enckeylen); + if (ret) + return ret; + + break; + } + + ctx->blksize = crypto_aead_blocksize(ctfm); + /* Encryption key */ + eip93_set_sa_record(sa_record, keys.enckeylen, ctx->flags); + sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE; + sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE, + EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC_HASH); + sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH; + sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, + ctx->authsize / sizeof(u32)); + + memcpy(sa_record->sa_key, keys.enckey, keys.enckeylen); + ctx->sa_nonce = nonce; + sa_record->sa_nonce = nonce; + + /* authentication key */ + ret = eip93_hmac_setkey(ctx->flags, keys.authkey, keys.authkeylen, + ctx->authsize, sa_record->sa_i_digest, + sa_record->sa_o_digest, false); + + ctx->set_assoc = true; + + return ret; +} + +static int eip93_aead_setauthsize(struct crypto_aead *ctfm, + unsigned int authsize) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->authsize = authsize; + ctx->sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH; + ctx->sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, + ctx->authsize / sizeof(u32)); + + return 0; +} + +static void eip93_aead_setassoc(struct eip93_crypto_ctx *ctx, + struct aead_request *req) +{ + struct sa_record *sa_record = ctx->sa_record; + + sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HASH_CRYPT_OFFSET; + sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_HASH_CRYPT_OFFSET, + req->assoclen / sizeof(u32)); + + ctx->assoclen = req->assoclen; +} + +static int eip93_aead_crypt(struct aead_request *req) +{ + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); + struct crypto_async_request *async = &req->base; + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + int ret; + + ctx->sa_record_base = dma_map_single(ctx->eip93->dev, ctx->sa_record, + sizeof(*ctx->sa_record), DMA_TO_DEVICE); + ret = dma_mapping_error(ctx->eip93->dev, ctx->sa_record_base); + if (ret) + return ret; + + rctx->textsize = req->cryptlen; + rctx->blksize = ctx->blksize; + rctx->assoclen = req->assoclen; + rctx->authsize = ctx->authsize; + rctx->sg_src = req->src; + rctx->sg_dst = req->dst; + rctx->ivsize = crypto_aead_ivsize(aead); + rctx->desc_flags = EIP93_DESC_AEAD; + rctx->sa_record_base = ctx->sa_record_base; + + if (IS_DECRYPT(rctx->flags)) + rctx->textsize -= rctx->authsize; + + return eip93_aead_send_req(async); +} + +static int eip93_aead_encrypt(struct aead_request *req) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); + + rctx->flags = ctx->flags; + rctx->flags |= EIP93_ENCRYPT; + if (ctx->set_assoc) { + eip93_aead_setassoc(ctx, req); + ctx->set_assoc = false; + } + + if (req->assoclen != ctx->assoclen) { + dev_err(ctx->eip93->dev, "Request AAD length error\n"); + return -EINVAL; + } + + return eip93_aead_crypt(req); +} + +static int eip93_aead_decrypt(struct aead_request *req) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); + + ctx->sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIRECTION_IN; + ctx->sa_record->sa_cmd1_word &= ~(EIP93_SA_CMD_COPY_PAD | + EIP93_SA_CMD_COPY_DIGEST); + + rctx->flags = ctx->flags; + rctx->flags |= EIP93_DECRYPT; + if (ctx->set_assoc) { + eip93_aead_setassoc(ctx, req); + ctx->set_assoc = false; + } + + if (req->assoclen != ctx->assoclen) { + dev_err(ctx->eip93->dev, "Request AAD length error\n"); + return -EINVAL; + } + + return eip93_aead_crypt(req); +} + +/* Available authenc algorithms in this module */ +struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(md5),cbc(aes))", + .cra_driver_name = + "authenc(hmac(md5-eip93), cbc(aes-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = + "authenc(hmac(sha1-eip93),cbc(aes-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha224),cbc(aes))", + .cra_driver_name = + "authenc(hmac(sha224-eip93),cbc(aes-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = + "authenc(hmac(sha256-eip93),cbc(aes-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_md5_rfc3686_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | + EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(md5),rfc3686(ctr(aes)))", + .cra_driver_name = + "authenc(hmac(md5-eip93),rfc3686(ctr(aes-eip93)))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha1_rfc3686_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | + EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", + .cra_driver_name = + "authenc(hmac(sha1-eip93),rfc3686(ctr(aes-eip93)))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha224_rfc3686_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | + EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))", + .cra_driver_name = + "authenc(hmac(sha224-eip93),rfc3686(ctr(aes-eip93)))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha256_rfc3686_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | + EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", + .cra_driver_name = + "authenc(hmac(sha256-eip93),rfc3686(ctr(aes-eip93)))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(md5),cbc(des))", + .cra_driver_name = + "authenc(hmac(md5-eip93),cbc(des-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des))", + .cra_driver_name = + "authenc(hmac(sha1-eip93),cbc(des-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha224),cbc(des))", + .cra_driver_name = + "authenc(hmac(sha224-eip93),cbc(des-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),cbc(des))", + .cra_driver_name = + "authenc(hmac(sha256-eip93),cbc(des-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des3_ede = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_3DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", + .cra_driver_name = + "authenc(hmac(md5-eip93),cbc(des3_ede-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0x0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des3_ede = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_3DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", + .cra_driver_name = + "authenc(hmac(sha1-eip93),cbc(des3_ede-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0x0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des3_ede = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_3DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha224),cbc(des3_ede))", + .cra_driver_name = + "authenc(hmac(sha224-eip93),cbc(des3_ede-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0x0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des3_ede = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_3DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", + .cra_driver_name = + "authenc(hmac(sha256-eip93),cbc(des3_ede-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0x0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; diff --git a/drivers/crypto/inside-secure/eip93/eip93-aead.h b/drivers/crypto/inside-secure/eip93/eip93-aead.h new file mode 100644 index 000000000000..e2fa8fd39c50 --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-aead.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ +#ifndef _EIP93_AEAD_H_ +#define _EIP93_AEAD_H_ + +extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_ctr_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_ctr_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_ctr_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_ctr_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_rfc3686_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_rfc3686_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_rfc3686_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_rfc3686_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des; +extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des3_ede; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des3_ede; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des3_ede; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des3_ede; +extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_ecb_null; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_ecb_null; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_ecb_null; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_ecb_null; + +void eip93_aead_handle_result(struct crypto_async_request *async, int err); + +#endif /* _EIP93_AEAD_H_ */ diff --git a/drivers/crypto/inside-secure/eip93/eip93-aes.h b/drivers/crypto/inside-secure/eip93/eip93-aes.h new file mode 100644 index 000000000000..1d83d39cab2a --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-aes.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ +#ifndef _EIP93_AES_H_ +#define _EIP93_AES_H_ + +extern struct eip93_alg_template eip93_alg_ecb_aes; +extern struct eip93_alg_template eip93_alg_cbc_aes; +extern struct eip93_alg_template eip93_alg_ctr_aes; +extern struct eip93_alg_template eip93_alg_rfc3686_aes; + +#endif /* _EIP93_AES_H_ */ diff --git a/drivers/crypto/inside-secure/eip93/eip93-cipher.c b/drivers/crypto/inside-secure/eip93/eip93-cipher.c new file mode 100644 index 000000000000..1f2d6846610f --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-cipher.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ + +#include <crypto/aes.h> +#include <crypto/ctr.h> +#include <crypto/internal/des.h> +#include <linux/dma-mapping.h> + +#include "eip93-aes.h" +#include "eip93-cipher.h" +#include "eip93-common.h" +#include "eip93-des.h" +#include "eip93-regs.h" + +void eip93_skcipher_handle_result(struct crypto_async_request *async, int err) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm); + struct eip93_device *eip93 = ctx->eip93; + struct skcipher_request *req = skcipher_request_cast(async); + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); + + eip93_unmap_dma(eip93, rctx, req->src, req->dst); + eip93_handle_result(eip93, rctx, req->iv); + + skcipher_request_complete(req, err); +} + +static int eip93_skcipher_send_req(struct crypto_async_request *async) +{ + struct skcipher_request *req = skcipher_request_cast(async); + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); + int err; + + err = check_valid_request(rctx); + + if (err) { + skcipher_request_complete(req, err); + return err; + } + + return eip93_send_req(async, req->iv, rctx); +} + +/* Crypto skcipher API functions */ +static int eip93_skcipher_cra_init(struct crypto_tfm *tfm) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg, + struct eip93_alg_template, alg.skcipher.base); + + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), + sizeof(struct eip93_cipher_reqctx)); + + memset(ctx, 0, sizeof(*ctx)); + + ctx->eip93 = tmpl->eip93; + ctx->type = tmpl->type; + + ctx->sa_record = kzalloc(sizeof(*ctx->sa_record), GFP_KERNEL); + if (!ctx->sa_record) + return -ENOMEM; + + return 0; +} + +static void eip93_skcipher_cra_exit(struct crypto_tfm *tfm) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + + dma_unmap_single(ctx->eip93->dev, ctx->sa_record_base, + sizeof(*ctx->sa_record), DMA_TO_DEVICE); + kfree(ctx->sa_record); +} + +static int eip93_skcipher_setkey(struct crypto_skcipher *ctfm, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg, + struct eip93_alg_template, + alg.skcipher.base); + struct sa_record *sa_record = ctx->sa_record; + unsigned int keylen = len; + u32 flags = tmpl->flags; + u32 nonce = 0; + int ret; + + if (!key || !keylen) + return -EINVAL; + + if (IS_RFC3686(flags)) { + if (len < CTR_RFC3686_NONCE_SIZE) + return -EINVAL; + + keylen = len - CTR_RFC3686_NONCE_SIZE; + memcpy(&nonce, key + keylen, CTR_RFC3686_NONCE_SIZE); + } + + if (flags & EIP93_ALG_DES) { + ctx->blksize = DES_BLOCK_SIZE; + ret = verify_skcipher_des_key(ctfm, key); + if (ret) + return ret; + } + if (flags & EIP93_ALG_3DES) { + ctx->blksize = DES3_EDE_BLOCK_SIZE; + ret = verify_skcipher_des3_key(ctfm, key); + if (ret) + return ret; + } + + if (flags & EIP93_ALG_AES) { + struct crypto_aes_ctx aes; + + ctx->blksize = AES_BLOCK_SIZE; + ret = aes_expandkey(&aes, key, keylen); + if (ret) + return ret; + } + + eip93_set_sa_record(sa_record, keylen, flags); + + memcpy(sa_record->sa_key, key, keylen); + ctx->sa_nonce = nonce; + sa_record->sa_nonce = nonce; + + return 0; +} + +static int eip93_skcipher_crypt(struct skcipher_request *req) +{ + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct crypto_async_request *async = &req->base; + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + int ret; + + if (!req->cryptlen) + return 0; + + /* + * ECB and CBC algorithms require message lengths to be + * multiples of block size. + */ + if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags)) + if (!IS_ALIGNED(req->cryptlen, + crypto_skcipher_blocksize(skcipher))) + return -EINVAL; + + ctx->sa_record_base = dma_map_single(ctx->eip93->dev, ctx->sa_record, + sizeof(*ctx->sa_record), DMA_TO_DEVICE); + ret = dma_mapping_error(ctx->eip93->dev, ctx->sa_record_base); + if (ret) + return ret; + + rctx->assoclen = 0; + rctx->textsize = req->cryptlen; + rctx->authsize = 0; + rctx->sg_src = req->src; + rctx->sg_dst = req->dst; + rctx->ivsize = crypto_skcipher_ivsize(skcipher); + rctx->blksize = ctx->blksize; + rctx->desc_flags = EIP93_DESC_SKCIPHER; + rctx->sa_record_base = ctx->sa_record_base; + + return eip93_skcipher_send_req(async); +} + +static int eip93_skcipher_encrypt(struct skcipher_request *req) +{ + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct eip93_alg_template *tmpl = container_of(req->base.tfm->__crt_alg, + struct eip93_alg_template, alg.skcipher.base); + + rctx->flags = tmpl->flags; + rctx->flags |= EIP93_ENCRYPT; + + return eip93_skcipher_crypt(req); +} + +static int eip93_skcipher_decrypt(struct skcipher_request *req) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct eip93_alg_template *tmpl = container_of(req->base.tfm->__crt_alg, + struct eip93_alg_template, alg.skcipher.base); + + ctx->sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIRECTION_IN; + + rctx->flags = tmpl->flags; + rctx->flags |= EIP93_DECRYPT; + + return eip93_skcipher_crypt(req); +} + +/* Available algorithms in this module */ +struct eip93_alg_template eip93_alg_ecb_aes = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_ECB | EIP93_ALG_AES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = 0, + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb(aes-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0xf, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_cbc_aes = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_CBC | EIP93_ALG_AES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc(aes-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0xf, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_ctr_aes = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_CTR | EIP93_ALG_AES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr(aes-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0xf, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_rfc3686_aes = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .ivsize = CTR_RFC3686_IV_SIZE, + .base = { + .cra_name = "rfc3686(ctr(aes))", + .cra_driver_name = "rfc3686(ctr(aes-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0xf, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_ecb_des = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_ECB | EIP93_ALG_DES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = 0, + .base = { + .cra_name = "ecb(des)", + .cra_driver_name = "ebc(des-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_cbc_des = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_CBC | EIP93_ALG_DES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .base = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc(des-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_ecb_des3_ede = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_ECB | EIP93_ALG_3DES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = 0, + .base = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb(des3_ede-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_cbc_des3_ede = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_CBC | EIP93_ALG_3DES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc(des3_ede-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; diff --git a/drivers/crypto/inside-secure/eip93/eip93-cipher.h b/drivers/crypto/inside-secure/eip93/eip93-cipher.h new file mode 100644 index 000000000000..6e2545ebd879 --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-cipher.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ +#ifndef _EIP93_CIPHER_H_ +#define _EIP93_CIPHER_H_ + +#include "eip93-main.h" + +struct eip93_crypto_ctx { + struct eip93_device *eip93; + u32 flags; + struct sa_record *sa_record; + u32 sa_nonce; + int blksize; + dma_addr_t sa_record_base; + /* AEAD specific */ + unsigned int authsize; + unsigned int assoclen; + bool set_assoc; + enum eip93_alg_type type; +}; + +struct eip93_cipher_reqctx { + u16 desc_flags; + u16 flags; + unsigned int blksize; + unsigned int ivsize; + unsigned int textsize; + unsigned int assoclen; + unsigned int authsize; + dma_addr_t sa_record_base; + struct sa_state *sa_state; + dma_addr_t sa_state_base; + struct eip93_descriptor *cdesc; + struct scatterlist *sg_src; + struct scatterlist *sg_dst; + int src_nents; + int dst_nents; + struct sa_state *sa_state_ctr; + dma_addr_t sa_state_ctr_base; +}; + +int check_valid_request(struct eip93_cipher_reqctx *rctx); + +void eip93_unmap_dma(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx, + struct scatterlist *reqsrc, struct scatterlist *reqdst); + +void eip93_skcipher_handle_result(struct crypto_async_request *async, int err); + +int eip93_send_req(struct crypto_async_request *async, + const u8 *reqiv, struct eip93_cipher_reqctx *rctx); + +void eip93_handle_result(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx, + u8 *reqiv); + +#endif /* _EIP93_CIPHER_H_ */ diff --git a/drivers/crypto/inside-secure/eip93/eip93-common.c b/drivers/crypto/inside-secure/eip93/eip93-common.c new file mode 100644 index 000000000000..66153aa2493f --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-common.c @@ -0,0 +1,822 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ + +#include <crypto/aes.h> +#include <crypto/ctr.h> +#include <crypto/hmac.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/scatterlist.h> + +#include "eip93-cipher.h" +#include "eip93-hash.h" +#include "eip93-common.h" +#include "eip93-main.h" +#include "eip93-regs.h" + +int eip93_parse_ctrl_stat_err(struct eip93_device *eip93, int err) +{ + u32 ext_err; + + if (!err) + return 0; + + switch (err & ~EIP93_PE_CTRL_PE_EXT_ERR_CODE) { + case EIP93_PE_CTRL_PE_AUTH_ERR: + case EIP93_PE_CTRL_PE_PAD_ERR: + return -EBADMSG; + /* let software handle anti-replay errors */ + case EIP93_PE_CTRL_PE_SEQNUM_ERR: + return 0; + case EIP93_PE_CTRL_PE_EXT_ERR: + break; + default: + dev_err(eip93->dev, "Unhandled error 0x%08x\n", err); + return -EINVAL; + } + + /* Parse additional ext errors */ + ext_err = FIELD_GET(EIP93_PE_CTRL_PE_EXT_ERR_CODE, err); + switch (ext_err) { + case EIP93_PE_CTRL_PE_EXT_ERR_BUS: + case EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING: + return -EIO; + case EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER: + return -EACCES; + case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP: + case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO: + case EIP93_PE_CTRL_PE_EXT_ERR_SPI: + return -EINVAL; + case EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH: + case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH: + case EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR: + return -EBADMSG; + default: + dev_err(eip93->dev, "Unhandled ext error 0x%08x\n", ext_err); + return -EINVAL; + } +} + +static void *eip93_ring_next_wptr(struct eip93_device *eip93, + struct eip93_desc_ring *ring) +{ + void *ptr = ring->write; + + if ((ring->write == ring->read - ring->offset) || + (ring->read == ring->base && ring->write == ring->base_end)) + return ERR_PTR(-ENOMEM); + + if (ring->write == ring->base_end) + ring->write = ring->base; + else + ring->write += ring->offset; + + return ptr; +} + +static void *eip93_ring_next_rptr(struct eip93_device *eip93, + struct eip93_desc_ring *ring) +{ + void *ptr = ring->read; + + if (ring->write == ring->read) + return ERR_PTR(-ENOENT); + + if (ring->read == ring->base_end) + ring->read = ring->base; + else + ring->read += ring->offset; + + return ptr; +} + +int eip93_put_descriptor(struct eip93_device *eip93, + struct eip93_descriptor *desc) +{ + struct eip93_descriptor *cdesc; + struct eip93_descriptor *rdesc; + + rdesc = eip93_ring_next_wptr(eip93, &eip93->ring->rdr); + if (IS_ERR(rdesc)) + return -ENOENT; + + cdesc = eip93_ring_next_wptr(eip93, &eip93->ring->cdr); + if (IS_ERR(cdesc)) + return -ENOENT; + + memset(rdesc, 0, sizeof(struct eip93_descriptor)); + + memcpy(cdesc, desc, sizeof(struct eip93_descriptor)); + + return 0; +} + +void *eip93_get_descriptor(struct eip93_device *eip93) +{ + struct eip93_descriptor *cdesc; + void *ptr; + + cdesc = eip93_ring_next_rptr(eip93, &eip93->ring->cdr); + if (IS_ERR(cdesc)) + return ERR_PTR(-ENOENT); + + memset(cdesc, 0, sizeof(struct eip93_descriptor)); + + ptr = eip93_ring_next_rptr(eip93, &eip93->ring->rdr); + if (IS_ERR(ptr)) + return ERR_PTR(-ENOENT); + + return ptr; +} + +static void eip93_free_sg_copy(const int len, struct scatterlist **sg) +{ + if (!*sg || !len) + return; + + free_pages((unsigned long)sg_virt(*sg), get_order(len)); + kfree(*sg); + *sg = NULL; +} + +static int eip93_make_sg_copy(struct scatterlist *src, struct scatterlist **dst, + const u32 len, const bool copy) +{ + void *pages; + + *dst = kmalloc(sizeof(**dst), GFP_KERNEL); + if (!*dst) + return -ENOMEM; + + pages = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA, + get_order(len)); + if (!pages) { + kfree(*dst); + *dst = NULL; + return -ENOMEM; + } + + sg_init_table(*dst, 1); + sg_set_buf(*dst, pages, len); + + /* copy only as requested */ + if (copy) + sg_copy_to_buffer(src, sg_nents(src), pages, len); + + return 0; +} + +static bool eip93_is_sg_aligned(struct scatterlist *sg, u32 len, + const int blksize) +{ + int nents; + + for (nents = 0; sg; sg = sg_next(sg), ++nents) { + if (!IS_ALIGNED(sg->offset, 4)) + return false; + + if (len <= sg->length) { + if (!IS_ALIGNED(len, blksize)) + return false; + + return true; + } + + if (!IS_ALIGNED(sg->length, blksize)) + return false; + + len -= sg->length; + } + return false; +} + +int check_valid_request(struct eip93_cipher_reqctx *rctx) +{ + struct scatterlist *src = rctx->sg_src; + struct scatterlist *dst = rctx->sg_dst; + u32 textsize = rctx->textsize; + u32 authsize = rctx->authsize; + u32 blksize = rctx->blksize; + u32 totlen_src = rctx->assoclen + rctx->textsize; + u32 totlen_dst = rctx->assoclen + rctx->textsize; + u32 copy_len; + bool src_align, dst_align; + int src_nents, dst_nents; + int err = -EINVAL; + + if (!IS_CTR(rctx->flags)) { + if (!IS_ALIGNED(textsize, blksize)) + return err; + } + + if (authsize) { + if (IS_ENCRYPT(rctx->flags)) + totlen_dst += authsize; + else + totlen_src += authsize; + } + + src_nents = sg_nents_for_len(src, totlen_src); + if (src_nents < 0) + return src_nents; + + dst_nents = sg_nents_for_len(dst, totlen_dst); + if (dst_nents < 0) + return dst_nents; + + if (src == dst) { + src_nents = max(src_nents, dst_nents); + dst_nents = src_nents; + if (unlikely((totlen_src || totlen_dst) && !src_nents)) + return err; + + } else { + if (unlikely(totlen_src && !src_nents)) + return err; + + if (unlikely(totlen_dst && !dst_nents)) + return err; + } + + if (authsize) { + if (dst_nents == 1 && src_nents == 1) { + src_align = eip93_is_sg_aligned(src, totlen_src, blksize); + if (src == dst) + dst_align = src_align; + else + dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize); + } else { + src_align = false; + dst_align = false; + } + } else { + src_align = eip93_is_sg_aligned(src, totlen_src, blksize); + if (src == dst) + dst_align = src_align; + else + dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize); + } + + copy_len = max(totlen_src, totlen_dst); + if (!src_align) { + err = eip93_make_sg_copy(src, &rctx->sg_src, copy_len, true); + if (err) + return err; + } + + if (!dst_align) { + err = eip93_make_sg_copy(dst, &rctx->sg_dst, copy_len, false); + if (err) + return err; + } + + src_nents = sg_nents_for_len(rctx->sg_src, totlen_src); + if (src_nents < 0) + return src_nents; + + dst_nents = sg_nents_for_len(rctx->sg_dst, totlen_dst); + if (dst_nents < 0) + return dst_nents; + + rctx->src_nents = src_nents; + rctx->dst_nents = dst_nents; + + return 0; +} + +/* + * Set sa_record function: + * Even sa_record is set to "0", keep " = 0" for readability. + */ +void eip93_set_sa_record(struct sa_record *sa_record, const unsigned int keylen, + const u32 flags) +{ + /* Reset cmd word */ + sa_record->sa_cmd0_word = 0; + sa_record->sa_cmd1_word = 0; + + sa_record->sa_cmd0_word |= EIP93_SA_CMD_IV_FROM_STATE; + if (!IS_ECB(flags)) + sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_IV; + + sa_record->sa_cmd0_word |= EIP93_SA_CMD_OP_BASIC; + + switch ((flags & EIP93_ALG_MASK)) { + case EIP93_ALG_AES: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_AES; + sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, + keylen >> 3); + break; + case EIP93_ALG_3DES: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_3DES; + break; + case EIP93_ALG_DES: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_DES; + break; + default: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_NULL; + } + + switch ((flags & EIP93_HASH_MASK)) { + case EIP93_HASH_SHA256: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA256; + break; + case EIP93_HASH_SHA224: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA224; + break; + case EIP93_HASH_SHA1: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA1; + break; + case EIP93_HASH_MD5: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_MD5; + break; + default: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_NULL; + } + + sa_record->sa_cmd0_word |= EIP93_SA_CMD_PAD_ZERO; + + switch ((flags & EIP93_MODE_MASK)) { + case EIP93_MODE_CBC: + sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CBC; + break; + case EIP93_MODE_CTR: + sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CTR; + break; + case EIP93_MODE_ECB: + sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_ECB; + break; + } + + sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIGEST_3WORD; + if (IS_HASH(flags)) { + sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_PAD; + sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_DIGEST; + } + + if (IS_HMAC(flags)) { + sa_record->sa_cmd1_word |= EIP93_SA_CMD_HMAC; + sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_HEADER; + } + + sa_record->sa_spi = 0x0; + sa_record->sa_seqmum_mask[0] = 0xFFFFFFFF; + sa_record->sa_seqmum_mask[1] = 0x0; +} + +/* + * Poor mans Scatter/gather function: + * Create a Descriptor for every segment to avoid copying buffers. + * For performance better to wait for hardware to perform multiple DMA + */ +static int eip93_scatter_combine(struct eip93_device *eip93, + struct eip93_cipher_reqctx *rctx, + u32 datalen, u32 split, int offsetin) +{ + struct eip93_descriptor *cdesc = rctx->cdesc; + struct scatterlist *sgsrc = rctx->sg_src; + struct scatterlist *sgdst = rctx->sg_dst; + unsigned int remainin = sg_dma_len(sgsrc); + unsigned int remainout = sg_dma_len(sgdst); + dma_addr_t saddr = sg_dma_address(sgsrc); + dma_addr_t daddr = sg_dma_address(sgdst); + dma_addr_t state_addr; + u32 src_addr, dst_addr, len, n; + bool nextin = false; + bool nextout = false; + int offsetout = 0; + int err; + + if (IS_ECB(rctx->flags)) + rctx->sa_state_base = 0; + + if (split < datalen) { + state_addr = rctx->sa_state_ctr_base; + n = split; + } else { + state_addr = rctx->sa_state_base; + n = datalen; + } + + do { + if (nextin) { + sgsrc = sg_next(sgsrc); + remainin = sg_dma_len(sgsrc); + if (remainin == 0) + continue; + + saddr = sg_dma_address(sgsrc); + offsetin = 0; + nextin = false; + } + + if (nextout) { + sgdst = sg_next(sgdst); + remainout = sg_dma_len(sgdst); + if (remainout == 0) + continue; + + daddr = sg_dma_address(sgdst); + offsetout = 0; + nextout = false; + } + src_addr = saddr + offsetin; + dst_addr = daddr + offsetout; + + if (remainin == remainout) { + len = remainin; + if (len > n) { + len = n; + remainin -= n; + remainout -= n; + offsetin += n; + offsetout += n; + } else { + nextin = true; + nextout = true; + } + } else if (remainin < remainout) { + len = remainin; + if (len > n) { + len = n; + remainin -= n; + remainout -= n; + offsetin += n; + offsetout += n; + } else { + offsetout += len; + remainout -= len; + nextin = true; + } + } else { + len = remainout; + if (len > n) { + len = n; + remainin -= n; + remainout -= n; + offsetin += n; + offsetout += n; + } else { + offsetin += len; + remainin -= len; + nextout = true; + } + } + n -= len; + + cdesc->src_addr = src_addr; + cdesc->dst_addr = dst_addr; + cdesc->state_addr = state_addr; + cdesc->pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY, + EIP93_PE_LENGTH_HOST_READY); + cdesc->pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH, len); + + if (n == 0) { + n = datalen - split; + split = datalen; + state_addr = rctx->sa_state_base; + } + + if (n == 0) + cdesc->user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, + EIP93_DESC_LAST); + + /* + * Loop - Delay - No need to rollback + * Maybe refine by slowing down at EIP93_RING_BUSY + */ +again: + scoped_guard(spinlock_irqsave, &eip93->ring->write_lock) + err = eip93_put_descriptor(eip93, cdesc); + if (err) { + usleep_range(EIP93_RING_BUSY_DELAY, + EIP93_RING_BUSY_DELAY * 2); + goto again; + } + /* Writing new descriptor count starts DMA action */ + writel(1, eip93->base + EIP93_REG_PE_CD_COUNT); + } while (n); + + return -EINPROGRESS; +} + +int eip93_send_req(struct crypto_async_request *async, + const u8 *reqiv, struct eip93_cipher_reqctx *rctx) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm); + struct eip93_device *eip93 = ctx->eip93; + struct scatterlist *src = rctx->sg_src; + struct scatterlist *dst = rctx->sg_dst; + struct sa_state *sa_state; + struct eip93_descriptor cdesc; + u32 flags = rctx->flags; + int offsetin = 0, err; + u32 datalen = rctx->assoclen + rctx->textsize; + u32 split = datalen; + u32 start, end, ctr, blocks; + u32 iv[AES_BLOCK_SIZE / sizeof(u32)]; + int crypto_async_idr; + + rctx->sa_state_ctr = NULL; + rctx->sa_state = NULL; + + if (IS_ECB(flags)) + goto skip_iv; + + memcpy(iv, reqiv, rctx->ivsize); + + rctx->sa_state = kzalloc(sizeof(*rctx->sa_state), GFP_KERNEL); + if (!rctx->sa_state) + return -ENOMEM; + + sa_state = rctx->sa_state; + + memcpy(sa_state->state_iv, iv, rctx->ivsize); + if (IS_RFC3686(flags)) { + sa_state->state_iv[0] = ctx->sa_nonce; + sa_state->state_iv[1] = iv[0]; + sa_state->state_iv[2] = iv[1]; + sa_state->state_iv[3] = (u32 __force)cpu_to_be32(0x1); + } else if (!IS_HMAC(flags) && IS_CTR(flags)) { + /* Compute data length. */ + blocks = DIV_ROUND_UP(rctx->textsize, AES_BLOCK_SIZE); + ctr = be32_to_cpu((__be32 __force)iv[3]); + /* Check 32bit counter overflow. */ + start = ctr; + end = start + blocks - 1; + if (end < start) { + split = AES_BLOCK_SIZE * -start; + /* + * Increment the counter manually to cope with + * the hardware counter overflow. + */ + iv[3] = 0xffffffff; + crypto_inc((u8 *)iv, AES_BLOCK_SIZE); + + rctx->sa_state_ctr = kzalloc(sizeof(*rctx->sa_state_ctr), + GFP_KERNEL); + if (!rctx->sa_state_ctr) { + err = -ENOMEM; + goto free_sa_state; + } + + memcpy(rctx->sa_state_ctr->state_iv, reqiv, rctx->ivsize); + memcpy(sa_state->state_iv, iv, rctx->ivsize); + + rctx->sa_state_ctr_base = dma_map_single(eip93->dev, rctx->sa_state_ctr, + sizeof(*rctx->sa_state_ctr), + DMA_TO_DEVICE); + err = dma_mapping_error(eip93->dev, rctx->sa_state_ctr_base); + if (err) + goto free_sa_state_ctr; + } + } + + rctx->sa_state_base = dma_map_single(eip93->dev, rctx->sa_state, + sizeof(*rctx->sa_state), DMA_TO_DEVICE); + err = dma_mapping_error(eip93->dev, rctx->sa_state_base); + if (err) + goto free_sa_state_ctr_dma; + +skip_iv: + + cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN, + EIP93_PE_CTRL_HOST_READY); + cdesc.sa_addr = rctx->sa_record_base; + cdesc.arc4_addr = 0; + + scoped_guard(spinlock_bh, &eip93->ring->idr_lock) + crypto_async_idr = idr_alloc(&eip93->ring->crypto_async_idr, async, 0, + EIP93_RING_NUM - 1, GFP_ATOMIC); + + cdesc.user_id = FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) | + FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, rctx->desc_flags); + + rctx->cdesc = &cdesc; + + /* map DMA_BIDIRECTIONAL to invalidate cache on destination + * implies __dma_cache_wback_inv + */ + if (!dma_map_sg(eip93->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL)) { + err = -ENOMEM; + goto free_sa_state_ctr_dma; + } + + if (src != dst && + !dma_map_sg(eip93->dev, src, rctx->src_nents, DMA_TO_DEVICE)) { + err = -ENOMEM; + goto free_sg_dma; + } + + return eip93_scatter_combine(eip93, rctx, datalen, split, offsetin); + +free_sg_dma: + dma_unmap_sg(eip93->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL); +free_sa_state_ctr_dma: + if (rctx->sa_state_ctr) + dma_unmap_single(eip93->dev, rctx->sa_state_ctr_base, + sizeof(*rctx->sa_state_ctr), + DMA_TO_DEVICE); +free_sa_state_ctr: + kfree(rctx->sa_state_ctr); + if (rctx->sa_state) + dma_unmap_single(eip93->dev, rctx->sa_state_base, + sizeof(*rctx->sa_state), + DMA_TO_DEVICE); +free_sa_state: + kfree(rctx->sa_state); + + return err; +} + +void eip93_unmap_dma(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx, + struct scatterlist *reqsrc, struct scatterlist *reqdst) +{ + u32 len = rctx->assoclen + rctx->textsize; + u32 authsize = rctx->authsize; + u32 flags = rctx->flags; + u32 *otag; + int i; + + if (rctx->sg_src == rctx->sg_dst) { + dma_unmap_sg(eip93->dev, rctx->sg_dst, rctx->dst_nents, + DMA_BIDIRECTIONAL); + goto process_tag; + } + + dma_unmap_sg(eip93->dev, rctx->sg_src, rctx->src_nents, + DMA_TO_DEVICE); + + if (rctx->sg_src != reqsrc) + eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_src); + + dma_unmap_sg(eip93->dev, rctx->sg_dst, rctx->dst_nents, + DMA_BIDIRECTIONAL); + + /* SHA tags need conversion from net-to-host */ +process_tag: + if (IS_DECRYPT(flags)) + authsize = 0; + + if (authsize) { + if (!IS_HASH_MD5(flags)) { + otag = sg_virt(rctx->sg_dst) + len; + for (i = 0; i < (authsize / 4); i++) + otag[i] = be32_to_cpu((__be32 __force)otag[i]); + } + } + + if (rctx->sg_dst != reqdst) { + sg_copy_from_buffer(reqdst, sg_nents(reqdst), + sg_virt(rctx->sg_dst), len + authsize); + eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_dst); + } +} + +void eip93_handle_result(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx, + u8 *reqiv) +{ + if (rctx->sa_state_ctr) + dma_unmap_single(eip93->dev, rctx->sa_state_ctr_base, + sizeof(*rctx->sa_state_ctr), + DMA_FROM_DEVICE); + + if (rctx->sa_state) + dma_unmap_single(eip93->dev, rctx->sa_state_base, + sizeof(*rctx->sa_state), + DMA_FROM_DEVICE); + + if (!IS_ECB(rctx->flags)) + memcpy(reqiv, rctx->sa_state->state_iv, rctx->ivsize); + + kfree(rctx->sa_state_ctr); + kfree(rctx->sa_state); +} + +int eip93_hmac_setkey(u32 ctx_flags, const u8 *key, unsigned int keylen, + unsigned int hashlen, u8 *dest_ipad, u8 *dest_opad, + bool skip_ipad) +{ + u8 ipad[SHA256_BLOCK_SIZE], opad[SHA256_BLOCK_SIZE]; + struct crypto_ahash *ahash_tfm; + struct eip93_hash_reqctx *rctx; + struct ahash_request *req; + DECLARE_CRYPTO_WAIT(wait); + struct scatterlist sg[1]; + const char *alg_name; + int i, ret; + + switch (ctx_flags & EIP93_HASH_MASK) { + case EIP93_HASH_SHA256: + alg_name = "sha256-eip93"; + break; + case EIP93_HASH_SHA224: + alg_name = "sha224-eip93"; + break; + case EIP93_HASH_SHA1: + alg_name = "sha1-eip93"; + break; + case EIP93_HASH_MD5: + alg_name = "md5-eip93"; + break; + default: /* Impossible */ + return -EINVAL; + } + + ahash_tfm = crypto_alloc_ahash(alg_name, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(ahash_tfm)) + return PTR_ERR(ahash_tfm); + + req = ahash_request_alloc(ahash_tfm, GFP_ATOMIC); + if (!req) { + ret = -ENOMEM; + goto err_ahash; + } + + rctx = ahash_request_ctx_dma(req); + crypto_init_wait(&wait); + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &wait); + + /* Hash the key if > SHA256_BLOCK_SIZE */ + if (keylen > SHA256_BLOCK_SIZE) { + sg_init_one(&sg[0], key, keylen); + + ahash_request_set_crypt(req, sg, ipad, keylen); + ret = crypto_wait_req(crypto_ahash_digest(req), &wait); + if (ret) + goto err_req; + + keylen = hashlen; + } else { + memcpy(ipad, key, keylen); + } + + /* Copy to opad */ + memset(ipad + keylen, 0, SHA256_BLOCK_SIZE - keylen); + memcpy(opad, ipad, SHA256_BLOCK_SIZE); + + /* Pad with HMAC constants */ + for (i = 0; i < SHA256_BLOCK_SIZE; i++) { + ipad[i] ^= HMAC_IPAD_VALUE; + opad[i] ^= HMAC_OPAD_VALUE; + } + + if (skip_ipad) { + memcpy(dest_ipad, ipad, SHA256_BLOCK_SIZE); + } else { + /* Hash ipad */ + sg_init_one(&sg[0], ipad, SHA256_BLOCK_SIZE); + ahash_request_set_crypt(req, sg, dest_ipad, SHA256_BLOCK_SIZE); + ret = crypto_ahash_init(req); + if (ret) + goto err_req; + + /* Disable HASH_FINALIZE for ipad hash */ + rctx->partial_hash = true; + + ret = crypto_wait_req(crypto_ahash_finup(req), &wait); + if (ret) + goto err_req; + } + + /* Hash opad */ + sg_init_one(&sg[0], opad, SHA256_BLOCK_SIZE); + ahash_request_set_crypt(req, sg, dest_opad, SHA256_BLOCK_SIZE); + ret = crypto_ahash_init(req); + if (ret) + goto err_req; + + /* Disable HASH_FINALIZE for opad hash */ + rctx->partial_hash = true; + + ret = crypto_wait_req(crypto_ahash_finup(req), &wait); + if (ret) + goto err_req; + + if (!IS_HASH_MD5(ctx_flags)) { + for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) { + u32 *ipad_hash = (u32 *)dest_ipad; + u32 *opad_hash = (u32 *)dest_opad; + + if (!skip_ipad) + ipad_hash[i] = (u32 __force)cpu_to_be32(ipad_hash[i]); + opad_hash[i] = (u32 __force)cpu_to_be32(opad_hash[i]); + } + } + +err_req: + ahash_request_free(req); +err_ahash: + crypto_free_ahash(ahash_tfm); + + return ret; +} diff --git a/drivers/crypto/inside-secure/eip93/eip93-common.h b/drivers/crypto/inside-secure/eip93/eip93-common.h new file mode 100644 index 000000000000..80964cfa34df --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-common.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ + +#ifndef _EIP93_COMMON_H_ +#define _EIP93_COMMON_H_ + +void *eip93_get_descriptor(struct eip93_device *eip93); +int eip93_put_descriptor(struct eip93_device *eip93, struct eip93_descriptor *desc); + +void eip93_set_sa_record(struct sa_record *sa_record, const unsigned int keylen, + const u32 flags); + +int eip93_parse_ctrl_stat_err(struct eip93_device *eip93, int err); + +int eip93_hmac_setkey(u32 ctx_flags, const u8 *key, unsigned int keylen, + unsigned int hashlen, u8 *ipad, u8 *opad, + bool skip_ipad); + +#endif /* _EIP93_COMMON_H_ */ diff --git a/drivers/crypto/inside-secure/eip93/eip93-des.h b/drivers/crypto/inside-secure/eip93/eip93-des.h new file mode 100644 index 000000000000..74748df04acf --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-des.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ +#ifndef _EIP93_DES_H_ +#define _EIP93_DES_H_ + +extern struct eip93_alg_template eip93_alg_ecb_des; +extern struct eip93_alg_template eip93_alg_cbc_des; +extern struct eip93_alg_template eip93_alg_ecb_des3_ede; +extern struct eip93_alg_template eip93_alg_cbc_des3_ede; + +#endif /* _EIP93_DES_H_ */ diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.c b/drivers/crypto/inside-secure/eip93/eip93-hash.c new file mode 100644 index 000000000000..ac13d90a2b7c --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-hash.c @@ -0,0 +1,875 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 + * + * Christian Marangi <ansuelsmth@gmail.com + */ + +#include <crypto/sha1.h> +#include <crypto/sha2.h> +#include <crypto/md5.h> +#include <crypto/hmac.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> + +#include "eip93-cipher.h" +#include "eip93-hash.h" +#include "eip93-main.h" +#include "eip93-common.h" +#include "eip93-regs.h" + +static void eip93_hash_free_data_blocks(struct ahash_request *req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct eip93_device *eip93 = ctx->eip93; + struct mkt_hash_block *block, *tmp; + + list_for_each_entry_safe(block, tmp, &rctx->blocks, list) { + dma_unmap_single(eip93->dev, block->data_dma, + SHA256_BLOCK_SIZE, DMA_TO_DEVICE); + kfree(block); + } + if (!list_empty(&rctx->blocks)) + INIT_LIST_HEAD(&rctx->blocks); + + if (rctx->finalize) + dma_unmap_single(eip93->dev, rctx->data_dma, + rctx->data_used, + DMA_TO_DEVICE); +} + +static void eip93_hash_free_sa_record(struct ahash_request *req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct eip93_device *eip93 = ctx->eip93; + + if (IS_HMAC(ctx->flags)) + dma_unmap_single(eip93->dev, rctx->sa_record_hmac_base, + sizeof(rctx->sa_record_hmac), DMA_TO_DEVICE); + + dma_unmap_single(eip93->dev, rctx->sa_record_base, + sizeof(rctx->sa_record), DMA_TO_DEVICE); +} + +void eip93_hash_handle_result(struct crypto_async_request *async, int err) +{ + struct ahash_request *req = ahash_request_cast(async); + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int digestsize = crypto_ahash_digestsize(ahash); + struct sa_state *sa_state = &rctx->sa_state; + struct eip93_device *eip93 = ctx->eip93; + int i; + + dma_unmap_single(eip93->dev, rctx->sa_state_base, + sizeof(*sa_state), DMA_FROM_DEVICE); + + /* + * With partial_hash assume SHA256_DIGEST_SIZE buffer is passed. + * This is to handle SHA224 that have a 32 byte intermediate digest. + */ + if (rctx->partial_hash) + digestsize = SHA256_DIGEST_SIZE; + + if (rctx->finalize || rctx->partial_hash) { + /* bytes needs to be swapped for req->result */ + if (!IS_HASH_MD5(ctx->flags)) { + for (i = 0; i < digestsize / sizeof(u32); i++) { + u32 *digest = (u32 *)sa_state->state_i_digest; + + digest[i] = be32_to_cpu((__be32 __force)digest[i]); + } + } + + memcpy(req->result, sa_state->state_i_digest, digestsize); + } + + eip93_hash_free_sa_record(req); + eip93_hash_free_data_blocks(req); + + ahash_request_complete(req, err); +} + +static void eip93_hash_init_sa_state_digest(u32 hash, u8 *digest) +{ + static const u32 sha256_init[] = { + SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 + }; + static const u32 sha224_init[] = { + SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, + SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7 + }; + static const u32 sha1_init[] = { + SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 + }; + static const u32 md5_init[] = { + MD5_H0, MD5_H1, MD5_H2, MD5_H3 + }; + + /* Init HASH constant */ + switch (hash) { + case EIP93_HASH_SHA256: + memcpy(digest, sha256_init, sizeof(sha256_init)); + return; + case EIP93_HASH_SHA224: + memcpy(digest, sha224_init, sizeof(sha224_init)); + return; + case EIP93_HASH_SHA1: + memcpy(digest, sha1_init, sizeof(sha1_init)); + return; + case EIP93_HASH_MD5: + memcpy(digest, md5_init, sizeof(md5_init)); + return; + default: /* Impossible */ + return; + } +} + +static void eip93_hash_export_sa_state(struct ahash_request *req, + struct eip93_hash_export_state *state) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct sa_state *sa_state = &rctx->sa_state; + + /* + * EIP93 have special handling for state_byte_cnt in sa_state. + * Even if a zero packet is passed (and a BADMSG is returned), + * state_byte_cnt is incremented to the digest handled (with the hash + * primitive). This is problematic with export/import as EIP93 + * expect 0 state_byte_cnt for the very first iteration. + */ + if (!rctx->len) + memset(state->state_len, 0, sizeof(u32) * 2); + else + memcpy(state->state_len, sa_state->state_byte_cnt, + sizeof(u32) * 2); + memcpy(state->state_hash, sa_state->state_i_digest, + SHA256_DIGEST_SIZE); + state->len = rctx->len; + state->data_used = rctx->data_used; +} + +static void __eip93_hash_init(struct ahash_request *req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct sa_record *sa_record = &rctx->sa_record; + int digestsize; + + digestsize = crypto_ahash_digestsize(ahash); + + eip93_set_sa_record(sa_record, 0, ctx->flags); + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_FROM_STATE; + sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_HASH; + sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE; + sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE, + EIP93_SA_CMD_OPCODE_BASIC_OUT_HASH); + sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH; + sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, + digestsize / sizeof(u32)); + + /* + * HMAC special handling + * Enabling CMD_HMAC force the inner hash to be always finalized. + * This cause problems on handling message > 64 byte as we + * need to produce intermediate inner hash on sending intermediate + * 64 bytes blocks. + * + * To handle this, enable CMD_HMAC only on the last block. + * We make a duplicate of sa_record and on the last descriptor, + * we pass a dedicated sa_record with CMD_HMAC enabled to make + * EIP93 apply the outer hash. + */ + if (IS_HMAC(ctx->flags)) { + struct sa_record *sa_record_hmac = &rctx->sa_record_hmac; + + memcpy(sa_record_hmac, sa_record, sizeof(*sa_record)); + /* Copy pre-hashed opad for HMAC */ + memcpy(sa_record_hmac->sa_o_digest, ctx->opad, SHA256_DIGEST_SIZE); + + /* Disable HMAC for hash normal sa_record */ + sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HMAC; + } + + rctx->len = 0; + rctx->data_used = 0; + rctx->partial_hash = false; + rctx->finalize = false; + INIT_LIST_HEAD(&rctx->blocks); +} + +static int eip93_send_hash_req(struct crypto_async_request *async, u8 *data, + dma_addr_t *data_dma, u32 len, bool last) +{ + struct ahash_request *req = ahash_request_cast(async); + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct eip93_device *eip93 = ctx->eip93; + struct eip93_descriptor cdesc = { }; + dma_addr_t src_addr; + int ret; + + /* Map block data to DMA */ + src_addr = dma_map_single(eip93->dev, data, len, DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, src_addr); + if (ret) + return ret; + + cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN, + EIP93_PE_CTRL_HOST_READY); + cdesc.sa_addr = rctx->sa_record_base; + cdesc.arc4_addr = 0; + + cdesc.state_addr = rctx->sa_state_base; + cdesc.src_addr = src_addr; + cdesc.pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY, + EIP93_PE_LENGTH_HOST_READY); + cdesc.pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH, + len); + + cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_HASH); + + if (last) { + int crypto_async_idr; + + if (rctx->finalize && !rctx->partial_hash) { + /* For last block, pass sa_record with CMD_HMAC enabled */ + if (IS_HMAC(ctx->flags)) { + struct sa_record *sa_record_hmac = &rctx->sa_record_hmac; + + rctx->sa_record_hmac_base = dma_map_single(eip93->dev, + sa_record_hmac, + sizeof(*sa_record_hmac), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_record_hmac_base); + if (ret) + return ret; + + cdesc.sa_addr = rctx->sa_record_hmac_base; + } + + cdesc.pe_ctrl_stat_word |= EIP93_PE_CTRL_PE_HASH_FINAL; + } + + scoped_guard(spinlock_bh, &eip93->ring->idr_lock) + crypto_async_idr = idr_alloc(&eip93->ring->crypto_async_idr, async, 0, + EIP93_RING_NUM - 1, GFP_ATOMIC); + + cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) | + FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_LAST); + } + +again: + scoped_guard(spinlock_irqsave, &eip93->ring->write_lock) + ret = eip93_put_descriptor(eip93, &cdesc); + if (ret) { + usleep_range(EIP93_RING_BUSY_DELAY, + EIP93_RING_BUSY_DELAY * 2); + goto again; + } + + /* Writing new descriptor count starts DMA action */ + writel(1, eip93->base + EIP93_REG_PE_CD_COUNT); + + *data_dma = src_addr; + return 0; +} + +static int eip93_hash_init(struct ahash_request *req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct sa_state *sa_state = &rctx->sa_state; + + memset(sa_state->state_byte_cnt, 0, sizeof(u32) * 2); + eip93_hash_init_sa_state_digest(ctx->flags & EIP93_HASH_MASK, + sa_state->state_i_digest); + + __eip93_hash_init(req); + + /* For HMAC setup the initial block for ipad */ + if (IS_HMAC(ctx->flags)) { + memcpy(rctx->data, ctx->ipad, SHA256_BLOCK_SIZE); + + rctx->data_used = SHA256_BLOCK_SIZE; + rctx->len += SHA256_BLOCK_SIZE; + } + + return 0; +} + +/* + * With complete_req true, we wait for the engine to consume all the block in list, + * else we just queue the block to the engine as final() will wait. This is useful + * for finup(). + */ +static int __eip93_hash_update(struct ahash_request *req, bool complete_req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_async_request *async = &req->base; + unsigned int read, to_consume = req->nbytes; + unsigned int max_read, consumed = 0; + struct mkt_hash_block *block; + bool wait_req = false; + int offset; + int ret; + + /* Get the offset and available space to fill req data */ + offset = rctx->data_used; + max_read = SHA256_BLOCK_SIZE - offset; + + /* Consume req in block of SHA256_BLOCK_SIZE. + * to_read is initially set to space available in the req data + * and then reset to SHA256_BLOCK_SIZE. + */ + while (to_consume > max_read) { + block = kzalloc(sizeof(*block), GFP_ATOMIC); + if (!block) { + ret = -ENOMEM; + goto free_blocks; + } + + read = sg_pcopy_to_buffer(req->src, sg_nents(req->src), + block->data + offset, + max_read, consumed); + + /* + * For first iteration only, copy req data to block + * and reset offset and max_read for next iteration. + */ + if (offset > 0) { + memcpy(block->data, rctx->data, offset); + offset = 0; + max_read = SHA256_BLOCK_SIZE; + } + + list_add(&block->list, &rctx->blocks); + to_consume -= read; + consumed += read; + } + + /* Write the remaining data to req data */ + read = sg_pcopy_to_buffer(req->src, sg_nents(req->src), + rctx->data + offset, to_consume, + consumed); + rctx->data_used = offset + read; + + /* Update counter with processed bytes */ + rctx->len += read + consumed; + + /* Consume all the block added to list */ + list_for_each_entry_reverse(block, &rctx->blocks, list) { + wait_req = complete_req && + list_is_first(&block->list, &rctx->blocks); + + ret = eip93_send_hash_req(async, block->data, + &block->data_dma, + SHA256_BLOCK_SIZE, wait_req); + if (ret) + goto free_blocks; + } + + return wait_req ? -EINPROGRESS : 0; + +free_blocks: + eip93_hash_free_data_blocks(req); + + return ret; +} + +static int eip93_hash_update(struct ahash_request *req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct sa_record *sa_record = &rctx->sa_record; + struct sa_state *sa_state = &rctx->sa_state; + struct eip93_device *eip93 = ctx->eip93; + int ret; + + if (!req->nbytes) + return 0; + + rctx->sa_state_base = dma_map_single(eip93->dev, sa_state, + sizeof(*sa_state), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_state_base); + if (ret) + return ret; + + rctx->sa_record_base = dma_map_single(eip93->dev, sa_record, + sizeof(*sa_record), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_record_base); + if (ret) + goto free_sa_state; + + ret = __eip93_hash_update(req, true); + if (ret && ret != -EINPROGRESS) + goto free_sa_record; + + return ret; + +free_sa_record: + dma_unmap_single(eip93->dev, rctx->sa_record_base, + sizeof(*sa_record), DMA_TO_DEVICE); + +free_sa_state: + dma_unmap_single(eip93->dev, rctx->sa_state_base, + sizeof(*sa_state), DMA_TO_DEVICE); + + return ret; +} + +/* + * With map_data true, we map the sa_record and sa_state. This is needed + * for finup() as the they are mapped before calling update() + */ +static int __eip93_hash_final(struct ahash_request *req, bool map_dma) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct crypto_async_request *async = &req->base; + struct sa_record *sa_record = &rctx->sa_record; + struct sa_state *sa_state = &rctx->sa_state; + struct eip93_device *eip93 = ctx->eip93; + int ret; + + /* EIP93 can't handle zero bytes hash */ + if (!rctx->len && !IS_HMAC(ctx->flags)) { + switch ((ctx->flags & EIP93_HASH_MASK)) { + case EIP93_HASH_SHA256: + memcpy(req->result, sha256_zero_message_hash, + SHA256_DIGEST_SIZE); + break; + case EIP93_HASH_SHA224: + memcpy(req->result, sha224_zero_message_hash, + SHA224_DIGEST_SIZE); + break; + case EIP93_HASH_SHA1: + memcpy(req->result, sha1_zero_message_hash, + SHA1_DIGEST_SIZE); + break; + case EIP93_HASH_MD5: + memcpy(req->result, md5_zero_message_hash, + MD5_DIGEST_SIZE); + break; + default: /* Impossible */ + return -EINVAL; + } + + return 0; + } + + /* Signal interrupt from engine is for last block */ + rctx->finalize = true; + + if (map_dma) { + rctx->sa_state_base = dma_map_single(eip93->dev, sa_state, + sizeof(*sa_state), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_state_base); + if (ret) + return ret; + + rctx->sa_record_base = dma_map_single(eip93->dev, sa_record, + sizeof(*sa_record), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_record_base); + if (ret) + goto free_sa_state; + } + + /* Send last block */ + ret = eip93_send_hash_req(async, rctx->data, &rctx->data_dma, + rctx->data_used, true); + if (ret) + goto free_blocks; + + return -EINPROGRESS; + +free_blocks: + eip93_hash_free_data_blocks(req); + + dma_unmap_single(eip93->dev, rctx->sa_record_base, + sizeof(*sa_record), DMA_TO_DEVICE); + +free_sa_state: + dma_unmap_single(eip93->dev, rctx->sa_state_base, + sizeof(*sa_state), DMA_TO_DEVICE); + + return ret; +} + +static int eip93_hash_final(struct ahash_request *req) +{ + return __eip93_hash_final(req, true); +} + +static int eip93_hash_finup(struct ahash_request *req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct sa_record *sa_record = &rctx->sa_record; + struct sa_state *sa_state = &rctx->sa_state; + struct eip93_device *eip93 = ctx->eip93; + int ret; + + if (rctx->len + req->nbytes || IS_HMAC(ctx->flags)) { + rctx->sa_state_base = dma_map_single(eip93->dev, sa_state, + sizeof(*sa_state), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_state_base); + if (ret) + return ret; + + rctx->sa_record_base = dma_map_single(eip93->dev, sa_record, + sizeof(*sa_record), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_record_base); + if (ret) + goto free_sa_state; + + ret = __eip93_hash_update(req, false); + if (ret) + goto free_sa_record; + } + + return __eip93_hash_final(req, false); + +free_sa_record: + dma_unmap_single(eip93->dev, rctx->sa_record_base, + sizeof(*sa_record), DMA_TO_DEVICE); +free_sa_state: + dma_unmap_single(eip93->dev, rctx->sa_state_base, + sizeof(*sa_state), DMA_TO_DEVICE); + + return ret; +} + +static int eip93_hash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, + u32 keylen) +{ + unsigned int digestsize = crypto_ahash_digestsize(ahash); + struct crypto_tfm *tfm = crypto_ahash_tfm(ahash); + struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm); + + return eip93_hmac_setkey(ctx->flags, key, keylen, digestsize, + ctx->ipad, ctx->opad, true); +} + +static int eip93_hash_cra_init(struct crypto_tfm *tfm) +{ + struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg, + struct eip93_alg_template, alg.ahash.halg.base); + + crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm), + sizeof(struct eip93_hash_reqctx)); + + ctx->eip93 = tmpl->eip93; + ctx->flags = tmpl->flags; + + return 0; +} + +static int eip93_hash_digest(struct ahash_request *req) +{ + int ret; + + ret = eip93_hash_init(req); + if (ret) + return ret; + + return eip93_hash_finup(req); +} + +static int eip93_hash_import(struct ahash_request *req, const void *in) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + const struct eip93_hash_export_state *state = in; + struct sa_state *sa_state = &rctx->sa_state; + + memcpy(sa_state->state_byte_cnt, state->state_len, sizeof(u32) * 2); + memcpy(sa_state->state_i_digest, state->state_hash, SHA256_DIGEST_SIZE); + + __eip93_hash_init(req); + + rctx->len = state->len; + rctx->data_used = state->data_used; + + /* Skip copying data if we have nothing to copy */ + if (rctx->len) + memcpy(rctx->data, state->data, rctx->data_used); + + return 0; +} + +static int eip93_hash_export(struct ahash_request *req, void *out) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct eip93_hash_export_state *state = out; + + /* Save the first block in state data */ + if (rctx->len) + memcpy(state->data, rctx->data, rctx->data_used); + + eip93_hash_export_sa_state(req, state); + + return 0; +} + +struct eip93_alg_template eip93_alg_md5 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_MD5, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "md5", + .cra_driver_name = "md5-eip93", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_sha1 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_SHA1, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-eip93", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_sha224 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_SHA224, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "sha224", + .cra_driver_name = "sha224-eip93", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_sha256 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_SHA256, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-eip93", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_hmac_md5 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .setkey = eip93_hash_hmac_setkey, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "hmac(md5)", + .cra_driver_name = "hmac(md5-eip93)", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_hmac_sha1 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .setkey = eip93_hash_hmac_setkey, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "hmac(sha1-eip93)", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_hmac_sha224 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .setkey = eip93_hash_hmac_setkey, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "hmac(sha224)", + .cra_driver_name = "hmac(sha224-eip93)", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_hmac_sha256 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .setkey = eip93_hash_hmac_setkey, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "hmac(sha256-eip93)", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.h b/drivers/crypto/inside-secure/eip93/eip93-hash.h new file mode 100644 index 000000000000..556f22fc1dd0 --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-hash.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ +#ifndef _EIP93_HASH_H_ +#define _EIP93_HASH_H_ + +#include <crypto/sha2.h> + +#include "eip93-main.h" +#include "eip93-regs.h" + +struct eip93_hash_ctx { + struct eip93_device *eip93; + u32 flags; + + u8 ipad[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); + u8 opad[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); +}; + +struct eip93_hash_reqctx { + /* Placement is important for DMA align */ + struct { + struct sa_record sa_record; + struct sa_record sa_record_hmac; + struct sa_state sa_state; + } __aligned(CRYPTO_DMA_ALIGN); + + dma_addr_t sa_record_base; + dma_addr_t sa_record_hmac_base; + dma_addr_t sa_state_base; + + /* Don't enable HASH_FINALIZE when last block is sent */ + bool partial_hash; + + /* Set to signal interrupt is for final packet */ + bool finalize; + + /* + * EIP93 requires data to be accumulated in block of 64 bytes + * for intermediate hash calculation. + */ + u64 len; + u32 data_used; + + u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); + dma_addr_t data_dma; + + struct list_head blocks; +}; + +struct mkt_hash_block { + struct list_head list; + u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); + dma_addr_t data_dma; +}; + +struct eip93_hash_export_state { + u64 len; + u32 data_used; + + u32 state_len[2]; + u8 state_hash[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); + + u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); +}; + +void eip93_hash_handle_result(struct crypto_async_request *async, int err); + +extern struct eip93_alg_template eip93_alg_md5; +extern struct eip93_alg_template eip93_alg_sha1; +extern struct eip93_alg_template eip93_alg_sha224; +extern struct eip93_alg_template eip93_alg_sha256; +extern struct eip93_alg_template eip93_alg_hmac_md5; +extern struct eip93_alg_template eip93_alg_hmac_sha1; +extern struct eip93_alg_template eip93_alg_hmac_sha224; +extern struct eip93_alg_template eip93_alg_hmac_sha256; + +#endif /* _EIP93_HASH_H_ */ diff --git a/drivers/crypto/inside-secure/eip93/eip93-main.c b/drivers/crypto/inside-secure/eip93/eip93-main.c new file mode 100644 index 000000000000..0b38a567da0e --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-main.c @@ -0,0 +1,501 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ + +#include <linux/atomic.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <crypto/aes.h> +#include <crypto/ctr.h> + +#include "eip93-main.h" +#include "eip93-regs.h" +#include "eip93-common.h" +#include "eip93-cipher.h" +#include "eip93-aes.h" +#include "eip93-des.h" +#include "eip93-aead.h" +#include "eip93-hash.h" + +static struct eip93_alg_template *eip93_algs[] = { + &eip93_alg_ecb_des, + &eip93_alg_cbc_des, + &eip93_alg_ecb_des3_ede, + &eip93_alg_cbc_des3_ede, + &eip93_alg_ecb_aes, + &eip93_alg_cbc_aes, + &eip93_alg_ctr_aes, + &eip93_alg_rfc3686_aes, + &eip93_alg_authenc_hmac_md5_cbc_des, + &eip93_alg_authenc_hmac_sha1_cbc_des, + &eip93_alg_authenc_hmac_sha224_cbc_des, + &eip93_alg_authenc_hmac_sha256_cbc_des, + &eip93_alg_authenc_hmac_md5_cbc_des3_ede, + &eip93_alg_authenc_hmac_sha1_cbc_des3_ede, + &eip93_alg_authenc_hmac_sha224_cbc_des3_ede, + &eip93_alg_authenc_hmac_sha256_cbc_des3_ede, + &eip93_alg_authenc_hmac_md5_cbc_aes, + &eip93_alg_authenc_hmac_sha1_cbc_aes, + &eip93_alg_authenc_hmac_sha224_cbc_aes, + &eip93_alg_authenc_hmac_sha256_cbc_aes, + &eip93_alg_authenc_hmac_md5_rfc3686_aes, + &eip93_alg_authenc_hmac_sha1_rfc3686_aes, + &eip93_alg_authenc_hmac_sha224_rfc3686_aes, + &eip93_alg_authenc_hmac_sha256_rfc3686_aes, + &eip93_alg_md5, + &eip93_alg_sha1, + &eip93_alg_sha224, + &eip93_alg_sha256, + &eip93_alg_hmac_md5, + &eip93_alg_hmac_sha1, + &eip93_alg_hmac_sha224, + &eip93_alg_hmac_sha256, +}; + +inline void eip93_irq_disable(struct eip93_device *eip93, u32 mask) +{ + __raw_writel(mask, eip93->base + EIP93_REG_MASK_DISABLE); +} + +inline void eip93_irq_enable(struct eip93_device *eip93, u32 mask) +{ + __raw_writel(mask, eip93->base + EIP93_REG_MASK_ENABLE); +} + +inline void eip93_irq_clear(struct eip93_device *eip93, u32 mask) +{ + __raw_writel(mask, eip93->base + EIP93_REG_INT_CLR); +} + +static void eip93_unregister_algs(unsigned int i) +{ + unsigned int j; + + for (j = 0; j < i; j++) { + switch (eip93_algs[j]->type) { + case EIP93_ALG_TYPE_SKCIPHER: + crypto_unregister_skcipher(&eip93_algs[j]->alg.skcipher); + break; + case EIP93_ALG_TYPE_AEAD: + crypto_unregister_aead(&eip93_algs[j]->alg.aead); + break; + case EIP93_ALG_TYPE_HASH: + crypto_unregister_ahash(&eip93_algs[i]->alg.ahash); + break; + } + } +} + +static int eip93_register_algs(struct eip93_device *eip93, u32 supported_algo_flags) +{ + unsigned int i; + int ret = 0; + + for (i = 0; i < ARRAY_SIZE(eip93_algs); i++) { + u32 alg_flags = eip93_algs[i]->flags; + + eip93_algs[i]->eip93 = eip93; + + if ((IS_DES(alg_flags) || IS_3DES(alg_flags)) && + !(supported_algo_flags & EIP93_PE_OPTION_TDES)) + continue; + + if (IS_AES(alg_flags)) { + if (!(supported_algo_flags & EIP93_PE_OPTION_AES)) + continue; + + if (!IS_HMAC(alg_flags)) { + if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY128) + eip93_algs[i]->alg.skcipher.max_keysize = + AES_KEYSIZE_128; + + if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY192) + eip93_algs[i]->alg.skcipher.max_keysize = + AES_KEYSIZE_192; + + if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY256) + eip93_algs[i]->alg.skcipher.max_keysize = + AES_KEYSIZE_256; + + if (IS_RFC3686(alg_flags)) + eip93_algs[i]->alg.skcipher.max_keysize += + CTR_RFC3686_NONCE_SIZE; + } + } + + if (IS_HASH_MD5(alg_flags) && + !(supported_algo_flags & EIP93_PE_OPTION_MD5)) + continue; + + if (IS_HASH_SHA1(alg_flags) && + !(supported_algo_flags & EIP93_PE_OPTION_SHA_1)) + continue; + + if (IS_HASH_SHA224(alg_flags) && + !(supported_algo_flags & EIP93_PE_OPTION_SHA_224)) + continue; + + if (IS_HASH_SHA256(alg_flags) && + !(supported_algo_flags & EIP93_PE_OPTION_SHA_256)) + continue; + + switch (eip93_algs[i]->type) { + case EIP93_ALG_TYPE_SKCIPHER: + ret = crypto_register_skcipher(&eip93_algs[i]->alg.skcipher); + break; + case EIP93_ALG_TYPE_AEAD: + ret = crypto_register_aead(&eip93_algs[i]->alg.aead); + break; + case EIP93_ALG_TYPE_HASH: + ret = crypto_register_ahash(&eip93_algs[i]->alg.ahash); + break; + } + if (ret) + goto fail; + } + + return 0; + +fail: + eip93_unregister_algs(i); + + return ret; +} + +static void eip93_handle_result_descriptor(struct eip93_device *eip93) +{ + struct crypto_async_request *async; + struct eip93_descriptor *rdesc; + u16 desc_flags, crypto_idr; + bool last_entry; + int handled, left, err; + u32 pe_ctrl_stat; + u32 pe_length; + +get_more: + handled = 0; + + left = readl(eip93->base + EIP93_REG_PE_RD_COUNT) & EIP93_PE_RD_COUNT; + + if (!left) { + eip93_irq_clear(eip93, EIP93_INT_RDR_THRESH); + eip93_irq_enable(eip93, EIP93_INT_RDR_THRESH); + return; + } + + last_entry = false; + + while (left) { + scoped_guard(spinlock_irqsave, &eip93->ring->read_lock) + rdesc = eip93_get_descriptor(eip93); + if (IS_ERR(rdesc)) { + dev_err(eip93->dev, "Ndesc: %d nreq: %d\n", + handled, left); + err = -EIO; + break; + } + /* make sure DMA is finished writing */ + do { + pe_ctrl_stat = READ_ONCE(rdesc->pe_ctrl_stat_word); + pe_length = READ_ONCE(rdesc->pe_length_word); + } while (FIELD_GET(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN, pe_ctrl_stat) != + EIP93_PE_CTRL_PE_READY || + FIELD_GET(EIP93_PE_LENGTH_HOST_PE_READY, pe_length) != + EIP93_PE_LENGTH_PE_READY); + + err = rdesc->pe_ctrl_stat_word & (EIP93_PE_CTRL_PE_EXT_ERR_CODE | + EIP93_PE_CTRL_PE_EXT_ERR | + EIP93_PE_CTRL_PE_SEQNUM_ERR | + EIP93_PE_CTRL_PE_PAD_ERR | + EIP93_PE_CTRL_PE_AUTH_ERR); + + desc_flags = FIELD_GET(EIP93_PE_USER_ID_DESC_FLAGS, rdesc->user_id); + crypto_idr = FIELD_GET(EIP93_PE_USER_ID_CRYPTO_IDR, rdesc->user_id); + + writel(1, eip93->base + EIP93_REG_PE_RD_COUNT); + eip93_irq_clear(eip93, EIP93_INT_RDR_THRESH); + + handled++; + left--; + + if (desc_flags & EIP93_DESC_LAST) { + last_entry = true; + break; + } + } + + if (!last_entry) + goto get_more; + + /* Get crypto async ref only for last descriptor */ + scoped_guard(spinlock_bh, &eip93->ring->idr_lock) { + async = idr_find(&eip93->ring->crypto_async_idr, crypto_idr); + idr_remove(&eip93->ring->crypto_async_idr, crypto_idr); + } + + /* Parse error in ctrl stat word */ + err = eip93_parse_ctrl_stat_err(eip93, err); + + if (desc_flags & EIP93_DESC_SKCIPHER) + eip93_skcipher_handle_result(async, err); + + if (desc_flags & EIP93_DESC_AEAD) + eip93_aead_handle_result(async, err); + + if (desc_flags & EIP93_DESC_HASH) + eip93_hash_handle_result(async, err); + + goto get_more; +} + +static void eip93_done_task(unsigned long data) +{ + struct eip93_device *eip93 = (struct eip93_device *)data; + + eip93_handle_result_descriptor(eip93); +} + +static irqreturn_t eip93_irq_handler(int irq, void *data) +{ + struct eip93_device *eip93 = data; + u32 irq_status; + + irq_status = readl(eip93->base + EIP93_REG_INT_MASK_STAT); + if (FIELD_GET(EIP93_INT_RDR_THRESH, irq_status)) { + eip93_irq_disable(eip93, EIP93_INT_RDR_THRESH); + tasklet_schedule(&eip93->ring->done_task); + return IRQ_HANDLED; + } + + /* Ignore errors in AUTO mode, handled by the RDR */ + eip93_irq_clear(eip93, irq_status); + if (irq_status) + eip93_irq_disable(eip93, irq_status); + + return IRQ_NONE; +} + +static void eip93_initialize(struct eip93_device *eip93, u32 supported_algo_flags) +{ + u32 val; + + /* Reset PE and rings */ + val = EIP93_PE_CONFIG_RST_PE | EIP93_PE_CONFIG_RST_RING; + val |= EIP93_PE_TARGET_AUTO_RING_MODE; + /* For Auto more, update the CDR ring owner after processing */ + val |= EIP93_PE_CONFIG_EN_CDR_UPDATE; + writel(val, eip93->base + EIP93_REG_PE_CONFIG); + + /* Wait for PE and ring to reset */ + usleep_range(10, 20); + + /* Release PE and ring reset */ + val = readl(eip93->base + EIP93_REG_PE_CONFIG); + val &= ~(EIP93_PE_CONFIG_RST_PE | EIP93_PE_CONFIG_RST_RING); + writel(val, eip93->base + EIP93_REG_PE_CONFIG); + + /* Config Clocks */ + val = EIP93_PE_CLOCK_EN_PE_CLK; + if (supported_algo_flags & EIP93_PE_OPTION_TDES) + val |= EIP93_PE_CLOCK_EN_DES_CLK; + if (supported_algo_flags & EIP93_PE_OPTION_AES) + val |= EIP93_PE_CLOCK_EN_AES_CLK; + if (supported_algo_flags & + (EIP93_PE_OPTION_MD5 | EIP93_PE_OPTION_SHA_1 | EIP93_PE_OPTION_SHA_224 | + EIP93_PE_OPTION_SHA_256)) + val |= EIP93_PE_CLOCK_EN_HASH_CLK; + writel(val, eip93->base + EIP93_REG_PE_CLOCK_CTRL); + + /* Config DMA thresholds */ + val = FIELD_PREP(EIP93_PE_OUTBUF_THRESH, 128) | + FIELD_PREP(EIP93_PE_INBUF_THRESH, 128); + writel(val, eip93->base + EIP93_REG_PE_BUF_THRESH); + + /* Clear/ack all interrupts before disable all */ + eip93_irq_clear(eip93, EIP93_INT_ALL); + eip93_irq_disable(eip93, EIP93_INT_ALL); + + /* Setup CRD threshold to trigger interrupt */ + val = FIELD_PREP(EIPR93_PE_CDR_THRESH, EIP93_RING_NUM - EIP93_RING_BUSY); + /* + * Configure RDR interrupt to be triggered if RD counter is not 0 + * for more than 2^(N+10) system clocks. + */ + val |= FIELD_PREP(EIPR93_PE_RD_TIMEOUT, 5) | EIPR93_PE_TIMEROUT_EN; + writel(val, eip93->base + EIP93_REG_PE_RING_THRESH); +} + +static void eip93_desc_free(struct eip93_device *eip93) +{ + writel(0, eip93->base + EIP93_REG_PE_RING_CONFIG); + writel(0, eip93->base + EIP93_REG_PE_CDR_BASE); + writel(0, eip93->base + EIP93_REG_PE_RDR_BASE); +} + +static int eip93_set_ring(struct eip93_device *eip93, struct eip93_desc_ring *ring) +{ + ring->offset = sizeof(struct eip93_descriptor); + ring->base = dmam_alloc_coherent(eip93->dev, + sizeof(struct eip93_descriptor) * EIP93_RING_NUM, + &ring->base_dma, GFP_KERNEL); + if (!ring->base) + return -ENOMEM; + + ring->write = ring->base; + ring->base_end = ring->base + sizeof(struct eip93_descriptor) * (EIP93_RING_NUM - 1); + ring->read = ring->base; + + return 0; +} + +static int eip93_desc_init(struct eip93_device *eip93) +{ + struct eip93_desc_ring *cdr = &eip93->ring->cdr; + struct eip93_desc_ring *rdr = &eip93->ring->rdr; + int ret; + u32 val; + + ret = eip93_set_ring(eip93, cdr); + if (ret) + return ret; + + ret = eip93_set_ring(eip93, rdr); + if (ret) + return ret; + + writel((u32 __force)cdr->base_dma, eip93->base + EIP93_REG_PE_CDR_BASE); + writel((u32 __force)rdr->base_dma, eip93->base + EIP93_REG_PE_RDR_BASE); + + val = FIELD_PREP(EIP93_PE_RING_SIZE, EIP93_RING_NUM - 1); + writel(val, eip93->base + EIP93_REG_PE_RING_CONFIG); + + return 0; +} + +static void eip93_cleanup(struct eip93_device *eip93) +{ + tasklet_kill(&eip93->ring->done_task); + + /* Clear/ack all interrupts before disable all */ + eip93_irq_clear(eip93, EIP93_INT_ALL); + eip93_irq_disable(eip93, EIP93_INT_ALL); + + writel(0, eip93->base + EIP93_REG_PE_CLOCK_CTRL); + + eip93_desc_free(eip93); + + idr_destroy(&eip93->ring->crypto_async_idr); +} + +static int eip93_crypto_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct eip93_device *eip93; + u32 ver, algo_flags; + int ret; + + eip93 = devm_kzalloc(dev, sizeof(*eip93), GFP_KERNEL); + if (!eip93) + return -ENOMEM; + + eip93->dev = dev; + platform_set_drvdata(pdev, eip93); + + eip93->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(eip93->base)) + return PTR_ERR(eip93->base); + + eip93->irq = platform_get_irq(pdev, 0); + if (eip93->irq < 0) + return eip93->irq; + + ret = devm_request_threaded_irq(eip93->dev, eip93->irq, eip93_irq_handler, + NULL, IRQF_ONESHOT, + dev_name(eip93->dev), eip93); + + eip93->ring = devm_kcalloc(eip93->dev, 1, sizeof(*eip93->ring), GFP_KERNEL); + if (!eip93->ring) + return -ENOMEM; + + ret = eip93_desc_init(eip93); + + if (ret) + return ret; + + tasklet_init(&eip93->ring->done_task, eip93_done_task, (unsigned long)eip93); + + spin_lock_init(&eip93->ring->read_lock); + spin_lock_init(&eip93->ring->write_lock); + + spin_lock_init(&eip93->ring->idr_lock); + idr_init(&eip93->ring->crypto_async_idr); + + algo_flags = readl(eip93->base + EIP93_REG_PE_OPTION_1); + + eip93_initialize(eip93, algo_flags); + + /* Init finished, enable RDR interrupt */ + eip93_irq_enable(eip93, EIP93_INT_RDR_THRESH); + + ret = eip93_register_algs(eip93, algo_flags); + if (ret) { + eip93_cleanup(eip93); + return ret; + } + + ver = readl(eip93->base + EIP93_REG_PE_REVISION); + /* EIP_EIP_NO:MAJOR_HW_REV:MINOR_HW_REV:HW_PATCH,PE(ALGO_FLAGS) */ + dev_info(eip93->dev, "EIP%lu:%lx:%lx:%lx,PE(0x%x:0x%x)\n", + FIELD_GET(EIP93_PE_REVISION_EIP_NO, ver), + FIELD_GET(EIP93_PE_REVISION_MAJ_HW_REV, ver), + FIELD_GET(EIP93_PE_REVISION_MIN_HW_REV, ver), + FIELD_GET(EIP93_PE_REVISION_HW_PATCH, ver), + algo_flags, + readl(eip93->base + EIP93_REG_PE_OPTION_0)); + + return 0; +} + +static void eip93_crypto_remove(struct platform_device *pdev) +{ + struct eip93_device *eip93 = platform_get_drvdata(pdev); + + eip93_unregister_algs(ARRAY_SIZE(eip93_algs)); + eip93_cleanup(eip93); +} + +static const struct of_device_id eip93_crypto_of_match[] = { + { .compatible = "inside-secure,safexcel-eip93i", }, + { .compatible = "inside-secure,safexcel-eip93ie", }, + { .compatible = "inside-secure,safexcel-eip93is", }, + { .compatible = "inside-secure,safexcel-eip93ies", }, + /* IW not supported currently, missing AES-XCB-MAC/AES-CCM */ + /* { .compatible = "inside-secure,safexcel-eip93iw", }, */ + {} +}; +MODULE_DEVICE_TABLE(of, eip93_crypto_of_match); + +static struct platform_driver eip93_crypto_driver = { + .probe = eip93_crypto_probe, + .remove = eip93_crypto_remove, + .driver = { + .name = "inside-secure-eip93", + .of_match_table = eip93_crypto_of_match, + }, +}; +module_platform_driver(eip93_crypto_driver); + +MODULE_AUTHOR("Richard van Schagen <vschagen@cs.com>"); +MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>"); +MODULE_DESCRIPTION("Mediatek EIP-93 crypto engine driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/inside-secure/eip93/eip93-main.h b/drivers/crypto/inside-secure/eip93/eip93-main.h new file mode 100644 index 000000000000..79b078f0e5da --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-main.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ +#ifndef _EIP93_MAIN_H_ +#define _EIP93_MAIN_H_ + +#include <crypto/internal/aead.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> +#include <linux/bitfield.h> +#include <linux/interrupt.h> + +#define EIP93_RING_BUSY_DELAY 500 + +#define EIP93_RING_NUM 512 +#define EIP93_RING_BUSY 32 +#define EIP93_CRA_PRIORITY 1500 + +#define EIP93_RING_SA_STATE_ADDR(base, idx) ((base) + (idx)) +#define EIP93_RING_SA_STATE_DMA(dma_base, idx) ((u32 __force)(dma_base) + \ + ((idx) * sizeof(struct sa_state))) + +/* cipher algorithms */ +#define EIP93_ALG_DES BIT(0) +#define EIP93_ALG_3DES BIT(1) +#define EIP93_ALG_AES BIT(2) +#define EIP93_ALG_MASK GENMASK(2, 0) +/* hash and hmac algorithms */ +#define EIP93_HASH_MD5 BIT(3) +#define EIP93_HASH_SHA1 BIT(4) +#define EIP93_HASH_SHA224 BIT(5) +#define EIP93_HASH_SHA256 BIT(6) +#define EIP93_HASH_HMAC BIT(7) +#define EIP93_HASH_MASK GENMASK(6, 3) +/* cipher modes */ +#define EIP93_MODE_CBC BIT(8) +#define EIP93_MODE_ECB BIT(9) +#define EIP93_MODE_CTR BIT(10) +#define EIP93_MODE_RFC3686 BIT(11) +#define EIP93_MODE_MASK GENMASK(10, 8) + +/* cipher encryption/decryption operations */ +#define EIP93_ENCRYPT BIT(12) +#define EIP93_DECRYPT BIT(13) + +#define EIP93_BUSY BIT(14) + +/* descriptor flags */ +#define EIP93_DESC_DMA_IV BIT(0) +#define EIP93_DESC_IPSEC BIT(1) +#define EIP93_DESC_FINISH BIT(2) +#define EIP93_DESC_LAST BIT(3) +#define EIP93_DESC_FAKE_HMAC BIT(4) +#define EIP93_DESC_PRNG BIT(5) +#define EIP93_DESC_HASH BIT(6) +#define EIP93_DESC_AEAD BIT(7) +#define EIP93_DESC_SKCIPHER BIT(8) +#define EIP93_DESC_ASYNC BIT(9) + +#define IS_DMA_IV(desc_flags) ((desc_flags) & EIP93_DESC_DMA_IV) + +#define IS_DES(flags) ((flags) & EIP93_ALG_DES) +#define IS_3DES(flags) ((flags) & EIP93_ALG_3DES) +#define IS_AES(flags) ((flags) & EIP93_ALG_AES) + +#define IS_HASH_MD5(flags) ((flags) & EIP93_HASH_MD5) +#define IS_HASH_SHA1(flags) ((flags) & EIP93_HASH_SHA1) +#define IS_HASH_SHA224(flags) ((flags) & EIP93_HASH_SHA224) +#define IS_HASH_SHA256(flags) ((flags) & EIP93_HASH_SHA256) +#define IS_HMAC(flags) ((flags) & EIP93_HASH_HMAC) + +#define IS_CBC(mode) ((mode) & EIP93_MODE_CBC) +#define IS_ECB(mode) ((mode) & EIP93_MODE_ECB) +#define IS_CTR(mode) ((mode) & EIP93_MODE_CTR) +#define IS_RFC3686(mode) ((mode) & EIP93_MODE_RFC3686) + +#define IS_BUSY(flags) ((flags) & EIP93_BUSY) + +#define IS_ENCRYPT(dir) ((dir) & EIP93_ENCRYPT) +#define IS_DECRYPT(dir) ((dir) & EIP93_DECRYPT) + +#define IS_CIPHER(flags) ((flags) & (EIP93_ALG_DES | \ + EIP93_ALG_3DES | \ + EIP93_ALG_AES)) + +#define IS_HASH(flags) ((flags) & (EIP93_HASH_MD5 | \ + EIP93_HASH_SHA1 | \ + EIP93_HASH_SHA224 | \ + EIP93_HASH_SHA256)) + +/** + * struct eip93_device - crypto engine device structure + */ +struct eip93_device { + void __iomem *base; + struct device *dev; + struct clk *clk; + int irq; + struct eip93_ring *ring; +}; + +struct eip93_desc_ring { + void *base; + void *base_end; + dma_addr_t base_dma; + /* write and read pointers */ + void *read; + void *write; + /* descriptor element offset */ + u32 offset; +}; + +struct eip93_state_pool { + void *base; + dma_addr_t base_dma; +}; + +struct eip93_ring { + struct tasklet_struct done_task; + /* command/result rings */ + struct eip93_desc_ring cdr; + struct eip93_desc_ring rdr; + spinlock_t write_lock; + spinlock_t read_lock; + /* aync idr */ + spinlock_t idr_lock; + struct idr crypto_async_idr; +}; + +enum eip93_alg_type { + EIP93_ALG_TYPE_AEAD, + EIP93_ALG_TYPE_SKCIPHER, + EIP93_ALG_TYPE_HASH, +}; + +struct eip93_alg_template { + struct eip93_device *eip93; + enum eip93_alg_type type; + u32 flags; + union { + struct aead_alg aead; + struct skcipher_alg skcipher; + struct ahash_alg ahash; + } alg; +}; + +#endif /* _EIP93_MAIN_H_ */ diff --git a/drivers/crypto/inside-secure/eip93/eip93-regs.h b/drivers/crypto/inside-secure/eip93/eip93-regs.h new file mode 100644 index 000000000000..0490b8d15131 --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-regs.h @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ +#ifndef REG_EIP93_H +#define REG_EIP93_H + +#define EIP93_REG_PE_CTRL_STAT 0x0 +#define EIP93_PE_CTRL_PE_PAD_CTRL_STAT GENMASK(31, 24) +#define EIP93_PE_CTRL_PE_EXT_ERR_CODE GENMASK(23, 20) +#define EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING 0x8 +#define EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR 0x7 +#define EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH 0x6 +#define EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH 0x5 +#define EIP93_PE_CTRL_PE_EXT_ERR_SPI 0x4 +#define EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO 0x3 +#define EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP 0x2 +#define EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER 0x1 +#define EIP93_PE_CTRL_PE_EXT_ERR_BUS 0x0 +#define EIP93_PE_CTRL_PE_EXT_ERR BIT(19) +#define EIP93_PE_CTRL_PE_SEQNUM_ERR BIT(18) +#define EIP93_PE_CTRL_PE_PAD_ERR BIT(17) +#define EIP93_PE_CTRL_PE_AUTH_ERR BIT(16) +#define EIP93_PE_CTRL_PE_PAD_VALUE GENMASK(15, 8) +#define EIP93_PE_CTRL_PE_PRNG_MODE GENMASK(7, 6) +#define EIP93_PE_CTRL_PE_HASH_FINAL BIT(4) +#define EIP93_PE_CTRL_PE_INIT_ARC4 BIT(3) +#define EIP93_PE_CTRL_PE_READY_DES_TRING_OWN GENMASK(1, 0) +#define EIP93_PE_CTRL_PE_READY 0x2 +#define EIP93_PE_CTRL_HOST_READY 0x1 +#define EIP93_REG_PE_SOURCE_ADDR 0x4 +#define EIP93_REG_PE_DEST_ADDR 0x8 +#define EIP93_REG_PE_SA_ADDR 0xc +#define EIP93_REG_PE_ADDR 0x10 /* STATE_ADDR */ +/* + * Special implementation for user ID + * user_id in eip93_descriptor is used to identify the + * descriptor and is opaque and can be used by the driver + * in custom way. + * + * The usage of this should be to put an address to the crypto + * request struct from the kernel but this can't work in 64bit + * world. + * + * Also it's required to put some flags to identify the last + * descriptor. + * + * To handle this, split the u32 in 2 part: + * - 31:16 descriptor flags + * - 15:0 IDR to connect the crypto request address + */ +#define EIP93_REG_PE_USER_ID 0x18 +#define EIP93_PE_USER_ID_DESC_FLAGS GENMASK(31, 16) +#define EIP93_PE_USER_ID_CRYPTO_IDR GENMASK(15, 0) +#define EIP93_REG_PE_LENGTH 0x1c +#define EIP93_PE_LENGTH_BYPASS GENMASK(31, 24) +#define EIP93_PE_LENGTH_HOST_PE_READY GENMASK(23, 22) +#define EIP93_PE_LENGTH_PE_READY 0x2 +#define EIP93_PE_LENGTH_HOST_READY 0x1 +#define EIP93_PE_LENGTH_LENGTH GENMASK(19, 0) + +/* PACKET ENGINE RING configuration registers */ +#define EIP93_REG_PE_CDR_BASE 0x80 +#define EIP93_REG_PE_RDR_BASE 0x84 +#define EIP93_REG_PE_RING_CONFIG 0x88 +#define EIP93_PE_EN_EXT_TRIG BIT(31) +/* Absent in later revision of eip93 */ +/* #define EIP93_PE_RING_OFFSET GENMASK(23, 15) */ +#define EIP93_PE_RING_SIZE GENMASK(9, 0) +#define EIP93_REG_PE_RING_THRESH 0x8c +#define EIPR93_PE_TIMEROUT_EN BIT(31) +#define EIPR93_PE_RD_TIMEOUT GENMASK(29, 26) +#define EIPR93_PE_RDR_THRESH GENMASK(25, 16) +#define EIPR93_PE_CDR_THRESH GENMASK(9, 0) +#define EIP93_REG_PE_CD_COUNT 0x90 +#define EIP93_PE_CD_COUNT GENMASK(10, 0) +/* + * In the same register, writing a value in GENMASK(7, 0) will + * increment the descriptor count and start DMA action. + */ +#define EIP93_PE_CD_COUNT_INCR GENMASK(7, 0) +#define EIP93_REG_PE_RD_COUNT 0x94 +#define EIP93_PE_RD_COUNT GENMASK(10, 0) +/* + * In the same register, writing a value in GENMASK(7, 0) will + * increment the descriptor count and start DMA action. + */ +#define EIP93_PE_RD_COUNT_INCR GENMASK(7, 0) +#define EIP93_REG_PE_RING_RW_PNTR 0x98 /* RING_PNTR */ + +/* PACKET ENGINE configuration registers */ +#define EIP93_REG_PE_CONFIG 0x100 +#define EIP93_PE_CONFIG_SWAP_TARGET BIT(20) +#define EIP93_PE_CONFIG_SWAP_DATA BIT(18) +#define EIP93_PE_CONFIG_SWAP_SA BIT(17) +#define EIP93_PE_CONFIG_SWAP_CDRD BIT(16) +#define EIP93_PE_CONFIG_EN_CDR_UPDATE BIT(10) +#define EIP93_PE_CONFIG_PE_MODE GENMASK(9, 8) +#define EIP93_PE_TARGET_AUTO_RING_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x3) +#define EIP93_PE_TARGET_COMMAND_NO_RDR_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x2) +#define EIP93_PE_TARGET_COMMAND_WITH_RDR_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x1) +#define EIP93_PE_DIRECT_HOST_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x0) +#define EIP93_PE_CONFIG_RST_RING BIT(2) +#define EIP93_PE_CONFIG_RST_PE BIT(0) +#define EIP93_REG_PE_STATUS 0x104 +#define EIP93_REG_PE_BUF_THRESH 0x10c +#define EIP93_PE_OUTBUF_THRESH GENMASK(23, 16) +#define EIP93_PE_INBUF_THRESH GENMASK(7, 0) +#define EIP93_REG_PE_INBUF_COUNT 0x100 +#define EIP93_REG_PE_OUTBUF_COUNT 0x114 +#define EIP93_REG_PE_BUF_RW_PNTR 0x118 /* BUF_PNTR */ + +/* PACKET ENGINE endian config */ +#define EIP93_REG_PE_ENDIAN_CONFIG 0x1cc +#define EIP93_AIROHA_REG_PE_ENDIAN_CONFIG 0x1d0 +#define EIP93_PE_ENDIAN_TARGET_BYTE_SWAP GENMASK(23, 16) +#define EIP93_PE_ENDIAN_MASTER_BYTE_SWAP GENMASK(7, 0) +/* + * Byte goes 2 and 2 and are referenced by ID + * Split GENMASK(7, 0) in 4 part, one for each byte. + * Example LITTLE ENDIAN: Example BIG ENDIAN + * GENMASK(7, 6) 0x3 GENMASK(7, 6) 0x0 + * GENMASK(5, 4) 0x2 GENMASK(7, 6) 0x1 + * GENMASK(3, 2) 0x1 GENMASK(3, 2) 0x2 + * GENMASK(1, 0) 0x0 GENMASK(1, 0) 0x3 + */ +#define EIP93_PE_ENDIAN_BYTE0 0x0 +#define EIP93_PE_ENDIAN_BYTE1 0x1 +#define EIP93_PE_ENDIAN_BYTE2 0x2 +#define EIP93_PE_ENDIAN_BYTE3 0x3 + +/* EIP93 CLOCK control registers */ +#define EIP93_REG_PE_CLOCK_CTRL 0x1e8 +#define EIP93_PE_CLOCK_EN_HASH_CLK BIT(4) +#define EIP93_PE_CLOCK_EN_ARC4_CLK BIT(3) +#define EIP93_PE_CLOCK_EN_AES_CLK BIT(2) +#define EIP93_PE_CLOCK_EN_DES_CLK BIT(1) +#define EIP93_PE_CLOCK_EN_PE_CLK BIT(0) + +/* EIP93 Device Option and Revision Register */ +#define EIP93_REG_PE_OPTION_1 0x1f4 +#define EIP93_PE_OPTION_MAC_KEY256 BIT(31) +#define EIP93_PE_OPTION_MAC_KEY192 BIT(30) +#define EIP93_PE_OPTION_MAC_KEY128 BIT(29) +#define EIP93_PE_OPTION_AES_CBC_MAC BIT(28) +#define EIP93_PE_OPTION_AES_XCBX BIT(23) +#define EIP93_PE_OPTION_SHA_256 BIT(19) +#define EIP93_PE_OPTION_SHA_224 BIT(18) +#define EIP93_PE_OPTION_SHA_1 BIT(17) +#define EIP93_PE_OPTION_MD5 BIT(16) +#define EIP93_PE_OPTION_AES_KEY256 BIT(15) +#define EIP93_PE_OPTION_AES_KEY192 BIT(14) +#define EIP93_PE_OPTION_AES_KEY128 BIT(13) +#define EIP93_PE_OPTION_AES BIT(2) +#define EIP93_PE_OPTION_ARC4 BIT(1) +#define EIP93_PE_OPTION_TDES BIT(0) /* DES and TDES */ +#define EIP93_REG_PE_OPTION_0 0x1f8 +#define EIP93_REG_PE_REVISION 0x1fc +#define EIP93_PE_REVISION_MAJ_HW_REV GENMASK(27, 24) +#define EIP93_PE_REVISION_MIN_HW_REV GENMASK(23, 20) +#define EIP93_PE_REVISION_HW_PATCH GENMASK(19, 16) +#define EIP93_PE_REVISION_EIP_NO GENMASK(7, 0) + +/* EIP93 Interrupt Control Register */ +#define EIP93_REG_INT_UNMASK_STAT 0x200 +#define EIP93_REG_INT_MASK_STAT 0x204 +#define EIP93_REG_INT_CLR 0x204 +#define EIP93_REG_INT_MASK 0x208 /* INT_EN */ +/* Each int reg have the same bitmap */ +#define EIP93_INT_INTERFACE_ERR BIT(18) +#define EIP93_INT_RPOC_ERR BIT(17) +#define EIP93_INT_PE_RING_ERR BIT(16) +#define EIP93_INT_HALT BIT(15) +#define EIP93_INT_OUTBUF_THRESH BIT(11) +#define EIP93_INT_INBUF_THRESH BIT(10) +#define EIP93_INT_OPERATION_DONE BIT(9) +#define EIP93_INT_RDR_THRESH BIT(1) +#define EIP93_INT_CDR_THRESH BIT(0) +#define EIP93_INT_ALL (EIP93_INT_INTERFACE_ERR | \ + EIP93_INT_RPOC_ERR | \ + EIP93_INT_PE_RING_ERR | \ + EIP93_INT_HALT | \ + EIP93_INT_OUTBUF_THRESH | \ + EIP93_INT_INBUF_THRESH | \ + EIP93_INT_OPERATION_DONE | \ + EIP93_INT_RDR_THRESH | \ + EIP93_INT_CDR_THRESH) + +#define EIP93_REG_INT_CFG 0x20c +#define EIP93_INT_TYPE_PULSE BIT(0) +#define EIP93_REG_MASK_ENABLE 0x210 +#define EIP93_REG_MASK_DISABLE 0x214 + +/* EIP93 SA Record register */ +#define EIP93_REG_SA_CMD_0 0x400 +#define EIP93_SA_CMD_SAVE_HASH BIT(29) +#define EIP93_SA_CMD_SAVE_IV BIT(28) +#define EIP93_SA_CMD_HASH_SOURCE GENMASK(27, 26) +#define EIP93_SA_CMD_HASH_NO_LOAD FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x3) +#define EIP93_SA_CMD_HASH_FROM_STATE FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x2) +#define EIP93_SA_CMD_HASH_FROM_SA FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x0) +#define EIP93_SA_CMD_IV_SOURCE GENMASK(25, 24) +#define EIP93_SA_CMD_IV_FROM_PRNG FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x3) +#define EIP93_SA_CMD_IV_FROM_STATE FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x2) +#define EIP93_SA_CMD_IV_FROM_INPUT FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x1) +#define EIP93_SA_CMD_IV_NO_LOAD FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x0) +#define EIP93_SA_CMD_DIGEST_LENGTH GENMASK(23, 20) +#define EIP93_SA_CMD_DIGEST_10WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0xa) /* SRTP and TLS */ +#define EIP93_SA_CMD_DIGEST_8WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x8) /* SHA-256 */ +#define EIP93_SA_CMD_DIGEST_7WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x7) /* SHA-224 */ +#define EIP93_SA_CMD_DIGEST_6WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x6) +#define EIP93_SA_CMD_DIGEST_5WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x5) /* SHA1 */ +#define EIP93_SA_CMD_DIGEST_4WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x4) /* MD5 and AES-based */ +#define EIP93_SA_CMD_DIGEST_3WORD_IPSEC FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x3) /* IPSEC */ +#define EIP93_SA_CMD_DIGEST_2WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x2) +#define EIP93_SA_CMD_DIGEST_1WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x1) +#define EIP93_SA_CMD_DIGEST_3WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x0) /* 96bit output */ +#define EIP93_SA_CMD_HDR_PROC BIT(19) +#define EIP93_SA_CMD_EXT_PAD BIT(18) +#define EIP93_SA_CMD_SCPAD BIT(17) +#define EIP93_SA_CMD_HASH GENMASK(15, 12) +#define EIP93_SA_CMD_HASH_NULL FIELD_PREP(EIP93_SA_CMD_HASH, 0xf) +#define EIP93_SA_CMD_HASH_SHA256 FIELD_PREP(EIP93_SA_CMD_HASH, 0x3) +#define EIP93_SA_CMD_HASH_SHA224 FIELD_PREP(EIP93_SA_CMD_HASH, 0x2) +#define EIP93_SA_CMD_HASH_SHA1 FIELD_PREP(EIP93_SA_CMD_HASH, 0x1) +#define EIP93_SA_CMD_HASH_MD5 FIELD_PREP(EIP93_SA_CMD_HASH, 0x0) +#define EIP93_SA_CMD_CIPHER GENMASK(11, 8) +#define EIP93_SA_CMD_CIPHER_NULL FIELD_PREP(EIP93_SA_CMD_CIPHER, 0xf) +#define EIP93_SA_CMD_CIPHER_AES FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x3) +#define EIP93_SA_CMD_CIPHER_ARC4 FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x2) +#define EIP93_SA_CMD_CIPHER_3DES FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x1) +#define EIP93_SA_CMD_CIPHER_DES FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x0) +#define EIP93_SA_CMD_PAD_TYPE GENMASK(7, 6) +#define EIP93_SA_CMD_PAD_CONST_SSL FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x6) +#define EIP93_SA_CMD_PAD_TLS_DTLS FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x5) +#define EIP93_SA_CMD_PAD_ZERO FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x3) +#define EIP93_SA_CMD_PAD_CONST FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x2) +#define EIP93_SA_CMD_PAD_PKCS7 FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x1) +#define EIP93_SA_CMD_PAD_IPSEC FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x0) +#define EIP93_SA_CMD_OPGROUP GENMASK(5, 4) +#define EIP93_SA_CMD_OP_EXT FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x2) +#define EIP93_SA_CMD_OP_PROTOCOL FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x1) +#define EIP93_SA_CMD_OP_BASIC FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x0) +#define EIP93_SA_CMD_DIRECTION_IN BIT(3) /* 0: outbount 1: inbound */ +#define EIP93_SA_CMD_OPCODE GENMASK(2, 0) +#define EIP93_SA_CMD_OPCODE_BASIC_OUT_PRNG 0x7 +#define EIP93_SA_CMD_OPCODE_BASIC_OUT_HASH 0x3 +#define EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC_HASH 0x1 +#define EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC 0x0 +#define EIP93_SA_CMD_OPCODE_BASIC_IN_HASH 0x3 +#define EIP93_SA_CMD_OPCODE_BASIC_IN_HASH_DEC 0x1 +#define EIP93_SA_CMD_OPCODE_BASIC_IN_DEC 0x0 +#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_ESP 0x0 +#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_SSL 0x4 +#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_TLS 0x5 +#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_SRTP 0x7 +#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_ESP 0x0 +#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_SSL 0x2 +#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_TLS 0x3 +#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_SRTP 0x7 +#define EIP93_SA_CMD_OPCODE_EXT_OUT_DTSL 0x1 +#define EIP93_SA_CMD_OPCODE_EXT_OUT_SSL 0x4 +#define EIP93_SA_CMD_OPCODE_EXT_OUT_TLSV10 0x5 +#define EIP93_SA_CMD_OPCODE_EXT_OUT_TLSV11 0x6 +#define EIP93_SA_CMD_OPCODE_EXT_IN_DTSL 0x1 +#define EIP93_SA_CMD_OPCODE_EXT_IN_SSL 0x4 +#define EIP93_SA_CMD_OPCODE_EXT_IN_TLSV10 0x5 +#define EIP93_SA_CMD_OPCODE_EXT_IN_TLSV11 0x6 +#define EIP93_REG_SA_CMD_1 0x404 +#define EIP93_SA_CMD_EN_SEQNUM_CHK BIT(29) +/* This mask can be either used for ARC4 or AES */ +#define EIP93_SA_CMD_ARC4_KEY_LENGHT GENMASK(28, 24) +#define EIP93_SA_CMD_AES_DEC_KEY BIT(28) /* 0: encrypt key 1: decrypt key */ +#define EIP93_SA_CMD_AES_KEY_LENGTH GENMASK(26, 24) +#define EIP93_SA_CMD_AES_KEY_256BIT FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x4) +#define EIP93_SA_CMD_AES_KEY_192BIT FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x3) +#define EIP93_SA_CMD_AES_KEY_128BIT FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x2) +#define EIP93_SA_CMD_HASH_CRYPT_OFFSET GENMASK(23, 16) +#define EIP93_SA_CMD_BYTE_OFFSET BIT(13) /* 0: CRYPT_OFFSET in 32bit word 1: CRYPT_OFFSET in 8bit bytes */ +#define EIP93_SA_CMD_HMAC BIT(12) +#define EIP93_SA_CMD_SSL_MAC BIT(12) +/* This mask can be either used for ARC4 or AES */ +#define EIP93_SA_CMD_CHIPER_MODE GENMASK(9, 8) +/* AES or DES operations */ +#define EIP93_SA_CMD_CHIPER_MODE_ICM FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x3) +#define EIP93_SA_CMD_CHIPER_MODE_CTR FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x2) +#define EIP93_SA_CMD_CHIPER_MODE_CBC FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x1) +#define EIP93_SA_CMD_CHIPER_MODE_ECB FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x0) +/* ARC4 operations */ +#define EIP93_SA_CMD_CHIPER_MODE_STATEFULL FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x1) +#define EIP93_SA_CMD_CHIPER_MODE_STATELESS FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x0) +#define EIP93_SA_CMD_COPY_PAD BIT(3) +#define EIP93_SA_CMD_COPY_PAYLOAD BIT(2) +#define EIP93_SA_CMD_COPY_HEADER BIT(1) +#define EIP93_SA_CMD_COPY_DIGEST BIT(0) /* With this enabled, COPY_PAD is required */ + +/* State save register */ +#define EIP93_REG_STATE_IV_0 0x500 +#define EIP93_REG_STATE_IV_1 0x504 + +#define EIP93_REG_PE_ARC4STATE 0x700 + +struct sa_record { + u32 sa_cmd0_word; + u32 sa_cmd1_word; + u32 sa_key[8]; + u8 sa_i_digest[32]; + u8 sa_o_digest[32]; + u32 sa_spi; + u32 sa_seqnum[2]; + u32 sa_seqmum_mask[2]; + u32 sa_nonce; +} __packed; + +struct sa_state { + u32 state_iv[4]; + u32 state_byte_cnt[2]; + u8 state_i_digest[32]; +} __packed; + +struct eip93_descriptor { + u32 pe_ctrl_stat_word; + u32 src_addr; + u32 dst_addr; + u32 sa_addr; + u32 state_addr; + u32 arc4_addr; + u32 user_id; + u32 pe_length_word; +} __packed; + +#endif diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 86c699c14f84..c3b2b22934b7 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/of_platform.h> #include <linux/of_irq.h> +#include <linux/pci.h> #include <linux/platform_device.h> #include <linux/workqueue.h> @@ -27,62 +28,217 @@ static u32 max_rings = EIP197_MAX_RINGS; module_param(max_rings, uint, 0644); MODULE_PARM_DESC(max_rings, "Maximum number of rings to use."); -static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) +static void eip197_trc_cache_setupvirt(struct safexcel_crypto_priv *priv) { - u32 val, htable_offset; - int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc; - - if (priv->version == EIP197B) { - cs_rc_max = EIP197B_CS_RC_MAX; - cs_ht_wc = EIP197B_CS_HT_WC; - cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC; - cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC; - } else { - cs_rc_max = EIP197D_CS_RC_MAX; - cs_ht_wc = EIP197D_CS_HT_WC; - cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC; - cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC; + int i; + + /* + * Map all interfaces/rings to register index 0 + * so they can share contexts. Without this, the EIP197 will + * assume each interface/ring to be in its own memory domain + * i.e. have its own subset of UNIQUE memory addresses. + * Which would cause records with the SAME memory address to + * use DIFFERENT cache buffers, causing both poor cache utilization + * AND serious coherence/invalidation issues. + */ + for (i = 0; i < 4; i++) + writel(0, priv->base + EIP197_FLUE_IFC_LUT(i)); + + /* + * Initialize other virtualization regs for cache + * These may not be in their reset state ... + */ + for (i = 0; i < priv->config.rings; i++) { + writel(0, priv->base + EIP197_FLUE_CACHEBASE_LO(i)); + writel(0, priv->base + EIP197_FLUE_CACHEBASE_HI(i)); + writel(EIP197_FLUE_CONFIG_MAGIC, + priv->base + EIP197_FLUE_CONFIG(i)); } + writel(0, priv->base + EIP197_FLUE_OFFSETS); + writel(0, priv->base + EIP197_FLUE_ARC4_OFFSET); +} - /* Enable the record cache memory access */ - val = readl(priv->base + EIP197_CS_RAM_CTRL); - val &= ~EIP197_TRC_ENABLE_MASK; - val |= EIP197_TRC_ENABLE_0; - writel(val, priv->base + EIP197_CS_RAM_CTRL); +static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv, + u32 addrmid, int *actbank) +{ + u32 val; + int curbank; + + curbank = addrmid >> 16; + if (curbank != *actbank) { + val = readl(priv->base + EIP197_CS_RAM_CTRL); + val = (val & ~EIP197_CS_BANKSEL_MASK) | + (curbank << EIP197_CS_BANKSEL_OFS); + writel(val, priv->base + EIP197_CS_RAM_CTRL); + *actbank = curbank; + } +} - /* Clear all ECC errors */ - writel(0, priv->base + EIP197_TRC_ECCCTRL); +static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv, + int maxbanks, u32 probemask, u32 stride) +{ + u32 val, addrhi, addrlo, addrmid, addralias, delta, marker; + int actbank; /* - * Make sure the cache memory is accessible by taking record cache into - * reset. + * And probe the actual size of the physically attached cache data RAM + * Using a binary subdivision algorithm downto 32 byte cache lines. */ - val = readl(priv->base + EIP197_TRC_PARAMS); - val |= EIP197_TRC_PARAMS_SW_RESET; - val &= ~EIP197_TRC_PARAMS_DATA_ACCESS; - writel(val, priv->base + EIP197_TRC_PARAMS); + addrhi = 1 << (16 + maxbanks); + addrlo = 0; + actbank = min(maxbanks - 1, 0); + while ((addrhi - addrlo) > stride) { + /* write marker to lowest address in top half */ + addrmid = (addrhi + addrlo) >> 1; + marker = (addrmid ^ 0xabadbabe) & probemask; /* Unique */ + eip197_trc_cache_banksel(priv, addrmid, &actbank); + writel(marker, + priv->base + EIP197_CLASSIFICATION_RAMS + + (addrmid & 0xffff)); + + /* write invalid markers to possible aliases */ + delta = 1 << __fls(addrmid); + while (delta >= stride) { + addralias = addrmid - delta; + eip197_trc_cache_banksel(priv, addralias, &actbank); + writel(~marker, + priv->base + EIP197_CLASSIFICATION_RAMS + + (addralias & 0xffff)); + delta >>= 1; + } - /* Clear all records */ + /* read back marker from top half */ + eip197_trc_cache_banksel(priv, addrmid, &actbank); + val = readl(priv->base + EIP197_CLASSIFICATION_RAMS + + (addrmid & 0xffff)); + + if ((val & probemask) == marker) + /* read back correct, continue with top half */ + addrlo = addrmid; + else + /* not read back correct, continue with bottom half */ + addrhi = addrmid; + } + return addrhi; +} + +static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv, + int cs_rc_max, int cs_ht_wc) +{ + int i; + u32 htable_offset, val, offset; + + /* Clear all records in administration RAM */ for (i = 0; i < cs_rc_max; i++) { - u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE; + offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE; writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) | EIP197_CS_RC_PREV(EIP197_RC_NULL), priv->base + offset); - val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1); + val = EIP197_CS_RC_NEXT(i + 1) | EIP197_CS_RC_PREV(i - 1); if (i == 0) val |= EIP197_CS_RC_PREV(EIP197_RC_NULL); else if (i == cs_rc_max - 1) val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL); - writel(val, priv->base + offset + sizeof(u32)); + writel(val, priv->base + offset + 4); + /* must also initialize the address key due to ECC! */ + writel(0, priv->base + offset + 8); + writel(0, priv->base + offset + 12); } /* Clear the hash table entries */ htable_offset = cs_rc_max * EIP197_CS_RC_SIZE; for (i = 0; i < cs_ht_wc; i++) writel(GENMASK(29, 0), - priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32)); + priv->base + EIP197_CLASSIFICATION_RAMS + + htable_offset + i * sizeof(u32)); +} + +static int eip197_trc_cache_init(struct safexcel_crypto_priv *priv) +{ + u32 val, dsize, asize; + int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc; + int cs_rc_abs_max, cs_ht_sz; + int maxbanks; + + /* Setup (dummy) virtualization for cache */ + eip197_trc_cache_setupvirt(priv); + + /* + * Enable the record cache memory access and + * probe the bank select width + */ + val = readl(priv->base + EIP197_CS_RAM_CTRL); + val &= ~EIP197_TRC_ENABLE_MASK; + val |= EIP197_TRC_ENABLE_0 | EIP197_CS_BANKSEL_MASK; + writel(val, priv->base + EIP197_CS_RAM_CTRL); + val = readl(priv->base + EIP197_CS_RAM_CTRL); + maxbanks = ((val&EIP197_CS_BANKSEL_MASK)>>EIP197_CS_BANKSEL_OFS) + 1; + + /* Clear all ECC errors */ + writel(0, priv->base + EIP197_TRC_ECCCTRL); + + /* + * Make sure the cache memory is accessible by taking record cache into + * reset. Need data memory access here, not admin access. + */ + val = readl(priv->base + EIP197_TRC_PARAMS); + val |= EIP197_TRC_PARAMS_SW_RESET | EIP197_TRC_PARAMS_DATA_ACCESS; + writel(val, priv->base + EIP197_TRC_PARAMS); + + /* Probed data RAM size in bytes */ + dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff, 32); + + /* + * Now probe the administration RAM size pretty much the same way + * Except that only the lower 30 bits are writable and we don't need + * bank selects + */ + val = readl(priv->base + EIP197_TRC_PARAMS); + /* admin access now */ + val &= ~(EIP197_TRC_PARAMS_DATA_ACCESS | EIP197_CS_BANKSEL_MASK); + writel(val, priv->base + EIP197_TRC_PARAMS); + + /* Probed admin RAM size in admin words */ + asize = eip197_trc_cache_probe(priv, 0, 0x3fffffff, 16) >> 4; + + /* Clear any ECC errors detected while probing! */ + writel(0, priv->base + EIP197_TRC_ECCCTRL); + + /* Sanity check probing results */ + if (dsize < EIP197_MIN_DSIZE || asize < EIP197_MIN_ASIZE) { + dev_err(priv->dev, "Record cache probing failed (%d,%d).", + dsize, asize); + return -ENODEV; + } + + /* + * Determine optimal configuration from RAM sizes + * Note that we assume that the physical RAM configuration is sane + * Therefore, we don't do any parameter error checking here ... + */ + + /* For now, just use a single record format covering everything */ + cs_trc_rec_wc = EIP197_CS_TRC_REC_WC; + cs_trc_lg_rec_wc = EIP197_CS_TRC_REC_WC; + + /* + * Step #1: How many records will physically fit? + * Hard upper limit is 1023! + */ + cs_rc_abs_max = min_t(uint, ((dsize >> 2) / cs_trc_lg_rec_wc), 1023); + /* Step #2: Need at least 2 words in the admin RAM per record */ + cs_rc_max = min_t(uint, cs_rc_abs_max, (asize >> 1)); + /* Step #3: Determine log2 of hash table size */ + cs_ht_sz = __fls(asize - cs_rc_max) - 2; + /* Step #4: determine current size of hash table in dwords */ + cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */ + /* Step #5: add back excess words and see if we can fit more records */ + cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2)); + + /* Clear the cache RAMs */ + eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc); /* Disable the record cache memory access */ val = readl(priv->base + EIP197_CS_RAM_CTRL); @@ -102,119 +258,254 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) /* Configure the record cache #2 */ val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) | EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) | - EIP197_TRC_PARAMS_HTABLE_SZ(2); + EIP197_TRC_PARAMS_HTABLE_SZ(cs_ht_sz); writel(val, priv->base + EIP197_TRC_PARAMS); + + dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n", + dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc); + return 0; } -static void eip197_write_firmware(struct safexcel_crypto_priv *priv, - const struct firmware *fw, int pe, u32 ctrl, - u32 prog_en) +static void eip197_init_firmware(struct safexcel_crypto_priv *priv) { - const u32 *data = (const u32 *)fw->data; + int pe, i; u32 val; - int i; - /* Reset the engine to make its program memory accessible */ - writel(EIP197_PE_ICE_x_CTRL_SW_RESET | - EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR | - EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR, - EIP197_PE(priv) + ctrl); + for (pe = 0; pe < priv->config.pes; pe++) { + /* Configure the token FIFO's */ + writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe)); + writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe)); - /* Enable access to the program memory */ - writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); + /* Clear the ICE scratchpad memory */ + val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); + val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER | + EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN | + EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS | + EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; + writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); + + /* clear the scratchpad RAM using 32 bit writes only */ + for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++) + writel(0, EIP197_PE(priv) + + EIP197_PE_ICE_SCRATCH_RAM(pe) + (i << 2)); + + /* Reset the IFPP engine to make its program mem accessible */ + writel(EIP197_PE_ICE_x_CTRL_SW_RESET | + EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR | + EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR, + EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe)); + + /* Reset the IPUE engine to make its program mem accessible */ + writel(EIP197_PE_ICE_x_CTRL_SW_RESET | + EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR | + EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR, + EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe)); + + /* Enable access to all IFPP program memories */ + writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN, + EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); + + /* bypass the OCE, if present */ + if (priv->flags & EIP197_OCE) + writel(EIP197_DEBUG_OCE_BYPASS, EIP197_PE(priv) + + EIP197_PE_DEBUG(pe)); + } + +} + +static int eip197_write_firmware(struct safexcel_crypto_priv *priv, + const struct firmware *fw) +{ + u32 val; + int i; /* Write the firmware */ - for (i = 0; i < fw->size / sizeof(u32); i++) - writel(be32_to_cpu(data[i]), - priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32)); + for (i = 0; i < fw->size / sizeof(u32); i++) { + if (priv->data->fw_little_endian) + val = le32_to_cpu(((const __le32 *)fw->data)[i]); + else + val = be32_to_cpu(((const __be32 *)fw->data)[i]); + + writel(val, + priv->base + EIP197_CLASSIFICATION_RAMS + + i * sizeof(val)); + } + + /* Exclude final 2 NOPs from size */ + return i - EIP197_FW_TERMINAL_NOPS; +} - /* Disable access to the program memory */ - writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); +/* + * If FW is actual production firmware, then poll for its initialization + * to complete and check if it is good for the HW, otherwise just return OK. + */ +static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp) +{ + int pe, pollcnt; + u32 base, pollofs; - /* Release engine from reset */ - val = readl(EIP197_PE(priv) + ctrl); - val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET; - writel(val, EIP197_PE(priv) + ctrl); + if (fpp) + pollofs = EIP197_FW_FPP_READY; + else + pollofs = EIP197_FW_PUE_READY; + + for (pe = 0; pe < priv->config.pes; pe++) { + base = EIP197_PE_ICE_SCRATCH_RAM(pe); + pollcnt = EIP197_FW_START_POLLCNT; + while (pollcnt && + (readl_relaxed(EIP197_PE(priv) + base + + pollofs) != 1)) { + pollcnt--; + } + if (!pollcnt) { + dev_err(priv->dev, "FW(%d) for PE %d failed to start\n", + fpp, pe); + return false; + } + } + return true; +} + +static bool eip197_start_firmware(struct safexcel_crypto_priv *priv, + int ipuesz, int ifppsz, int minifw) +{ + int pe; + u32 val; + + for (pe = 0; pe < priv->config.pes; pe++) { + /* Disable access to all program memory */ + writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); + + /* Start IFPP microengines */ + if (minifw) + val = 0; + else + val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) & + EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) | + EIP197_PE_ICE_UENG_DEBUG_RESET; + writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe)); + + /* Start IPUE microengines */ + if (minifw) + val = 0; + else + val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) & + EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) | + EIP197_PE_ICE_UENG_DEBUG_RESET; + writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe)); + } + + /* For miniFW startup, there is no initialization, so always succeed */ + if (minifw) + return true; + + /* Wait until all the firmwares have properly started up */ + if (!poll_fw_ready(priv, 1)) + return false; + if (!poll_fw_ready(priv, 0)) + return false; + + return true; } static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) { const char *fw_name[] = {"ifpp.bin", "ipue.bin"}; const struct firmware *fw[FW_NB]; - char fw_path[31], *dir = NULL; + char fw_path[37], *dir = NULL; int i, j, ret = 0, pe; - u32 val; + int ipuesz, ifppsz, minifw = 0; - switch (priv->version) { - case EIP197B: - dir = "eip197b"; - break; - case EIP197D: + if (priv->data->version == EIP197D_MRVL) dir = "eip197d"; - break; - default: - /* No firmware is required */ - return 0; - } + else if (priv->data->version == EIP197B_MRVL || + priv->data->version == EIP197_DEVBRD) + dir = "eip197b"; + else if (priv->data->version == EIP197C_MXL) + dir = "eip197c"; + else + return -ENODEV; +retry_fw: for (i = 0; i < FW_NB; i++) { - snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]); - ret = request_firmware(&fw[i], fw_path, priv->dev); + snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]); + ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev); if (ret) { - if (priv->version != EIP197B) + if (minifw || priv->data->version != EIP197B_MRVL) goto release_fw; /* Fallback to the old firmware location for the * EIP197b. */ - ret = request_firmware(&fw[i], fw_name[i], priv->dev); - if (ret) { - dev_err(priv->dev, - "Failed to request firmware %s (%d)\n", - fw_name[i], ret); + ret = firmware_request_nowarn(&fw[i], fw_name[i], + priv->dev); + if (ret) goto release_fw; - } } } - for (pe = 0; pe < priv->config.pes; pe++) { - /* Clear the scratchpad memory */ - val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); - val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER | - EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN | - EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS | - EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; - writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); + eip197_init_firmware(priv); + + ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]); - memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0, - EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); + /* Enable access to IPUE program memories */ + for (pe = 0; pe < priv->config.pes; pe++) + writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN, + EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); - eip197_write_firmware(priv, fw[FW_IFPP], pe, - EIP197_PE_ICE_FPP_CTRL(pe), - EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN); + ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]); - eip197_write_firmware(priv, fw[FW_IPUE], pe, - EIP197_PE_ICE_PUE_CTRL(pe), - EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN); + if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) { + dev_dbg(priv->dev, "Firmware loaded successfully\n"); + return 0; } + ret = -ENODEV; + release_fw: for (j = 0; j < i; j++) release_firmware(fw[j]); + if (!minifw) { + /* Retry with minifw path */ + dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n"); + dir = "eip197_minifw"; + minifw = 1; + goto retry_fw; + } + + dev_err(priv->dev, "Firmware load failed.\n"); + return ret; } static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) { - u32 hdw, cd_size_rnd, val; - int i; - - hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); - hdw &= GENMASK(27, 25); - hdw >>= 25; - - cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw; + u32 cd_size_rnd, val; + int i, cd_fetch_cnt; + + cd_size_rnd = (priv->config.cd_size + + (BIT(priv->hwconfig.hwdataw) - 1)) >> + priv->hwconfig.hwdataw; + /* determine number of CD's we can fetch into the CD FIFO as 1 block */ + if (priv->flags & SAFEXCEL_HW_EIP197) { + /* EIP197: try to fetch enough in 1 go to keep all pipes busy */ + cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd; + cd_fetch_cnt = min_t(uint, cd_fetch_cnt, + (priv->config.pes * EIP197_FETCH_DEPTH)); + } else { + /* for the EIP97, just fetch all that fits minus 1 */ + cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) / + cd_size_rnd) - 1; + } + /* + * Since we're using command desc's way larger than formally specified, + * we need to check whether we can fit even 1 for low-end EIP196's! + */ + if (!cd_fetch_cnt) { + dev_err(priv->dev, "Unable to fit even 1 command desc!\n"); + return -ENODEV; + } for (i = 0; i < priv->config.rings; i++) { /* ring base address */ @@ -223,11 +514,12 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) writel(upper_32_bits(priv->ring[i].cdr.base_dma), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); - writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) | - priv->config.cd_size, + writel(EIP197_xDR_DESC_MODE_64BIT | EIP197_CDR_DESC_MODE_ADCP | + (priv->config.cd_offset << 14) | priv->config.cd_size, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); - writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) | - (EIP197_FETCH_COUNT * priv->config.cd_offset), + writel(((cd_fetch_cnt * + (cd_size_rnd << priv->hwconfig.hwdataw)) << 16) | + (cd_fetch_cnt * (priv->config.cd_offset / sizeof(u32))), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG); /* Configure DMA tx control */ @@ -245,14 +537,23 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) { - u32 hdw, rd_size_rnd, val; - int i; - - hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); - hdw &= GENMASK(27, 25); - hdw >>= 25; - - rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw; + u32 rd_size_rnd, val; + int i, rd_fetch_cnt; + + /* determine number of RD's we can fetch into the FIFO as one block */ + rd_size_rnd = (EIP197_RD64_FETCH_SIZE + + (BIT(priv->hwconfig.hwdataw) - 1)) >> + priv->hwconfig.hwdataw; + if (priv->flags & SAFEXCEL_HW_EIP197) { + /* EIP197: try to fetch enough in 1 go to keep all pipes busy */ + rd_fetch_cnt = (1 << priv->hwconfig.hwrfsize) / rd_size_rnd; + rd_fetch_cnt = min_t(uint, rd_fetch_cnt, + (priv->config.pes * EIP197_FETCH_DEPTH)); + } else { + /* for the EIP97, just fetch all that fits minus 1 */ + rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) / + rd_size_rnd) - 1; + } for (i = 0; i < priv->config.rings; i++) { /* ring base address */ @@ -261,12 +562,13 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) writel(upper_32_bits(priv->ring[i].rdr.base_dma), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); - writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) | + writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 14) | priv->config.rd_size, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); - writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) | - (EIP197_FETCH_COUNT * priv->config.rd_offset), + writel(((rd_fetch_cnt * + (rd_size_rnd << priv->hwconfig.hwdataw)) << 16) | + (rd_fetch_cnt * (priv->config.rd_offset / sizeof(u32))), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG); /* Configure DMA tx control */ @@ -291,23 +593,21 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) static int safexcel_hw_init(struct safexcel_crypto_priv *priv) { - u32 version, val; - int i, ret, pe; - - /* Determine endianess and configure byte swap */ - version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION); - val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); + u32 val; + int i, ret, pe, opbuflo, opbufhi; - if ((version & 0xffff) == EIP197_HIA_VERSION_BE) - val |= EIP197_MST_CTRL_BYTE_SWAP; - else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE) - val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24); + dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n", + priv->config.pes, priv->config.rings); - /* For EIP197 set maximum number of TX commands to 2^5 = 32 */ - if (priv->version == EIP197B || priv->version == EIP197D) + /* + * For EIP197's only set maximum number of TX commands to 2^5 = 32 + * Skip for the EIP97 as it does not have this field. + */ + if (priv->flags & SAFEXCEL_HW_EIP197) { + val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); val |= EIP197_MST_CTRL_TX_MAX_CMD(5); - - writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); + writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); + } /* Configure wr/rd cache values */ writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) | @@ -330,11 +630,10 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) writel(EIP197_DxE_THR_CTRL_RESET_PE, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe)); - if (priv->version == EIP197B || priv->version == EIP197D) { - /* Reset HIA input interface arbiter */ + if (priv->flags & EIP197_PE_ARB) + /* Reset HIA input interface arbiter (if present) */ writel(EIP197_HIA_RA_PE_CTRL_RESET, EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe)); - } /* DMA transfer size to use */ val = EIP197_HIA_DFE_CFG_DIS_DEBUG; @@ -357,12 +656,11 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) EIP197_PE_IN_xBUF_THRES_MAX(7), EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe)); - if (priv->version == EIP197B || priv->version == EIP197D) { + if (priv->flags & SAFEXCEL_HW_EIP197) /* enable HIA input interface arbiter and rings */ writel(EIP197_HIA_RA_PE_CTRL_EN | GENMASK(priv->config.rings - 1, 0), EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe)); - } /* Data Store Engine configuration */ @@ -376,39 +674,46 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) ; /* DMA transfer size to use */ + if (priv->hwconfig.hwnumpes > 4) { + opbuflo = 9; + opbufhi = 10; + } else { + opbuflo = 7; + opbufhi = 8; + } val = EIP197_HIA_DSE_CFG_DIS_DEBUG; - val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | - EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8); + val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(opbuflo) | + EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(opbufhi); val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS); val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE; - /* FIXME: instability issues can occur for EIP97 but disabling it impact - * performances. + /* FIXME: instability issues can occur for EIP97 but disabling + * it impacts performance. */ - if (priv->version == EIP197B || priv->version == EIP197D) + if (priv->flags & SAFEXCEL_HW_EIP197) val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR; writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe)); /* Leave the DSE threads reset state */ writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe)); - /* Configure the procesing engine thresholds */ - writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | - EIP197_PE_OUT_DBUF_THRES_MAX(8), + /* Configure the processing engine thresholds */ + writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) | + EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi), EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe)); /* Processing Engine configuration */ - /* H/W capabilities selection */ - val = EIP197_FUNCTION_RSVD; - val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY; - val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT; - val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC; - val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC; - val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC; - val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5; - val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1; - val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2; - writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe)); + /* Token & context configuration */ + val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES | + EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT | + EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT; + writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe)); + + /* H/W capabilities selection: just enable everything */ + writel(EIP197_FUNCTION_ALL, + EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe)); + writel(EIP197_FUNCTION_ALL, + EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION2_EN(pe)); } /* Command Descriptor Rings prepare */ @@ -433,7 +738,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR); - writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2, + writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE); } @@ -456,7 +761,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR); /* Ring size */ - writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2, + writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE); } @@ -473,18 +778,28 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) /* Clear any HIA interrupt */ writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK); - if (priv->version == EIP197B || priv->version == EIP197D) { - eip197_trc_cache_init(priv); + if (priv->flags & EIP197_SIMPLE_TRC) { + writel(EIP197_STRC_CONFIG_INIT | + EIP197_STRC_CONFIG_LARGE_REC(EIP197_CS_TRC_REC_WC) | + EIP197_STRC_CONFIG_SMALL_REC(EIP197_CS_TRC_REC_WC), + priv->base + EIP197_STRC_CONFIG); + writel(EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE, + EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL2(0)); + } else if (priv->flags & SAFEXCEL_HW_EIP197) { + ret = eip197_trc_cache_init(priv); + if (ret) + return ret; + } + if (priv->flags & EIP197_ICE) { ret = eip197_load_firmwares(priv); if (ret) return ret; } - safexcel_hw_setup_cdesc_rings(priv); - safexcel_hw_setup_rdesc_rings(priv); - - return 0; + return safexcel_hw_setup_cdesc_rings(priv) ?: + safexcel_hw_setup_rdesc_rings(priv) ?: + 0; } /* Called with ring's lock taken */ @@ -535,7 +850,7 @@ handle_req: goto request_failed; if (backlog) - backlog->complete(backlog, -EINPROGRESS); + crypto_request_complete(backlog, -EINPROGRESS); /* In case the send() helper did not issue any command to push * to the engine because the input data was cached, continue to @@ -572,27 +887,48 @@ finalize: spin_unlock_bh(&priv->ring[ring].lock); /* let the RDR know we have pending descriptors */ - writel((rdesc * priv->config.rd_offset) << 2, + writel((rdesc * priv->config.rd_offset), EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT); /* let the CDR know we have pending descriptors */ - writel((cdesc * priv->config.cd_offset) << 2, + writel((cdesc * priv->config.cd_offset), EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT); } inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, - struct safexcel_result_desc *rdesc) + void *rdp) { - if (likely(!rdesc->result_data.error_code)) + struct safexcel_result_desc *rdesc = rdp; + struct result_data_desc *result_data = rdp + priv->config.res_offset; + + if (likely((!rdesc->last_seg) || /* Rest only valid if last seg! */ + ((!rdesc->descriptor_overflow) && + (!rdesc->buffer_overflow) && + (!result_data->error_code)))) return 0; - if (rdesc->result_data.error_code & 0x407f) { - /* Fatal error (bits 0-7, 14) */ + if (rdesc->descriptor_overflow) + dev_err(priv->dev, "Descriptor overflow detected"); + + if (rdesc->buffer_overflow) + dev_err(priv->dev, "Buffer overflow detected"); + + if (result_data->error_code & 0x4066) { + /* Fatal error (bits 1,2,5,6 & 14) */ dev_err(priv->dev, - "cipher: result: result descriptor error (%d)\n", - rdesc->result_data.error_code); + "result descriptor error (%x)", + result_data->error_code); + return -EIO; - } else if (rdesc->result_data.error_code == BIT(9)) { + } else if (result_data->error_code & + (BIT(7) | BIT(4) | BIT(3) | BIT(0))) { + /* + * Give priority over authentication fails: + * Blocksize, length & overflow errors, + * something wrong with the input! + */ + return -EINVAL; + } else if (result_data->error_code & BIT(9)) { /* Authentication failed */ return -EBADMSG; } @@ -634,33 +970,24 @@ void safexcel_complete(struct safexcel_crypto_priv *priv, int ring) } while (!cdesc->last_seg); } -void safexcel_inv_complete(struct crypto_async_request *req, int error) -{ - struct safexcel_inv_result *result = req->data; - - if (error == -EINPROGRESS) - return; - - result->error = error; - complete(&result->completion); -} - int safexcel_invalidate_cache(struct crypto_async_request *async, struct safexcel_crypto_priv *priv, dma_addr_t ctxr_dma, int ring) { struct safexcel_command_desc *cdesc; struct safexcel_result_desc *rdesc; + struct safexcel_token *dmmy; int ret = 0; /* Prepare command descriptor */ - cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma); + cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma, + &dmmy); if (IS_ERR(cdesc)) return PTR_ERR(cdesc); cdesc->control_data.type = EIP197_TYPE_EXTENDED; cdesc->control_data.options = 0; - cdesc->control_data.refresh = 0; + cdesc->control_data.context_lo &= ~EIP197_CONTEXT_SIZE_MASK; cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR; /* Prepare result descriptor */ @@ -705,13 +1032,14 @@ handle_results: ndesc = ctx->handle_result(priv, ring, req, &should_complete, &ret); if (ndesc < 0) { - dev_err(priv->dev, "failed to handle result (%d)", ndesc); + dev_err(priv->dev, "failed to handle result (%d)\n", + ndesc); goto acknowledge; } if (should_complete) { local_bh_disable(); - req->complete(req, ret); + crypto_request_complete(req, ret); local_bh_enable(); } @@ -720,11 +1048,10 @@ handle_results: } acknowledge: - if (i) { + if (i) writel(EIP197_xDR_PROC_xD_PKT(i) | - EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset), + (tot_descs * priv->config.rd_offset), EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); - } /* If the number of requests overflowed the counter, try to proceed more * requests. @@ -778,7 +1105,7 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data) * reinitialized. This should not happen under * normal circumstances. */ - dev_err(priv->dev, "RDR: fatal error."); + dev_err(priv->dev, "RDR: fatal error.\n"); } else if (likely(stat & EIP197_xDR_THRESH)) { rc = IRQ_WAKE_THREAD; } @@ -808,26 +1135,52 @@ static irqreturn_t safexcel_irq_ring_thread(int irq, void *data) return IRQ_HANDLED; } -static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name, +static int safexcel_request_ring_irq(void *pdev, int irqid, + int is_pci_dev, + int ring_id, irq_handler_t handler, irq_handler_t threaded_handler, struct safexcel_ring_irq_data *ring_irq_priv) { - int ret, irq = platform_get_irq_byname(pdev, name); + int ret, irq, cpu; + struct device *dev; + + if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) { + struct pci_dev *pci_pdev = pdev; + + dev = &pci_pdev->dev; + irq = pci_irq_vector(pci_pdev, irqid); + if (irq < 0) { + dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n", + irqid, irq); + return irq; + } + } else if (IS_ENABLED(CONFIG_OF)) { + struct platform_device *plf_pdev = pdev; + char irq_name[6] = {0}; /* "ringX\0" */ - if (irq < 0) { - dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name); - return irq; + snprintf(irq_name, 6, "ring%d", irqid); + dev = &plf_pdev->dev; + irq = platform_get_irq_byname(plf_pdev, irq_name); + + if (irq < 0) + return irq; + } else { + return -ENXIO; } - ret = devm_request_threaded_irq(&pdev->dev, irq, handler, + ret = devm_request_threaded_irq(dev, irq, handler, threaded_handler, IRQF_ONESHOT, - dev_name(&pdev->dev), ring_irq_priv); + dev_name(dev), ring_irq_priv); if (ret) { - dev_err(&pdev->dev, "unable to request IRQ %d\n", irq); + dev_err(dev, "unable to request IRQ %d\n", irq); return ret; } + /* Set affinity */ + cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE); + irq_set_affinity_hint(irq, get_cpu_mask(cpu)); + return irq; } @@ -838,6 +1191,7 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_cbc_des3_ede, &safexcel_alg_ecb_aes, &safexcel_alg_cbc_aes, + &safexcel_alg_ctr_aes, &safexcel_alg_md5, &safexcel_alg_sha1, &safexcel_alg_sha224, @@ -855,6 +1209,50 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_authenc_hmac_sha256_cbc_aes, &safexcel_alg_authenc_hmac_sha384_cbc_aes, &safexcel_alg_authenc_hmac_sha512_cbc_aes, + &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede, + &safexcel_alg_authenc_hmac_sha1_ctr_aes, + &safexcel_alg_authenc_hmac_sha224_ctr_aes, + &safexcel_alg_authenc_hmac_sha256_ctr_aes, + &safexcel_alg_authenc_hmac_sha384_ctr_aes, + &safexcel_alg_authenc_hmac_sha512_ctr_aes, + &safexcel_alg_xts_aes, + &safexcel_alg_gcm, + &safexcel_alg_ccm, + &safexcel_alg_cbcmac, + &safexcel_alg_xcbcmac, + &safexcel_alg_cmac, + &safexcel_alg_chacha20, + &safexcel_alg_chachapoly, + &safexcel_alg_chachapoly_esp, + &safexcel_alg_sm3, + &safexcel_alg_hmac_sm3, + &safexcel_alg_ecb_sm4, + &safexcel_alg_cbc_sm4, + &safexcel_alg_ctr_sm4, + &safexcel_alg_authenc_hmac_sha1_cbc_sm4, + &safexcel_alg_authenc_hmac_sm3_cbc_sm4, + &safexcel_alg_authenc_hmac_sha1_ctr_sm4, + &safexcel_alg_authenc_hmac_sm3_ctr_sm4, + &safexcel_alg_sha3_224, + &safexcel_alg_sha3_256, + &safexcel_alg_sha3_384, + &safexcel_alg_sha3_512, + &safexcel_alg_hmac_sha3_224, + &safexcel_alg_hmac_sha3_256, + &safexcel_alg_hmac_sha3_384, + &safexcel_alg_hmac_sha3_512, + &safexcel_alg_authenc_hmac_sha1_cbc_des, + &safexcel_alg_authenc_hmac_sha256_cbc_des3_ede, + &safexcel_alg_authenc_hmac_sha224_cbc_des3_ede, + &safexcel_alg_authenc_hmac_sha512_cbc_des3_ede, + &safexcel_alg_authenc_hmac_sha384_cbc_des3_ede, + &safexcel_alg_authenc_hmac_sha256_cbc_des, + &safexcel_alg_authenc_hmac_sha224_cbc_des, + &safexcel_alg_authenc_hmac_sha512_cbc_des, + &safexcel_alg_authenc_hmac_sha384_cbc_des, + &safexcel_alg_rfc4106_gcm, + &safexcel_alg_rfc4543_gcm, + &safexcel_alg_rfc4309_ccm, }; static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) @@ -864,7 +1262,10 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { safexcel_algs[i]->priv = priv; - if (!(safexcel_algs[i]->engines & priv->version)) + /* Do we have all required base algorithms available? */ + if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) != + safexcel_algs[i]->algo_mask) + /* No, so don't register this ciphersuite */ continue; if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) @@ -882,7 +1283,10 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) fail: for (j = 0; j < i; j++) { - if (!(safexcel_algs[j]->engines & priv->version)) + /* Do we have all required base algorithms available? */ + if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) != + safexcel_algs[j]->algo_mask) + /* No, so don't unregister this ciphersuite */ continue; if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) @@ -901,7 +1305,10 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv) int i; for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { - if (!(safexcel_algs[i]->engines & priv->version)) + /* Do we have all required base algorithms available? */ + if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) != + safexcel_algs[i]->algo_mask) + /* No, so don't unregister this ciphersuite */ continue; if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) @@ -915,41 +1322,37 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv) static void safexcel_configure(struct safexcel_crypto_priv *priv) { - u32 val, mask = 0; - - val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); - - /* Read number of PEs from the engine */ - switch (priv->version) { - case EIP197B: - case EIP197D: - mask = EIP197_N_PES_MASK; - break; - default: - mask = EIP97_N_PES_MASK; - } - priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask; - - val = (val & GENMASK(27, 25)) >> 25; - mask = BIT(val) - 1; + u32 mask = BIT(priv->hwconfig.hwdataw) - 1; - val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); - priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings); + priv->config.pes = priv->hwconfig.hwnumpes; + priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings); + /* Cannot currently support more rings than we have ring AICs! */ + priv->config.rings = min_t(u32, priv->config.rings, + priv->hwconfig.hwnumraic); - priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32)); + priv->config.cd_size = EIP197_CD64_FETCH_SIZE; priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask; + priv->config.cdsh_offset = (EIP197_MAX_TOKENS + mask) & ~mask; - priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32)); + /* res token is behind the descr, but ofs must be rounded to buswdth */ + priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask; + /* now the size of the descr is this 1st part plus the result struct */ + priv->config.rd_size = priv->config.res_offset + + EIP197_RD64_RESULT_SIZE; priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask; + + /* convert dwords to bytes */ + priv->config.cd_offset *= sizeof(u32); + priv->config.cdsh_offset *= sizeof(u32); + priv->config.rd_offset *= sizeof(u32); + priv->config.res_offset *= sizeof(u32); } static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) { struct safexcel_register_offsets *offsets = &priv->offsets; - switch (priv->version) { - case EIP197B: - case EIP197D: + if (priv->flags & SAFEXCEL_HW_EIP197) { offsets->hia_aic = EIP197_HIA_AIC_BASE; offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE; offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE; @@ -960,8 +1363,8 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE; offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE; offsets->pe = EIP197_PE_BASE; - break; - case EIP97IES: + offsets->global = EIP197_GLOBAL_BASE; + } else { offsets->hia_aic = EIP97_HIA_AIC_BASE; offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE; offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE; @@ -972,134 +1375,300 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE; offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE; offsets->pe = EIP97_PE_BASE; - break; + offsets->global = EIP97_GLOBAL_BASE; } } -static int safexcel_probe(struct platform_device *pdev) +/* + * Generic part of probe routine, shared by platform and PCI driver + * + * Assumes IO resources have been mapped, private data mem has been allocated, + * clocks have been enabled, device pointer has been assigned etc. + * + */ +static int safexcel_probe_generic(void *pdev, + struct safexcel_crypto_priv *priv, + int is_pci_dev) { - struct device *dev = &pdev->dev; - struct resource *res; - struct safexcel_crypto_priv *priv; - int i, ret; + struct device *dev = priv->dev; + u32 peid, version, mask, val, hiaopt, hwopt, peopt; + int i, ret, hwctg; - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); - if (!priv) + priv->context_pool = dmam_pool_create("safexcel-context", dev, + sizeof(struct safexcel_context_record), + 1, 0); + if (!priv->context_pool) return -ENOMEM; - priv->dev = dev; - priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev); - - if (priv->version == EIP197B || priv->version == EIP197D) - priv->flags |= EIP197_TRC_CACHE; + /* + * First try the EIP97 HIA version regs + * For the EIP197, this is guaranteed to NOT return any of the test + * values + */ + version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION); + + mask = 0; /* do not swap */ + if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) { + priv->hwconfig.hiaver = EIP197_VERSION_MASK(version); + } else if (EIP197_REG_HI16(version) == EIP197_HIA_VERSION_BE) { + /* read back byte-swapped, so complement byte swap bits */ + mask = EIP197_MST_CTRL_BYTE_SWAP_BITS; + priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version); + } else { + /* So it wasn't an EIP97 ... maybe it's an EIP197? */ + version = readl(priv->base + EIP197_HIA_AIC_BASE + + EIP197_HIA_VERSION); + if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) { + priv->hwconfig.hiaver = EIP197_VERSION_MASK(version); + priv->flags |= SAFEXCEL_HW_EIP197; + } else if (EIP197_REG_HI16(version) == + EIP197_HIA_VERSION_BE) { + /* read back byte-swapped, so complement swap bits */ + mask = EIP197_MST_CTRL_BYTE_SWAP_BITS; + priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version); + priv->flags |= SAFEXCEL_HW_EIP197; + } else { + return -ENODEV; + } + } + /* Now initialize the reg offsets based on the probing info so far */ safexcel_init_register_offsets(priv); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(dev, res); - if (IS_ERR(priv->base)) { - dev_err(dev, "failed to get resource\n"); - return PTR_ERR(priv->base); + /* + * If the version was read byte-swapped, we need to flip the device + * swapping Keep in mind here, though, that what we write will also be + * byte-swapped ... + */ + if (mask) { + val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); + val = val ^ (mask >> 24); /* toggle byte swap bits */ + writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); } - priv->clk = devm_clk_get(&pdev->dev, NULL); - ret = PTR_ERR_OR_ZERO(priv->clk); - /* The clock isn't mandatory */ - if (ret != -ENOENT) { - if (ret) - return ret; - - ret = clk_prepare_enable(priv->clk); - if (ret) { - dev_err(dev, "unable to enable clk (%d)\n", ret); - return ret; - } + /* + * We're not done probing yet! We may fall through to here if no HIA + * was found at all. So, with the endianness presumably correct now and + * the offsets setup, *really* probe for the EIP97/EIP197. + */ + version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION); + if (((priv->flags & SAFEXCEL_HW_EIP197) && + (EIP197_REG_LO16(version) != EIP197_VERSION_LE) && + (EIP197_REG_LO16(version) != EIP196_VERSION_LE)) || + ((!(priv->flags & SAFEXCEL_HW_EIP197) && + (EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) { + /* + * We did not find the device that matched our initial probing + * (or our initial probing failed) Report appropriate error. + */ + dev_err(priv->dev, "Probing for EIP97/EIP19x failed - no such device (read %08x)\n", + version); + return -ENODEV; } - priv->reg_clk = devm_clk_get(&pdev->dev, "reg"); - ret = PTR_ERR_OR_ZERO(priv->reg_clk); - /* The clock isn't mandatory */ - if (ret != -ENOENT) { - if (ret) - goto err_core_clk; + priv->hwconfig.hwver = EIP197_VERSION_MASK(version); + hwctg = version >> 28; + peid = version & 255; - ret = clk_prepare_enable(priv->reg_clk); - if (ret) { - dev_err(dev, "unable to enable reg clk (%d)\n", ret); - goto err_core_clk; - } + /* Detect EIP206 processing pipe */ + version = readl(EIP197_PE(priv) + + EIP197_PE_VERSION(0)); + if (EIP197_REG_LO16(version) != EIP206_VERSION_LE) { + dev_err(priv->dev, "EIP%d: EIP206 not detected\n", peid); + return -ENODEV; } + priv->hwconfig.ppver = EIP197_VERSION_MASK(version); - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); - if (ret) - goto err_reg_clk; + /* Detect EIP96 packet engine and version */ + version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0)); + if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) { + dev_err(dev, "EIP%d: EIP96 not detected.\n", peid); + return -ENODEV; + } + priv->hwconfig.pever = EIP197_VERSION_MASK(version); + + hwopt = readl(EIP197_GLOBAL(priv) + EIP197_OPTIONS); + hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS); + + priv->hwconfig.icever = 0; + priv->hwconfig.ocever = 0; + priv->hwconfig.psever = 0; + if (priv->flags & SAFEXCEL_HW_EIP197) { + /* EIP197 */ + peopt = readl(EIP197_PE(priv) + EIP197_PE_OPTIONS(0)); + + priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) & + EIP197_HWDATAW_MASK; + priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) & + EIP197_CFSIZE_MASK) + + EIP197_CFSIZE_ADJUST; + priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) & + EIP197_RFSIZE_MASK) + + EIP197_RFSIZE_ADJUST; + priv->hwconfig.hwnumpes = (hiaopt >> EIP197_N_PES_OFFSET) & + EIP197_N_PES_MASK; + priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) & + EIP197_N_RINGS_MASK; + if (hiaopt & EIP197_HIA_OPT_HAS_PE_ARB) + priv->flags |= EIP197_PE_ARB; + if (EIP206_OPT_ICE_TYPE(peopt) == 1) { + priv->flags |= EIP197_ICE; + /* Detect ICE EIP207 class. engine and version */ + version = readl(EIP197_PE(priv) + + EIP197_PE_ICE_VERSION(0)); + if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) { + dev_err(dev, "EIP%d: ICE EIP207 not detected.\n", + peid); + return -ENODEV; + } + priv->hwconfig.icever = EIP197_VERSION_MASK(version); + } + if (EIP206_OPT_OCE_TYPE(peopt) == 1) { + priv->flags |= EIP197_OCE; + /* Detect EIP96PP packet stream editor and version */ + version = readl(EIP197_PE(priv) + EIP197_PE_PSE_VERSION(0)); + if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) { + dev_err(dev, "EIP%d: EIP96PP not detected.\n", peid); + return -ENODEV; + } + priv->hwconfig.psever = EIP197_VERSION_MASK(version); + /* Detect OCE EIP207 class. engine and version */ + version = readl(EIP197_PE(priv) + + EIP197_PE_ICE_VERSION(0)); + if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) { + dev_err(dev, "EIP%d: OCE EIP207 not detected.\n", + peid); + return -ENODEV; + } + priv->hwconfig.ocever = EIP197_VERSION_MASK(version); + } + /* If not a full TRC, then assume simple TRC */ + if (!(hwopt & EIP197_OPT_HAS_TRC)) + priv->flags |= EIP197_SIMPLE_TRC; + /* EIP197 always has SOME form of TRC */ + priv->flags |= EIP197_TRC_CACHE; + } else { + /* EIP97 */ + priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) & + EIP97_HWDATAW_MASK; + priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) & + EIP97_CFSIZE_MASK; + priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) & + EIP97_RFSIZE_MASK; + priv->hwconfig.hwnumpes = 1; /* by definition */ + priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) & + EIP197_N_RINGS_MASK; + } - priv->context_pool = dmam_pool_create("safexcel-context", dev, - sizeof(struct safexcel_context_record), - 1, 0); - if (!priv->context_pool) { - ret = -ENOMEM; - goto err_reg_clk; + /* Scan for ring AIC's */ + for (i = 0; i < EIP197_MAX_RING_AIC; i++) { + version = readl(EIP197_HIA_AIC_R(priv) + + EIP197_HIA_AIC_R_VERSION(i)); + if (EIP197_REG_LO16(version) != EIP201_VERSION_LE) + break; } + priv->hwconfig.hwnumraic = i; + /* Low-end EIP196 may not have any ring AIC's ... */ + if (!priv->hwconfig.hwnumraic) { + dev_err(priv->dev, "No ring interrupt controller present!\n"); + return -ENODEV; + } + + /* Get supported algorithms from EIP96 transform engine */ + priv->hwconfig.algo_flags = readl(EIP197_PE(priv) + + EIP197_PE_EIP96_OPTIONS(0)); + + /* Print single info line describing what we just detected */ + dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x(alg:%08x)/%x/%x/%x\n", + peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hwnumpes, + priv->hwconfig.hwnumrings, priv->hwconfig.hwnumraic, + priv->hwconfig.hiaver, priv->hwconfig.hwdataw, + priv->hwconfig.hwcfsize, priv->hwconfig.hwrfsize, + priv->hwconfig.ppver, priv->hwconfig.pever, + priv->hwconfig.algo_flags, priv->hwconfig.icever, + priv->hwconfig.ocever, priv->hwconfig.psever); safexcel_configure(priv); + if (IS_ENABLED(CONFIG_PCI) && priv->data->version == EIP197_DEVBRD) { + /* + * Request MSI vectors for global + 1 per ring - + * or just 1 for older dev images + */ + struct pci_dev *pci_pdev = pdev; + + ret = pci_alloc_irq_vectors(pci_pdev, + priv->config.rings + 1, + priv->config.rings + 1, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (ret < 0) { + dev_err(dev, "Failed to allocate PCI MSI interrupts\n"); + return ret; + } + } + + /* Register the ring IRQ handlers and configure the rings */ priv->ring = devm_kcalloc(dev, priv->config.rings, sizeof(*priv->ring), GFP_KERNEL); - if (!priv->ring) { - ret = -ENOMEM; - goto err_reg_clk; - } + if (!priv->ring) + return -ENOMEM; for (i = 0; i < priv->config.rings; i++) { - char irq_name[6] = {0}; /* "ringX\0" */ - char wq_name[9] = {0}; /* "wq_ringX\0" */ + char wq_name[9] = {0}; int irq; struct safexcel_ring_irq_data *ring_irq; ret = safexcel_init_ring_descriptors(priv, &priv->ring[i].cdr, &priv->ring[i].rdr); - if (ret) - goto err_reg_clk; + if (ret) { + dev_err(dev, "Failed to initialize rings\n"); + goto err_cleanup_rings; + } priv->ring[i].rdr_req = devm_kcalloc(dev, EIP197_DEFAULT_RING_SIZE, - sizeof(priv->ring[i].rdr_req), + sizeof(*priv->ring[i].rdr_req), GFP_KERNEL); if (!priv->ring[i].rdr_req) { ret = -ENOMEM; - goto err_reg_clk; + goto err_cleanup_rings; } ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL); if (!ring_irq) { ret = -ENOMEM; - goto err_reg_clk; + goto err_cleanup_rings; } ring_irq->priv = priv; ring_irq->ring = i; - snprintf(irq_name, 6, "ring%d", i); - irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring, + irq = safexcel_request_ring_irq(pdev, + EIP197_IRQ_NUMBER(i, is_pci_dev), + is_pci_dev, + i, + safexcel_irq_ring, safexcel_irq_ring_thread, ring_irq); if (irq < 0) { + dev_err(dev, "Failed to get IRQ ID for ring %d\n", i); ret = irq; - goto err_reg_clk; + goto err_cleanup_rings; } + priv->ring[i].irq = irq; priv->ring[i].work_data.priv = priv; priv->ring[i].work_data.ring = i; - INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work); + INIT_WORK(&priv->ring[i].work_data.work, + safexcel_dequeue_work); snprintf(wq_name, 9, "wq_ring%d", i); - priv->ring[i].workqueue = create_singlethread_workqueue(wq_name); + priv->ring[i].workqueue = + create_singlethread_workqueue(wq_name); if (!priv->ring[i].workqueue) { ret = -ENOMEM; - goto err_reg_clk; + goto err_cleanup_rings; } priv->ring[i].requests = 0; @@ -1112,27 +1681,30 @@ static int safexcel_probe(struct platform_device *pdev) spin_lock_init(&priv->ring[i].queue_lock); } - platform_set_drvdata(pdev, priv); atomic_set(&priv->ring_used, 0); ret = safexcel_hw_init(priv); if (ret) { - dev_err(dev, "EIP h/w init failed (%d)\n", ret); - goto err_reg_clk; + dev_err(dev, "HW init failed (%d)\n", ret); + goto err_cleanup_rings; } ret = safexcel_register_algorithms(priv); if (ret) { dev_err(dev, "Failed to register algorithms (%d)\n", ret); - goto err_reg_clk; + goto err_cleanup_rings; } return 0; -err_reg_clk: - clk_disable_unprepare(priv->reg_clk); -err_core_clk: - clk_disable_unprepare(priv->clk); +err_cleanup_rings: + for (i = 0; i < priv->config.rings; i++) { + if (priv->ring[i].irq) + irq_set_affinity_hint(priv->ring[i].irq, NULL); + if (priv->ring[i].workqueue) + destroy_workqueue(priv->ring[i].workqueue); + } + return ret; } @@ -1155,7 +1727,76 @@ static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv) } } -static int safexcel_remove(struct platform_device *pdev) +/* for Device Tree platform driver */ + +static int safexcel_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct safexcel_crypto_priv *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + priv->data = (struct safexcel_priv_data *)of_device_get_match_data(dev); + + platform_set_drvdata(pdev, priv); + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) { + dev_err(dev, "failed to get resource\n"); + return PTR_ERR(priv->base); + } + + priv->clk = devm_clk_get(&pdev->dev, NULL); + ret = PTR_ERR_OR_ZERO(priv->clk); + /* The clock isn't mandatory */ + if (ret != -ENOENT) { + if (ret) + return ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) { + dev_err(dev, "unable to enable clk (%d)\n", ret); + return ret; + } + } + + priv->reg_clk = devm_clk_get(&pdev->dev, "reg"); + ret = PTR_ERR_OR_ZERO(priv->reg_clk); + /* The clock isn't mandatory */ + if (ret != -ENOENT) { + if (ret) + goto err_core_clk; + + ret = clk_prepare_enable(priv->reg_clk); + if (ret) { + dev_err(dev, "unable to enable reg clk (%d)\n", ret); + goto err_core_clk; + } + } + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (ret) + goto err_reg_clk; + + /* Generic EIP97/EIP197 device probing */ + ret = safexcel_probe_generic(pdev, priv, 0); + if (ret) + goto err_reg_clk; + + return 0; + +err_reg_clk: + clk_disable_unprepare(priv->reg_clk); +err_core_clk: + clk_disable_unprepare(priv->clk); + return ret; +} + +static void safexcel_remove(struct platform_device *pdev) { struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev); int i; @@ -1163,40 +1804,66 @@ static int safexcel_remove(struct platform_device *pdev) safexcel_unregister_algorithms(priv); safexcel_hw_reset_rings(priv); + clk_disable_unprepare(priv->reg_clk); clk_disable_unprepare(priv->clk); - for (i = 0; i < priv->config.rings; i++) + for (i = 0; i < priv->config.rings; i++) { + irq_set_affinity_hint(priv->ring[i].irq, NULL); destroy_workqueue(priv->ring[i].workqueue); - - return 0; + } } +static const struct safexcel_priv_data eip97ies_mrvl_data = { + .version = EIP97IES_MRVL, +}; + +static const struct safexcel_priv_data eip197b_mrvl_data = { + .version = EIP197B_MRVL, +}; + +static const struct safexcel_priv_data eip197d_mrvl_data = { + .version = EIP197D_MRVL, +}; + +static const struct safexcel_priv_data eip197_devbrd_data = { + .version = EIP197_DEVBRD, +}; + +static const struct safexcel_priv_data eip197c_mxl_data = { + .version = EIP197C_MXL, + .fw_little_endian = true, +}; + static const struct of_device_id safexcel_of_match_table[] = { { .compatible = "inside-secure,safexcel-eip97ies", - .data = (void *)EIP97IES, + .data = &eip97ies_mrvl_data, }, { .compatible = "inside-secure,safexcel-eip197b", - .data = (void *)EIP197B, + .data = &eip197b_mrvl_data, }, { .compatible = "inside-secure,safexcel-eip197d", - .data = (void *)EIP197D, + .data = &eip197d_mrvl_data, }, { - /* Deprecated. Kept for backward compatibility. */ + .compatible = "inside-secure,safexcel-eip197c-mxl", + .data = &eip197c_mxl_data, + }, + /* For backward compatibility and intended for generic use */ + { .compatible = "inside-secure,safexcel-eip97", - .data = (void *)EIP97IES, + .data = &eip97ies_mrvl_data, }, { - /* Deprecated. Kept for backward compatibility. */ .compatible = "inside-secure,safexcel-eip197", - .data = (void *)EIP197B, + .data = &eip197b_mrvl_data, }, {}, }; +MODULE_DEVICE_TABLE(of, safexcel_of_match_table); static struct platform_driver crypto_safexcel = { .probe = safexcel_probe, @@ -1206,10 +1873,170 @@ static struct platform_driver crypto_safexcel = { .of_match_table = safexcel_of_match_table, }, }; -module_platform_driver(crypto_safexcel); + +/* PCIE devices - i.e. Inside Secure development boards */ + +static int safexcel_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct safexcel_crypto_priv *priv; + void __iomem *pciebase; + int rc; + u32 val; + + dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n", + ent->vendor, ent->device, ent->subvendor, + ent->subdevice, ent->driver_data); + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + priv->data = (struct safexcel_priv_data *)ent->driver_data; + + pci_set_drvdata(pdev, priv); + + /* enable the device */ + rc = pcim_enable_device(pdev); + if (rc) { + dev_err(dev, "Failed to enable PCI device\n"); + return rc; + } + + /* take ownership of PCI BAR0 */ + rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel"); + if (rc) { + dev_err(dev, "Failed to map IO region for BAR0\n"); + return rc; + } + priv->base = pcim_iomap_table(pdev)[0]; + + if (priv->data->version == EIP197_DEVBRD) { + dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n"); + + rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel"); + if (rc) { + dev_err(dev, "Failed to map IO region for BAR4\n"); + return rc; + } + + pciebase = pcim_iomap_table(pdev)[2]; + val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR); + if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) { + dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n", + (val & 0xff)); + + /* Setup MSI identity map mapping */ + writel(EIP197_XLX_USER_VECT_LUT0_IDENT, + pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR); + writel(EIP197_XLX_USER_VECT_LUT1_IDENT, + pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR); + writel(EIP197_XLX_USER_VECT_LUT2_IDENT, + pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR); + writel(EIP197_XLX_USER_VECT_LUT3_IDENT, + pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR); + + /* Enable all device interrupts */ + writel(GENMASK(31, 0), + pciebase + EIP197_XLX_USER_INT_ENB_MSK); + } else { + dev_err(dev, "Unrecognised IRQ block identifier %x\n", + val); + return -ENODEV; + } + + /* HW reset FPGA dev board */ + /* assert reset */ + writel(1, priv->base + EIP197_XLX_GPIO_BASE); + wmb(); /* maintain strict ordering for accesses here */ + /* deassert reset */ + writel(0, priv->base + EIP197_XLX_GPIO_BASE); + wmb(); /* maintain strict ordering for accesses here */ + } + + /* enable bus mastering */ + pci_set_master(pdev); + + /* Generic EIP97/EIP197 device probing */ + rc = safexcel_probe_generic(pdev, priv, 1); + return rc; +} + +static void safexcel_pci_remove(struct pci_dev *pdev) +{ + struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev); + int i; + + safexcel_unregister_algorithms(priv); + + for (i = 0; i < priv->config.rings; i++) + destroy_workqueue(priv->ring[i].workqueue); + + safexcel_hw_reset_rings(priv); +} + +static const struct pci_device_id safexcel_pci_ids[] = { + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038, + 0x16ae, 0xc522), + .driver_data = (kernel_ulong_t)&eip197_devbrd_data, + }, + {}, +}; + +MODULE_DEVICE_TABLE(pci, safexcel_pci_ids); + +static struct pci_driver safexcel_pci_driver = { + .name = "crypto-safexcel", + .id_table = safexcel_pci_ids, + .probe = safexcel_pci_probe, + .remove = safexcel_pci_remove, +}; + +static int __init safexcel_init(void) +{ + int ret; + + /* Register PCI driver */ + ret = pci_register_driver(&safexcel_pci_driver); + + /* Register platform driver */ + if (IS_ENABLED(CONFIG_OF) && !ret) { + ret = platform_driver_register(&crypto_safexcel); + if (ret) + pci_unregister_driver(&safexcel_pci_driver); + } + + return ret; +} + +static void __exit safexcel_exit(void) +{ + /* Unregister platform driver */ + if (IS_ENABLED(CONFIG_OF)) + platform_driver_unregister(&crypto_safexcel); + + /* Unregister PCI driver if successfully registered before */ + pci_unregister_driver(&safexcel_pci_driver); +} + +module_init(safexcel_init); +module_exit(safexcel_exit); MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>"); MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>"); MODULE_AUTHOR("Igal Liberman <igall@marvell.com>"); -MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197"); +MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); + +MODULE_FIRMWARE("ifpp.bin"); +MODULE_FIRMWARE("ipue.bin"); +MODULE_FIRMWARE("inside-secure/eip197b/ifpp.bin"); +MODULE_FIRMWARE("inside-secure/eip197b/ipue.bin"); +MODULE_FIRMWARE("inside-secure/eip197d/ifpp.bin"); +MODULE_FIRMWARE("inside-secure/eip197d/ipue.bin"); +MODULE_FIRMWARE("inside-secure/eip197_minifw/ifpp.bin"); +MODULE_FIRMWARE("inside-secure/eip197_minifw/ipue.bin"); diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 65624a81f0fd..0f27367a85fa 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -11,18 +11,46 @@ #include <crypto/aead.h> #include <crypto/algapi.h> #include <crypto/internal/hash.h> -#include <crypto/sha.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> +#include <crypto/sha3.h> #include <crypto/skcipher.h> - -#define EIP197_HIA_VERSION_LE 0xca35 -#define EIP197_HIA_VERSION_BE 0x35ca +#include <linux/types.h> + +#define EIP197_HIA_VERSION_BE 0xca35 +#define EIP197_HIA_VERSION_LE 0x35ca +#define EIP97_VERSION_LE 0x9e61 +#define EIP196_VERSION_LE 0x3bc4 +#define EIP197_VERSION_LE 0x3ac5 +#define EIP96_VERSION_LE 0x9f60 +#define EIP201_VERSION_LE 0x36c9 +#define EIP206_VERSION_LE 0x31ce +#define EIP207_VERSION_LE 0x30cf +#define EIP197_REG_LO16(reg) (reg & 0xffff) +#define EIP197_REG_HI16(reg) ((reg >> 16) & 0xffff) +#define EIP197_VERSION_MASK(reg) ((reg >> 16) & 0xfff) +#define EIP197_VERSION_SWAP(reg) (((reg & 0xf0) << 4) | \ + ((reg >> 4) & 0xf0) | \ + ((reg >> 12) & 0xf)) + +/* EIP197 HIA OPTIONS ENCODING */ +#define EIP197_HIA_OPT_HAS_PE_ARB BIT(29) + +/* EIP206 OPTIONS ENCODING */ +#define EIP206_OPT_ICE_TYPE(n) ((n>>8)&3) +#define EIP206_OPT_OCE_TYPE(n) ((n>>10)&3) + +/* EIP197 OPTIONS ENCODING */ +#define EIP197_OPT_HAS_TRC BIT(31) /* Static configuration */ #define EIP197_DEFAULT_RING_SIZE 400 -#define EIP197_MAX_TOKENS 8 +#define EIP197_EMB_TOKENS 4 /* Pad CD to 16 dwords */ +#define EIP197_MAX_TOKENS 16 #define EIP197_MAX_RINGS 4 -#define EIP197_FETCH_COUNT 1 +#define EIP197_FETCH_DEPTH 2 #define EIP197_MAX_BATCH_SZ 64 +#define EIP197_MAX_RING_AIC 14 #define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \ GFP_KERNEL : GFP_ATOMIC) @@ -38,6 +66,27 @@ char __##name##_desc[size] CRYPTO_MINALIGN_ATTR; \ struct type##_request *name = (void *)__##name##_desc +/* Xilinx dev board base offsets */ +#define EIP197_XLX_GPIO_BASE 0x200000 +#define EIP197_XLX_IRQ_BLOCK_ID_ADDR 0x2000 +#define EIP197_XLX_IRQ_BLOCK_ID_VALUE 0x1fc2 +#define EIP197_XLX_USER_INT_ENB_MSK 0x2004 +#define EIP197_XLX_USER_INT_ENB_SET 0x2008 +#define EIP197_XLX_USER_INT_ENB_CLEAR 0x200c +#define EIP197_XLX_USER_INT_BLOCK 0x2040 +#define EIP197_XLX_USER_INT_PEND 0x2048 +#define EIP197_XLX_USER_VECT_LUT0_ADDR 0x2080 +#define EIP197_XLX_USER_VECT_LUT0_IDENT 0x03020100 +#define EIP197_XLX_USER_VECT_LUT1_ADDR 0x2084 +#define EIP197_XLX_USER_VECT_LUT1_IDENT 0x07060504 +#define EIP197_XLX_USER_VECT_LUT2_ADDR 0x2088 +#define EIP197_XLX_USER_VECT_LUT2_IDENT 0x0b0a0908 +#define EIP197_XLX_USER_VECT_LUT3_ADDR 0x208c +#define EIP197_XLX_USER_VECT_LUT3_IDENT 0x0f0e0d0c + +/* Helper defines for probe function */ +#define EIP197_IRQ_NUMBER(i, is_pci) (i + is_pci) + /* Register base offsets */ #define EIP197_HIA_AIC(priv) ((priv)->base + (priv)->offsets.hia_aic) #define EIP197_HIA_AIC_G(priv) ((priv)->base + (priv)->offsets.hia_aic_g) @@ -49,6 +98,7 @@ #define EIP197_HIA_DSE_THR(priv) ((priv)->base + (priv)->offsets.hia_dse_thr) #define EIP197_HIA_GEN_CFG(priv) ((priv)->base + (priv)->offsets.hia_gen_cfg) #define EIP197_PE(priv) ((priv)->base + (priv)->offsets.pe) +#define EIP197_GLOBAL(priv) ((priv)->base + (priv)->offsets.global) /* EIP197 base offsets */ #define EIP197_HIA_AIC_BASE 0x90000 @@ -61,6 +111,7 @@ #define EIP197_HIA_DSE_THR_BASE 0x8d040 #define EIP197_HIA_GEN_CFG_BASE 0xf0000 #define EIP197_PE_BASE 0xa0000 +#define EIP197_GLOBAL_BASE 0xf0000 /* EIP97 base offsets */ #define EIP97_HIA_AIC_BASE 0x0 @@ -73,6 +124,7 @@ #define EIP97_HIA_DSE_THR_BASE 0xf600 #define EIP97_HIA_GEN_CFG_BASE 0x10000 #define EIP97_PE_BASE 0x10000 +#define EIP97_GLOBAL_BASE 0x10000 /* CDR/RDR register offsets */ #define EIP197_HIA_xDR_OFF(priv, r) (EIP197_HIA_AIC_xDR(priv) + (r) * 0x1000) @@ -105,6 +157,7 @@ #define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r)) #define EIP197_HIA_AIC_R_ACK(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r)) #define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0xe014 - EIP197_HIA_AIC_R_OFF(r)) +#define EIP197_HIA_AIC_R_VERSION(r) (0xe01c - EIP197_HIA_AIC_R_OFF(r)) #define EIP197_HIA_AIC_G_ENABLE_CTRL 0xf808 #define EIP197_HIA_AIC_G_ENABLED_STAT 0xf810 #define EIP197_HIA_AIC_G_ACK 0xf810 @@ -115,15 +168,30 @@ #define EIP197_PE_IN_TBUF_THRES(n) (0x0100 + (0x2000 * (n))) #define EIP197_PE_ICE_SCRATCH_RAM(n) (0x0800 + (0x2000 * (n))) #define EIP197_PE_ICE_PUE_CTRL(n) (0x0c80 + (0x2000 * (n))) +#define EIP197_PE_ICE_PUTF_CTRL(n) (0x0d00 + (0x2000 * (n))) #define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n))) #define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n))) +#define EIP197_PE_ICE_PPTF_CTRL(n) (0x0e00 + (0x2000 * (n))) #define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n))) +#define EIP197_PE_ICE_VERSION(n) (0x0ffc + (0x2000 * (n))) +#define EIP197_PE_EIP96_TOKEN_CTRL(n) (0x1000 + (0x2000 * (n))) #define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n))) #define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n))) #define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n))) +#define EIP197_PE_EIP96_TOKEN_CTRL2(n) (0x102c + (0x2000 * (n))) +#define EIP197_PE_EIP96_FUNCTION2_EN(n) (0x1030 + (0x2000 * (n))) +#define EIP197_PE_EIP96_OPTIONS(n) (0x13f8 + (0x2000 * (n))) +#define EIP197_PE_EIP96_VERSION(n) (0x13fc + (0x2000 * (n))) +#define EIP197_PE_OCE_VERSION(n) (0x1bfc + (0x2000 * (n))) #define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n))) #define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n))) +#define EIP197_PE_PSE_VERSION(n) (0x1efc + (0x2000 * (n))) +#define EIP197_PE_DEBUG(n) (0x1ff4 + (0x2000 * (n))) +#define EIP197_PE_OPTIONS(n) (0x1ff8 + (0x2000 * (n))) +#define EIP197_PE_VERSION(n) (0x1ffc + (0x2000 * (n))) #define EIP197_MST_CTRL 0xfff4 +#define EIP197_OPTIONS 0xfff8 +#define EIP197_VERSION 0xfffc /* EIP197-specific registers, no indirection */ #define EIP197_CLASSIFICATION_RAMS 0xe0000 @@ -138,10 +206,18 @@ #define EIP197_TRC_ECCADMINSTAT 0xf0838 #define EIP197_TRC_ECCDATASTAT 0xf083c #define EIP197_TRC_ECCDATA 0xf0840 +#define EIP197_STRC_CONFIG 0xf43f0 +#define EIP197_FLUE_CACHEBASE_LO(n) (0xf6000 + (32 * (n))) +#define EIP197_FLUE_CACHEBASE_HI(n) (0xf6004 + (32 * (n))) +#define EIP197_FLUE_CONFIG(n) (0xf6010 + (32 * (n))) +#define EIP197_FLUE_OFFSETS 0xf6808 +#define EIP197_FLUE_ARC4_OFFSET 0xf680c +#define EIP197_FLUE_IFC_LUT(n) (0xf6820 + (4 * (n))) #define EIP197_CS_RAM_CTRL 0xf7ff0 /* EIP197_HIA_xDR_DESC_SIZE */ #define EIP197_xDR_DESC_MODE_64BIT BIT(31) +#define EIP197_CDR_DESC_MODE_ADCP BIT(30) /* EIP197_HIA_xDR_DMA_CFG */ #define EIP197_HIA_xDR_WR_RES_BUF BIT(22) @@ -167,7 +243,6 @@ /* EIP197_HIA_xDR_PROC_COUNT */ #define EIP197_xDR_PROC_xD_PKT_OFFSET 24 #define EIP197_xDR_PROC_xD_PKT_MASK GENMASK(6, 0) -#define EIP197_xDR_PROC_xD_COUNT(n) ((n) << 2) #define EIP197_xDR_PROC_xD_PKT(n) ((n) << 24) #define EIP197_xDR_PROC_CLR_COUNT BIT(31) @@ -182,9 +257,24 @@ #define EIP197_HIA_RA_PE_CTRL_EN BIT(30) /* EIP197_HIA_OPTIONS */ +#define EIP197_N_RINGS_OFFSET 0 +#define EIP197_N_RINGS_MASK GENMASK(3, 0) #define EIP197_N_PES_OFFSET 4 #define EIP197_N_PES_MASK GENMASK(4, 0) #define EIP97_N_PES_MASK GENMASK(2, 0) +#define EIP197_HWDATAW_OFFSET 25 +#define EIP197_HWDATAW_MASK GENMASK(3, 0) +#define EIP97_HWDATAW_MASK GENMASK(2, 0) +#define EIP197_CFSIZE_OFFSET 9 +#define EIP197_CFSIZE_ADJUST 4 +#define EIP97_CFSIZE_OFFSET 8 +#define EIP197_CFSIZE_MASK GENMASK(2, 0) +#define EIP97_CFSIZE_MASK GENMASK(3, 0) +#define EIP197_RFSIZE_OFFSET 12 +#define EIP197_RFSIZE_ADJUST 4 +#define EIP97_RFSIZE_OFFSET 12 +#define EIP197_RFSIZE_MASK GENMASK(2, 0) +#define EIP97_RFSIZE_MASK GENMASK(3, 0) /* EIP197_HIA_AIC_R_ENABLE_CTRL */ #define EIP197_CDR_IRQ(n) BIT((n) * 2) @@ -198,14 +288,19 @@ #define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n) ((n) << 16) #define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n) (((n) & 0x7) << 20) #define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n) ((n) << 24) -#define EIP197_HIA_DFE_CFG_DIS_DEBUG (BIT(31) | BIT(29)) +#define EIP197_HIA_DFE_CFG_DIS_DEBUG GENMASK(31, 29) #define EIP197_HIA_DSE_CFG_EN_SINGLE_WR BIT(29) -#define EIP197_HIA_DSE_CFG_DIS_DEBUG BIT(31) +#define EIP197_HIA_DSE_CFG_DIS_DEBUG GENMASK(31, 30) /* EIP197_HIA_DFE/DSE_THR_CTRL */ #define EIP197_DxE_THR_CTRL_EN BIT(30) #define EIP197_DxE_THR_CTRL_RESET_PE BIT(31) +/* EIP197_PE_ICE_PUE/FPP_CTRL */ +#define EIP197_PE_ICE_UENG_START_OFFSET(n) ((n) << 16) +#define EIP197_PE_ICE_UENG_INIT_ALIGN_MASK 0x7ff0 +#define EIP197_PE_ICE_UENG_DEBUG_RESET BIT(3) + /* EIP197_HIA_AIC_G_ENABLED_STAT */ #define EIP197_G_IRQ_DFE(n) BIT((n) << 1) #define EIP197_G_IRQ_DSE(n) BIT(((n) << 1) + 1) @@ -222,6 +317,7 @@ #define EIP197_MST_CTRL_TX_MAX_CMD(n) (((n) & 0xf) << 20) #define EIP197_MST_CTRL_BYTE_SWAP BIT(24) #define EIP197_MST_CTRL_NO_BYTE_SWAP BIT(25) +#define EIP197_MST_CTRL_BYTE_SWAP_BITS GENMASK(25, 24) /* EIP197_PE_IN_DBUF/TBUF_THRES */ #define EIP197_PE_IN_xBUF_THRES_MIN(n) ((n) << 8) @@ -249,46 +345,37 @@ #define EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN BIT(0) #define EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN BIT(1) +/* EIP197_PE_EIP96_TOKEN_CTRL */ +#define EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES BIT(16) +#define EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT BIT(17) +#define EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT BIT(22) + /* EIP197_PE_EIP96_FUNCTION_EN */ -#define EIP197_FUNCTION_RSVD (BIT(6) | BIT(15) | BIT(20) | BIT(23)) -#define EIP197_PROTOCOL_HASH_ONLY BIT(0) -#define EIP197_PROTOCOL_ENCRYPT_ONLY BIT(1) -#define EIP197_PROTOCOL_HASH_ENCRYPT BIT(2) -#define EIP197_PROTOCOL_HASH_DECRYPT BIT(3) -#define EIP197_PROTOCOL_ENCRYPT_HASH BIT(4) -#define EIP197_PROTOCOL_DECRYPT_HASH BIT(5) -#define EIP197_ALG_ARC4 BIT(7) -#define EIP197_ALG_AES_ECB BIT(8) -#define EIP197_ALG_AES_CBC BIT(9) -#define EIP197_ALG_AES_CTR_ICM BIT(10) -#define EIP197_ALG_AES_OFB BIT(11) -#define EIP197_ALG_AES_CFB BIT(12) -#define EIP197_ALG_DES_ECB BIT(13) -#define EIP197_ALG_DES_CBC BIT(14) -#define EIP197_ALG_DES_OFB BIT(16) -#define EIP197_ALG_DES_CFB BIT(17) -#define EIP197_ALG_3DES_ECB BIT(18) -#define EIP197_ALG_3DES_CBC BIT(19) -#define EIP197_ALG_3DES_OFB BIT(21) -#define EIP197_ALG_3DES_CFB BIT(22) -#define EIP197_ALG_MD5 BIT(24) -#define EIP197_ALG_HMAC_MD5 BIT(25) -#define EIP197_ALG_SHA1 BIT(26) -#define EIP197_ALG_HMAC_SHA1 BIT(27) -#define EIP197_ALG_SHA2 BIT(28) -#define EIP197_ALG_HMAC_SHA2 BIT(29) -#define EIP197_ALG_AES_XCBC_MAC BIT(30) -#define EIP197_ALG_GCM_HASH BIT(31) +#define EIP197_FUNCTION_ALL 0xffffffff /* EIP197_PE_EIP96_CONTEXT_CTRL */ #define EIP197_CONTEXT_SIZE(n) (n) #define EIP197_ADDRESS_MODE BIT(8) #define EIP197_CONTROL_MODE BIT(9) +/* EIP197_PE_EIP96_TOKEN_CTRL2 */ +#define EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE BIT(3) + +/* EIP197_PE_DEBUG */ +#define EIP197_DEBUG_OCE_BYPASS BIT(1) + +/* EIP197_STRC_CONFIG */ +#define EIP197_STRC_CONFIG_INIT BIT(31) +#define EIP197_STRC_CONFIG_LARGE_REC(s) (s<<8) +#define EIP197_STRC_CONFIG_SMALL_REC(s) (s<<0) + +/* EIP197_FLUE_CONFIG */ +#define EIP197_FLUE_CONFIG_MAGIC 0xc7000004 + /* Context Control */ struct safexcel_context_record { - u32 control0; - u32 control1; + __le32 control0; + __le32 control1; __le32 data[40]; } __packed; @@ -313,28 +400,61 @@ struct safexcel_context_record { #define CONTEXT_CONTROL_CRYPTO_ALG_AES128 (0x5 << 17) #define CONTEXT_CONTROL_CRYPTO_ALG_AES192 (0x6 << 17) #define CONTEXT_CONTROL_CRYPTO_ALG_AES256 (0x7 << 17) +#define CONTEXT_CONTROL_CRYPTO_ALG_CHACHA20 (0x8 << 17) +#define CONTEXT_CONTROL_CRYPTO_ALG_SM4 (0xd << 17) +#define CONTEXT_CONTROL_DIGEST_INITIAL (0x0 << 21) #define CONTEXT_CONTROL_DIGEST_PRECOMPUTED (0x1 << 21) +#define CONTEXT_CONTROL_DIGEST_XCM (0x2 << 21) #define CONTEXT_CONTROL_DIGEST_HMAC (0x3 << 21) #define CONTEXT_CONTROL_CRYPTO_ALG_MD5 (0x0 << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_CRC32 (0x0 << 23) #define CONTEXT_CONTROL_CRYPTO_ALG_SHA1 (0x2 << 23) #define CONTEXT_CONTROL_CRYPTO_ALG_SHA224 (0x4 << 23) #define CONTEXT_CONTROL_CRYPTO_ALG_SHA256 (0x3 << 23) #define CONTEXT_CONTROL_CRYPTO_ALG_SHA384 (0x6 << 23) #define CONTEXT_CONTROL_CRYPTO_ALG_SHA512 (0x5 << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_GHASH (0x4 << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC128 (0x1 << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC192 (0x2 << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC256 (0x3 << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_SM3 (0x7 << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256 (0xb << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224 (0xc << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512 (0xd << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384 (0xe << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_POLY1305 (0xf << 23) #define CONTEXT_CONTROL_INV_FR (0x5 << 24) #define CONTEXT_CONTROL_INV_TR (0x6 << 24) /* control1 */ #define CONTEXT_CONTROL_CRYPTO_MODE_ECB (0 << 0) #define CONTEXT_CONTROL_CRYPTO_MODE_CBC (1 << 0) +#define CONTEXT_CONTROL_CHACHA20_MODE_256_32 (2 << 0) +#define CONTEXT_CONTROL_CRYPTO_MODE_OFB (4 << 0) +#define CONTEXT_CONTROL_CRYPTO_MODE_CFB (5 << 0) +#define CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD (6 << 0) +#define CONTEXT_CONTROL_CRYPTO_MODE_XTS (7 << 0) +#define CONTEXT_CONTROL_CRYPTO_MODE_XCM ((6 << 0) | BIT(17)) +#define CONTEXT_CONTROL_CHACHA20_MODE_CALC_OTK (12 << 0) #define CONTEXT_CONTROL_IV0 BIT(5) #define CONTEXT_CONTROL_IV1 BIT(6) #define CONTEXT_CONTROL_IV2 BIT(7) #define CONTEXT_CONTROL_IV3 BIT(8) #define CONTEXT_CONTROL_DIGEST_CNT BIT(9) #define CONTEXT_CONTROL_COUNTER_MODE BIT(10) +#define CONTEXT_CONTROL_CRYPTO_STORE BIT(12) #define CONTEXT_CONTROL_HASH_STORE BIT(19) +#define EIP197_XCM_MODE_GCM 1 +#define EIP197_XCM_MODE_CCM 2 + +#define EIP197_AEAD_TYPE_IPSEC_ESP 2 +#define EIP197_AEAD_TYPE_IPSEC_ESP_GMAC 3 +#define EIP197_AEAD_IPSEC_IV_SIZE 8 +#define EIP197_AEAD_IPSEC_NONCE_SIZE 4 +#define EIP197_AEAD_IPSEC_COUNTER_SIZE 4 +#define EIP197_AEAD_IPSEC_CCM_NONCE_SIZE 3 + /* The hash counter given to the engine in the context has a granularity of * 64 bits. */ @@ -345,6 +465,8 @@ struct safexcel_context_record { #define EIP197_TRC_ENABLE_1 BIT(5) #define EIP197_TRC_ENABLE_2 BIT(6) #define EIP197_TRC_ENABLE_MASK GENMASK(6, 4) +#define EIP197_CS_BANKSEL_MASK GENMASK(14, 12) +#define EIP197_CS_BANKSEL_OFS 12 /* EIP197_TRC_PARAMS */ #define EIP197_TRC_PARAMS_SW_RESET BIT(0) @@ -362,39 +484,33 @@ struct safexcel_context_record { #define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n) ((n) << 18) /* Cache helpers */ -#define EIP197B_CS_RC_MAX 52 -#define EIP197D_CS_RC_MAX 96 +#define EIP197_MIN_DSIZE 1024 +#define EIP197_MIN_ASIZE 8 +#define EIP197_CS_TRC_REC_WC 64 #define EIP197_CS_RC_SIZE (4 * sizeof(u32)) #define EIP197_CS_RC_NEXT(x) (x) #define EIP197_CS_RC_PREV(x) ((x) << 10) #define EIP197_RC_NULL 0x3ff -#define EIP197B_CS_TRC_REC_WC 59 -#define EIP197D_CS_TRC_REC_WC 64 -#define EIP197B_CS_TRC_LG_REC_WC 73 -#define EIP197D_CS_TRC_LG_REC_WC 80 -#define EIP197B_CS_HT_WC 64 -#define EIP197D_CS_HT_WC 256 - /* Result data */ struct result_data_desc { u32 packet_length:17; u32 error_code:15; - u8 bypass_length:4; - u8 e15:1; - u16 rsvd0; - u8 hash_bytes:1; - u8 hash_length:6; - u8 generic_bytes:1; - u8 checksum:1; - u8 next_header:1; - u8 length:1; + u32 bypass_length:4; + u32 e15:1; + u32 rsvd0:16; + u32 hash_bytes:1; + u32 hash_length:6; + u32 generic_bytes:1; + u32 checksum:1; + u32 next_header:1; + u32 length:1; u16 application_id; u16 rsvd1; - u32 rsvd2; + u32 rsvd2[5]; } __packed; @@ -412,10 +528,17 @@ struct safexcel_result_desc { u32 data_lo; u32 data_hi; - - struct result_data_desc result_data; } __packed; +/* + * The EIP(1)97 only needs to fetch the descriptor part of + * the result descriptor, not the result token part! + */ +#define EIP197_RD64_FETCH_SIZE (sizeof(struct safexcel_result_desc) /\ + sizeof(u32)) +#define EIP197_RD64_RESULT_SIZE (sizeof(struct result_data_desc) /\ + sizeof(u32)) + struct safexcel_token { u32 packet_length:17; u8 stat:2; @@ -425,26 +548,37 @@ struct safexcel_token { #define EIP197_TOKEN_HASH_RESULT_VERIFY BIT(16) +#define EIP197_TOKEN_CTX_OFFSET(x) (x) +#define EIP197_TOKEN_DIRECTION_EXTERNAL BIT(11) +#define EIP197_TOKEN_EXEC_IF_SUCCESSFUL (0x1 << 12) + #define EIP197_TOKEN_STAT_LAST_HASH BIT(0) #define EIP197_TOKEN_STAT_LAST_PACKET BIT(1) #define EIP197_TOKEN_OPCODE_DIRECTION 0x0 #define EIP197_TOKEN_OPCODE_INSERT 0x2 #define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT #define EIP197_TOKEN_OPCODE_RETRIEVE 0x4 +#define EIP197_TOKEN_OPCODE_INSERT_REMRES 0xa #define EIP197_TOKEN_OPCODE_VERIFY 0xd +#define EIP197_TOKEN_OPCODE_CTX_ACCESS 0xe #define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0) static inline void eip197_noop_token(struct safexcel_token *token) { token->opcode = EIP197_TOKEN_OPCODE_NOOP; token->packet_length = BIT(2); + token->stat = 0; + token->instructions = 0; } /* Instructions */ #define EIP197_TOKEN_INS_INSERT_HASH_DIGEST 0x1c +#define EIP197_TOKEN_INS_ORIGIN_IV0 0x14 +#define EIP197_TOKEN_INS_ORIGIN_TOKEN 0x1b +#define EIP197_TOKEN_INS_ORIGIN_LEN(x) ((x) << 5) #define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5) #define EIP197_TOKEN_INS_TYPE_HASH BIT(6) -#define EIP197_TOKEN_INS_TYPE_CRYTO BIT(7) +#define EIP197_TOKEN_INS_TYPE_CRYPTO BIT(7) #define EIP197_TOKEN_INS_LAST BIT(8) /* Processing Engine Control Data */ @@ -456,23 +590,26 @@ struct safexcel_control_data_desc { u16 application_id; u16 rsvd; - u8 refresh:2; - u32 context_lo:30; + u32 context_lo; u32 context_hi; u32 control0; u32 control1; - u32 token[EIP197_MAX_TOKENS]; + u32 token[EIP197_EMB_TOKENS]; } __packed; #define EIP197_OPTION_MAGIC_VALUE BIT(0) #define EIP197_OPTION_64BIT_CTX BIT(1) +#define EIP197_OPTION_RC_AUTO (0x2 << 3) #define EIP197_OPTION_CTX_CTRL_IN_CMD BIT(8) #define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10) #define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9) +#define EIP197_TYPE_BCLA 0x0 #define EIP197_TYPE_EXTENDED 0x3 +#define EIP197_CONTEXT_SMALL 0x2 +#define EIP197_CONTEXT_SIZE_MASK 0x3 /* Basic Command Descriptor format */ struct safexcel_command_desc { @@ -480,20 +617,31 @@ struct safexcel_command_desc { u8 rsvd0:5; u8 last_seg:1; u8 first_seg:1; - u16 additional_cdata_size:8; + u8 additional_cdata_size:8; u32 rsvd1; u32 data_lo; u32 data_hi; + u32 atok_lo; + u32 atok_hi; + struct safexcel_control_data_desc control_data; } __packed; +#define EIP197_CD64_FETCH_SIZE (sizeof(struct safexcel_command_desc) /\ + sizeof(u32)) + /* * Internal structures & functions */ +#define EIP197_FW_TERMINAL_NOPS 2 +#define EIP197_FW_START_POLLCNT 16 +#define EIP197_FW_PUE_READY 0x14 +#define EIP197_FW_FPP_READY 0x18 + enum eip197_fw { FW_IFPP = 0, FW_IPUE, @@ -502,15 +650,20 @@ enum eip197_fw { struct safexcel_desc_ring { void *base; + void *shbase; void *base_end; + void *shbase_end; dma_addr_t base_dma; + dma_addr_t shbase_dma; /* write and read pointers */ void *write; + void *shwrite; void *read; /* descriptor element offset */ - unsigned offset; + unsigned int offset; + unsigned int shoffset; }; enum safexcel_alg_type { @@ -525,9 +678,11 @@ struct safexcel_config { u32 cd_size; u32 cd_offset; + u32 cdsh_offset; u32 rd_size; u32 rd_offset; + u32 res_offset; }; struct safexcel_work_data { @@ -564,12 +719,59 @@ struct safexcel_ring { */ struct crypto_async_request *req; struct crypto_async_request *backlog; + + /* irq of this ring */ + int irq; }; +/* EIP integration context flags */ enum safexcel_eip_version { - EIP97IES = BIT(0), - EIP197B = BIT(1), - EIP197D = BIT(2), + /* Platform (EIP integration context) specifier */ + EIP97IES_MRVL, + EIP197B_MRVL, + EIP197D_MRVL, + EIP197_DEVBRD, + EIP197C_MXL, +}; + +struct safexcel_priv_data { + enum safexcel_eip_version version; + bool fw_little_endian; +}; + +/* Priority we use for advertising our algorithms */ +#define SAFEXCEL_CRA_PRIORITY 300 + +/* SM3 digest result for zero length message */ +#define EIP197_SM3_ZEROM_HASH "\x1A\xB2\x1D\x83\x55\xCF\xA1\x7F" \ + "\x8E\x61\x19\x48\x31\xE8\x1A\x8F" \ + "\x22\xBE\xC8\xC7\x28\xFE\xFB\x74" \ + "\x7E\xD0\x35\xEB\x50\x82\xAA\x2B" + +/* EIP algorithm presence flags */ +enum safexcel_eip_algorithms { + SAFEXCEL_ALG_BC0 = BIT(5), + SAFEXCEL_ALG_SM4 = BIT(6), + SAFEXCEL_ALG_SM3 = BIT(7), + SAFEXCEL_ALG_CHACHA20 = BIT(8), + SAFEXCEL_ALG_POLY1305 = BIT(9), + SAFEXCEL_SEQMASK_256 = BIT(10), + SAFEXCEL_SEQMASK_384 = BIT(11), + SAFEXCEL_ALG_AES = BIT(12), + SAFEXCEL_ALG_AES_XFB = BIT(13), + SAFEXCEL_ALG_DES = BIT(15), + SAFEXCEL_ALG_DES_XFB = BIT(16), + SAFEXCEL_ALG_ARC4 = BIT(18), + SAFEXCEL_ALG_AES_XTS = BIT(20), + SAFEXCEL_ALG_WIRELESS = BIT(21), + SAFEXCEL_ALG_MD5 = BIT(22), + SAFEXCEL_ALG_SHA1 = BIT(23), + SAFEXCEL_ALG_SHA2_256 = BIT(25), + SAFEXCEL_ALG_SHA2_512 = BIT(26), + SAFEXCEL_ALG_XCBC_MAC = BIT(27), + SAFEXCEL_ALG_CBC_MAC_ALL = BIT(29), + SAFEXCEL_ALG_GHASH = BIT(30), + SAFEXCEL_ALG_SHA3 = BIT(31), }; struct safexcel_register_offsets { @@ -583,10 +785,33 @@ struct safexcel_register_offsets { u32 hia_dse_thr; u32 hia_gen_cfg; u32 pe; + u32 global; }; enum safexcel_flags { - EIP197_TRC_CACHE = BIT(0), + EIP197_TRC_CACHE = BIT(0), + SAFEXCEL_HW_EIP197 = BIT(1), + EIP197_PE_ARB = BIT(2), + EIP197_ICE = BIT(3), + EIP197_SIMPLE_TRC = BIT(4), + EIP197_OCE = BIT(5), +}; + +struct safexcel_hwconfig { + enum safexcel_eip_algorithms algo_flags; + int hwver; + int hiaver; + int ppver; + int icever; + int pever; + int ocever; + int psever; + int hwdataw; + int hwcfsize; + int hwrfsize; + int hwnumpes; + int hwnumrings; + int hwnumraic; }; struct safexcel_crypto_priv { @@ -596,8 +821,9 @@ struct safexcel_crypto_priv { struct clk *reg_clk; struct safexcel_config config; - enum safexcel_eip_version version; + struct safexcel_priv_data *data; struct safexcel_register_offsets offsets; + struct safexcel_hwconfig hwconfig; u32 flags; /* context DMA pool */ @@ -615,21 +841,31 @@ struct safexcel_context { struct crypto_async_request *req, bool *complete, int *ret); struct safexcel_context_record *ctxr; + struct safexcel_crypto_priv *priv; dma_addr_t ctxr_dma; + union { + __le32 le[SHA3_512_BLOCK_SIZE / 4]; + __be32 be[SHA3_512_BLOCK_SIZE / 4]; + u32 word[SHA3_512_BLOCK_SIZE / 4]; + u8 byte[SHA3_512_BLOCK_SIZE]; + } ipad, opad; + int ring; bool needs_inv; bool exit_inv; }; +#define HASH_CACHE_SIZE SHA512_BLOCK_SIZE + struct safexcel_ahash_export_state { - u64 len[2]; - u64 processed[2]; + u64 len; + u64 processed; u32 digest; u32 state[SHA512_DIGEST_SIZE / sizeof(u32)]; - u8 cache[SHA512_BLOCK_SIZE]; + u8 cache[HASH_CACHE_SIZE]; }; /* @@ -640,7 +876,7 @@ struct safexcel_ahash_export_state { struct safexcel_alg_template { struct safexcel_crypto_priv *priv; enum safexcel_alg_type type; - u32 engines; + enum safexcel_eip_algorithms algo_mask; union { struct skcipher_alg skcipher; struct aead_alg aead; @@ -648,14 +884,9 @@ struct safexcel_alg_template { } alg; }; -struct safexcel_inv_result { - struct completion completion; - int error; -}; - void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring); int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, - struct safexcel_result_desc *rdesc); + void *rdp); void safexcel_complete(struct safexcel_crypto_priv *priv, int ring); int safexcel_invalidate_cache(struct crypto_async_request *async, struct safexcel_crypto_priv *priv, @@ -666,7 +897,6 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv, int safexcel_select_ring(struct safexcel_crypto_priv *priv); void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv, struct safexcel_desc_ring *ring); -void *safexcel_ring_first_rptr(struct safexcel_crypto_priv *priv, int ring); void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv, struct safexcel_desc_ring *ring); struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv, @@ -674,7 +904,8 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr bool first, bool last, dma_addr_t data, u32 len, u32 full_data_len, - dma_addr_t context); + dma_addr_t context, + struct safexcel_token **atoken); struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv, int ring_id, bool first, bool last, @@ -690,9 +921,9 @@ void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv, struct crypto_async_request *req); inline struct crypto_async_request * safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring); -void safexcel_inv_complete(struct crypto_async_request *req, int error); -int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen, - void *istate, void *ostate); +int safexcel_hmac_setkey(struct safexcel_context *base, const u8 *key, + unsigned int keylen, const char *alg, + unsigned int state_sz); /* available algorithms */ extern struct safexcel_alg_template safexcel_alg_ecb_des; @@ -701,6 +932,7 @@ extern struct safexcel_alg_template safexcel_alg_ecb_des3_ede; extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede; extern struct safexcel_alg_template safexcel_alg_ecb_aes; extern struct safexcel_alg_template safexcel_alg_cbc_aes; +extern struct safexcel_alg_template safexcel_alg_ctr_aes; extern struct safexcel_alg_template safexcel_alg_md5; extern struct safexcel_alg_template safexcel_alg_sha1; extern struct safexcel_alg_template safexcel_alg_sha224; @@ -718,5 +950,49 @@ extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes; +extern struct safexcel_alg_template safexcel_alg_xts_aes; +extern struct safexcel_alg_template safexcel_alg_gcm; +extern struct safexcel_alg_template safexcel_alg_ccm; +extern struct safexcel_alg_template safexcel_alg_cbcmac; +extern struct safexcel_alg_template safexcel_alg_xcbcmac; +extern struct safexcel_alg_template safexcel_alg_cmac; +extern struct safexcel_alg_template safexcel_alg_chacha20; +extern struct safexcel_alg_template safexcel_alg_chachapoly; +extern struct safexcel_alg_template safexcel_alg_chachapoly_esp; +extern struct safexcel_alg_template safexcel_alg_sm3; +extern struct safexcel_alg_template safexcel_alg_hmac_sm3; +extern struct safexcel_alg_template safexcel_alg_ecb_sm4; +extern struct safexcel_alg_template safexcel_alg_cbc_sm4; +extern struct safexcel_alg_template safexcel_alg_ctr_sm4; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_sm4; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_cbc_sm4; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_sm4; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_ctr_sm4; +extern struct safexcel_alg_template safexcel_alg_sha3_224; +extern struct safexcel_alg_template safexcel_alg_sha3_256; +extern struct safexcel_alg_template safexcel_alg_sha3_384; +extern struct safexcel_alg_template safexcel_alg_sha3_512; +extern struct safexcel_alg_template safexcel_alg_hmac_sha3_224; +extern struct safexcel_alg_template safexcel_alg_hmac_sha3_256; +extern struct safexcel_alg_template safexcel_alg_hmac_sha3_384; +extern struct safexcel_alg_template safexcel_alg_hmac_sha3_512; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des; +extern struct safexcel_alg_template safexcel_alg_rfc4106_gcm; +extern struct safexcel_alg_template safexcel_alg_rfc4543_gcm; +extern struct safexcel_alg_template safexcel_alg_rfc4309_ccm; #endif diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index d531c14020dc..919e5a2cab95 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -5,15 +5,24 @@ * Antoine Tenart <antoine.tenart@free-electrons.com> */ +#include <linux/unaligned.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> - #include <crypto/aead.h> #include <crypto/aes.h> #include <crypto/authenc.h> -#include <crypto/des.h> -#include <crypto/sha.h> +#include <crypto/chacha.h> +#include <crypto/ctr.h> +#include <crypto/internal/des.h> +#include <crypto/gcm.h> +#include <crypto/ghash.h> +#include <crypto/poly1305.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> +#include <crypto/sm3.h> +#include <crypto/sm4.h> +#include <crypto/xts.h> #include <crypto/skcipher.h> #include <crypto/internal/aead.h> #include <crypto/internal/skcipher.h> @@ -29,6 +38,8 @@ enum safexcel_cipher_alg { SAFEXCEL_DES, SAFEXCEL_3DES, SAFEXCEL_AES, + SAFEXCEL_CHACHA20, + SAFEXCEL_SM4, }; struct safexcel_cipher_ctx { @@ -37,116 +48,324 @@ struct safexcel_cipher_ctx { u32 mode; enum safexcel_cipher_alg alg; - bool aead; + u8 aead; /* !=0=AEAD, 2=IPSec ESP AEAD, 3=IPsec ESP GMAC */ + u8 xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */ + u8 aadskip; + u8 blocksz; + u32 ivmask; + u32 ctrinit; - __le32 key[8]; - unsigned int key_len; + __le32 key[16]; + u32 nonce; + unsigned int key_len, xts; /* All the below is AEAD specific */ u32 hash_alg; u32 state_sz; - u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)]; - u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)]; + + struct crypto_aead *fback; }; struct safexcel_cipher_req { enum safexcel_cipher_direction direction; + /* Number of result descriptors associated to the request */ + unsigned int rdescs; bool needs_inv; + int nr_src, nr_dst; }; +static int safexcel_skcipher_iv(struct safexcel_cipher_ctx *ctx, u8 *iv, + struct safexcel_command_desc *cdesc) +{ + if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) { + cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; + /* 32 bit nonce */ + cdesc->control_data.token[0] = ctx->nonce; + /* 64 bit IV part */ + memcpy(&cdesc->control_data.token[1], iv, 8); + /* 32 bit counter, start at 0 or 1 (big endian!) */ + cdesc->control_data.token[3] = + (__force u32)cpu_to_be32(ctx->ctrinit); + return 4; + } + if (ctx->alg == SAFEXCEL_CHACHA20) { + cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; + /* 96 bit nonce part */ + memcpy(&cdesc->control_data.token[0], &iv[4], 12); + /* 32 bit counter */ + cdesc->control_data.token[3] = *(u32 *)iv; + return 4; + } + + cdesc->control_data.options |= ctx->ivmask; + memcpy(cdesc->control_data.token, iv, ctx->blocksz); + return ctx->blocksz / sizeof(u32); +} + static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, struct safexcel_command_desc *cdesc, + struct safexcel_token *atoken, u32 length) { struct safexcel_token *token; - unsigned offset = 0; - - if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { - switch (ctx->alg) { - case SAFEXCEL_DES: - offset = DES_BLOCK_SIZE / sizeof(u32); - memcpy(cdesc->control_data.token, iv, DES_BLOCK_SIZE); - cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; - break; - case SAFEXCEL_3DES: - offset = DES3_EDE_BLOCK_SIZE / sizeof(u32); - memcpy(cdesc->control_data.token, iv, DES3_EDE_BLOCK_SIZE); - cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; - break; + int ivlen; - case SAFEXCEL_AES: - offset = AES_BLOCK_SIZE / sizeof(u32); - memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE); - cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; - break; - } + ivlen = safexcel_skcipher_iv(ctx, iv, cdesc); + if (ivlen == 4) { + /* No space in cdesc, instruction moves to atoken */ + cdesc->additional_cdata_size = 1; + token = atoken; + } else { + /* Everything fits in cdesc */ + token = (struct safexcel_token *)(cdesc->control_data.token + 2); + /* Need to pad with NOP */ + eip197_noop_token(&token[1]); } - token = (struct safexcel_token *)(cdesc->control_data.token + offset); + token->opcode = EIP197_TOKEN_OPCODE_DIRECTION; + token->packet_length = length; + token->stat = EIP197_TOKEN_STAT_LAST_PACKET | + EIP197_TOKEN_STAT_LAST_HASH; + token->instructions = EIP197_TOKEN_INS_LAST | + EIP197_TOKEN_INS_TYPE_CRYPTO | + EIP197_TOKEN_INS_TYPE_OUTPUT; +} - token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; - token[0].packet_length = length; - token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET | - EIP197_TOKEN_STAT_LAST_HASH; - token[0].instructions = EIP197_TOKEN_INS_LAST | - EIP197_TOKEN_INS_TYPE_CRYTO | - EIP197_TOKEN_INS_TYPE_OUTPUT; +static void safexcel_aead_iv(struct safexcel_cipher_ctx *ctx, u8 *iv, + struct safexcel_command_desc *cdesc) +{ + if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD || + ctx->aead & EIP197_AEAD_TYPE_IPSEC_ESP) { /* _ESP and _ESP_GMAC */ + /* 32 bit nonce */ + cdesc->control_data.token[0] = ctx->nonce; + /* 64 bit IV part */ + memcpy(&cdesc->control_data.token[1], iv, 8); + /* 32 bit counter, start at 0 or 1 (big endian!) */ + cdesc->control_data.token[3] = + (__force u32)cpu_to_be32(ctx->ctrinit); + return; + } + if (ctx->xcm == EIP197_XCM_MODE_GCM || ctx->alg == SAFEXCEL_CHACHA20) { + /* 96 bit IV part */ + memcpy(&cdesc->control_data.token[0], iv, 12); + /* 32 bit counter, start at 0 or 1 (big endian!) */ + cdesc->control_data.token[3] = + (__force u32)cpu_to_be32(ctx->ctrinit); + return; + } + /* CBC */ + memcpy(cdesc->control_data.token, iv, ctx->blocksz); } static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, struct safexcel_command_desc *cdesc, + struct safexcel_token *atoken, enum safexcel_cipher_direction direction, u32 cryptlen, u32 assoclen, u32 digestsize) { - struct safexcel_token *token; - unsigned offset = 0; - - if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { - offset = AES_BLOCK_SIZE / sizeof(u32); - memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE); - - cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; - } + struct safexcel_token *aadref; + int atoksize = 2; /* Start with minimum size */ + int assocadj = assoclen - ctx->aadskip, aadalign; - token = (struct safexcel_token *)(cdesc->control_data.token + offset); + /* Always 4 dwords of embedded IV for AEAD modes */ + cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; if (direction == SAFEXCEL_DECRYPT) cryptlen -= digestsize; - token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; - token[0].packet_length = assoclen; - token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH | - EIP197_TOKEN_INS_TYPE_OUTPUT; + if (unlikely(ctx->xcm == EIP197_XCM_MODE_CCM)) { + /* Construct IV block B0 for the CBC-MAC */ + u8 *final_iv = (u8 *)cdesc->control_data.token; + u8 *cbcmaciv = (u8 *)&atoken[1]; + __le32 *aadlen = (__le32 *)&atoken[5]; + + if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) { + /* Length + nonce */ + cdesc->control_data.token[0] = ctx->nonce; + /* Fixup flags byte */ + *(__le32 *)cbcmaciv = + cpu_to_le32(ctx->nonce | + ((assocadj > 0) << 6) | + ((digestsize - 2) << 2)); + /* 64 bit IV part */ + memcpy(&cdesc->control_data.token[1], iv, 8); + memcpy(cbcmaciv + 4, iv, 8); + /* Start counter at 0 */ + cdesc->control_data.token[3] = 0; + /* Message length */ + *(__be32 *)(cbcmaciv + 12) = cpu_to_be32(cryptlen); + } else { + /* Variable length IV part */ + memcpy(final_iv, iv, 15 - iv[0]); + memcpy(cbcmaciv, iv, 15 - iv[0]); + /* Start variable length counter at 0 */ + memset(final_iv + 15 - iv[0], 0, iv[0] + 1); + memset(cbcmaciv + 15 - iv[0], 0, iv[0] - 1); + /* fixup flags byte */ + cbcmaciv[0] |= ((assocadj > 0) << 6) | + ((digestsize - 2) << 2); + /* insert lower 2 bytes of message length */ + cbcmaciv[14] = cryptlen >> 8; + cbcmaciv[15] = cryptlen & 255; + } - token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION; - token[1].packet_length = cryptlen; - token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; - token[1].instructions = EIP197_TOKEN_INS_LAST | - EIP197_TOKEN_INS_TYPE_CRYTO | - EIP197_TOKEN_INS_TYPE_HASH | - EIP197_TOKEN_INS_TYPE_OUTPUT; + atoken->opcode = EIP197_TOKEN_OPCODE_INSERT; + atoken->packet_length = AES_BLOCK_SIZE + + ((assocadj > 0) << 1); + atoken->stat = 0; + atoken->instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN | + EIP197_TOKEN_INS_TYPE_HASH; + + if (likely(assocadj)) { + *aadlen = cpu_to_le32((assocadj >> 8) | + (assocadj & 255) << 8); + atoken += 6; + atoksize += 7; + } else { + atoken += 5; + atoksize += 6; + } + + /* Process AAD data */ + aadref = atoken; + atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION; + atoken->packet_length = assocadj; + atoken->stat = 0; + atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH; + atoken++; + + /* For CCM only, align AAD data towards hash engine */ + atoken->opcode = EIP197_TOKEN_OPCODE_INSERT; + aadalign = (assocadj + 2) & 15; + atoken->packet_length = assocadj && aadalign ? + 16 - aadalign : + 0; + if (likely(cryptlen)) { + atoken->stat = 0; + atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH; + } else { + atoken->stat = EIP197_TOKEN_STAT_LAST_HASH; + atoken->instructions = EIP197_TOKEN_INS_LAST | + EIP197_TOKEN_INS_TYPE_HASH; + } + } else { + safexcel_aead_iv(ctx, iv, cdesc); + + /* Process AAD data */ + aadref = atoken; + atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION; + atoken->packet_length = assocadj; + atoken->stat = EIP197_TOKEN_STAT_LAST_HASH; + atoken->instructions = EIP197_TOKEN_INS_LAST | + EIP197_TOKEN_INS_TYPE_HASH; + } + atoken++; + + if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) { + /* For ESP mode (and not GMAC), skip over the IV */ + atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION; + atoken->packet_length = EIP197_AEAD_IPSEC_IV_SIZE; + atoken->stat = 0; + atoken->instructions = 0; + atoken++; + atoksize++; + } else if (unlikely(ctx->alg == SAFEXCEL_CHACHA20 && + direction == SAFEXCEL_DECRYPT)) { + /* Poly-chacha decryption needs a dummy NOP here ... */ + atoken->opcode = EIP197_TOKEN_OPCODE_INSERT; + atoken->packet_length = 16; /* According to Op Manual */ + atoken->stat = 0; + atoken->instructions = 0; + atoken++; + atoksize++; + } + + if (ctx->xcm) { + /* For GCM and CCM, obtain enc(Y0) */ + atoken->opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES; + atoken->packet_length = 0; + atoken->stat = 0; + atoken->instructions = AES_BLOCK_SIZE; + atoken++; + + atoken->opcode = EIP197_TOKEN_OPCODE_INSERT; + atoken->packet_length = AES_BLOCK_SIZE; + atoken->stat = 0; + atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | + EIP197_TOKEN_INS_TYPE_CRYPTO; + atoken++; + atoksize += 2; + } + + if (likely(cryptlen || ctx->alg == SAFEXCEL_CHACHA20)) { + /* Fixup stat field for AAD direction instruction */ + aadref->stat = 0; + + /* Process crypto data */ + atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION; + atoken->packet_length = cryptlen; + + if (unlikely(ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) { + /* Fixup instruction field for AAD dir instruction */ + aadref->instructions = EIP197_TOKEN_INS_TYPE_HASH; + + /* Do not send to crypt engine in case of GMAC */ + atoken->instructions = EIP197_TOKEN_INS_LAST | + EIP197_TOKEN_INS_TYPE_HASH | + EIP197_TOKEN_INS_TYPE_OUTPUT; + } else { + atoken->instructions = EIP197_TOKEN_INS_LAST | + EIP197_TOKEN_INS_TYPE_CRYPTO | + EIP197_TOKEN_INS_TYPE_HASH | + EIP197_TOKEN_INS_TYPE_OUTPUT; + } + + cryptlen &= 15; + if (unlikely(ctx->xcm == EIP197_XCM_MODE_CCM && cryptlen)) { + atoken->stat = 0; + /* For CCM only, pad crypto data to the hash engine */ + atoken++; + atoksize++; + atoken->opcode = EIP197_TOKEN_OPCODE_INSERT; + atoken->packet_length = 16 - cryptlen; + atoken->stat = EIP197_TOKEN_STAT_LAST_HASH; + atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH; + } else { + atoken->stat = EIP197_TOKEN_STAT_LAST_HASH; + } + atoken++; + atoksize++; + } if (direction == SAFEXCEL_ENCRYPT) { - token[2].opcode = EIP197_TOKEN_OPCODE_INSERT; - token[2].packet_length = digestsize; - token[2].stat = EIP197_TOKEN_STAT_LAST_HASH | - EIP197_TOKEN_STAT_LAST_PACKET; - token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | - EIP197_TOKEN_INS_INSERT_HASH_DIGEST; + /* Append ICV */ + atoken->opcode = EIP197_TOKEN_OPCODE_INSERT; + atoken->packet_length = digestsize; + atoken->stat = EIP197_TOKEN_STAT_LAST_HASH | + EIP197_TOKEN_STAT_LAST_PACKET; + atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | + EIP197_TOKEN_INS_INSERT_HASH_DIGEST; } else { - token[2].opcode = EIP197_TOKEN_OPCODE_RETRIEVE; - token[2].packet_length = digestsize; - token[2].stat = EIP197_TOKEN_STAT_LAST_HASH | - EIP197_TOKEN_STAT_LAST_PACKET; - token[2].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST; - - token[3].opcode = EIP197_TOKEN_OPCODE_VERIFY; - token[3].packet_length = digestsize | - EIP197_TOKEN_HASH_RESULT_VERIFY; - token[3].stat = EIP197_TOKEN_STAT_LAST_HASH | - EIP197_TOKEN_STAT_LAST_PACKET; - token[3].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT; + /* Extract ICV */ + atoken->opcode = EIP197_TOKEN_OPCODE_RETRIEVE; + atoken->packet_length = digestsize; + atoken->stat = EIP197_TOKEN_STAT_LAST_HASH | + EIP197_TOKEN_STAT_LAST_PACKET; + atoken->instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST; + atoken++; + atoksize++; + + /* Verify ICV */ + atoken->opcode = EIP197_TOKEN_OPCODE_VERIFY; + atoken->packet_length = digestsize | + EIP197_TOKEN_HASH_RESULT_VERIFY; + atoken->stat = EIP197_TOKEN_STAT_LAST_HASH | + EIP197_TOKEN_STAT_LAST_PACKET; + atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT; } + + /* Fixup length of the token in the command descriptor */ + cdesc->additional_cdata_size = atoksize; } static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm, @@ -154,19 +373,17 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm, { struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); - struct safexcel_crypto_priv *priv = ctx->priv; + struct safexcel_crypto_priv *priv = ctx->base.priv; struct crypto_aes_ctx aes; int ret, i; - ret = crypto_aes_expand_key(&aes, key, len); - if (ret) { - crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + ret = aes_expandkey(&aes, key, len); + if (ret) return ret; - } if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { for (i = 0; i < len / sizeof(u32); i++) { - if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) { + if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) { ctx->base.needs_inv = true; break; } @@ -182,80 +399,107 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm, return 0; } -static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key, - unsigned int len) +static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key, + unsigned int len) { struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); - struct safexcel_ahash_export_state istate, ostate; - struct safexcel_crypto_priv *priv = ctx->priv; + struct safexcel_crypto_priv *priv = ctx->base.priv; struct crypto_authenc_keys keys; + struct crypto_aes_ctx aes; + int err = -EINVAL, i; + const char *alg; - if (crypto_authenc_extractkeys(&keys, key, len) != 0) + if (unlikely(crypto_authenc_extractkeys(&keys, key, len))) goto badkey; - if (keys.enckeylen > sizeof(ctx->key)) - goto badkey; + if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) { + /* Must have at least space for the nonce here */ + if (unlikely(keys.enckeylen < CTR_RFC3686_NONCE_SIZE)) + goto badkey; + /* last 4 bytes of key are the nonce! */ + ctx->nonce = *(u32 *)(keys.enckey + keys.enckeylen - + CTR_RFC3686_NONCE_SIZE); + /* exclude the nonce here */ + keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; + } /* Encryption key */ - if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma && - memcmp(ctx->key, keys.enckey, keys.enckeylen)) - ctx->base.needs_inv = true; + switch (ctx->alg) { + case SAFEXCEL_DES: + err = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen); + if (unlikely(err)) + goto badkey; + break; + case SAFEXCEL_3DES: + err = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen); + if (unlikely(err)) + goto badkey; + break; + case SAFEXCEL_AES: + err = aes_expandkey(&aes, keys.enckey, keys.enckeylen); + if (unlikely(err)) + goto badkey; + break; + case SAFEXCEL_SM4: + if (unlikely(keys.enckeylen != SM4_KEY_SIZE)) + goto badkey; + break; + default: + dev_err(priv->dev, "aead: unsupported cipher algorithm\n"); + goto badkey; + } + + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { + for (i = 0; i < keys.enckeylen / sizeof(u32); i++) { + if (le32_to_cpu(ctx->key[i]) != + ((u32 *)keys.enckey)[i]) { + ctx->base.needs_inv = true; + break; + } + } + } /* Auth key */ switch (ctx->hash_alg) { case CONTEXT_CONTROL_CRYPTO_ALG_SHA1: - if (safexcel_hmac_setkey("safexcel-sha1", keys.authkey, - keys.authkeylen, &istate, &ostate)) - goto badkey; + alg = "safexcel-sha1"; break; case CONTEXT_CONTROL_CRYPTO_ALG_SHA224: - if (safexcel_hmac_setkey("safexcel-sha224", keys.authkey, - keys.authkeylen, &istate, &ostate)) - goto badkey; + alg = "safexcel-sha224"; break; case CONTEXT_CONTROL_CRYPTO_ALG_SHA256: - if (safexcel_hmac_setkey("safexcel-sha256", keys.authkey, - keys.authkeylen, &istate, &ostate)) - goto badkey; + alg = "safexcel-sha256"; break; case CONTEXT_CONTROL_CRYPTO_ALG_SHA384: - if (safexcel_hmac_setkey("safexcel-sha384", keys.authkey, - keys.authkeylen, &istate, &ostate)) - goto badkey; + alg = "safexcel-sha384"; break; case CONTEXT_CONTROL_CRYPTO_ALG_SHA512: - if (safexcel_hmac_setkey("safexcel-sha512", keys.authkey, - keys.authkeylen, &istate, &ostate)) - goto badkey; + alg = "safexcel-sha512"; + break; + case CONTEXT_CONTROL_CRYPTO_ALG_SM3: + alg = "safexcel-sm3"; break; default: dev_err(priv->dev, "aead: unsupported hash algorithm\n"); goto badkey; } - crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) & - CRYPTO_TFM_RES_MASK); - - if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma && - (memcmp(ctx->ipad, istate.state, ctx->state_sz) || - memcmp(ctx->opad, ostate.state, ctx->state_sz))) - ctx->base.needs_inv = true; + if (safexcel_hmac_setkey(&ctx->base, keys.authkey, keys.authkeylen, + alg, ctx->state_sz)) + goto badkey; /* Now copy the keys into the context */ - memcpy(ctx->key, keys.enckey, keys.enckeylen); + for (i = 0; i < keys.enckeylen / sizeof(u32); i++) + ctx->key[i] = cpu_to_le32(((u32 *)keys.enckey)[i]); ctx->key_len = keys.enckeylen; - memcpy(ctx->ipad, &istate.state, ctx->state_sz); - memcpy(ctx->opad, &ostate.state, ctx->state_sz); - memzero_explicit(&keys, sizeof(keys)); return 0; badkey: - crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); - return -EINVAL; + return err; } static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, @@ -263,60 +507,100 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, struct safexcel_cipher_req *sreq, struct safexcel_command_desc *cdesc) { - struct safexcel_crypto_priv *priv = ctx->priv; - int ctrl_size; + struct safexcel_crypto_priv *priv = ctx->base.priv; + int ctrl_size = ctx->key_len / sizeof(u32); + + cdesc->control_data.control1 = ctx->mode; if (ctx->aead) { - if (sreq->direction == SAFEXCEL_ENCRYPT) - cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT; + /* Take in account the ipad+opad digests */ + if (ctx->xcm) { + ctrl_size += ctx->state_sz / sizeof(u32); + cdesc->control_data.control0 = + CONTEXT_CONTROL_KEY_EN | + CONTEXT_CONTROL_DIGEST_XCM | + ctx->hash_alg | + CONTEXT_CONTROL_SIZE(ctrl_size); + } else if (ctx->alg == SAFEXCEL_CHACHA20) { + /* Chacha20-Poly1305 */ + cdesc->control_data.control0 = + CONTEXT_CONTROL_KEY_EN | + CONTEXT_CONTROL_CRYPTO_ALG_CHACHA20 | + (sreq->direction == SAFEXCEL_ENCRYPT ? + CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT : + CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN) | + ctx->hash_alg | + CONTEXT_CONTROL_SIZE(ctrl_size); + return 0; + } else { + ctrl_size += ctx->state_sz / sizeof(u32) * 2; + cdesc->control_data.control0 = + CONTEXT_CONTROL_KEY_EN | + CONTEXT_CONTROL_DIGEST_HMAC | + ctx->hash_alg | + CONTEXT_CONTROL_SIZE(ctrl_size); + } + + if (sreq->direction == SAFEXCEL_ENCRYPT && + (ctx->xcm == EIP197_XCM_MODE_CCM || + ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) + cdesc->control_data.control0 |= + CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT; + else if (sreq->direction == SAFEXCEL_ENCRYPT) + cdesc->control_data.control0 |= + CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT; + else if (ctx->xcm == EIP197_XCM_MODE_CCM) + cdesc->control_data.control0 |= + CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN; else - cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN; + cdesc->control_data.control0 |= + CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN; } else { - cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT; - - /* The decryption control type is a combination of the - * encryption type and CONTEXT_CONTROL_TYPE_NULL_IN, for all - * types. - */ - if (sreq->direction == SAFEXCEL_DECRYPT) - cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_NULL_IN; + if (sreq->direction == SAFEXCEL_ENCRYPT) + cdesc->control_data.control0 = + CONTEXT_CONTROL_TYPE_CRYPTO_OUT | + CONTEXT_CONTROL_KEY_EN | + CONTEXT_CONTROL_SIZE(ctrl_size); + else + cdesc->control_data.control0 = + CONTEXT_CONTROL_TYPE_CRYPTO_IN | + CONTEXT_CONTROL_KEY_EN | + CONTEXT_CONTROL_SIZE(ctrl_size); } - cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN; - cdesc->control_data.control1 |= ctx->mode; - - if (ctx->aead) - cdesc->control_data.control0 |= CONTEXT_CONTROL_DIGEST_HMAC | - ctx->hash_alg; - if (ctx->alg == SAFEXCEL_DES) { - cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_DES; + cdesc->control_data.control0 |= + CONTEXT_CONTROL_CRYPTO_ALG_DES; } else if (ctx->alg == SAFEXCEL_3DES) { - cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_3DES; + cdesc->control_data.control0 |= + CONTEXT_CONTROL_CRYPTO_ALG_3DES; } else if (ctx->alg == SAFEXCEL_AES) { - switch (ctx->key_len) { + switch (ctx->key_len >> ctx->xts) { case AES_KEYSIZE_128: - cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128; + cdesc->control_data.control0 |= + CONTEXT_CONTROL_CRYPTO_ALG_AES128; break; case AES_KEYSIZE_192: - cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192; + cdesc->control_data.control0 |= + CONTEXT_CONTROL_CRYPTO_ALG_AES192; break; case AES_KEYSIZE_256: - cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256; + cdesc->control_data.control0 |= + CONTEXT_CONTROL_CRYPTO_ALG_AES256; break; default: dev_err(priv->dev, "aes keysize not supported: %u\n", - ctx->key_len); + ctx->key_len >> ctx->xts); return -EINVAL; } + } else if (ctx->alg == SAFEXCEL_CHACHA20) { + cdesc->control_data.control0 |= + CONTEXT_CONTROL_CRYPTO_ALG_CHACHA20; + } else if (ctx->alg == SAFEXCEL_SM4) { + cdesc->control_data.control0 |= + CONTEXT_CONTROL_CRYPTO_ALG_SM4; } - ctrl_size = ctx->key_len / sizeof(u32); - if (ctx->aead) - /* Take in account the ipad+opad digests */ - ctrl_size += ctx->state_sz / sizeof(u32) * 2; - cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size); - return 0; } @@ -328,12 +612,18 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin struct safexcel_cipher_req *sreq, bool *should_complete, int *ret) { + struct skcipher_request *areq = skcipher_request_cast(async); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(areq); + struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(skcipher); struct safexcel_result_desc *rdesc; int ndesc = 0; *ret = 0; - do { + if (unlikely(!sreq->rdescs)) + return 0; + + while (sreq->rdescs--) { rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); if (IS_ERR(rdesc)) { dev_err(priv->dev, @@ -346,21 +636,33 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin *ret = safexcel_rdesc_check_errors(priv, rdesc); ndesc++; - } while (!rdesc->last_seg); + } safexcel_complete(priv, ring); if (src == dst) { - dma_unmap_sg(priv->dev, src, - sg_nents_for_len(src, cryptlen), - DMA_BIDIRECTIONAL); + if (sreq->nr_src > 0) + dma_unmap_sg(priv->dev, src, sreq->nr_src, + DMA_BIDIRECTIONAL); } else { - dma_unmap_sg(priv->dev, src, - sg_nents_for_len(src, cryptlen), - DMA_TO_DEVICE); - dma_unmap_sg(priv->dev, dst, - sg_nents_for_len(dst, cryptlen), - DMA_FROM_DEVICE); + if (sreq->nr_src > 0) + dma_unmap_sg(priv->dev, src, sreq->nr_src, + DMA_TO_DEVICE); + if (sreq->nr_dst > 0) + dma_unmap_sg(priv->dev, dst, sreq->nr_dst, + DMA_FROM_DEVICE); + } + + /* + * Update IV in req from last crypto output word for CBC modes + */ + if ((!ctx->aead) && (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) && + (sreq->direction == SAFEXCEL_ENCRYPT)) { + /* For encrypt take the last output word */ + sg_pcopy_to_buffer(dst, sreq->nr_dst, areq->iv, + crypto_skcipher_ivsize(skcipher), + (cryptlen - + crypto_skcipher_ivsize(skcipher))); } *should_complete = true; @@ -375,100 +677,214 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, unsigned int digestsize, u8 *iv, int *commands, int *results) { + struct skcipher_request *areq = skcipher_request_cast(base); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(areq); struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); - struct safexcel_crypto_priv *priv = ctx->priv; + struct safexcel_crypto_priv *priv = ctx->base.priv; struct safexcel_command_desc *cdesc; + struct safexcel_command_desc *first_cdesc = NULL; struct safexcel_result_desc *rdesc, *first_rdesc = NULL; struct scatterlist *sg; - unsigned int totlen = cryptlen + assoclen; - int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen; - int i, ret = 0; + unsigned int totlen; + unsigned int totlen_src = cryptlen + assoclen; + unsigned int totlen_dst = totlen_src; + struct safexcel_token *atoken; + int n_cdesc = 0, n_rdesc = 0; + int queued, i, ret = 0; + bool first = true; + + sreq->nr_src = sg_nents_for_len(src, totlen_src); + + if (ctx->aead) { + /* + * AEAD has auth tag appended to output for encrypt and + * removed from the output for decrypt! + */ + if (sreq->direction == SAFEXCEL_DECRYPT) + totlen_dst -= digestsize; + else + totlen_dst += digestsize; + + memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32), + &ctx->base.ipad, ctx->state_sz); + if (!ctx->xcm) + memcpy(ctx->base.ctxr->data + (ctx->key_len + + ctx->state_sz) / sizeof(u32), &ctx->base.opad, + ctx->state_sz); + } else if ((ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) && + (sreq->direction == SAFEXCEL_DECRYPT)) { + /* + * Save IV from last crypto input word for CBC modes in decrypt + * direction. Need to do this first in case of inplace operation + * as it will be overwritten. + */ + sg_pcopy_to_buffer(src, sreq->nr_src, areq->iv, + crypto_skcipher_ivsize(skcipher), + (totlen_src - + crypto_skcipher_ivsize(skcipher))); + } + + sreq->nr_dst = sg_nents_for_len(dst, totlen_dst); + + /* + * Remember actual input length, source buffer length may be + * updated in case of inline operation below. + */ + totlen = totlen_src; + queued = totlen_src; if (src == dst) { - nr_src = dma_map_sg(priv->dev, src, - sg_nents_for_len(src, totlen), - DMA_BIDIRECTIONAL); - nr_dst = nr_src; - if (!nr_src) + sreq->nr_src = max(sreq->nr_src, sreq->nr_dst); + sreq->nr_dst = sreq->nr_src; + if (unlikely((totlen_src || totlen_dst) && + (sreq->nr_src <= 0))) { + dev_err(priv->dev, "In-place buffer not large enough (need %d bytes)!", + max(totlen_src, totlen_dst)); return -EINVAL; + } + if (sreq->nr_src > 0 && + !dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL)) + return -EIO; } else { - nr_src = dma_map_sg(priv->dev, src, - sg_nents_for_len(src, totlen), - DMA_TO_DEVICE); - if (!nr_src) + if (unlikely(totlen_src && (sreq->nr_src <= 0))) { + dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!", + totlen_src); return -EINVAL; + } - nr_dst = dma_map_sg(priv->dev, dst, - sg_nents_for_len(dst, totlen), - DMA_FROM_DEVICE); - if (!nr_dst) { - dma_unmap_sg(priv->dev, src, - sg_nents_for_len(src, totlen), - DMA_TO_DEVICE); - return -EINVAL; + if (sreq->nr_src > 0 && + !dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE)) + return -EIO; + + if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) { + dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!", + totlen_dst); + ret = -EINVAL; + goto unmap; + } + + if (sreq->nr_dst > 0 && + !dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE)) { + ret = -EIO; + goto unmap; } } memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len); - if (ctx->aead) { - memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32), - ctx->ipad, ctx->state_sz); - memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) / sizeof(u32), - ctx->opad, ctx->state_sz); + if (!totlen) { + /* + * The EIP97 cannot deal with zero length input packets! + * So stuff a dummy command descriptor indicating a 1 byte + * (dummy) input packet, using the context record as source. + */ + first_cdesc = safexcel_add_cdesc(priv, ring, + 1, 1, ctx->base.ctxr_dma, + 1, 1, ctx->base.ctxr_dma, + &atoken); + if (IS_ERR(first_cdesc)) { + /* No space left in the command descriptor ring */ + ret = PTR_ERR(first_cdesc); + goto cdesc_rollback; + } + n_cdesc = 1; + goto skip_cdesc; } /* command descriptors */ - for_each_sg(src, sg, nr_src, i) { + for_each_sg(src, sg, sreq->nr_src, i) { int len = sg_dma_len(sg); /* Do not overflow the request */ - if (queued - len < 0) + if (queued < len) len = queued; - cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len), + cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, + !(queued - len), sg_dma_address(sg), len, totlen, - ctx->base.ctxr_dma); + ctx->base.ctxr_dma, &atoken); if (IS_ERR(cdesc)) { /* No space left in the command descriptor ring */ ret = PTR_ERR(cdesc); goto cdesc_rollback; } - n_cdesc++; - if (n_cdesc == 1) { - safexcel_context_control(ctx, base, sreq, cdesc); - if (ctx->aead) - safexcel_aead_token(ctx, iv, cdesc, - sreq->direction, cryptlen, - assoclen, digestsize); - else - safexcel_skcipher_token(ctx, iv, cdesc, - cryptlen); - } + if (!n_cdesc) + first_cdesc = cdesc; + n_cdesc++; queued -= len; if (!queued) break; } +skip_cdesc: + /* Add context control words and token to first command descriptor */ + safexcel_context_control(ctx, base, sreq, first_cdesc); + if (ctx->aead) + safexcel_aead_token(ctx, iv, first_cdesc, atoken, + sreq->direction, cryptlen, + assoclen, digestsize); + else + safexcel_skcipher_token(ctx, iv, first_cdesc, atoken, + cryptlen); /* result descriptors */ - for_each_sg(dst, sg, nr_dst, i) { - bool first = !i, last = (i == nr_dst - 1); + for_each_sg(dst, sg, sreq->nr_dst, i) { + bool last = (i == sreq->nr_dst - 1); u32 len = sg_dma_len(sg); - rdesc = safexcel_add_rdesc(priv, ring, first, last, - sg_dma_address(sg), len); + /* only allow the part of the buffer we know we need */ + if (len > totlen_dst) + len = totlen_dst; + if (unlikely(!len)) + break; + totlen_dst -= len; + + /* skip over AAD space in buffer - not written */ + if (assoclen) { + if (assoclen >= len) { + assoclen -= len; + continue; + } + rdesc = safexcel_add_rdesc(priv, ring, first, last, + sg_dma_address(sg) + + assoclen, + len - assoclen); + assoclen = 0; + } else { + rdesc = safexcel_add_rdesc(priv, ring, first, last, + sg_dma_address(sg), + len); + } if (IS_ERR(rdesc)) { /* No space left in the result descriptor ring */ ret = PTR_ERR(rdesc); goto rdesc_rollback; } - if (first) + if (first) { first_rdesc = rdesc; + first = false; + } n_rdesc++; } + if (unlikely(first)) { + /* + * Special case: AEAD decrypt with only AAD data. + * In this case there is NO output data from the engine, + * but the engine still needs a result descriptor! + * Create a dummy one just for catching the result token. + */ + rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0); + if (IS_ERR(rdesc)) { + /* No space left in the result descriptor ring */ + ret = PTR_ERR(rdesc); + goto rdesc_rollback; + } + first_rdesc = rdesc; + n_rdesc = 1; + } + safexcel_rdr_req_set(priv, ring, first_rdesc, base); *commands = n_cdesc; @@ -481,18 +897,18 @@ rdesc_rollback: cdesc_rollback: for (i = 0; i < n_cdesc; i++) safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); - +unmap: if (src == dst) { - dma_unmap_sg(priv->dev, src, - sg_nents_for_len(src, totlen), - DMA_BIDIRECTIONAL); + if (sreq->nr_src > 0) + dma_unmap_sg(priv->dev, src, sreq->nr_src, + DMA_BIDIRECTIONAL); } else { - dma_unmap_sg(priv->dev, src, - sg_nents_for_len(src, totlen), - DMA_TO_DEVICE); - dma_unmap_sg(priv->dev, dst, - sg_nents_for_len(dst, totlen), - DMA_FROM_DEVICE); + if (sreq->nr_src > 0) + dma_unmap_sg(priv->dev, src, sreq->nr_src, + DMA_TO_DEVICE); + if (sreq->nr_dst > 0) + dma_unmap_sg(priv->dev, dst, sreq->nr_dst, + DMA_FROM_DEVICE); } return ret; @@ -501,6 +917,7 @@ cdesc_rollback: static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, int ring, struct crypto_async_request *base, + struct safexcel_cipher_req *sreq, bool *should_complete, int *ret) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); @@ -509,7 +926,10 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, *ret = 0; - do { + if (unlikely(!sreq->rdescs)) + return 0; + + while (sreq->rdescs--) { rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); if (IS_ERR(rdesc)) { dev_err(priv->dev, @@ -522,7 +942,7 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, *ret = safexcel_rdesc_check_errors(priv, rdesc); ndesc++; - } while (!rdesc->last_seg); + } safexcel_complete(priv, ring); @@ -564,7 +984,7 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv, if (sreq->needs_inv) { sreq->needs_inv = false; - err = safexcel_handle_inv_result(priv, ring, async, + err = safexcel_handle_inv_result(priv, ring, async, sreq, should_complete, ret); } else { err = safexcel_handle_req_result(priv, ring, async, req->src, @@ -587,7 +1007,7 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv, if (sreq->needs_inv) { sreq->needs_inv = false; - err = safexcel_handle_inv_result(priv, ring, async, + err = safexcel_handle_inv_result(priv, ring, async, sreq, should_complete, ret); } else { err = safexcel_handle_req_result(priv, ring, async, req->src, @@ -603,7 +1023,7 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *base, int ring, int *commands, int *results) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); - struct safexcel_crypto_priv *priv = ctx->priv; + struct safexcel_crypto_priv *priv = ctx->base.priv; int ret; ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring); @@ -622,17 +1042,29 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring, struct skcipher_request *req = skcipher_request_cast(async); struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); - struct safexcel_crypto_priv *priv = ctx->priv; + struct safexcel_crypto_priv *priv = ctx->base.priv; int ret; BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv); - if (sreq->needs_inv) + if (sreq->needs_inv) { ret = safexcel_cipher_send_inv(async, ring, commands, results); - else + } else { + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + u8 input_iv[AES_BLOCK_SIZE]; + + /* + * Save input IV in case of CBC decrypt mode + * Will be overwritten with output IV prior to use! + */ + memcpy(input_iv, req->iv, crypto_skcipher_ivsize(skcipher)); + ret = safexcel_send_req(async, ring, sreq, req->src, - req->dst, req->cryptlen, 0, 0, req->iv, + req->dst, req->cryptlen, 0, 0, input_iv, commands, results); + } + + sreq->rdescs = *results; return ret; } @@ -643,7 +1075,7 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring, struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct safexcel_cipher_req *sreq = aead_request_ctx(req); - struct safexcel_crypto_priv *priv = ctx->priv; + struct safexcel_crypto_priv *priv = ctx->base.priv; int ret; BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv); @@ -655,19 +1087,19 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring, req->cryptlen, req->assoclen, crypto_aead_authsize(tfm), req->iv, commands, results); + sreq->rdescs = *results; return ret; } static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm, struct crypto_async_request *base, struct safexcel_cipher_req *sreq, - struct safexcel_inv_result *result) + struct crypto_wait *result) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); - struct safexcel_crypto_priv *priv = ctx->priv; + struct safexcel_crypto_priv *priv = ctx->base.priv; int ring = ctx->base.ring; - - init_completion(&result->completion); + int err; ctx = crypto_tfm_ctx(base->tfm); ctx->base.exit_inv = true; @@ -680,13 +1112,13 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm, queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work); - wait_for_completion(&result->completion); + err = crypto_wait_req(-EINPROGRESS, result); - if (result->error) { + if (err) { dev_warn(priv->dev, "cipher: sync: invalidate: completion error %d\n", - result->error); - return result->error; + err); + return err; } return 0; @@ -696,12 +1128,12 @@ static int safexcel_skcipher_exit_inv(struct crypto_tfm *tfm) { EIP197_REQUEST_ON_STACK(req, skcipher, EIP197_SKCIPHER_REQ_SIZE); struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); - struct safexcel_inv_result result = {}; + DECLARE_CRYPTO_WAIT(result); memset(req, 0, sizeof(struct skcipher_request)); skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - safexcel_inv_complete, &result); + crypto_req_done, &result); skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm)); return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result); @@ -711,12 +1143,12 @@ static int safexcel_aead_exit_inv(struct crypto_tfm *tfm) { EIP197_REQUEST_ON_STACK(req, aead, EIP197_AEAD_REQ_SIZE); struct safexcel_cipher_req *sreq = aead_request_ctx(req); - struct safexcel_inv_result result = {}; + DECLARE_CRYPTO_WAIT(result); memset(req, 0, sizeof(struct aead_request)); aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - safexcel_inv_complete, &result); + crypto_req_done, &result); aead_request_set_tfm(req, __crypto_aead_cast(tfm)); return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result); @@ -724,17 +1156,14 @@ static int safexcel_aead_exit_inv(struct crypto_tfm *tfm) static int safexcel_queue_req(struct crypto_async_request *base, struct safexcel_cipher_req *sreq, - enum safexcel_cipher_direction dir, u32 mode, - enum safexcel_cipher_alg alg) + enum safexcel_cipher_direction dir) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); - struct safexcel_crypto_priv *priv = ctx->priv; + struct safexcel_crypto_priv *priv = ctx->base.priv; int ret, ring; sreq->needs_inv = false; sreq->direction = dir; - ctx->alg = alg; - ctx->mode = mode; if (ctx->base.ctxr) { if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) { @@ -762,18 +1191,16 @@ static int safexcel_queue_req(struct crypto_async_request *base, return ret; } -static int safexcel_ecb_aes_encrypt(struct skcipher_request *req) +static int safexcel_encrypt(struct skcipher_request *req) { return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, - SAFEXCEL_AES); + SAFEXCEL_ENCRYPT); } -static int safexcel_ecb_aes_decrypt(struct skcipher_request *req) +static int safexcel_decrypt(struct skcipher_request *req) { return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, - SAFEXCEL_AES); + SAFEXCEL_DECRYPT); } static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm) @@ -786,10 +1213,12 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm) crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), sizeof(struct safexcel_cipher_req)); - ctx->priv = tmpl->priv; + ctx->base.priv = tmpl->priv; ctx->base.send = safexcel_skcipher_send; ctx->base.handle_result = safexcel_skcipher_handle_result; + ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD; + ctx->ctrinit = 1; return 0; } @@ -810,7 +1239,7 @@ static int safexcel_cipher_cra_exit(struct crypto_tfm *tfm) static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); - struct safexcel_crypto_priv *priv = ctx->priv; + struct safexcel_crypto_priv *priv = ctx->base.priv; int ret; if (safexcel_cipher_cra_exit(tfm)) @@ -830,7 +1259,7 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm) static void safexcel_aead_cra_exit(struct crypto_tfm *tfm) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); - struct safexcel_crypto_priv *priv = ctx->priv; + struct safexcel_crypto_priv *priv = ctx->base.priv; int ret; if (safexcel_cipher_cra_exit(tfm)) @@ -847,106 +1276,170 @@ static void safexcel_aead_cra_exit(struct crypto_tfm *tfm) } } +static int safexcel_skcipher_aes_ecb_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_AES; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB; + ctx->blocksz = 0; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; + return 0; +} + struct safexcel_alg_template safexcel_alg_ecb_aes = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_AES, .alg.skcipher = { .setkey = safexcel_skcipher_aes_setkey, - .encrypt = safexcel_ecb_aes_encrypt, - .decrypt = safexcel_ecb_aes_decrypt, + .encrypt = safexcel_encrypt, + .decrypt = safexcel_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .base = { .cra_name = "ecb(aes)", .cra_driver_name = "safexcel-ecb-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), .cra_alignmask = 0, - .cra_init = safexcel_skcipher_cra_init, + .cra_init = safexcel_skcipher_aes_ecb_cra_init, .cra_exit = safexcel_skcipher_cra_exit, .cra_module = THIS_MODULE, }, }, }; -static int safexcel_cbc_aes_encrypt(struct skcipher_request *req) +static int safexcel_skcipher_aes_cbc_cra_init(struct crypto_tfm *tfm) { - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, - SAFEXCEL_AES); -} + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); -static int safexcel_cbc_aes_decrypt(struct skcipher_request *req) -{ - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, - SAFEXCEL_AES); + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_AES; + ctx->blocksz = AES_BLOCK_SIZE; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; + return 0; } struct safexcel_alg_template safexcel_alg_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_AES, .alg.skcipher = { .setkey = safexcel_skcipher_aes_setkey, - .encrypt = safexcel_cbc_aes_encrypt, - .decrypt = safexcel_cbc_aes_decrypt, + .encrypt = safexcel_encrypt, + .decrypt = safexcel_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .base = { .cra_name = "cbc(aes)", .cra_driver_name = "safexcel-cbc-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), .cra_alignmask = 0, - .cra_init = safexcel_skcipher_cra_init, + .cra_init = safexcel_skcipher_aes_cbc_cra_init, .cra_exit = safexcel_skcipher_cra_exit, .cra_module = THIS_MODULE, }, }, }; -static int safexcel_cbc_des_encrypt(struct skcipher_request *req) +static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm, + const u8 *key, unsigned int len) { - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, - SAFEXCEL_DES); + struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct safexcel_crypto_priv *priv = ctx->base.priv; + struct crypto_aes_ctx aes; + int ret, i; + unsigned int keylen; + + /* last 4 bytes of key are the nonce! */ + ctx->nonce = *(u32 *)(key + len - CTR_RFC3686_NONCE_SIZE); + /* exclude the nonce here */ + keylen = len - CTR_RFC3686_NONCE_SIZE; + ret = aes_expandkey(&aes, key, keylen); + if (ret) + return ret; + + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { + for (i = 0; i < keylen / sizeof(u32); i++) { + if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) { + ctx->base.needs_inv = true; + break; + } + } + } + + for (i = 0; i < keylen / sizeof(u32); i++) + ctx->key[i] = cpu_to_le32(aes.key_enc[i]); + + ctx->key_len = keylen; + + memzero_explicit(&aes, sizeof(aes)); + return 0; } -static int safexcel_cbc_des_decrypt(struct skcipher_request *req) +static int safexcel_skcipher_aes_ctr_cra_init(struct crypto_tfm *tfm) { - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, - SAFEXCEL_DES); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_AES; + ctx->blocksz = AES_BLOCK_SIZE; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; + return 0; } +struct safexcel_alg_template safexcel_alg_ctr_aes = { + .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .algo_mask = SAFEXCEL_ALG_AES, + .alg.skcipher = { + .setkey = safexcel_skcipher_aesctr_setkey, + .encrypt = safexcel_encrypt, + .decrypt = safexcel_decrypt, + /* Add nonce size */ + .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .ivsize = CTR_RFC3686_IV_SIZE, + .base = { + .cra_name = "rfc3686(ctr(aes))", + .cra_driver_name = "safexcel-ctr-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_skcipher_aes_ctr_cra_init, + .cra_exit = safexcel_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key, unsigned int len) { - struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); - struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); - u32 tmp[DES_EXPKEY_WORDS]; + struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm); + struct safexcel_crypto_priv *priv = ctx->base.priv; int ret; - if (len != DES_KEY_SIZE) { - crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - ret = des_ekey(tmp, key); - if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { - tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; - return -EINVAL; - } + ret = verify_skcipher_des_key(ctfm, key); + if (ret) + return ret; /* if context exits and key changed, need to invalidate it */ - if (ctx->base.ctxr_dma) + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) if (memcmp(ctx->key, key, len)) ctx->base.needs_inv = true; @@ -956,170 +1449,176 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key, return 0; } +static int safexcel_skcipher_des_cbc_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_DES; + ctx->blocksz = DES_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; + return 0; +} + struct safexcel_alg_template safexcel_alg_cbc_des = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_DES, .alg.skcipher = { .setkey = safexcel_des_setkey, - .encrypt = safexcel_cbc_des_encrypt, - .decrypt = safexcel_cbc_des_decrypt, + .encrypt = safexcel_encrypt, + .decrypt = safexcel_decrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, .base = { .cra_name = "cbc(des)", .cra_driver_name = "safexcel-cbc-des", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), .cra_alignmask = 0, - .cra_init = safexcel_skcipher_cra_init, + .cra_init = safexcel_skcipher_des_cbc_cra_init, .cra_exit = safexcel_skcipher_cra_exit, .cra_module = THIS_MODULE, }, }, }; -static int safexcel_ecb_des_encrypt(struct skcipher_request *req) +static int safexcel_skcipher_des_ecb_cra_init(struct crypto_tfm *tfm) { - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, - SAFEXCEL_DES); -} + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); -static int safexcel_ecb_des_decrypt(struct skcipher_request *req) -{ - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, - SAFEXCEL_DES); + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_DES; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB; + ctx->blocksz = 0; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; + return 0; } struct safexcel_alg_template safexcel_alg_ecb_des = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_DES, .alg.skcipher = { .setkey = safexcel_des_setkey, - .encrypt = safexcel_ecb_des_encrypt, - .decrypt = safexcel_ecb_des_decrypt, + .encrypt = safexcel_encrypt, + .decrypt = safexcel_decrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, .base = { .cra_name = "ecb(des)", .cra_driver_name = "safexcel-ecb-des", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), .cra_alignmask = 0, - .cra_init = safexcel_skcipher_cra_init, + .cra_init = safexcel_skcipher_des_ecb_cra_init, .cra_exit = safexcel_skcipher_cra_exit, .cra_module = THIS_MODULE, }, }, }; -static int safexcel_cbc_des3_ede_encrypt(struct skcipher_request *req) -{ - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, - SAFEXCEL_3DES); -} - -static int safexcel_cbc_des3_ede_decrypt(struct skcipher_request *req) -{ - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, - SAFEXCEL_3DES); -} - static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm, const u8 *key, unsigned int len) { - struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); - struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm); + struct safexcel_crypto_priv *priv = ctx->base.priv; + int err; - if (len != DES3_EDE_KEY_SIZE) { - crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } + err = verify_skcipher_des3_key(ctfm, key); + if (err) + return err; /* if context exits and key changed, need to invalidate it */ - if (ctx->base.ctxr_dma) { + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) if (memcmp(ctx->key, key, len)) ctx->base.needs_inv = true; - } memcpy(ctx->key, key, len); - ctx->key_len = len; return 0; } +static int safexcel_skcipher_des3_cbc_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_3DES; + ctx->blocksz = DES3_EDE_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; + return 0; +} + struct safexcel_alg_template safexcel_alg_cbc_des3_ede = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_DES, .alg.skcipher = { .setkey = safexcel_des3_ede_setkey, - .encrypt = safexcel_cbc_des3_ede_encrypt, - .decrypt = safexcel_cbc_des3_ede_decrypt, + .encrypt = safexcel_encrypt, + .decrypt = safexcel_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, .base = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "safexcel-cbc-des3_ede", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), .cra_alignmask = 0, - .cra_init = safexcel_skcipher_cra_init, + .cra_init = safexcel_skcipher_des3_cbc_cra_init, .cra_exit = safexcel_skcipher_cra_exit, .cra_module = THIS_MODULE, }, }, }; -static int safexcel_ecb_des3_ede_encrypt(struct skcipher_request *req) +static int safexcel_skcipher_des3_ecb_cra_init(struct crypto_tfm *tfm) { - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, - SAFEXCEL_3DES); -} + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); -static int safexcel_ecb_des3_ede_decrypt(struct skcipher_request *req) -{ - return safexcel_queue_req(&req->base, skcipher_request_ctx(req), - SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, - SAFEXCEL_3DES); + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_3DES; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB; + ctx->blocksz = 0; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; + return 0; } struct safexcel_alg_template safexcel_alg_ecb_des3_ede = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_DES, .alg.skcipher = { .setkey = safexcel_des3_ede_setkey, - .encrypt = safexcel_ecb_des3_ede_encrypt, - .decrypt = safexcel_ecb_des3_ede_decrypt, + .encrypt = safexcel_encrypt, + .decrypt = safexcel_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, .base = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "safexcel-ecb-des3_ede", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), .cra_alignmask = 0, - .cra_init = safexcel_skcipher_cra_init, + .cra_init = safexcel_skcipher_des3_ecb_cra_init, .cra_exit = safexcel_skcipher_cra_exit, .cra_module = THIS_MODULE, }, @@ -1130,16 +1629,14 @@ static int safexcel_aead_encrypt(struct aead_request *req) { struct safexcel_cipher_req *creq = aead_request_ctx(req); - return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT, - CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES); + return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT); } static int safexcel_aead_decrypt(struct aead_request *req) { struct safexcel_cipher_req *creq = aead_request_ctx(req); - return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT, - CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES); + return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT); } static int safexcel_aead_cra_init(struct crypto_tfm *tfm) @@ -1152,8 +1649,13 @@ static int safexcel_aead_cra_init(struct crypto_tfm *tfm) crypto_aead_set_reqsize(__crypto_aead_cast(tfm), sizeof(struct safexcel_cipher_req)); - ctx->priv = tmpl->priv; + ctx->base.priv = tmpl->priv; + ctx->alg = SAFEXCEL_AES; /* default */ + ctx->blocksz = AES_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD; + ctx->ctrinit = 1; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; /* default */ ctx->aead = true; ctx->base.send = safexcel_aead_send; ctx->base.handle_result = safexcel_aead_handle_result; @@ -1172,9 +1674,9 @@ static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA1, .alg.aead = { - .setkey = safexcel_aead_aes_setkey, + .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt, .decrypt = safexcel_aead_decrypt, .ivsize = AES_BLOCK_SIZE, @@ -1182,8 +1684,9 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = { .base = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1207,9 +1710,9 @@ static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256, .alg.aead = { - .setkey = safexcel_aead_aes_setkey, + .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt, .decrypt = safexcel_aead_decrypt, .ivsize = AES_BLOCK_SIZE, @@ -1217,8 +1720,9 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { .base = { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1242,9 +1746,9 @@ static int safexcel_aead_sha224_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256, .alg.aead = { - .setkey = safexcel_aead_aes_setkey, + .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt, .decrypt = safexcel_aead_decrypt, .ivsize = AES_BLOCK_SIZE, @@ -1252,8 +1756,9 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { .base = { .cra_name = "authenc(hmac(sha224),cbc(aes))", .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1277,9 +1782,9 @@ static int safexcel_aead_sha512_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512, .alg.aead = { - .setkey = safexcel_aead_aes_setkey, + .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt, .decrypt = safexcel_aead_decrypt, .ivsize = AES_BLOCK_SIZE, @@ -1287,8 +1792,9 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = { .base = { .cra_name = "authenc(hmac(sha512),cbc(aes))", .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1312,9 +1818,9 @@ static int safexcel_aead_sha384_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512, .alg.aead = { - .setkey = safexcel_aead_aes_setkey, + .setkey = safexcel_aead_setkey, .encrypt = safexcel_aead_encrypt, .decrypt = safexcel_aead_decrypt, .ivsize = AES_BLOCK_SIZE, @@ -1322,8 +1828,9 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { .base = { .cra_name = "authenc(hmac(sha384),cbc(aes))", .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1334,3 +1841,1771 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { }, }, }; + +static int safexcel_aead_sha1_des3_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha1_cra_init(tfm); + ctx->alg = SAFEXCEL_3DES; /* override default */ + ctx->blocksz = DES3_EDE_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA1, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", + .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des3_ede", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha1_des3_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha256_des3_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha256_cra_init(tfm); + ctx->alg = SAFEXCEL_3DES; /* override default */ + ctx->blocksz = DES3_EDE_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", + .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des3_ede", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha256_des3_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha224_des3_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha224_cra_init(tfm); + ctx->alg = SAFEXCEL_3DES; /* override default */ + ctx->blocksz = DES3_EDE_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha224),cbc(des3_ede))", + .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des3_ede", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha224_des3_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha512_des3_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha512_cra_init(tfm); + ctx->alg = SAFEXCEL_3DES; /* override default */ + ctx->blocksz = DES3_EDE_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha512),cbc(des3_ede))", + .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des3_ede", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha512_des3_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha384_des3_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha384_cra_init(tfm); + ctx->alg = SAFEXCEL_3DES; /* override default */ + ctx->blocksz = DES3_EDE_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha384),cbc(des3_ede))", + .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des3_ede", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha384_des3_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha1_des_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha1_cra_init(tfm); + ctx->alg = SAFEXCEL_DES; /* override default */ + ctx->blocksz = DES_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA1, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des))", + .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha1_des_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha256_des_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha256_cra_init(tfm); + ctx->alg = SAFEXCEL_DES; /* override default */ + ctx->blocksz = DES_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),cbc(des))", + .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha256_des_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha224_des_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha224_cra_init(tfm); + ctx->alg = SAFEXCEL_DES; /* override default */ + ctx->blocksz = DES_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha224),cbc(des))", + .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha224_des_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha512_des_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha512_cra_init(tfm); + ctx->alg = SAFEXCEL_DES; /* override default */ + ctx->blocksz = DES_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha512),cbc(des))", + .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha512_des_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha384_des_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha384_cra_init(tfm); + ctx->alg = SAFEXCEL_DES; /* override default */ + ctx->blocksz = DES_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha384),cbc(des))", + .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha384_des_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha1_ctr_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha1_cra_init(tfm); + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */ + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA1, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", + .cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha1_ctr_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha256_ctr_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha256_cra_init(tfm); + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */ + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", + .cra_driver_name = "safexcel-authenc-hmac-sha256-ctr-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha256_ctr_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha224_ctr_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha224_cra_init(tfm); + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */ + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))", + .cra_driver_name = "safexcel-authenc-hmac-sha224-ctr-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha224_ctr_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha512_ctr_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha512_cra_init(tfm); + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */ + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))", + .cra_driver_name = "safexcel-authenc-hmac-sha512-ctr-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha512_ctr_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha384_ctr_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sha384_cra_init(tfm); + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */ + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))", + .cra_driver_name = "safexcel-authenc-hmac-sha384-ctr-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha384_ctr_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm, + const u8 *key, unsigned int len) +{ + struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct safexcel_crypto_priv *priv = ctx->base.priv; + struct crypto_aes_ctx aes; + int ret, i; + unsigned int keylen; + + /* Check for illegal XTS keys */ + ret = xts_verify_key(ctfm, key, len); + if (ret) + return ret; + + /* Only half of the key data is cipher key */ + keylen = (len >> 1); + ret = aes_expandkey(&aes, key, keylen); + if (ret) + return ret; + + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { + for (i = 0; i < keylen / sizeof(u32); i++) { + if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) { + ctx->base.needs_inv = true; + break; + } + } + } + + for (i = 0; i < keylen / sizeof(u32); i++) + ctx->key[i] = cpu_to_le32(aes.key_enc[i]); + + /* The other half is the tweak key */ + ret = aes_expandkey(&aes, (u8 *)(key + keylen), keylen); + if (ret) + return ret; + + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { + for (i = 0; i < keylen / sizeof(u32); i++) { + if (le32_to_cpu(ctx->key[i + keylen / sizeof(u32)]) != + aes.key_enc[i]) { + ctx->base.needs_inv = true; + break; + } + } + } + + for (i = 0; i < keylen / sizeof(u32); i++) + ctx->key[i + keylen / sizeof(u32)] = + cpu_to_le32(aes.key_enc[i]); + + ctx->key_len = keylen << 1; + + memzero_explicit(&aes, sizeof(aes)); + return 0; +} + +static int safexcel_skcipher_aes_xts_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_AES; + ctx->blocksz = AES_BLOCK_SIZE; + ctx->xts = 1; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XTS; + return 0; +} + +static int safexcel_encrypt_xts(struct skcipher_request *req) +{ + if (req->cryptlen < XTS_BLOCK_SIZE) + return -EINVAL; + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_ENCRYPT); +} + +static int safexcel_decrypt_xts(struct skcipher_request *req) +{ + if (req->cryptlen < XTS_BLOCK_SIZE) + return -EINVAL; + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_DECRYPT); +} + +struct safexcel_alg_template safexcel_alg_xts_aes = { + .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XTS, + .alg.skcipher = { + .setkey = safexcel_skcipher_aesxts_setkey, + .encrypt = safexcel_encrypt_xts, + .decrypt = safexcel_decrypt_xts, + /* XTS actually uses 2 AES keys glued together */ + .min_keysize = AES_MIN_KEY_SIZE * 2, + .max_keysize = AES_MAX_KEY_SIZE * 2, + .ivsize = XTS_BLOCK_SIZE, + .base = { + .cra_name = "xts(aes)", + .cra_driver_name = "safexcel-xts-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = XTS_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_skcipher_aes_xts_cra_init, + .cra_exit = safexcel_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct safexcel_crypto_priv *priv = ctx->base.priv; + struct crypto_aes_ctx aes; + u32 hashkey[AES_BLOCK_SIZE >> 2]; + int ret, i; + + ret = aes_expandkey(&aes, key, len); + if (ret) { + memzero_explicit(&aes, sizeof(aes)); + return ret; + } + + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { + for (i = 0; i < len / sizeof(u32); i++) { + if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) { + ctx->base.needs_inv = true; + break; + } + } + } + + for (i = 0; i < len / sizeof(u32); i++) + ctx->key[i] = cpu_to_le32(aes.key_enc[i]); + + ctx->key_len = len; + + /* Compute hash key by encrypting zeroes with cipher key */ + memset(hashkey, 0, AES_BLOCK_SIZE); + aes_encrypt(&aes, (u8 *)hashkey, (u8 *)hashkey); + + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { + for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) { + if (be32_to_cpu(ctx->base.ipad.be[i]) != hashkey[i]) { + ctx->base.needs_inv = true; + break; + } + } + } + + for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) + ctx->base.ipad.be[i] = cpu_to_be32(hashkey[i]); + + memzero_explicit(hashkey, AES_BLOCK_SIZE); + memzero_explicit(&aes, sizeof(aes)); + return 0; +} + +static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_cra_init(tfm); + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_GHASH; + ctx->state_sz = GHASH_BLOCK_SIZE; + ctx->xcm = EIP197_XCM_MODE_GCM; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */ + + return 0; +} + +static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm) +{ + safexcel_aead_cra_exit(tfm); +} + +static int safexcel_aead_gcm_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + return crypto_gcm_check_authsize(authsize); +} + +struct safexcel_alg_template safexcel_alg_gcm = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_GHASH, + .alg.aead = { + .setkey = safexcel_aead_gcm_setkey, + .setauthsize = safexcel_aead_gcm_setauthsize, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = GCM_AES_IV_SIZE, + .maxauthsize = GHASH_DIGEST_SIZE, + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "safexcel-gcm-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_gcm_cra_init, + .cra_exit = safexcel_aead_gcm_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct safexcel_crypto_priv *priv = ctx->base.priv; + struct crypto_aes_ctx aes; + int ret, i; + + ret = aes_expandkey(&aes, key, len); + if (ret) { + memzero_explicit(&aes, sizeof(aes)); + return ret; + } + + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { + for (i = 0; i < len / sizeof(u32); i++) { + if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) { + ctx->base.needs_inv = true; + break; + } + } + } + + for (i = 0; i < len / sizeof(u32); i++) { + ctx->key[i] = cpu_to_le32(aes.key_enc[i]); + ctx->base.ipad.be[i + 2 * AES_BLOCK_SIZE / sizeof(u32)] = + cpu_to_be32(aes.key_enc[i]); + } + + ctx->key_len = len; + ctx->state_sz = 2 * AES_BLOCK_SIZE + len; + + if (len == AES_KEYSIZE_192) + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192; + else if (len == AES_KEYSIZE_256) + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256; + else + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128; + + memzero_explicit(&aes, sizeof(aes)); + return 0; +} + +static int safexcel_aead_ccm_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_cra_init(tfm); + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128; + ctx->state_sz = 3 * AES_BLOCK_SIZE; + ctx->xcm = EIP197_XCM_MODE_CCM; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */ + ctx->ctrinit = 0; + return 0; +} + +static int safexcel_aead_ccm_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + /* Borrowed from crypto/ccm.c */ + switch (authsize) { + case 4: + case 6: + case 8: + case 10: + case 12: + case 14: + case 16: + break; + default: + return -EINVAL; + } + + return 0; +} + +static int safexcel_ccm_encrypt(struct aead_request *req) +{ + struct safexcel_cipher_req *creq = aead_request_ctx(req); + + if (req->iv[0] < 1 || req->iv[0] > 7) + return -EINVAL; + + return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT); +} + +static int safexcel_ccm_decrypt(struct aead_request *req) +{ + struct safexcel_cipher_req *creq = aead_request_ctx(req); + + if (req->iv[0] < 1 || req->iv[0] > 7) + return -EINVAL; + + return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT); +} + +struct safexcel_alg_template safexcel_alg_ccm = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_CBC_MAC_ALL, + .alg.aead = { + .setkey = safexcel_aead_ccm_setkey, + .setauthsize = safexcel_aead_ccm_setauthsize, + .encrypt = safexcel_ccm_encrypt, + .decrypt = safexcel_ccm_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "ccm(aes)", + .cra_driver_name = "safexcel-ccm-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_ccm_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static void safexcel_chacha20_setkey(struct safexcel_cipher_ctx *ctx, + const u8 *key) +{ + struct safexcel_crypto_priv *priv = ctx->base.priv; + + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) + if (memcmp(ctx->key, key, CHACHA_KEY_SIZE)) + ctx->base.needs_inv = true; + + memcpy(ctx->key, key, CHACHA_KEY_SIZE); + ctx->key_len = CHACHA_KEY_SIZE; +} + +static int safexcel_skcipher_chacha20_setkey(struct crypto_skcipher *ctfm, + const u8 *key, unsigned int len) +{ + struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm); + + if (len != CHACHA_KEY_SIZE) + return -EINVAL; + + safexcel_chacha20_setkey(ctx, key); + + return 0; +} + +static int safexcel_skcipher_chacha20_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_CHACHA20; + ctx->ctrinit = 0; + ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32; + return 0; +} + +struct safexcel_alg_template safexcel_alg_chacha20 = { + .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .algo_mask = SAFEXCEL_ALG_CHACHA20, + .alg.skcipher = { + .setkey = safexcel_skcipher_chacha20_setkey, + .encrypt = safexcel_encrypt, + .decrypt = safexcel_decrypt, + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = CHACHA_IV_SIZE, + .base = { + .cra_name = "chacha20", + .cra_driver_name = "safexcel-chacha20", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_skcipher_chacha20_cra_init, + .cra_exit = safexcel_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_chachapoly_setkey(struct crypto_aead *ctfm, + const u8 *key, unsigned int len) +{ + struct safexcel_cipher_ctx *ctx = crypto_aead_ctx(ctfm); + + if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP && + len > EIP197_AEAD_IPSEC_NONCE_SIZE) { + /* ESP variant has nonce appended to key */ + len -= EIP197_AEAD_IPSEC_NONCE_SIZE; + ctx->nonce = *(u32 *)(key + len); + } + if (len != CHACHA_KEY_SIZE) + return -EINVAL; + + safexcel_chacha20_setkey(ctx, key); + + return 0; +} + +static int safexcel_aead_chachapoly_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + if (authsize != POLY1305_DIGEST_SIZE) + return -EINVAL; + return 0; +} + +static int safexcel_aead_chachapoly_crypt(struct aead_request *req, + enum safexcel_cipher_direction dir) +{ + struct safexcel_cipher_req *creq = aead_request_ctx(req); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_tfm *tfm = crypto_aead_tfm(aead); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct aead_request *subreq = aead_request_ctx(req); + u32 key[CHACHA_KEY_SIZE / sizeof(u32) + 1]; + int ret = 0; + + /* + * Instead of wasting time detecting umpteen silly corner cases, + * just dump all "small" requests to the fallback implementation. + * HW would not be faster on such small requests anyway. + */ + if (likely((ctx->aead != EIP197_AEAD_TYPE_IPSEC_ESP || + req->assoclen >= EIP197_AEAD_IPSEC_IV_SIZE) && + req->cryptlen > POLY1305_DIGEST_SIZE)) { + return safexcel_queue_req(&req->base, creq, dir); + } + + /* HW cannot do full (AAD+payload) zero length, use fallback */ + memcpy(key, ctx->key, CHACHA_KEY_SIZE); + if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) { + /* ESP variant has nonce appended to the key */ + key[CHACHA_KEY_SIZE / sizeof(u32)] = ctx->nonce; + ret = crypto_aead_setkey(ctx->fback, (u8 *)key, + CHACHA_KEY_SIZE + + EIP197_AEAD_IPSEC_NONCE_SIZE); + } else { + ret = crypto_aead_setkey(ctx->fback, (u8 *)key, + CHACHA_KEY_SIZE); + } + if (ret) { + crypto_aead_clear_flags(aead, CRYPTO_TFM_REQ_MASK); + crypto_aead_set_flags(aead, crypto_aead_get_flags(ctx->fback) & + CRYPTO_TFM_REQ_MASK); + return ret; + } + + aead_request_set_tfm(subreq, ctx->fback); + aead_request_set_callback(subreq, req->base.flags, req->base.complete, + req->base.data); + aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, + req->iv); + aead_request_set_ad(subreq, req->assoclen); + + return (dir == SAFEXCEL_ENCRYPT) ? + crypto_aead_encrypt(subreq) : + crypto_aead_decrypt(subreq); +} + +static int safexcel_aead_chachapoly_encrypt(struct aead_request *req) +{ + return safexcel_aead_chachapoly_crypt(req, SAFEXCEL_ENCRYPT); +} + +static int safexcel_aead_chachapoly_decrypt(struct aead_request *req) +{ + return safexcel_aead_chachapoly_crypt(req, SAFEXCEL_DECRYPT); +} + +static int safexcel_aead_fallback_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_aead *aead = __crypto_aead_cast(tfm); + struct aead_alg *alg = crypto_aead_alg(aead); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_cra_init(tfm); + + /* Allocate fallback implementation */ + ctx->fback = crypto_alloc_aead(alg->base.cra_name, 0, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fback)) + return PTR_ERR(ctx->fback); + + crypto_aead_set_reqsize(aead, max(sizeof(struct safexcel_cipher_req), + sizeof(struct aead_request) + + crypto_aead_reqsize(ctx->fback))); + + return 0; +} + +static int safexcel_aead_chachapoly_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_fallback_cra_init(tfm); + ctx->alg = SAFEXCEL_CHACHA20; + ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32 | + CONTEXT_CONTROL_CHACHA20_MODE_CALC_OTK; + ctx->ctrinit = 0; + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_POLY1305; + ctx->state_sz = 0; /* Precomputed by HW */ + return 0; +} + +static void safexcel_aead_fallback_cra_exit(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_aead(ctx->fback); + safexcel_aead_cra_exit(tfm); +} + +struct safexcel_alg_template safexcel_alg_chachapoly = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_CHACHA20 | SAFEXCEL_ALG_POLY1305, + .alg.aead = { + .setkey = safexcel_aead_chachapoly_setkey, + .setauthsize = safexcel_aead_chachapoly_setauthsize, + .encrypt = safexcel_aead_chachapoly_encrypt, + .decrypt = safexcel_aead_chachapoly_decrypt, + .ivsize = CHACHAPOLY_IV_SIZE, + .maxauthsize = POLY1305_DIGEST_SIZE, + .base = { + .cra_name = "rfc7539(chacha20,poly1305)", + .cra_driver_name = "safexcel-chacha20-poly1305", + /* +1 to put it above HW chacha + SW poly */ + .cra_priority = SAFEXCEL_CRA_PRIORITY + 1, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_chachapoly_cra_init, + .cra_exit = safexcel_aead_fallback_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_chachapolyesp_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + int ret; + + ret = safexcel_aead_chachapoly_cra_init(tfm); + ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP; + ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE; + return ret; +} + +struct safexcel_alg_template safexcel_alg_chachapoly_esp = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_CHACHA20 | SAFEXCEL_ALG_POLY1305, + .alg.aead = { + .setkey = safexcel_aead_chachapoly_setkey, + .setauthsize = safexcel_aead_chachapoly_setauthsize, + .encrypt = safexcel_aead_chachapoly_encrypt, + .decrypt = safexcel_aead_chachapoly_decrypt, + .ivsize = CHACHAPOLY_IV_SIZE - EIP197_AEAD_IPSEC_NONCE_SIZE, + .maxauthsize = POLY1305_DIGEST_SIZE, + .base = { + .cra_name = "rfc7539esp(chacha20,poly1305)", + .cra_driver_name = "safexcel-chacha20-poly1305-esp", + /* +1 to put it above HW chacha + SW poly */ + .cra_priority = SAFEXCEL_CRA_PRIORITY + 1, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_chachapolyesp_cra_init, + .cra_exit = safexcel_aead_fallback_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_skcipher_sm4_setkey(struct crypto_skcipher *ctfm, + const u8 *key, unsigned int len) +{ + struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct safexcel_crypto_priv *priv = ctx->base.priv; + + if (len != SM4_KEY_SIZE) + return -EINVAL; + + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) + if (memcmp(ctx->key, key, SM4_KEY_SIZE)) + ctx->base.needs_inv = true; + + memcpy(ctx->key, key, SM4_KEY_SIZE); + ctx->key_len = SM4_KEY_SIZE; + + return 0; +} + +static int safexcel_sm4_blk_encrypt(struct skcipher_request *req) +{ + /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */ + if (req->cryptlen & (SM4_BLOCK_SIZE - 1)) + return -EINVAL; + else + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_ENCRYPT); +} + +static int safexcel_sm4_blk_decrypt(struct skcipher_request *req) +{ + /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */ + if (req->cryptlen & (SM4_BLOCK_SIZE - 1)) + return -EINVAL; + else + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_DECRYPT); +} + +static int safexcel_skcipher_sm4_ecb_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_SM4; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB; + ctx->blocksz = 0; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; + return 0; +} + +struct safexcel_alg_template safexcel_alg_ecb_sm4 = { + .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .algo_mask = SAFEXCEL_ALG_SM4, + .alg.skcipher = { + .setkey = safexcel_skcipher_sm4_setkey, + .encrypt = safexcel_sm4_blk_encrypt, + .decrypt = safexcel_sm4_blk_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .base = { + .cra_name = "ecb(sm4)", + .cra_driver_name = "safexcel-ecb-sm4", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_skcipher_sm4_ecb_cra_init, + .cra_exit = safexcel_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_skcipher_sm4_cbc_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_SM4; + ctx->blocksz = SM4_BLOCK_SIZE; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; + return 0; +} + +struct safexcel_alg_template safexcel_alg_cbc_sm4 = { + .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .algo_mask = SAFEXCEL_ALG_SM4, + .alg.skcipher = { + .setkey = safexcel_skcipher_sm4_setkey, + .encrypt = safexcel_sm4_blk_encrypt, + .decrypt = safexcel_sm4_blk_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .base = { + .cra_name = "cbc(sm4)", + .cra_driver_name = "safexcel-cbc-sm4", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_skcipher_sm4_cbc_cra_init, + .cra_exit = safexcel_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_skcipher_sm4ctr_setkey(struct crypto_skcipher *ctfm, + const u8 *key, unsigned int len) +{ + struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + /* last 4 bytes of key are the nonce! */ + ctx->nonce = *(u32 *)(key + len - CTR_RFC3686_NONCE_SIZE); + /* exclude the nonce here */ + len -= CTR_RFC3686_NONCE_SIZE; + + return safexcel_skcipher_sm4_setkey(ctfm, key, len); +} + +static int safexcel_skcipher_sm4_ctr_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_skcipher_cra_init(tfm); + ctx->alg = SAFEXCEL_SM4; + ctx->blocksz = SM4_BLOCK_SIZE; + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; + return 0; +} + +struct safexcel_alg_template safexcel_alg_ctr_sm4 = { + .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .algo_mask = SAFEXCEL_ALG_SM4, + .alg.skcipher = { + .setkey = safexcel_skcipher_sm4ctr_setkey, + .encrypt = safexcel_encrypt, + .decrypt = safexcel_decrypt, + /* Add nonce size */ + .min_keysize = SM4_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .max_keysize = SM4_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .ivsize = CTR_RFC3686_IV_SIZE, + .base = { + .cra_name = "rfc3686(ctr(sm4))", + .cra_driver_name = "safexcel-ctr-sm4", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_skcipher_sm4_ctr_cra_init, + .cra_exit = safexcel_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sm4_blk_encrypt(struct aead_request *req) +{ + /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */ + if (req->cryptlen & (SM4_BLOCK_SIZE - 1)) + return -EINVAL; + + return safexcel_queue_req(&req->base, aead_request_ctx(req), + SAFEXCEL_ENCRYPT); +} + +static int safexcel_aead_sm4_blk_decrypt(struct aead_request *req) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + + /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */ + if ((req->cryptlen - crypto_aead_authsize(tfm)) & (SM4_BLOCK_SIZE - 1)) + return -EINVAL; + + return safexcel_queue_req(&req->base, aead_request_ctx(req), + SAFEXCEL_DECRYPT); +} + +static int safexcel_aead_sm4cbc_sha1_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_cra_init(tfm); + ctx->alg = SAFEXCEL_SM4; + ctx->blocksz = SM4_BLOCK_SIZE; + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; + ctx->state_sz = SHA1_DIGEST_SIZE; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_sm4 = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SHA1, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_sm4_blk_encrypt, + .decrypt = safexcel_aead_sm4_blk_decrypt, + .ivsize = SM4_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(sm4))", + .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-sm4", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sm4cbc_sha1_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_fallback_setkey(struct crypto_aead *ctfm, + const u8 *key, unsigned int len) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + /* Keep fallback cipher synchronized */ + return crypto_aead_setkey(ctx->fback, (u8 *)key, len) ?: + safexcel_aead_setkey(ctfm, key, len); +} + +static int safexcel_aead_fallback_setauthsize(struct crypto_aead *ctfm, + unsigned int authsize) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + /* Keep fallback cipher synchronized */ + return crypto_aead_setauthsize(ctx->fback, authsize); +} + +static int safexcel_aead_fallback_crypt(struct aead_request *req, + enum safexcel_cipher_direction dir) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_tfm *tfm = crypto_aead_tfm(aead); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct aead_request *subreq = aead_request_ctx(req); + + aead_request_set_tfm(subreq, ctx->fback); + aead_request_set_callback(subreq, req->base.flags, req->base.complete, + req->base.data); + aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, + req->iv); + aead_request_set_ad(subreq, req->assoclen); + + return (dir == SAFEXCEL_ENCRYPT) ? + crypto_aead_encrypt(subreq) : + crypto_aead_decrypt(subreq); +} + +static int safexcel_aead_sm4cbc_sm3_encrypt(struct aead_request *req) +{ + struct safexcel_cipher_req *creq = aead_request_ctx(req); + + /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */ + if (req->cryptlen & (SM4_BLOCK_SIZE - 1)) + return -EINVAL; + else if (req->cryptlen || req->assoclen) /* If input length > 0 only */ + return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT); + + /* HW cannot do full (AAD+payload) zero length, use fallback */ + return safexcel_aead_fallback_crypt(req, SAFEXCEL_ENCRYPT); +} + +static int safexcel_aead_sm4cbc_sm3_decrypt(struct aead_request *req) +{ + struct safexcel_cipher_req *creq = aead_request_ctx(req); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + + /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */ + if ((req->cryptlen - crypto_aead_authsize(tfm)) & (SM4_BLOCK_SIZE - 1)) + return -EINVAL; + else if (req->cryptlen > crypto_aead_authsize(tfm) || req->assoclen) + /* If input length > 0 only */ + return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT); + + /* HW cannot do full (AAD+payload) zero length, use fallback */ + return safexcel_aead_fallback_crypt(req, SAFEXCEL_DECRYPT); +} + +static int safexcel_aead_sm4cbc_sm3_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_fallback_cra_init(tfm); + ctx->alg = SAFEXCEL_SM4; + ctx->blocksz = SM4_BLOCK_SIZE; + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3; + ctx->state_sz = SM3_DIGEST_SIZE; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_cbc_sm4 = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SM3, + .alg.aead = { + .setkey = safexcel_aead_fallback_setkey, + .setauthsize = safexcel_aead_fallback_setauthsize, + .encrypt = safexcel_aead_sm4cbc_sm3_encrypt, + .decrypt = safexcel_aead_sm4cbc_sm3_decrypt, + .ivsize = SM4_BLOCK_SIZE, + .maxauthsize = SM3_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sm3),cbc(sm4))", + .cra_driver_name = "safexcel-authenc-hmac-sm3-cbc-sm4", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sm4cbc_sm3_cra_init, + .cra_exit = safexcel_aead_fallback_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sm4ctr_sha1_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sm4cbc_sha1_cra_init(tfm); + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_sm4 = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SHA1, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),rfc3686(ctr(sm4)))", + .cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-sm4", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sm4ctr_sha1_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sm4ctr_sm3_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_sm4cbc_sm3_cra_init(tfm); + ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_ctr_sm4 = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SM3, + .alg.aead = { + .setkey = safexcel_aead_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SM3_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sm3),rfc3686(ctr(sm4)))", + .cra_driver_name = "safexcel-authenc-hmac-sm3-ctr-sm4", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sm4ctr_sm3_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_rfc4106_gcm_setkey(struct crypto_aead *ctfm, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + /* last 4 bytes of key are the nonce! */ + ctx->nonce = *(u32 *)(key + len - CTR_RFC3686_NONCE_SIZE); + + len -= CTR_RFC3686_NONCE_SIZE; + return safexcel_aead_gcm_setkey(ctfm, key, len); +} + +static int safexcel_rfc4106_gcm_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + return crypto_rfc4106_check_authsize(authsize); +} + +static int safexcel_rfc4106_encrypt(struct aead_request *req) +{ + return crypto_ipsec_check_assoclen(req->assoclen) ?: + safexcel_aead_encrypt(req); +} + +static int safexcel_rfc4106_decrypt(struct aead_request *req) +{ + return crypto_ipsec_check_assoclen(req->assoclen) ?: + safexcel_aead_decrypt(req); +} + +static int safexcel_rfc4106_gcm_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + int ret; + + ret = safexcel_aead_gcm_cra_init(tfm); + ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP; + ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE; + return ret; +} + +struct safexcel_alg_template safexcel_alg_rfc4106_gcm = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_GHASH, + .alg.aead = { + .setkey = safexcel_rfc4106_gcm_setkey, + .setauthsize = safexcel_rfc4106_gcm_setauthsize, + .encrypt = safexcel_rfc4106_encrypt, + .decrypt = safexcel_rfc4106_decrypt, + .ivsize = GCM_RFC4106_IV_SIZE, + .maxauthsize = GHASH_DIGEST_SIZE, + .base = { + .cra_name = "rfc4106(gcm(aes))", + .cra_driver_name = "safexcel-rfc4106-gcm-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_rfc4106_gcm_cra_init, + .cra_exit = safexcel_aead_gcm_cra_exit, + }, + }, +}; + +static int safexcel_rfc4543_gcm_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + if (authsize != GHASH_DIGEST_SIZE) + return -EINVAL; + + return 0; +} + +static int safexcel_rfc4543_gcm_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + int ret; + + ret = safexcel_aead_gcm_cra_init(tfm); + ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP_GMAC; + return ret; +} + +struct safexcel_alg_template safexcel_alg_rfc4543_gcm = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_GHASH, + .alg.aead = { + .setkey = safexcel_rfc4106_gcm_setkey, + .setauthsize = safexcel_rfc4543_gcm_setauthsize, + .encrypt = safexcel_rfc4106_encrypt, + .decrypt = safexcel_rfc4106_decrypt, + .ivsize = GCM_RFC4543_IV_SIZE, + .maxauthsize = GHASH_DIGEST_SIZE, + .base = { + .cra_name = "rfc4543(gcm(aes))", + .cra_driver_name = "safexcel-rfc4543-gcm-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_rfc4543_gcm_cra_init, + .cra_exit = safexcel_aead_gcm_cra_exit, + }, + }, +}; + +static int safexcel_rfc4309_ccm_setkey(struct crypto_aead *ctfm, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + /* First byte of the nonce = L = always 3 for RFC4309 (4 byte ctr) */ + *(u8 *)&ctx->nonce = EIP197_AEAD_IPSEC_COUNTER_SIZE - 1; + /* last 3 bytes of key are the nonce! */ + memcpy((u8 *)&ctx->nonce + 1, key + len - + EIP197_AEAD_IPSEC_CCM_NONCE_SIZE, + EIP197_AEAD_IPSEC_CCM_NONCE_SIZE); + + len -= EIP197_AEAD_IPSEC_CCM_NONCE_SIZE; + return safexcel_aead_ccm_setkey(ctfm, key, len); +} + +static int safexcel_rfc4309_ccm_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + /* Borrowed from crypto/ccm.c */ + switch (authsize) { + case 8: + case 12: + case 16: + break; + default: + return -EINVAL; + } + + return 0; +} + +static int safexcel_rfc4309_ccm_encrypt(struct aead_request *req) +{ + struct safexcel_cipher_req *creq = aead_request_ctx(req); + + /* Borrowed from crypto/ccm.c */ + if (req->assoclen != 16 && req->assoclen != 20) + return -EINVAL; + + return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT); +} + +static int safexcel_rfc4309_ccm_decrypt(struct aead_request *req) +{ + struct safexcel_cipher_req *creq = aead_request_ctx(req); + + /* Borrowed from crypto/ccm.c */ + if (req->assoclen != 16 && req->assoclen != 20) + return -EINVAL; + + return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT); +} + +static int safexcel_rfc4309_ccm_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + int ret; + + ret = safexcel_aead_ccm_cra_init(tfm); + ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP; + ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE; + return ret; +} + +struct safexcel_alg_template safexcel_alg_rfc4309_ccm = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_CBC_MAC_ALL, + .alg.aead = { + .setkey = safexcel_rfc4309_ccm_setkey, + .setauthsize = safexcel_rfc4309_ccm_setauthsize, + .encrypt = safexcel_rfc4309_ccm_encrypt, + .decrypt = safexcel_rfc4309_ccm_decrypt, + .ivsize = EIP197_AEAD_IPSEC_IV_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "rfc4309(ccm(aes))", + .cra_driver_name = "safexcel-rfc4309-ccm-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_rfc4309_ccm_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index ac9282c1a5ec..ef0ba4832928 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -5,9 +5,15 @@ * Antoine Tenart <antoine.tenart@free-electrons.com> */ +#include <crypto/aes.h> #include <crypto/hmac.h> #include <crypto/md5.h> -#include <crypto/sha.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> +#include <crypto/sha3.h> +#include <crypto/skcipher.h> +#include <crypto/sm3.h> +#include <crypto/internal/cipher.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> @@ -16,12 +22,18 @@ struct safexcel_ahash_ctx { struct safexcel_context base; - struct safexcel_crypto_priv *priv; u32 alg; - - u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)]; - u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)]; + u8 key_sz; + bool cbcmac; + bool do_fallback; + bool fb_init_done; + bool fb_do_setkey; + + struct crypto_aes_ctx *aes; + struct crypto_ahash *fback; + struct crypto_shash *shpre; + struct shash_desc *shdesc; }; struct safexcel_ahash_req { @@ -29,130 +41,198 @@ struct safexcel_ahash_req { bool finish; bool hmac; bool needs_inv; + bool hmac_zlen; + bool len_is_le; + bool not_first; + bool xcbcmac; int nents; dma_addr_t result_dma; u32 digest; - u8 state_sz; /* expected sate size, only set once */ - u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); + u8 state_sz; /* expected state size, only set once */ + u8 block_sz; /* block size, only set once */ + u8 digest_sz; /* output digest size, only set once */ + __le32 state[SHA3_512_BLOCK_SIZE / + sizeof(__le32)] __aligned(sizeof(__le32)); - u64 len[2]; - u64 processed[2]; + u64 len; + u64 processed; - u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); + u8 cache[HASH_CACHE_SIZE] __aligned(sizeof(u32)); dma_addr_t cache_dma; unsigned int cache_sz; - u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); + u8 cache_next[HASH_CACHE_SIZE] __aligned(sizeof(u32)); }; static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) { - if (req->len[1] > req->processed[1]) - return 0xffffffff - (req->len[0] - req->processed[0]); - - return req->len[0] - req->processed[0]; + return req->len - req->processed; } static void safexcel_hash_token(struct safexcel_command_desc *cdesc, - u32 input_length, u32 result_length) + u32 input_length, u32 result_length, + bool cbcmac) { struct safexcel_token *token = (struct safexcel_token *)cdesc->control_data.token; token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; token[0].packet_length = input_length; - token[0].stat = EIP197_TOKEN_STAT_LAST_HASH; token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH; - token[1].opcode = EIP197_TOKEN_OPCODE_INSERT; - token[1].packet_length = result_length; - token[1].stat = EIP197_TOKEN_STAT_LAST_HASH | + input_length &= 15; + if (unlikely(cbcmac && input_length)) { + token[0].stat = 0; + token[1].opcode = EIP197_TOKEN_OPCODE_INSERT; + token[1].packet_length = 16 - input_length; + token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; + token[1].instructions = EIP197_TOKEN_INS_TYPE_HASH; + } else { + token[0].stat = EIP197_TOKEN_STAT_LAST_HASH; + eip197_noop_token(&token[1]); + } + + token[2].opcode = EIP197_TOKEN_OPCODE_INSERT; + token[2].stat = EIP197_TOKEN_STAT_LAST_HASH | EIP197_TOKEN_STAT_LAST_PACKET; - token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | + token[2].packet_length = result_length; + token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | EIP197_TOKEN_INS_INSERT_HASH_DIGEST; + + eip197_noop_token(&token[3]); } static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, struct safexcel_ahash_req *req, - struct safexcel_command_desc *cdesc, - unsigned int digestsize) -{ - struct safexcel_crypto_priv *priv = ctx->priv; - int i; - - cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT; - cdesc->control_data.control0 |= ctx->alg; - cdesc->control_data.control0 |= req->digest; - - if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) { - if (req->processed[0] || req->processed[1]) { - if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) - cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(5); - else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1) - cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6); - else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 || - ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256) - cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9); - else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384 || - ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512) - cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(17); - - cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT; - } else { - cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH; - } + struct safexcel_command_desc *cdesc) +{ + struct safexcel_crypto_priv *priv = ctx->base.priv; + u64 count = 0; - if (!req->finish) - cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH; + cdesc->control_data.control0 = ctx->alg; + cdesc->control_data.control1 = 0; - /* - * Copy the input digest if needed, and setup the context - * fields. Do this now as we need it to setup the first command - * descriptor. - */ - if (req->processed[0] || req->processed[1]) { - for (i = 0; i < digestsize / sizeof(u32); i++) - ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]); - - if (req->finish) { - u64 count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE; - count += ((0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * - req->processed[1]); - - /* This is a haredware limitation, as the - * counter must fit into an u32. This represents - * a farily big amount of input data, so we - * shouldn't see this. - */ - if (unlikely(count & 0xffff0000)) { - dev_warn(priv->dev, - "Input data is too big\n"); - return; - } + /* + * Copy the input digest if needed, and setup the context + * fields. Do this now as we need it to setup the first command + * descriptor. + */ + if (unlikely(req->digest == CONTEXT_CONTROL_DIGEST_XCM)) { + if (req->xcbcmac) + memcpy(ctx->base.ctxr->data, &ctx->base.ipad, ctx->key_sz); + else + memcpy(ctx->base.ctxr->data, req->state, req->state_sz); + + if (!req->finish && req->xcbcmac) + cdesc->control_data.control0 |= + CONTEXT_CONTROL_DIGEST_XCM | + CONTEXT_CONTROL_TYPE_HASH_OUT | + CONTEXT_CONTROL_NO_FINISH_HASH | + CONTEXT_CONTROL_SIZE(req->state_sz / + sizeof(u32)); + else + cdesc->control_data.control0 |= + CONTEXT_CONTROL_DIGEST_XCM | + CONTEXT_CONTROL_TYPE_HASH_OUT | + CONTEXT_CONTROL_SIZE(req->state_sz / + sizeof(u32)); + return; + } else if (!req->processed) { + /* First - and possibly only - block of basic hash only */ + if (req->finish) + cdesc->control_data.control0 |= req->digest | + CONTEXT_CONTROL_TYPE_HASH_OUT | + CONTEXT_CONTROL_RESTART_HASH | + /* ensure its not 0! */ + CONTEXT_CONTROL_SIZE(1); + else + cdesc->control_data.control0 |= req->digest | + CONTEXT_CONTROL_TYPE_HASH_OUT | + CONTEXT_CONTROL_RESTART_HASH | + CONTEXT_CONTROL_NO_FINISH_HASH | + /* ensure its not 0! */ + CONTEXT_CONTROL_SIZE(1); + return; + } + + /* Hash continuation or HMAC, setup (inner) digest from state */ + memcpy(ctx->base.ctxr->data, req->state, req->state_sz); + + if (req->finish) { + /* Compute digest count for hash/HMAC finish operations */ + if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) || + req->hmac_zlen || (req->processed != req->block_sz)) { + count = req->processed / EIP197_COUNTER_BLOCK_SIZE; - ctx->base.ctxr->data[i] = cpu_to_le32(count); + /* This is a hardware limitation, as the + * counter must fit into an u32. This represents + * a fairly big amount of input data, so we + * shouldn't see this. + */ + if (unlikely(count & 0xffffffff00000000ULL)) { + dev_warn(priv->dev, + "Input data is too big\n"); + return; } } - } else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) { - cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32)); - memcpy(ctx->base.ctxr->data, ctx->ipad, req->state_sz); - memcpy(ctx->base.ctxr->data + req->state_sz / sizeof(u32), - ctx->opad, req->state_sz); + if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) || + /* Special case: zero length HMAC */ + req->hmac_zlen || + /* PE HW < 4.4 cannot do HMAC continue, fake using hash */ + (req->processed != req->block_sz)) { + /* Basic hash continue operation, need digest + cnt */ + cdesc->control_data.control0 |= + CONTEXT_CONTROL_SIZE((req->state_sz >> 2) + 1) | + CONTEXT_CONTROL_TYPE_HASH_OUT | + CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + /* For zero-len HMAC, don't finalize, already padded! */ + if (req->hmac_zlen) + cdesc->control_data.control0 |= + CONTEXT_CONTROL_NO_FINISH_HASH; + cdesc->control_data.control1 |= + CONTEXT_CONTROL_DIGEST_CNT; + ctx->base.ctxr->data[req->state_sz >> 2] = + cpu_to_le32(count); + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + + /* Clear zero-length HMAC flag for next operation! */ + req->hmac_zlen = false; + } else { /* HMAC */ + /* Need outer digest for HMAC finalization */ + memcpy(ctx->base.ctxr->data + (req->state_sz >> 2), + &ctx->base.opad, req->state_sz); + + /* Single pass HMAC - no digest count */ + cdesc->control_data.control0 |= + CONTEXT_CONTROL_SIZE(req->state_sz >> 1) | + CONTEXT_CONTROL_TYPE_HASH_OUT | + CONTEXT_CONTROL_DIGEST_HMAC; + } + } else { /* Hash continuation, do not finish yet */ + cdesc->control_data.control0 |= + CONTEXT_CONTROL_SIZE(req->state_sz >> 2) | + CONTEXT_CONTROL_DIGEST_PRECOMPUTED | + CONTEXT_CONTROL_TYPE_HASH_OUT | + CONTEXT_CONTROL_NO_FINISH_HASH; } } -static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, +static int safexcel_ahash_enqueue(struct ahash_request *areq); + +static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, + int ring, struct crypto_async_request *async, bool *should_complete, int *ret) { struct safexcel_result_desc *rdesc; struct ahash_request *areq = ahash_request_cast(async); struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); - struct safexcel_ahash_req *sreq = ahash_request_ctx(areq); + struct safexcel_ahash_req *sreq = ahash_request_ctx_dma(areq); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash); u64 cache_len; *ret = 0; @@ -169,12 +249,14 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin safexcel_complete(priv, ring); if (sreq->nents) { - dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE); + dma_unmap_sg(priv->dev, areq->src, + sg_nents_for_len(areq->src, areq->nbytes), + DMA_TO_DEVICE); sreq->nents = 0; } if (sreq->result_dma) { - dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz, + dma_unmap_single(priv->dev, sreq->result_dma, sreq->digest_sz, DMA_FROM_DEVICE); sreq->result_dma = 0; } @@ -183,11 +265,35 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz, DMA_TO_DEVICE); sreq->cache_dma = 0; + sreq->cache_sz = 0; } - if (sreq->finish) + if (sreq->finish) { + if (sreq->hmac && + (sreq->digest != CONTEXT_CONTROL_DIGEST_HMAC)) { + /* Faking HMAC using hash - need to do outer hash */ + memcpy(sreq->cache, sreq->state, + crypto_ahash_digestsize(ahash)); + + memcpy(sreq->state, &ctx->base.opad, sreq->digest_sz); + + sreq->len = sreq->block_sz + + crypto_ahash_digestsize(ahash); + sreq->processed = sreq->block_sz; + sreq->hmac = 0; + + if (priv->flags & EIP197_TRC_CACHE) + ctx->base.needs_inv = true; + areq->nbytes = 0; + safexcel_ahash_enqueue(areq); + + *should_complete = false; /* Not done yet */ + return 1; + } + memcpy(areq->result, sreq->state, crypto_ahash_digestsize(ahash)); + } cache_len = safexcel_queued_len(sreq); if (cache_len) @@ -202,49 +308,89 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, int *commands, int *results) { struct ahash_request *areq = ahash_request_cast(async); - struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_crypto_priv *priv = ctx->priv; + struct safexcel_crypto_priv *priv = ctx->base.priv; struct safexcel_command_desc *cdesc, *first_cdesc = NULL; struct safexcel_result_desc *rdesc; struct scatterlist *sg; - int i, extra, n_cdesc = 0, ret = 0; - u64 queued, len, cache_len; + struct safexcel_token *dmmy; + int i, extra = 0, n_cdesc = 0, ret = 0, cache_len, skip = 0; + u64 queued, len; - queued = len = safexcel_queued_len(req); - if (queued <= crypto_ahash_blocksize(ahash)) + queued = safexcel_queued_len(req); + if (queued <= HASH_CACHE_SIZE) cache_len = queued; else cache_len = queued - areq->nbytes; - if (!req->last_req) { + if (!req->finish && !req->last_req) { /* If this is not the last request and the queued data does not - * fit into full blocks, cache it for the next send() call. + * fit into full cache blocks, cache it for the next send call. + */ + extra = queued & (HASH_CACHE_SIZE - 1); + + /* If this is not the last request and the queued data + * is a multiple of a block, cache the last one for now. */ - extra = queued & (crypto_ahash_blocksize(ahash) - 1); if (!extra) - /* If this is not the last request and the queued data - * is a multiple of a block, cache the last one for now. - */ - extra = crypto_ahash_blocksize(ahash); + extra = HASH_CACHE_SIZE; - if (extra) { - sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), - req->cache_next, extra, - areq->nbytes - extra); + sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), + req->cache_next, extra, + areq->nbytes - extra); - queued -= extra; - len -= extra; + queued -= extra; - if (!queued) { - *commands = 0; - *results = 0; - return 0; + if (!queued) { + *commands = 0; + *results = 0; + return 0; + } + + extra = 0; + } + + if (unlikely(req->xcbcmac && req->processed > AES_BLOCK_SIZE)) { + if (unlikely(cache_len < AES_BLOCK_SIZE)) { + /* + * Cache contains less than 1 full block, complete. + */ + extra = AES_BLOCK_SIZE - cache_len; + if (queued > cache_len) { + /* More data follows: borrow bytes */ + u64 tmp = queued - cache_len; + + skip = min_t(u64, tmp, extra); + sg_pcopy_to_buffer(areq->src, + sg_nents(areq->src), + req->cache + cache_len, + skip, 0); } + extra -= skip; + memset(req->cache + cache_len + skip, 0, extra); + if (!ctx->cbcmac && extra) { + // 10- padding for XCBCMAC & CMAC + req->cache[cache_len + skip] = 0x80; + // HW will use K2 iso K3 - compensate! + for (i = 0; i < AES_BLOCK_SIZE / 4; i++) { + u32 *cache = (void *)req->cache; + u32 *ipad = ctx->base.ipad.word; + u32 x; + + x = ipad[i] ^ ipad[i + 4]; + cache[i] ^= swab32(x); + } + } + cache_len = AES_BLOCK_SIZE; + queued = queued + extra; } + + /* XCBC continue: XOR previous result into 1st word */ + crypto_xor(req->cache, (const u8 *)req->state, AES_BLOCK_SIZE); } + len = queued; /* Add a command descriptor for the cached data, if any */ if (cache_len) { req->cache_dma = dma_map_single(priv->dev, req->cache, @@ -255,8 +401,9 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, req->cache_sz = cache_len; first_cdesc = safexcel_add_cdesc(priv, ring, 1, (cache_len == len), - req->cache_dma, cache_len, len, - ctx->base.ctxr_dma); + req->cache_dma, cache_len, + len, ctx->base.ctxr_dma, + &dmmy); if (IS_ERR(first_cdesc)) { ret = PTR_ERR(first_cdesc); goto unmap_cache; @@ -270,7 +417,8 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, /* Now handle the current ahash request buffer(s) */ req->nents = dma_map_sg(priv->dev, areq->src, - sg_nents_for_len(areq->src, areq->nbytes), + sg_nents_for_len(areq->src, + areq->nbytes), DMA_TO_DEVICE); if (!req->nents) { ret = -ENOMEM; @@ -280,35 +428,44 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, for_each_sg(areq->src, sg, req->nents, i) { int sglen = sg_dma_len(sg); + if (unlikely(sglen <= skip)) { + skip -= sglen; + continue; + } + /* Do not overflow the request */ - if (queued < sglen) + if ((queued + skip) <= sglen) sglen = queued; + else + sglen -= skip; cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, - !(queued - sglen), sg_dma_address(sg), - sglen, len, ctx->base.ctxr_dma); + !(queued - sglen), + sg_dma_address(sg) + skip, sglen, + len, ctx->base.ctxr_dma, &dmmy); if (IS_ERR(cdesc)) { ret = PTR_ERR(cdesc); goto unmap_sg; } - n_cdesc++; - if (n_cdesc == 1) + if (!n_cdesc) first_cdesc = cdesc; + n_cdesc++; queued -= sglen; if (!queued) break; + skip = 0; } send_command: /* Setup the context options */ - safexcel_context_control(ctx, req, first_cdesc, req->state_sz); + safexcel_context_control(ctx, req, first_cdesc); /* Add the token */ - safexcel_hash_token(first_cdesc, len, req->state_sz); + safexcel_hash_token(first_cdesc, len, req->digest_sz, ctx->cbcmac); - req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz, + req->result_dma = dma_map_single(priv->dev, req->state, req->digest_sz, DMA_FROM_DEVICE); if (dma_mapping_error(priv->dev, req->result_dma)) { ret = -EINVAL; @@ -317,7 +474,7 @@ send_command: /* Add a result descriptor */ rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma, - req->state_sz); + req->digest_sz); if (IS_ERR(rdesc)) { ret = PTR_ERR(rdesc); goto unmap_result; @@ -325,19 +482,22 @@ send_command: safexcel_rdr_req_set(priv, ring, rdesc, &areq->base); - req->processed[0] += len; - if (req->processed[0] < len) - req->processed[1]++; + req->processed += len - extra; *commands = n_cdesc; *results = 1; return 0; unmap_result: - dma_unmap_single(priv->dev, req->result_dma, req->state_sz, + dma_unmap_single(priv->dev, req->result_dma, req->digest_sz, DMA_FROM_DEVICE); unmap_sg: - dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE); + if (req->nents) { + dma_unmap_sg(priv->dev, areq->src, + sg_nents_for_len(areq->src, areq->nbytes), + DMA_TO_DEVICE); + req->nents = 0; + } cdesc_rollback: for (i = 0; i < n_cdesc; i++) safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); @@ -345,33 +505,13 @@ unmap_cache: if (req->cache_dma) { dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz, DMA_TO_DEVICE); + req->cache_dma = 0; req->cache_sz = 0; } return ret; } -static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq) -{ - struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); - unsigned int state_w_sz = req->state_sz / sizeof(u32); - u64 processed; - int i; - - processed = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE; - processed += (0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * req->processed[1]; - - for (i = 0; i < state_w_sz; i++) - if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i])) - return true; - - if (ctx->base.ctxr->data[state_w_sz] != cpu_to_le32(processed)) - return true; - - return false; -} - static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, int ring, struct crypto_async_request *async, @@ -427,7 +567,7 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, bool *should_complete, int *ret) { struct ahash_request *areq = ahash_request_cast(async); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); int err; BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv); @@ -451,7 +591,7 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async, struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); int ret; - ret = safexcel_invalidate_cache(async, ctx->priv, + ret = safexcel_invalidate_cache(async, ctx->base.priv, ctx->base.ctxr_dma, ring); if (unlikely(ret)) return ret; @@ -466,7 +606,7 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, int *commands, int *results) { struct ahash_request *areq = ahash_request_cast(async); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); int ret; if (req->needs_inv) @@ -480,18 +620,19 @@ static int safexcel_ahash_send(struct crypto_async_request *async, static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) { struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); - struct safexcel_crypto_priv *priv = ctx->priv; + struct safexcel_crypto_priv *priv = ctx->base.priv; EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE); - struct safexcel_ahash_req *rctx = ahash_request_ctx(req); - struct safexcel_inv_result result = {}; + struct safexcel_ahash_req *rctx = ahash_request_ctx_dma(req); + DECLARE_CRYPTO_WAIT(result); int ring = ctx->base.ring; + int err; - memset(req, 0, sizeof(struct ahash_request)); + memset(req, 0, EIP197_AHASH_REQ_SIZE); /* create invalidation request */ init_completion(&result.completion); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - safexcel_inv_complete, &result); + crypto_req_done, &result); ahash_request_set_tfm(req, __crypto_ahash_cast(tfm)); ctx = crypto_tfm_ctx(req->base.tfm); @@ -505,12 +646,11 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work); - wait_for_completion(&result.completion); + err = crypto_wait_req(-EINPROGRESS, &result); - if (result.error) { - dev_warn(priv->dev, "hash: completion error (%d)\n", - result.error); - return result.error; + if (err) { + dev_warn(priv->dev, "hash: completion error (%d)\n", err); + return err; } return 0; @@ -521,29 +661,23 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) */ static int safexcel_ahash_cache(struct ahash_request *areq) { - struct safexcel_ahash_req *req = ahash_request_ctx(areq); - struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); - u64 queued, cache_len; + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + u64 cache_len; - /* queued: everything accepted by the driver which will be handled by - * the next send() calls. - * tot sz handled by update() - tot sz handled by send() - */ - queued = safexcel_queued_len(req); /* cache_len: everything accepted by the driver but not sent yet, * tot sz handled by update() - last req sz - tot sz handled by send() */ - cache_len = queued - areq->nbytes; + cache_len = safexcel_queued_len(req); /* * In case there isn't enough bytes to proceed (less than a * block size), cache the data until we have enough. */ - if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) { + if (cache_len + areq->nbytes <= HASH_CACHE_SIZE) { sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), req->cache + cache_len, areq->nbytes, 0); - return areq->nbytes; + return 0; } /* We couldn't cache all the data */ @@ -553,22 +687,29 @@ static int safexcel_ahash_cache(struct ahash_request *areq) static int safexcel_ahash_enqueue(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); - struct safexcel_crypto_priv *priv = ctx->priv; + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + struct safexcel_crypto_priv *priv = ctx->base.priv; int ret, ring; req->needs_inv = false; if (ctx->base.ctxr) { if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv && - (req->processed[0] || req->processed[1]) && - req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) - /* We're still setting needs_inv here, even though it is + /* invalidate for *any* non-XCBC continuation */ + ((req->not_first && !req->xcbcmac) || + /* invalidate if (i)digest changed */ + memcmp(ctx->base.ctxr->data, req->state, req->state_sz) || + /* invalidate for HMAC finish with odigest changed */ + (req->finish && req->hmac && + memcmp(ctx->base.ctxr->data + (req->state_sz>>2), + &ctx->base.opad, req->state_sz)))) + /* + * We're still setting needs_inv here, even though it is * cleared right away, because the needs_inv flag can be * set in other functions and we want to keep the same * logic. */ - ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); + ctx->base.needs_inv = true; if (ctx->base.needs_inv) { ctx->base.needs_inv = false; @@ -582,6 +723,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) if (!ctx->base.ctxr) return -ENOMEM; } + req->not_first = true; ring = ctx->base.ring; @@ -597,31 +739,24 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) static int safexcel_ahash_update(struct ahash_request *areq) { - struct safexcel_ahash_req *req = ahash_request_ctx(areq); - struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + int ret; /* If the request is 0 length, do nothing */ if (!areq->nbytes) return 0; - req->len[0] += areq->nbytes; - if (req->len[0] < areq->nbytes) - req->len[1]++; + /* Add request to the cache if it fits */ + ret = safexcel_ahash_cache(areq); - safexcel_ahash_cache(areq); + /* Update total request length */ + req->len += areq->nbytes; - /* - * We're not doing partial updates when performing an hmac request. - * Everything will be handled by the final() call. + /* If not all data could fit into the cache, go process the excess. + * Also go process immediately for an HMAC IV precompute, which + * will never be finished at all, but needs to be processed anyway. */ - if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) - return 0; - - if (req->hmac) - return safexcel_ahash_enqueue(areq); - - if (!req->last_req && - safexcel_queued_len(req) > crypto_ahash_blocksize(ahash)) + if ((ret && !req->finish) || req->last_req) return safexcel_ahash_enqueue(areq); return 0; @@ -629,14 +764,17 @@ static int safexcel_ahash_update(struct ahash_request *areq) static int safexcel_ahash_final(struct ahash_request *areq) { - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - req->last_req = true; req->finish = true; - /* If we have an overall 0 length request */ - if (!req->len[0] && !req->len[1] && !areq->nbytes) { + if (unlikely(!req->len && !areq->nbytes)) { + /* + * If we have an overall 0 length *hash* request: + * The HW cannot do 0 length hash, so we provide the correct + * result directly here. + */ if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) memcpy(areq->result, md5_zero_message_hash, MD5_DIGEST_SIZE); @@ -655,8 +793,74 @@ static int safexcel_ahash_final(struct ahash_request *areq) else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512) memcpy(areq->result, sha512_zero_message_hash, SHA512_DIGEST_SIZE); + else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SM3) { + memcpy(areq->result, + EIP197_SM3_ZEROM_HASH, SM3_DIGEST_SIZE); + } + + return 0; + } else if (unlikely(req->digest == CONTEXT_CONTROL_DIGEST_XCM && + ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5 && + req->len == sizeof(u32) && !areq->nbytes)) { + /* Zero length CRC32 */ + memcpy(areq->result, &ctx->base.ipad, sizeof(u32)); + return 0; + } else if (unlikely(ctx->cbcmac && req->len == AES_BLOCK_SIZE && + !areq->nbytes)) { + /* Zero length CBC MAC */ + memset(areq->result, 0, AES_BLOCK_SIZE); + return 0; + } else if (unlikely(req->xcbcmac && req->len == AES_BLOCK_SIZE && + !areq->nbytes)) { + /* Zero length (X)CBC/CMAC */ + int i; + for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) { + u32 *result = (void *)areq->result; + + /* K3 */ + result[i] = swab32(ctx->base.ipad.word[i + 4]); + } + areq->result[0] ^= 0x80; // 10- padding + aes_encrypt(ctx->aes, areq->result, areq->result); return 0; + } else if (unlikely(req->hmac && + (req->len == req->block_sz) && + !areq->nbytes)) { + /* + * If we have an overall 0 length *HMAC* request: + * For HMAC, we need to finalize the inner digest + * and then perform the outer hash. + */ + + /* generate pad block in the cache */ + /* start with a hash block of all zeroes */ + memset(req->cache, 0, req->block_sz); + /* set the first byte to 0x80 to 'append a 1 bit' */ + req->cache[0] = 0x80; + /* add the length in bits in the last 2 bytes */ + if (req->len_is_le) { + /* Little endian length word (e.g. MD5) */ + req->cache[req->block_sz-8] = (req->block_sz << 3) & + 255; + req->cache[req->block_sz-7] = (req->block_sz >> 5); + } else { + /* Big endian length word (e.g. any SHA) */ + req->cache[req->block_sz-2] = (req->block_sz >> 5); + req->cache[req->block_sz-1] = (req->block_sz << 3) & + 255; + } + + req->len += req->block_sz; /* plus 1 hash block */ + + /* Set special zero-length HMAC flag */ + req->hmac_zlen = true; + + /* Finalize HMAC */ + req->digest = CONTEXT_CONTROL_DIGEST_HMAC; + } else if (req->hmac) { + /* Finalize HMAC */ + req->digest = CONTEXT_CONTROL_DIGEST_HMAC; } return safexcel_ahash_enqueue(areq); @@ -664,9 +868,8 @@ static int safexcel_ahash_final(struct ahash_request *areq) static int safexcel_ahash_finup(struct ahash_request *areq) { - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); - req->last_req = true; req->finish = true; safexcel_ahash_update(areq); @@ -675,27 +878,23 @@ static int safexcel_ahash_finup(struct ahash_request *areq) static int safexcel_ahash_export(struct ahash_request *areq, void *out) { - struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); struct safexcel_ahash_export_state *export = out; - export->len[0] = req->len[0]; - export->len[1] = req->len[1]; - export->processed[0] = req->processed[0]; - export->processed[1] = req->processed[1]; + export->len = req->len; + export->processed = req->processed; export->digest = req->digest; memcpy(export->state, req->state, req->state_sz); - memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash)); + memcpy(export->cache, req->cache, HASH_CACHE_SIZE); return 0; } static int safexcel_ahash_import(struct ahash_request *areq, const void *in) { - struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); const struct safexcel_ahash_export_state *export = in; int ret; @@ -703,14 +902,12 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in) if (ret) return ret; - req->len[0] = export->len[0]; - req->len[1] = export->len[1]; - req->processed[0] = export->processed[0]; - req->processed[1] = export->processed[1]; + req->len = export->len; + req->processed = export->processed; req->digest = export->digest; - memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash)); + memcpy(req->cache, export->cache, HASH_CACHE_SIZE); memcpy(req->state, export->state, req->state_sz); return 0; @@ -723,31 +920,28 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm) container_of(__crypto_ahash_alg(tfm->__crt_alg), struct safexcel_alg_template, alg.ahash); - ctx->priv = tmpl->priv; + ctx->base.priv = tmpl->priv; ctx->base.send = safexcel_ahash_send; ctx->base.handle_result = safexcel_handle_result; + ctx->fb_do_setkey = false; - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct safexcel_ahash_req)); + crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm), + sizeof(struct safexcel_ahash_req)); return 0; } static int safexcel_sha1_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); - req->state[0] = SHA1_H0; - req->state[1] = SHA1_H1; - req->state[2] = SHA1_H2; - req->state[3] = SHA1_H3; - req->state[4] = SHA1_H4; - ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = SHA1_DIGEST_SIZE; + req->digest_sz = SHA1_DIGEST_SIZE; + req->block_sz = SHA1_BLOCK_SIZE; return 0; } @@ -765,7 +959,7 @@ static int safexcel_sha1_digest(struct ahash_request *areq) static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm) { struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); - struct safexcel_crypto_priv *priv = ctx->priv; + struct safexcel_crypto_priv *priv = ctx->base.priv; int ret; /* context not allocated, skip invalidation */ @@ -784,7 +978,7 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_sha1 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_SHA1, .alg.ahash = { .init = safexcel_sha1_init, .update = safexcel_ahash_update, @@ -799,8 +993,9 @@ struct safexcel_alg_template safexcel_alg_sha1 = { .base = { .cra_name = "sha1", .cra_driver_name = "safexcel-sha1", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -814,10 +1009,24 @@ struct safexcel_alg_template safexcel_alg_sha1 = { static int safexcel_hmac_sha1_init(struct ahash_request *areq) { - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + + memset(req, 0, sizeof(*req)); + + /* Start from ipad precompute */ + memcpy(req->state, &ctx->base.ipad, SHA1_DIGEST_SIZE); + /* Already processed the key^ipad part now! */ + req->len = SHA1_BLOCK_SIZE; + req->processed = SHA1_BLOCK_SIZE; + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + req->state_sz = SHA1_DIGEST_SIZE; + req->digest_sz = SHA1_DIGEST_SIZE; + req->block_sz = SHA1_BLOCK_SIZE; + req->hmac = true; - safexcel_sha1_init(areq); - req->digest = CONTEXT_CONTROL_DIGEST_HMAC; return 0; } @@ -831,27 +1040,11 @@ static int safexcel_hmac_sha1_digest(struct ahash_request *areq) return safexcel_ahash_finup(areq); } -struct safexcel_ahash_result { - struct completion completion; - int error; -}; - -static void safexcel_ahash_complete(struct crypto_async_request *req, int error) -{ - struct safexcel_ahash_result *result = req->data; - - if (error == -EINPROGRESS) - return; - - result->error = error; - complete(&result->completion); -} - static int safexcel_hmac_init_pad(struct ahash_request *areq, unsigned int blocksize, const u8 *key, unsigned int keylen, u8 *ipad, u8 *opad) { - struct safexcel_ahash_result result; + DECLARE_CRYPTO_WAIT(result); struct scatterlist sg; int ret, i; u8 *keydup; @@ -864,20 +1057,15 @@ static int safexcel_hmac_init_pad(struct ahash_request *areq, return -ENOMEM; ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG, - safexcel_ahash_complete, &result); + crypto_req_done, &result); sg_init_one(&sg, keydup, keylen); ahash_request_set_crypt(areq, &sg, ipad, keylen); - init_completion(&result.completion); ret = crypto_ahash_digest(areq); - if (ret == -EINPROGRESS || ret == -EBUSY) { - wait_for_completion_interruptible(&result.completion); - ret = result.error; - } + ret = crypto_wait_req(ret, &result); /* Avoid leaking */ - memzero_explicit(keydup, keylen); - kfree(keydup); + kfree_sensitive(keydup); if (ret) return ret; @@ -899,38 +1087,33 @@ static int safexcel_hmac_init_pad(struct ahash_request *areq, static int safexcel_hmac_init_iv(struct ahash_request *areq, unsigned int blocksize, u8 *pad, void *state) { - struct safexcel_ahash_result result; struct safexcel_ahash_req *req; + DECLARE_CRYPTO_WAIT(result); struct scatterlist sg; int ret; ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG, - safexcel_ahash_complete, &result); + crypto_req_done, &result); sg_init_one(&sg, pad, blocksize); ahash_request_set_crypt(areq, &sg, pad, blocksize); - init_completion(&result.completion); ret = crypto_ahash_init(areq); if (ret) return ret; - req = ahash_request_ctx(areq); + req = ahash_request_ctx_dma(areq); req->hmac = true; req->last_req = true; ret = crypto_ahash_update(areq); - if (ret && ret != -EINPROGRESS && ret != -EBUSY) - return ret; - - wait_for_completion_interruptible(&result.completion); - if (result.error) - return result.error; + ret = crypto_wait_req(ret, &result); - return crypto_ahash_export(areq, state); + return ret ?: crypto_ahash_export(areq, state); } -int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen, - void *istate, void *ostate) +static int __safexcel_hmac_setkey(const char *alg, const u8 *key, + unsigned int keylen, + void *istate, void *ostate) { struct ahash_request *areq; struct crypto_ahash *tfm; @@ -979,35 +1162,38 @@ free_ahash: return ret; } -static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen, const char *alg, - unsigned int state_sz) +int safexcel_hmac_setkey(struct safexcel_context *base, const u8 *key, + unsigned int keylen, const char *alg, + unsigned int state_sz) { - struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); - struct safexcel_crypto_priv *priv = ctx->priv; + struct safexcel_crypto_priv *priv = base->priv; struct safexcel_ahash_export_state istate, ostate; - int ret, i; + int ret; - ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate); + ret = __safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate); if (ret) return ret; - if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr) { - for (i = 0; i < state_sz / sizeof(u32); i++) { - if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || - ctx->opad[i] != le32_to_cpu(ostate.state[i])) { - ctx->base.needs_inv = true; - break; - } - } - } + if (priv->flags & EIP197_TRC_CACHE && base->ctxr && + (memcmp(&base->ipad, istate.state, state_sz) || + memcmp(&base->opad, ostate.state, state_sz))) + base->needs_inv = true; - memcpy(ctx->ipad, &istate.state, state_sz); - memcpy(ctx->opad, &ostate.state, state_sz); + memcpy(&base->ipad, &istate.state, state_sz); + memcpy(&base->opad, &ostate.state, state_sz); return 0; } +static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen, const char *alg, + unsigned int state_sz) +{ + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + + return safexcel_hmac_setkey(&ctx->base, key, keylen, alg, state_sz); +} + static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { @@ -1017,7 +1203,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, struct safexcel_alg_template safexcel_alg_hmac_sha1 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_SHA1, .alg.ahash = { .init = safexcel_hmac_sha1_init, .update = safexcel_ahash_update, @@ -1033,8 +1219,9 @@ struct safexcel_alg_template safexcel_alg_hmac_sha1 = { .base = { .cra_name = "hmac(sha1)", .cra_driver_name = "safexcel-hmac-sha1", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1049,22 +1236,15 @@ struct safexcel_alg_template safexcel_alg_hmac_sha1 = { static int safexcel_sha256_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); - req->state[0] = SHA256_H0; - req->state[1] = SHA256_H1; - req->state[2] = SHA256_H2; - req->state[3] = SHA256_H3; - req->state[4] = SHA256_H4; - req->state[5] = SHA256_H5; - req->state[6] = SHA256_H6; - req->state[7] = SHA256_H7; - ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = SHA256_DIGEST_SIZE; + req->digest_sz = SHA256_DIGEST_SIZE; + req->block_sz = SHA256_BLOCK_SIZE; return 0; } @@ -1081,7 +1261,7 @@ static int safexcel_sha256_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_sha256 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_SHA2_256, .alg.ahash = { .init = safexcel_sha256_init, .update = safexcel_ahash_update, @@ -1096,8 +1276,9 @@ struct safexcel_alg_template safexcel_alg_sha256 = { .base = { .cra_name = "sha256", .cra_driver_name = "safexcel-sha256", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1112,22 +1293,15 @@ struct safexcel_alg_template safexcel_alg_sha256 = { static int safexcel_sha224_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); - req->state[0] = SHA224_H0; - req->state[1] = SHA224_H1; - req->state[2] = SHA224_H2; - req->state[3] = SHA224_H3; - req->state[4] = SHA224_H4; - req->state[5] = SHA224_H5; - req->state[6] = SHA224_H6; - req->state[7] = SHA224_H7; - ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = SHA256_DIGEST_SIZE; + req->digest_sz = SHA256_DIGEST_SIZE; + req->block_sz = SHA256_BLOCK_SIZE; return 0; } @@ -1144,7 +1318,7 @@ static int safexcel_sha224_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_sha224 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_SHA2_256, .alg.ahash = { .init = safexcel_sha224_init, .update = safexcel_ahash_update, @@ -1159,8 +1333,9 @@ struct safexcel_alg_template safexcel_alg_sha224 = { .base = { .cra_name = "sha224", .cra_driver_name = "safexcel-sha224", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1181,10 +1356,24 @@ static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key, static int safexcel_hmac_sha224_init(struct ahash_request *areq) { - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + + memset(req, 0, sizeof(*req)); + + /* Start from ipad precompute */ + memcpy(req->state, &ctx->base.ipad, SHA256_DIGEST_SIZE); + /* Already processed the key^ipad part now! */ + req->len = SHA256_BLOCK_SIZE; + req->processed = SHA256_BLOCK_SIZE; + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224; + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + req->state_sz = SHA256_DIGEST_SIZE; + req->digest_sz = SHA256_DIGEST_SIZE; + req->block_sz = SHA256_BLOCK_SIZE; + req->hmac = true; - safexcel_sha224_init(areq); - req->digest = CONTEXT_CONTROL_DIGEST_HMAC; return 0; } @@ -1200,7 +1389,7 @@ static int safexcel_hmac_sha224_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_hmac_sha224 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_SHA2_256, .alg.ahash = { .init = safexcel_hmac_sha224_init, .update = safexcel_ahash_update, @@ -1216,8 +1405,9 @@ struct safexcel_alg_template safexcel_alg_hmac_sha224 = { .base = { .cra_name = "hmac(sha224)", .cra_driver_name = "safexcel-hmac-sha224", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1238,10 +1428,24 @@ static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, static int safexcel_hmac_sha256_init(struct ahash_request *areq) { - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + + memset(req, 0, sizeof(*req)); + + /* Start from ipad precompute */ + memcpy(req->state, &ctx->base.ipad, SHA256_DIGEST_SIZE); + /* Already processed the key^ipad part now! */ + req->len = SHA256_BLOCK_SIZE; + req->processed = SHA256_BLOCK_SIZE; + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256; + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + req->state_sz = SHA256_DIGEST_SIZE; + req->digest_sz = SHA256_DIGEST_SIZE; + req->block_sz = SHA256_BLOCK_SIZE; + req->hmac = true; - safexcel_sha256_init(areq); - req->digest = CONTEXT_CONTROL_DIGEST_HMAC; return 0; } @@ -1257,7 +1461,7 @@ static int safexcel_hmac_sha256_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_hmac_sha256 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_SHA2_256, .alg.ahash = { .init = safexcel_hmac_sha256_init, .update = safexcel_ahash_update, @@ -1273,8 +1477,9 @@ struct safexcel_alg_template safexcel_alg_hmac_sha256 = { .base = { .cra_name = "hmac(sha256)", .cra_driver_name = "safexcel-hmac-sha256", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1289,30 +1494,15 @@ struct safexcel_alg_template safexcel_alg_hmac_sha256 = { static int safexcel_sha512_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); - req->state[0] = lower_32_bits(SHA512_H0); - req->state[1] = upper_32_bits(SHA512_H0); - req->state[2] = lower_32_bits(SHA512_H1); - req->state[3] = upper_32_bits(SHA512_H1); - req->state[4] = lower_32_bits(SHA512_H2); - req->state[5] = upper_32_bits(SHA512_H2); - req->state[6] = lower_32_bits(SHA512_H3); - req->state[7] = upper_32_bits(SHA512_H3); - req->state[8] = lower_32_bits(SHA512_H4); - req->state[9] = upper_32_bits(SHA512_H4); - req->state[10] = lower_32_bits(SHA512_H5); - req->state[11] = upper_32_bits(SHA512_H5); - req->state[12] = lower_32_bits(SHA512_H6); - req->state[13] = upper_32_bits(SHA512_H6); - req->state[14] = lower_32_bits(SHA512_H7); - req->state[15] = upper_32_bits(SHA512_H7); - ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = SHA512_DIGEST_SIZE; + req->digest_sz = SHA512_DIGEST_SIZE; + req->block_sz = SHA512_BLOCK_SIZE; return 0; } @@ -1329,7 +1519,7 @@ static int safexcel_sha512_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_sha512 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_SHA2_512, .alg.ahash = { .init = safexcel_sha512_init, .update = safexcel_ahash_update, @@ -1344,8 +1534,9 @@ struct safexcel_alg_template safexcel_alg_sha512 = { .base = { .cra_name = "sha512", .cra_driver_name = "safexcel-sha512", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1360,30 +1551,15 @@ struct safexcel_alg_template safexcel_alg_sha512 = { static int safexcel_sha384_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); - req->state[0] = lower_32_bits(SHA384_H0); - req->state[1] = upper_32_bits(SHA384_H0); - req->state[2] = lower_32_bits(SHA384_H1); - req->state[3] = upper_32_bits(SHA384_H1); - req->state[4] = lower_32_bits(SHA384_H2); - req->state[5] = upper_32_bits(SHA384_H2); - req->state[6] = lower_32_bits(SHA384_H3); - req->state[7] = upper_32_bits(SHA384_H3); - req->state[8] = lower_32_bits(SHA384_H4); - req->state[9] = upper_32_bits(SHA384_H4); - req->state[10] = lower_32_bits(SHA384_H5); - req->state[11] = upper_32_bits(SHA384_H5); - req->state[12] = lower_32_bits(SHA384_H6); - req->state[13] = upper_32_bits(SHA384_H6); - req->state[14] = lower_32_bits(SHA384_H7); - req->state[15] = upper_32_bits(SHA384_H7); - ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = SHA512_DIGEST_SIZE; + req->digest_sz = SHA512_DIGEST_SIZE; + req->block_sz = SHA512_BLOCK_SIZE; return 0; } @@ -1400,7 +1576,7 @@ static int safexcel_sha384_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_sha384 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_SHA2_512, .alg.ahash = { .init = safexcel_sha384_init, .update = safexcel_ahash_update, @@ -1415,8 +1591,9 @@ struct safexcel_alg_template safexcel_alg_sha384 = { .base = { .cra_name = "sha384", .cra_driver_name = "safexcel-sha384", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1437,10 +1614,24 @@ static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key, static int safexcel_hmac_sha512_init(struct ahash_request *areq) { - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + + memset(req, 0, sizeof(*req)); + + /* Start from ipad precompute */ + memcpy(req->state, &ctx->base.ipad, SHA512_DIGEST_SIZE); + /* Already processed the key^ipad part now! */ + req->len = SHA512_BLOCK_SIZE; + req->processed = SHA512_BLOCK_SIZE; + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512; + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + req->state_sz = SHA512_DIGEST_SIZE; + req->digest_sz = SHA512_DIGEST_SIZE; + req->block_sz = SHA512_BLOCK_SIZE; + req->hmac = true; - safexcel_sha512_init(areq); - req->digest = CONTEXT_CONTROL_DIGEST_HMAC; return 0; } @@ -1456,7 +1647,7 @@ static int safexcel_hmac_sha512_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_hmac_sha512 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_SHA2_512, .alg.ahash = { .init = safexcel_hmac_sha512_init, .update = safexcel_ahash_update, @@ -1472,8 +1663,9 @@ struct safexcel_alg_template safexcel_alg_hmac_sha512 = { .base = { .cra_name = "hmac(sha512)", .cra_driver_name = "safexcel-hmac-sha512", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1494,10 +1686,24 @@ static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key, static int safexcel_hmac_sha384_init(struct ahash_request *areq) { - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + + memset(req, 0, sizeof(*req)); + + /* Start from ipad precompute */ + memcpy(req->state, &ctx->base.ipad, SHA512_DIGEST_SIZE); + /* Already processed the key^ipad part now! */ + req->len = SHA512_BLOCK_SIZE; + req->processed = SHA512_BLOCK_SIZE; + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384; + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + req->state_sz = SHA512_DIGEST_SIZE; + req->digest_sz = SHA512_DIGEST_SIZE; + req->block_sz = SHA512_BLOCK_SIZE; + req->hmac = true; - safexcel_sha384_init(areq); - req->digest = CONTEXT_CONTROL_DIGEST_HMAC; return 0; } @@ -1513,7 +1719,7 @@ static int safexcel_hmac_sha384_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_hmac_sha384 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_SHA2_512, .alg.ahash = { .init = safexcel_hmac_sha384_init, .update = safexcel_ahash_update, @@ -1529,8 +1735,9 @@ struct safexcel_alg_template safexcel_alg_hmac_sha384 = { .base = { .cra_name = "hmac(sha384)", .cra_driver_name = "safexcel-hmac-sha384", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1545,18 +1752,15 @@ struct safexcel_alg_template safexcel_alg_hmac_sha384 = { static int safexcel_md5_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); - req->state[0] = MD5_H0; - req->state[1] = MD5_H1; - req->state[2] = MD5_H2; - req->state[3] = MD5_H3; - ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5; req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; req->state_sz = MD5_DIGEST_SIZE; + req->digest_sz = MD5_DIGEST_SIZE; + req->block_sz = MD5_HMAC_BLOCK_SIZE; return 0; } @@ -1573,7 +1777,7 @@ static int safexcel_md5_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_md5 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_MD5, .alg.ahash = { .init = safexcel_md5_init, .update = safexcel_ahash_update, @@ -1588,8 +1792,9 @@ struct safexcel_alg_template safexcel_alg_md5 = { .base = { .cra_name = "md5", .cra_driver_name = "safexcel-md5", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1603,10 +1808,25 @@ struct safexcel_alg_template safexcel_alg_md5 = { static int safexcel_hmac_md5_init(struct ahash_request *areq) { - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + + memset(req, 0, sizeof(*req)); + + /* Start from ipad precompute */ + memcpy(req->state, &ctx->base.ipad, MD5_DIGEST_SIZE); + /* Already processed the key^ipad part now! */ + req->len = MD5_HMAC_BLOCK_SIZE; + req->processed = MD5_HMAC_BLOCK_SIZE; + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5; + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + req->state_sz = MD5_DIGEST_SIZE; + req->digest_sz = MD5_DIGEST_SIZE; + req->block_sz = MD5_HMAC_BLOCK_SIZE; + req->len_is_le = true; /* MD5 is little endian! ... */ + req->hmac = true; - safexcel_md5_init(areq); - req->digest = CONTEXT_CONTROL_DIGEST_HMAC; return 0; } @@ -1629,7 +1849,7 @@ static int safexcel_hmac_md5_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_hmac_md5 = { .type = SAFEXCEL_ALG_TYPE_AHASH, - .engines = EIP97IES | EIP197B | EIP197D, + .algo_mask = SAFEXCEL_ALG_MD5, .alg.ahash = { .init = safexcel_hmac_md5_init, .update = safexcel_ahash_update, @@ -1645,8 +1865,9 @@ struct safexcel_alg_template safexcel_alg_hmac_md5 = { .base = { .cra_name = "hmac(md5)", .cra_driver_name = "safexcel-hmac-md5", - .cra_priority = 300, + .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1657,3 +1878,1135 @@ struct safexcel_alg_template safexcel_alg_hmac_md5 = { }, }, }; + +static int safexcel_cbcmac_init(struct ahash_request *areq) +{ + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + + memset(req, 0, sizeof(*req)); + + /* Start from loaded keys */ + memcpy(req->state, &ctx->base.ipad, ctx->key_sz); + /* Set processed to non-zero to enable invalidation detection */ + req->len = AES_BLOCK_SIZE; + req->processed = AES_BLOCK_SIZE; + + req->digest = CONTEXT_CONTROL_DIGEST_XCM; + req->state_sz = ctx->key_sz; + req->digest_sz = AES_BLOCK_SIZE; + req->block_sz = AES_BLOCK_SIZE; + req->xcbcmac = true; + + return 0; +} + +static int safexcel_cbcmac_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int len) +{ + struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct crypto_aes_ctx aes; + int ret, i; + + ret = aes_expandkey(&aes, key, len); + if (ret) + return ret; + + memset(&ctx->base.ipad, 0, 2 * AES_BLOCK_SIZE); + for (i = 0; i < len / sizeof(u32); i++) + ctx->base.ipad.be[i + 8] = cpu_to_be32(aes.key_enc[i]); + + if (len == AES_KEYSIZE_192) { + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192; + ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE; + } else if (len == AES_KEYSIZE_256) { + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256; + ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE; + } else { + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128; + ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE; + } + ctx->cbcmac = true; + + memzero_explicit(&aes, sizeof(aes)); + return 0; +} + +static int safexcel_cbcmac_digest(struct ahash_request *areq) +{ + return safexcel_cbcmac_init(areq) ?: safexcel_ahash_finup(areq); +} + +struct safexcel_alg_template safexcel_alg_cbcmac = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = 0, + .alg.ahash = { + .init = safexcel_cbcmac_init, + .update = safexcel_ahash_update, + .final = safexcel_ahash_final, + .finup = safexcel_ahash_finup, + .digest = safexcel_cbcmac_digest, + .setkey = safexcel_cbcmac_setkey, + .export = safexcel_ahash_export, + .import = safexcel_ahash_import, + .halg = { + .digestsize = AES_BLOCK_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "cbcmac(aes)", + .cra_driver_name = "safexcel-cbcmac-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_ahash_cra_init, + .cra_exit = safexcel_ahash_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int len) +{ + struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + u32 key_tmp[3 * AES_BLOCK_SIZE / sizeof(u32)]; + int ret, i; + + ret = aes_expandkey(ctx->aes, key, len); + if (ret) + return ret; + + /* precompute the XCBC key material */ + aes_encrypt(ctx->aes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE, + "\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1"); + aes_encrypt(ctx->aes, (u8 *)key_tmp, + "\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2"); + aes_encrypt(ctx->aes, (u8 *)key_tmp + AES_BLOCK_SIZE, + "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3"); + for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++) + ctx->base.ipad.word[i] = swab32(key_tmp[i]); + + ret = aes_expandkey(ctx->aes, + (u8 *)key_tmp + 2 * AES_BLOCK_SIZE, + AES_MIN_KEY_SIZE); + if (ret) + return ret; + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128; + ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE; + ctx->cbcmac = false; + + return 0; +} + +static int safexcel_xcbcmac_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_ahash_cra_init(tfm); + ctx->aes = kmalloc(sizeof(*ctx->aes), GFP_KERNEL); + return ctx->aes == NULL ? -ENOMEM : 0; +} + +static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm) +{ + struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); + + kfree(ctx->aes); + safexcel_ahash_cra_exit(tfm); +} + +struct safexcel_alg_template safexcel_alg_xcbcmac = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = 0, + .alg.ahash = { + .init = safexcel_cbcmac_init, + .update = safexcel_ahash_update, + .final = safexcel_ahash_final, + .finup = safexcel_ahash_finup, + .digest = safexcel_cbcmac_digest, + .setkey = safexcel_xcbcmac_setkey, + .export = safexcel_ahash_export, + .import = safexcel_ahash_import, + .halg = { + .digestsize = AES_BLOCK_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "xcbc(aes)", + .cra_driver_name = "safexcel-xcbc-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_xcbcmac_cra_init, + .cra_exit = safexcel_xcbcmac_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int len) +{ + struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + __be64 consts[4]; + u64 _const[2]; + u8 msb_mask, gfmask; + int ret, i; + + /* precompute the CMAC key material */ + ret = aes_expandkey(ctx->aes, key, len); + if (ret) + return ret; + + for (i = 0; i < len / sizeof(u32); i++) + ctx->base.ipad.word[i + 8] = swab32(ctx->aes->key_enc[i]); + + /* code below borrowed from crypto/cmac.c */ + /* encrypt the zero block */ + memset(consts, 0, AES_BLOCK_SIZE); + aes_encrypt(ctx->aes, (u8 *)consts, (u8 *)consts); + + gfmask = 0x87; + _const[0] = be64_to_cpu(consts[1]); + _const[1] = be64_to_cpu(consts[0]); + + /* gf(2^128) multiply zero-ciphertext with u and u^2 */ + for (i = 0; i < 4; i += 2) { + msb_mask = ((s64)_const[1] >> 63) & gfmask; + _const[1] = (_const[1] << 1) | (_const[0] >> 63); + _const[0] = (_const[0] << 1) ^ msb_mask; + + consts[i + 0] = cpu_to_be64(_const[1]); + consts[i + 1] = cpu_to_be64(_const[0]); + } + /* end of code borrowed from crypto/cmac.c */ + + for (i = 0; i < 2 * AES_BLOCK_SIZE / sizeof(u32); i++) + ctx->base.ipad.be[i] = cpu_to_be32(((u32 *)consts)[i]); + + if (len == AES_KEYSIZE_192) { + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192; + ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE; + } else if (len == AES_KEYSIZE_256) { + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256; + ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE; + } else { + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128; + ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE; + } + ctx->cbcmac = false; + + return 0; +} + +struct safexcel_alg_template safexcel_alg_cmac = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = 0, + .alg.ahash = { + .init = safexcel_cbcmac_init, + .update = safexcel_ahash_update, + .final = safexcel_ahash_final, + .finup = safexcel_ahash_finup, + .digest = safexcel_cbcmac_digest, + .setkey = safexcel_cmac_setkey, + .export = safexcel_ahash_export, + .import = safexcel_ahash_import, + .halg = { + .digestsize = AES_BLOCK_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "cmac(aes)", + .cra_driver_name = "safexcel-cmac-aes", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_xcbcmac_cra_init, + .cra_exit = safexcel_xcbcmac_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +static int safexcel_sm3_init(struct ahash_request *areq) +{ + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + + memset(req, 0, sizeof(*req)); + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3; + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + req->state_sz = SM3_DIGEST_SIZE; + req->digest_sz = SM3_DIGEST_SIZE; + req->block_sz = SM3_BLOCK_SIZE; + + return 0; +} + +static int safexcel_sm3_digest(struct ahash_request *areq) +{ + int ret = safexcel_sm3_init(areq); + + if (ret) + return ret; + + return safexcel_ahash_finup(areq); +} + +struct safexcel_alg_template safexcel_alg_sm3 = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SM3, + .alg.ahash = { + .init = safexcel_sm3_init, + .update = safexcel_ahash_update, + .final = safexcel_ahash_final, + .finup = safexcel_ahash_finup, + .digest = safexcel_sm3_digest, + .export = safexcel_ahash_export, + .import = safexcel_ahash_import, + .halg = { + .digestsize = SM3_DIGEST_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "sm3", + .cra_driver_name = "safexcel-sm3", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SM3_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_ahash_cra_init, + .cra_exit = safexcel_ahash_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +static int safexcel_hmac_sm3_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sm3", + SM3_DIGEST_SIZE); +} + +static int safexcel_hmac_sm3_init(struct ahash_request *areq) +{ + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + + memset(req, 0, sizeof(*req)); + + /* Start from ipad precompute */ + memcpy(req->state, &ctx->base.ipad, SM3_DIGEST_SIZE); + /* Already processed the key^ipad part now! */ + req->len = SM3_BLOCK_SIZE; + req->processed = SM3_BLOCK_SIZE; + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3; + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + req->state_sz = SM3_DIGEST_SIZE; + req->digest_sz = SM3_DIGEST_SIZE; + req->block_sz = SM3_BLOCK_SIZE; + req->hmac = true; + + return 0; +} + +static int safexcel_hmac_sm3_digest(struct ahash_request *areq) +{ + int ret = safexcel_hmac_sm3_init(areq); + + if (ret) + return ret; + + return safexcel_ahash_finup(areq); +} + +struct safexcel_alg_template safexcel_alg_hmac_sm3 = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SM3, + .alg.ahash = { + .init = safexcel_hmac_sm3_init, + .update = safexcel_ahash_update, + .final = safexcel_ahash_final, + .finup = safexcel_ahash_finup, + .digest = safexcel_hmac_sm3_digest, + .setkey = safexcel_hmac_sm3_setkey, + .export = safexcel_ahash_export, + .import = safexcel_ahash_import, + .halg = { + .digestsize = SM3_DIGEST_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "hmac(sm3)", + .cra_driver_name = "safexcel-hmac-sm3", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SM3_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_ahash_cra_init, + .cra_exit = safexcel_ahash_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +static int safexcel_sha3_224_init(struct ahash_request *areq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + + memset(req, 0, sizeof(*req)); + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224; + req->digest = CONTEXT_CONTROL_DIGEST_INITIAL; + req->state_sz = SHA3_224_DIGEST_SIZE; + req->digest_sz = SHA3_224_DIGEST_SIZE; + req->block_sz = SHA3_224_BLOCK_SIZE; + ctx->do_fallback = false; + ctx->fb_init_done = false; + return 0; +} + +static int safexcel_sha3_fbcheck(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + struct ahash_request *subreq = ahash_request_ctx_dma(req); + int ret = 0; + + if (ctx->do_fallback) { + ahash_request_set_tfm(subreq, ctx->fback); + ahash_request_set_callback(subreq, req->base.flags, + req->base.complete, req->base.data); + ahash_request_set_crypt(subreq, req->src, req->result, + req->nbytes); + if (!ctx->fb_init_done) { + if (ctx->fb_do_setkey) { + /* Set fallback cipher HMAC key */ + u8 key[SHA3_224_BLOCK_SIZE]; + + memcpy(key, &ctx->base.ipad, + crypto_ahash_blocksize(ctx->fback) / 2); + memcpy(key + + crypto_ahash_blocksize(ctx->fback) / 2, + &ctx->base.opad, + crypto_ahash_blocksize(ctx->fback) / 2); + ret = crypto_ahash_setkey(ctx->fback, key, + crypto_ahash_blocksize(ctx->fback)); + memzero_explicit(key, + crypto_ahash_blocksize(ctx->fback)); + ctx->fb_do_setkey = false; + } + ret = ret ?: crypto_ahash_init(subreq); + ctx->fb_init_done = true; + } + } + return ret; +} + +static int safexcel_sha3_update(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + struct ahash_request *subreq = ahash_request_ctx_dma(req); + + ctx->do_fallback = true; + return safexcel_sha3_fbcheck(req) ?: crypto_ahash_update(subreq); +} + +static int safexcel_sha3_final(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + struct ahash_request *subreq = ahash_request_ctx_dma(req); + + ctx->do_fallback = true; + return safexcel_sha3_fbcheck(req) ?: crypto_ahash_final(subreq); +} + +static int safexcel_sha3_finup(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + struct ahash_request *subreq = ahash_request_ctx_dma(req); + + ctx->do_fallback |= !req->nbytes; + if (ctx->do_fallback) + /* Update or ex/import happened or len 0, cannot use the HW */ + return safexcel_sha3_fbcheck(req) ?: + crypto_ahash_finup(subreq); + else + return safexcel_ahash_finup(req); +} + +static int safexcel_sha3_digest_fallback(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + struct ahash_request *subreq = ahash_request_ctx_dma(req); + + ctx->do_fallback = true; + ctx->fb_init_done = false; + return safexcel_sha3_fbcheck(req) ?: crypto_ahash_finup(subreq); +} + +static int safexcel_sha3_224_digest(struct ahash_request *req) +{ + if (req->nbytes) + return safexcel_sha3_224_init(req) ?: safexcel_ahash_finup(req); + + /* HW cannot do zero length hash, use fallback instead */ + return safexcel_sha3_digest_fallback(req); +} + +static int safexcel_sha3_export(struct ahash_request *req, void *out) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + struct ahash_request *subreq = ahash_request_ctx_dma(req); + + ctx->do_fallback = true; + return safexcel_sha3_fbcheck(req) ?: crypto_ahash_export(subreq, out); +} + +static int safexcel_sha3_import(struct ahash_request *req, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + struct ahash_request *subreq = ahash_request_ctx_dma(req); + + ctx->do_fallback = true; + return safexcel_sha3_fbcheck(req) ?: crypto_ahash_import(subreq, in); + // return safexcel_ahash_import(req, in); +} + +static int safexcel_sha3_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_ahash_cra_init(tfm); + + /* Allocate fallback implementation */ + ctx->fback = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fback)) + return PTR_ERR(ctx->fback); + + /* Update statesize from fallback algorithm! */ + crypto_hash_alg_common(ahash)->statesize = + crypto_ahash_statesize(ctx->fback); + crypto_ahash_set_reqsize_dma( + ahash, max(sizeof(struct safexcel_ahash_req), + sizeof(struct ahash_request) + + crypto_ahash_reqsize(ctx->fback))); + return 0; +} + +static void safexcel_sha3_cra_exit(struct crypto_tfm *tfm) +{ + struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_ahash(ctx->fback); + safexcel_ahash_cra_exit(tfm); +} + +struct safexcel_alg_template safexcel_alg_sha3_224 = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SHA3, + .alg.ahash = { + .init = safexcel_sha3_224_init, + .update = safexcel_sha3_update, + .final = safexcel_sha3_final, + .finup = safexcel_sha3_finup, + .digest = safexcel_sha3_224_digest, + .export = safexcel_sha3_export, + .import = safexcel_sha3_import, + .halg = { + .digestsize = SHA3_224_DIGEST_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "sha3-224", + .cra_driver_name = "safexcel-sha3-224", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA3_224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_sha3_cra_init, + .cra_exit = safexcel_sha3_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +static int safexcel_sha3_256_init(struct ahash_request *areq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + + memset(req, 0, sizeof(*req)); + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256; + req->digest = CONTEXT_CONTROL_DIGEST_INITIAL; + req->state_sz = SHA3_256_DIGEST_SIZE; + req->digest_sz = SHA3_256_DIGEST_SIZE; + req->block_sz = SHA3_256_BLOCK_SIZE; + ctx->do_fallback = false; + ctx->fb_init_done = false; + return 0; +} + +static int safexcel_sha3_256_digest(struct ahash_request *req) +{ + if (req->nbytes) + return safexcel_sha3_256_init(req) ?: safexcel_ahash_finup(req); + + /* HW cannot do zero length hash, use fallback instead */ + return safexcel_sha3_digest_fallback(req); +} + +struct safexcel_alg_template safexcel_alg_sha3_256 = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SHA3, + .alg.ahash = { + .init = safexcel_sha3_256_init, + .update = safexcel_sha3_update, + .final = safexcel_sha3_final, + .finup = safexcel_sha3_finup, + .digest = safexcel_sha3_256_digest, + .export = safexcel_sha3_export, + .import = safexcel_sha3_import, + .halg = { + .digestsize = SHA3_256_DIGEST_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "sha3-256", + .cra_driver_name = "safexcel-sha3-256", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA3_256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_sha3_cra_init, + .cra_exit = safexcel_sha3_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +static int safexcel_sha3_384_init(struct ahash_request *areq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + + memset(req, 0, sizeof(*req)); + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384; + req->digest = CONTEXT_CONTROL_DIGEST_INITIAL; + req->state_sz = SHA3_384_DIGEST_SIZE; + req->digest_sz = SHA3_384_DIGEST_SIZE; + req->block_sz = SHA3_384_BLOCK_SIZE; + ctx->do_fallback = false; + ctx->fb_init_done = false; + return 0; +} + +static int safexcel_sha3_384_digest(struct ahash_request *req) +{ + if (req->nbytes) + return safexcel_sha3_384_init(req) ?: safexcel_ahash_finup(req); + + /* HW cannot do zero length hash, use fallback instead */ + return safexcel_sha3_digest_fallback(req); +} + +struct safexcel_alg_template safexcel_alg_sha3_384 = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SHA3, + .alg.ahash = { + .init = safexcel_sha3_384_init, + .update = safexcel_sha3_update, + .final = safexcel_sha3_final, + .finup = safexcel_sha3_finup, + .digest = safexcel_sha3_384_digest, + .export = safexcel_sha3_export, + .import = safexcel_sha3_import, + .halg = { + .digestsize = SHA3_384_DIGEST_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "sha3-384", + .cra_driver_name = "safexcel-sha3-384", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA3_384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_sha3_cra_init, + .cra_exit = safexcel_sha3_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +static int safexcel_sha3_512_init(struct ahash_request *areq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + + memset(req, 0, sizeof(*req)); + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512; + req->digest = CONTEXT_CONTROL_DIGEST_INITIAL; + req->state_sz = SHA3_512_DIGEST_SIZE; + req->digest_sz = SHA3_512_DIGEST_SIZE; + req->block_sz = SHA3_512_BLOCK_SIZE; + ctx->do_fallback = false; + ctx->fb_init_done = false; + return 0; +} + +static int safexcel_sha3_512_digest(struct ahash_request *req) +{ + if (req->nbytes) + return safexcel_sha3_512_init(req) ?: safexcel_ahash_finup(req); + + /* HW cannot do zero length hash, use fallback instead */ + return safexcel_sha3_digest_fallback(req); +} + +struct safexcel_alg_template safexcel_alg_sha3_512 = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SHA3, + .alg.ahash = { + .init = safexcel_sha3_512_init, + .update = safexcel_sha3_update, + .final = safexcel_sha3_final, + .finup = safexcel_sha3_finup, + .digest = safexcel_sha3_512_digest, + .export = safexcel_sha3_export, + .import = safexcel_sha3_import, + .halg = { + .digestsize = SHA3_512_DIGEST_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "sha3-512", + .cra_driver_name = "safexcel-sha3-512", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA3_512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_sha3_cra_init, + .cra_exit = safexcel_sha3_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +static int safexcel_hmac_sha3_cra_init(struct crypto_tfm *tfm, const char *alg) +{ + struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); + int ret; + + ret = safexcel_sha3_cra_init(tfm); + if (ret) + return ret; + + /* Allocate precalc basic digest implementation */ + ctx->shpre = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->shpre)) + return PTR_ERR(ctx->shpre); + + ctx->shdesc = kmalloc(sizeof(*ctx->shdesc) + + crypto_shash_descsize(ctx->shpre), GFP_KERNEL); + if (!ctx->shdesc) { + crypto_free_shash(ctx->shpre); + return -ENOMEM; + } + ctx->shdesc->tfm = ctx->shpre; + return 0; +} + +static void safexcel_hmac_sha3_cra_exit(struct crypto_tfm *tfm) +{ + struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_ahash(ctx->fback); + crypto_free_shash(ctx->shpre); + kfree(ctx->shdesc); + safexcel_ahash_cra_exit(tfm); +} + +static int safexcel_hmac_sha3_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + int ret = 0; + + if (keylen > crypto_ahash_blocksize(tfm)) { + /* + * If the key is larger than the blocksize, then hash it + * first using our fallback cipher + */ + ret = crypto_shash_digest(ctx->shdesc, key, keylen, + ctx->base.ipad.byte); + keylen = crypto_shash_digestsize(ctx->shpre); + + /* + * If the digest is larger than half the blocksize, we need to + * move the rest to opad due to the way our HMAC infra works. + */ + if (keylen > crypto_ahash_blocksize(tfm) / 2) + /* Buffers overlap, need to use memmove iso memcpy! */ + memmove(&ctx->base.opad, + ctx->base.ipad.byte + + crypto_ahash_blocksize(tfm) / 2, + keylen - crypto_ahash_blocksize(tfm) / 2); + } else { + /* + * Copy the key to our ipad & opad buffers + * Note that ipad and opad each contain one half of the key, + * to match the existing HMAC driver infrastructure. + */ + if (keylen <= crypto_ahash_blocksize(tfm) / 2) { + memcpy(&ctx->base.ipad, key, keylen); + } else { + memcpy(&ctx->base.ipad, key, + crypto_ahash_blocksize(tfm) / 2); + memcpy(&ctx->base.opad, + key + crypto_ahash_blocksize(tfm) / 2, + keylen - crypto_ahash_blocksize(tfm) / 2); + } + } + + /* Pad key with zeroes */ + if (keylen <= crypto_ahash_blocksize(tfm) / 2) { + memset(ctx->base.ipad.byte + keylen, 0, + crypto_ahash_blocksize(tfm) / 2 - keylen); + memset(&ctx->base.opad, 0, crypto_ahash_blocksize(tfm) / 2); + } else { + memset(ctx->base.opad.byte + keylen - + crypto_ahash_blocksize(tfm) / 2, 0, + crypto_ahash_blocksize(tfm) - keylen); + } + + /* If doing fallback, still need to set the new key! */ + ctx->fb_do_setkey = true; + return ret; +} + +static int safexcel_hmac_sha3_224_init(struct ahash_request *areq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + + memset(req, 0, sizeof(*req)); + + /* Copy (half of) the key */ + memcpy(req->state, &ctx->base.ipad, SHA3_224_BLOCK_SIZE / 2); + /* Start of HMAC should have len == processed == blocksize */ + req->len = SHA3_224_BLOCK_SIZE; + req->processed = SHA3_224_BLOCK_SIZE; + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224; + req->digest = CONTEXT_CONTROL_DIGEST_HMAC; + req->state_sz = SHA3_224_BLOCK_SIZE / 2; + req->digest_sz = SHA3_224_DIGEST_SIZE; + req->block_sz = SHA3_224_BLOCK_SIZE; + req->hmac = true; + ctx->do_fallback = false; + ctx->fb_init_done = false; + return 0; +} + +static int safexcel_hmac_sha3_224_digest(struct ahash_request *req) +{ + if (req->nbytes) + return safexcel_hmac_sha3_224_init(req) ?: + safexcel_ahash_finup(req); + + /* HW cannot do zero length HMAC, use fallback instead */ + return safexcel_sha3_digest_fallback(req); +} + +static int safexcel_hmac_sha3_224_cra_init(struct crypto_tfm *tfm) +{ + return safexcel_hmac_sha3_cra_init(tfm, "sha3-224"); +} + +struct safexcel_alg_template safexcel_alg_hmac_sha3_224 = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SHA3, + .alg.ahash = { + .init = safexcel_hmac_sha3_224_init, + .update = safexcel_sha3_update, + .final = safexcel_sha3_final, + .finup = safexcel_sha3_finup, + .digest = safexcel_hmac_sha3_224_digest, + .setkey = safexcel_hmac_sha3_setkey, + .export = safexcel_sha3_export, + .import = safexcel_sha3_import, + .halg = { + .digestsize = SHA3_224_DIGEST_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "hmac(sha3-224)", + .cra_driver_name = "safexcel-hmac-sha3-224", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA3_224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_hmac_sha3_224_cra_init, + .cra_exit = safexcel_hmac_sha3_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +static int safexcel_hmac_sha3_256_init(struct ahash_request *areq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + + memset(req, 0, sizeof(*req)); + + /* Copy (half of) the key */ + memcpy(req->state, &ctx->base.ipad, SHA3_256_BLOCK_SIZE / 2); + /* Start of HMAC should have len == processed == blocksize */ + req->len = SHA3_256_BLOCK_SIZE; + req->processed = SHA3_256_BLOCK_SIZE; + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256; + req->digest = CONTEXT_CONTROL_DIGEST_HMAC; + req->state_sz = SHA3_256_BLOCK_SIZE / 2; + req->digest_sz = SHA3_256_DIGEST_SIZE; + req->block_sz = SHA3_256_BLOCK_SIZE; + req->hmac = true; + ctx->do_fallback = false; + ctx->fb_init_done = false; + return 0; +} + +static int safexcel_hmac_sha3_256_digest(struct ahash_request *req) +{ + if (req->nbytes) + return safexcel_hmac_sha3_256_init(req) ?: + safexcel_ahash_finup(req); + + /* HW cannot do zero length HMAC, use fallback instead */ + return safexcel_sha3_digest_fallback(req); +} + +static int safexcel_hmac_sha3_256_cra_init(struct crypto_tfm *tfm) +{ + return safexcel_hmac_sha3_cra_init(tfm, "sha3-256"); +} + +struct safexcel_alg_template safexcel_alg_hmac_sha3_256 = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SHA3, + .alg.ahash = { + .init = safexcel_hmac_sha3_256_init, + .update = safexcel_sha3_update, + .final = safexcel_sha3_final, + .finup = safexcel_sha3_finup, + .digest = safexcel_hmac_sha3_256_digest, + .setkey = safexcel_hmac_sha3_setkey, + .export = safexcel_sha3_export, + .import = safexcel_sha3_import, + .halg = { + .digestsize = SHA3_256_DIGEST_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "hmac(sha3-256)", + .cra_driver_name = "safexcel-hmac-sha3-256", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA3_256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_hmac_sha3_256_cra_init, + .cra_exit = safexcel_hmac_sha3_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +static int safexcel_hmac_sha3_384_init(struct ahash_request *areq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + + memset(req, 0, sizeof(*req)); + + /* Copy (half of) the key */ + memcpy(req->state, &ctx->base.ipad, SHA3_384_BLOCK_SIZE / 2); + /* Start of HMAC should have len == processed == blocksize */ + req->len = SHA3_384_BLOCK_SIZE; + req->processed = SHA3_384_BLOCK_SIZE; + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384; + req->digest = CONTEXT_CONTROL_DIGEST_HMAC; + req->state_sz = SHA3_384_BLOCK_SIZE / 2; + req->digest_sz = SHA3_384_DIGEST_SIZE; + req->block_sz = SHA3_384_BLOCK_SIZE; + req->hmac = true; + ctx->do_fallback = false; + ctx->fb_init_done = false; + return 0; +} + +static int safexcel_hmac_sha3_384_digest(struct ahash_request *req) +{ + if (req->nbytes) + return safexcel_hmac_sha3_384_init(req) ?: + safexcel_ahash_finup(req); + + /* HW cannot do zero length HMAC, use fallback instead */ + return safexcel_sha3_digest_fallback(req); +} + +static int safexcel_hmac_sha3_384_cra_init(struct crypto_tfm *tfm) +{ + return safexcel_hmac_sha3_cra_init(tfm, "sha3-384"); +} + +struct safexcel_alg_template safexcel_alg_hmac_sha3_384 = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SHA3, + .alg.ahash = { + .init = safexcel_hmac_sha3_384_init, + .update = safexcel_sha3_update, + .final = safexcel_sha3_final, + .finup = safexcel_sha3_finup, + .digest = safexcel_hmac_sha3_384_digest, + .setkey = safexcel_hmac_sha3_setkey, + .export = safexcel_sha3_export, + .import = safexcel_sha3_import, + .halg = { + .digestsize = SHA3_384_DIGEST_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "hmac(sha3-384)", + .cra_driver_name = "safexcel-hmac-sha3-384", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA3_384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_hmac_sha3_384_cra_init, + .cra_exit = safexcel_hmac_sha3_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +static int safexcel_hmac_sha3_512_init(struct ahash_request *areq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); + + memset(req, 0, sizeof(*req)); + + /* Copy (half of) the key */ + memcpy(req->state, &ctx->base.ipad, SHA3_512_BLOCK_SIZE / 2); + /* Start of HMAC should have len == processed == blocksize */ + req->len = SHA3_512_BLOCK_SIZE; + req->processed = SHA3_512_BLOCK_SIZE; + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512; + req->digest = CONTEXT_CONTROL_DIGEST_HMAC; + req->state_sz = SHA3_512_BLOCK_SIZE / 2; + req->digest_sz = SHA3_512_DIGEST_SIZE; + req->block_sz = SHA3_512_BLOCK_SIZE; + req->hmac = true; + ctx->do_fallback = false; + ctx->fb_init_done = false; + return 0; +} + +static int safexcel_hmac_sha3_512_digest(struct ahash_request *req) +{ + if (req->nbytes) + return safexcel_hmac_sha3_512_init(req) ?: + safexcel_ahash_finup(req); + + /* HW cannot do zero length HMAC, use fallback instead */ + return safexcel_sha3_digest_fallback(req); +} + +static int safexcel_hmac_sha3_512_cra_init(struct crypto_tfm *tfm) +{ + return safexcel_hmac_sha3_cra_init(tfm, "sha3-512"); +} +struct safexcel_alg_template safexcel_alg_hmac_sha3_512 = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .algo_mask = SAFEXCEL_ALG_SHA3, + .alg.ahash = { + .init = safexcel_hmac_sha3_512_init, + .update = safexcel_sha3_update, + .final = safexcel_sha3_final, + .finup = safexcel_sha3_finup, + .digest = safexcel_hmac_sha3_512_digest, + .setkey = safexcel_hmac_sha3_setkey, + .export = safexcel_sha3_export, + .import = safexcel_sha3_import, + .halg = { + .digestsize = SHA3_512_DIGEST_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "hmac(sha3-512)", + .cra_driver_name = "safexcel-hmac-sha3-512", + .cra_priority = SAFEXCEL_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA3_512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_hmac_sha3_512_cra_init, + .cra_exit = safexcel_hmac_sha3_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c index eb75fa684876..90f15032c8df 100644 --- a/drivers/crypto/inside-secure/safexcel_ring.c +++ b/drivers/crypto/inside-secure/safexcel_ring.c @@ -14,7 +14,12 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv, struct safexcel_desc_ring *cdr, struct safexcel_desc_ring *rdr) { - cdr->offset = sizeof(u32) * priv->config.cd_offset; + int i; + struct safexcel_command_desc *cdesc; + dma_addr_t atok; + + /* Actual command descriptor ring */ + cdr->offset = priv->config.cd_offset; cdr->base = dmam_alloc_coherent(priv->dev, cdr->offset * EIP197_DEFAULT_RING_SIZE, &cdr->base_dma, GFP_KERNEL); @@ -24,7 +29,34 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv, cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1); cdr->read = cdr->base; - rdr->offset = sizeof(u32) * priv->config.rd_offset; + /* Command descriptor shadow ring for storing additional token data */ + cdr->shoffset = priv->config.cdsh_offset; + cdr->shbase = dmam_alloc_coherent(priv->dev, + cdr->shoffset * + EIP197_DEFAULT_RING_SIZE, + &cdr->shbase_dma, GFP_KERNEL); + if (!cdr->shbase) + return -ENOMEM; + cdr->shwrite = cdr->shbase; + cdr->shbase_end = cdr->shbase + cdr->shoffset * + (EIP197_DEFAULT_RING_SIZE - 1); + + /* + * Populate command descriptors with physical pointers to shadow descs. + * Note that we only need to do this once if we don't overwrite them. + */ + cdesc = cdr->base; + atok = cdr->shbase_dma; + for (i = 0; i < EIP197_DEFAULT_RING_SIZE; i++) { + cdesc->atok_lo = lower_32_bits(atok); + cdesc->atok_hi = upper_32_bits(atok); + cdesc = (void *)cdesc + cdr->offset; + atok += cdr->shoffset; + } + + rdr->offset = priv->config.rd_offset; + /* Use shoffset for result token offset here */ + rdr->shoffset = priv->config.res_offset; rdr->base = dmam_alloc_coherent(priv->dev, rdr->offset * EIP197_DEFAULT_RING_SIZE, &rdr->base_dma, GFP_KERNEL); @@ -42,11 +74,40 @@ inline int safexcel_select_ring(struct safexcel_crypto_priv *priv) return (atomic_inc_return(&priv->ring_used) % priv->config.rings); } -static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv, - struct safexcel_desc_ring *ring) +static void *safexcel_ring_next_cwptr(struct safexcel_crypto_priv *priv, + struct safexcel_desc_ring *ring, + bool first, + struct safexcel_token **atoken) +{ + void *ptr = ring->write; + + if (first) + *atoken = ring->shwrite; + + if ((ring->write == ring->read - ring->offset) || + (ring->read == ring->base && ring->write == ring->base_end)) + return ERR_PTR(-ENOMEM); + + if (ring->write == ring->base_end) { + ring->write = ring->base; + ring->shwrite = ring->shbase; + } else { + ring->write += ring->offset; + ring->shwrite += ring->shoffset; + } + + return ptr; +} + +static void *safexcel_ring_next_rwptr(struct safexcel_crypto_priv *priv, + struct safexcel_desc_ring *ring, + struct result_data_desc **rtoken) { void *ptr = ring->write; + /* Result token at relative offset shoffset */ + *rtoken = ring->write + ring->shoffset; + if ((ring->write == ring->read - ring->offset) || (ring->read == ring->base && ring->write == ring->base_end)) return ERR_PTR(-ENOMEM); @@ -106,10 +167,13 @@ void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv, if (ring->write == ring->read) return; - if (ring->write == ring->base) + if (ring->write == ring->base) { ring->write = ring->base_end; - else + ring->shwrite = ring->shbase_end; + } else { ring->write -= ring->offset; + ring->shwrite -= ring->shoffset; + } } struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv, @@ -117,39 +181,41 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr bool first, bool last, dma_addr_t data, u32 data_len, u32 full_data_len, - dma_addr_t context) { + dma_addr_t context, + struct safexcel_token **atoken) +{ struct safexcel_command_desc *cdesc; - int i; - cdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].cdr); + cdesc = safexcel_ring_next_cwptr(priv, &priv->ring[ring_id].cdr, + first, atoken); if (IS_ERR(cdesc)) return cdesc; - memset(cdesc, 0, sizeof(struct safexcel_command_desc)); - - cdesc->first_seg = first; - cdesc->last_seg = last; cdesc->particle_size = data_len; + cdesc->rsvd0 = 0; + cdesc->last_seg = last; + cdesc->first_seg = first; + cdesc->additional_cdata_size = 0; + cdesc->rsvd1 = 0; cdesc->data_lo = lower_32_bits(data); cdesc->data_hi = upper_32_bits(data); - if (first && context) { - struct safexcel_token *token = - (struct safexcel_token *)cdesc->control_data.token; - - cdesc->control_data.packet_length = full_data_len; + if (first) { + /* + * Note that the length here MUST be >0 or else the EIP(1)97 + * may hang. Newer EIP197 firmware actually incorporates this + * fix already, but that doesn't help the EIP97 and we may + * also be running older firmware. + */ + cdesc->control_data.packet_length = full_data_len ?: 1; cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE | EIP197_OPTION_64BIT_CTX | - EIP197_OPTION_CTX_CTRL_IN_CMD; - cdesc->control_data.context_lo = - (lower_32_bits(context) & GENMASK(31, 2)) >> 2; + EIP197_OPTION_CTX_CTRL_IN_CMD | + EIP197_OPTION_RC_AUTO; + cdesc->control_data.type = EIP197_TYPE_BCLA; + cdesc->control_data.context_lo = lower_32_bits(context) | + EIP197_CONTEXT_SMALL; cdesc->control_data.context_hi = upper_32_bits(context); - - /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */ - cdesc->control_data.refresh = 2; - - for (i = 0; i < EIP197_MAX_TOKENS; i++) - eip197_noop_token(&token[i]); } return cdesc; @@ -161,18 +227,28 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri dma_addr_t data, u32 len) { struct safexcel_result_desc *rdesc; + struct result_data_desc *rtoken; - rdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].rdr); + rdesc = safexcel_ring_next_rwptr(priv, &priv->ring[ring_id].rdr, + &rtoken); if (IS_ERR(rdesc)) return rdesc; - memset(rdesc, 0, sizeof(struct safexcel_result_desc)); - - rdesc->first_seg = first; - rdesc->last_seg = last; rdesc->particle_size = len; + rdesc->rsvd0 = 0; + rdesc->descriptor_overflow = 1; /* assume error */ + rdesc->buffer_overflow = 1; /* assume error */ + rdesc->last_seg = last; + rdesc->first_seg = first; + rdesc->result_size = EIP197_RD64_RESULT_SIZE; + rdesc->rsvd1 = 0; rdesc->data_lo = lower_32_bits(data); rdesc->data_hi = upper_32_bits(data); + /* Clear length in result token */ + rtoken->packet_length = 0; + /* Assume errors - HW will clear if not the case */ + rtoken->error_code = 0x7fff; + return rdesc; } |
