summaryrefslogtreecommitdiff
path: root/drivers/staging/ccree/ssi_aead.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-05 10:36:26 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-05 10:36:26 -0700
commitbf1d6b2c76eda86159519bf5c427b1fa8f51f733 (patch)
tree030fb04185223a03254aa1092750d9c1b819618c /drivers/staging/ccree/ssi_aead.c
parente63a94f12b5fc67b2b92a89d4058e7a9021e900e (diff)
parent28eb51f7468a43769bd9dca19a54d97ec7a447ed (diff)
Merge tag 'staging-4.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging
Pull staging/IIO driver updates from Greg KH: "Here is the big staging and IIO driver update for 4.14-rc1. Lots of staging driver fixes and cleanups, including some reorginizing of the lustre header files to try to impose some sanity on what is, and what is not, the uapi for that filesystem. There are some tty core changes in here as well, as the speakup drivers need them, and that's ok with me, they are sane and the speakup code is getting nicer because of it. There is also the addition of the obiligatory new wifi driver, just because it has been a release or two since we added our last one... Other than that, lots and lots of small coding style fixes, as usual. All of these have been in linux-next for a while with no reported issues" * tag 'staging-4.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging: (612 commits) staging:rtl8188eu:core Fix remove unneccessary else block staging: typec: fusb302: make structure fusb302_psy_desc static staging: unisys: visorbus: make two functions static staging: fsl-dpaa2/eth: fix off-by-one FD ctrl bitmaks staging: r8822be: Simplify deinit_priv() staging: r8822be: Remove some dead code staging: vboxvideo: Use CONFIG_DRM_KMS_FB_HELPER to check for fbdefio availability staging:rtl8188eu Fix comparison to NULL staging: rts5208: rename mmc_ddr_tunning_rx_cmd to mmc_ddr_tuning_rx_cmd Staging: Pi433: style fix - tabs and spaces staging: pi433: fix spelling mistake: "preample" -> "preamble" staging:rtl8188eu:core Fix Code Indent staging: typec: fusb302: Export current-limit through a power_supply class dev staging: typec: fusb302: Add support for USB2 charger detection through extcon staging: typec: fusb302: Use client->irq as irq if set staging: typec: fusb302: Get max snk mv/ma/mw from device-properties staging: typec: fusb302: Set max supply voltage to 5V staging: typec: tcpm: Add get_current_limit tcpc_dev callback staging:rtl8188eu Use __func__ instead of function name staging: lustre: coding style fixes found by checkpatch.pl ...
Diffstat (limited to 'drivers/staging/ccree/ssi_aead.c')
-rw-r--r--drivers/staging/ccree/ssi_aead.c246
1 files changed, 132 insertions, 114 deletions
diff --git a/drivers/staging/ccree/ssi_aead.c b/drivers/staging/ccree/ssi_aead.c
index 1fc0b05ea0d5..5abe6b24ff8c 100644
--- a/drivers/staging/ccree/ssi_aead.c
+++ b/drivers/staging/ccree/ssi_aead.c
@@ -36,7 +36,6 @@
#include "ssi_hash.h"
#include "ssi_sysfs.h"
#include "ssi_sram_mgr.h"
-#include "ssi_fips_local.h"
#define template_aead template_u.aead
@@ -57,22 +56,26 @@ struct ssi_aead_handle {
struct list_head aead_list;
};
+struct cc_hmac_s {
+ u8 *padded_authkey;
+ u8 *ipad_opad; /* IPAD, OPAD*/
+ dma_addr_t padded_authkey_dma_addr;
+ dma_addr_t ipad_opad_dma_addr;
+};
+
+struct cc_xcbc_s {
+ u8 *xcbc_keys; /* K1,K2,K3 */
+ dma_addr_t xcbc_keys_dma_addr;
+};
+
struct ssi_aead_ctx {
struct ssi_drvdata *drvdata;
u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
u8 *enckey;
dma_addr_t enckey_dma_addr;
union {
- struct {
- u8 *padded_authkey;
- u8 *ipad_opad; /* IPAD, OPAD*/
- dma_addr_t padded_authkey_dma_addr;
- dma_addr_t ipad_opad_dma_addr;
- } hmac;
- struct {
- u8 *xcbc_keys; /* K1,K2,K3 */
- dma_addr_t xcbc_keys_dma_addr;
- } xcbc;
+ struct cc_hmac_s hmac;
+ struct cc_xcbc_s xcbc;
} auth_state;
unsigned int enc_keylen;
unsigned int auth_keylen;
@@ -93,46 +96,50 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
SSI_LOG_DEBUG("Clearing context @%p for %s\n",
- crypto_aead_ctx(tfm), crypto_tfm_alg_name(&(tfm->base)));
+ crypto_aead_ctx(tfm), crypto_tfm_alg_name(&tfm->base));
dev = &ctx->drvdata->plat_dev->dev;
/* Unmap enckey buffer */
if (ctx->enckey) {
dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, ctx->enckey_dma_addr);
- SSI_LOG_DEBUG("Freed enckey DMA buffer enckey_dma_addr=0x%llX\n",
- (unsigned long long)ctx->enckey_dma_addr);
+ SSI_LOG_DEBUG("Freed enckey DMA buffer enckey_dma_addr=%pad\n",
+ ctx->enckey_dma_addr);
ctx->enckey_dma_addr = 0;
ctx->enckey = NULL;
}
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
- if (ctx->auth_state.xcbc.xcbc_keys) {
+ struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
+
+ if (xcbc->xcbc_keys) {
dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
- ctx->auth_state.xcbc.xcbc_keys,
- ctx->auth_state.xcbc.xcbc_keys_dma_addr);
+ xcbc->xcbc_keys,
+ xcbc->xcbc_keys_dma_addr);
}
- SSI_LOG_DEBUG("Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=0x%llX\n",
- (unsigned long long)ctx->auth_state.xcbc.xcbc_keys_dma_addr);
- ctx->auth_state.xcbc.xcbc_keys_dma_addr = 0;
- ctx->auth_state.xcbc.xcbc_keys = NULL;
+ SSI_LOG_DEBUG("Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
+ xcbc->xcbc_keys_dma_addr);
+ xcbc->xcbc_keys_dma_addr = 0;
+ xcbc->xcbc_keys = NULL;
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
- if (ctx->auth_state.hmac.ipad_opad) {
+ struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
+
+ if (hmac->ipad_opad) {
dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
- ctx->auth_state.hmac.ipad_opad,
- ctx->auth_state.hmac.ipad_opad_dma_addr);
- SSI_LOG_DEBUG("Freed ipad_opad DMA buffer ipad_opad_dma_addr=0x%llX\n",
- (unsigned long long)ctx->auth_state.hmac.ipad_opad_dma_addr);
- ctx->auth_state.hmac.ipad_opad_dma_addr = 0;
- ctx->auth_state.hmac.ipad_opad = NULL;
+ hmac->ipad_opad,
+ hmac->ipad_opad_dma_addr);
+ SSI_LOG_DEBUG("Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
+ hmac->ipad_opad_dma_addr);
+ hmac->ipad_opad_dma_addr = 0;
+ hmac->ipad_opad = NULL;
}
- if (ctx->auth_state.hmac.padded_authkey) {
+ if (hmac->padded_authkey) {
dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
- ctx->auth_state.hmac.padded_authkey,
- ctx->auth_state.hmac.padded_authkey_dma_addr);
- SSI_LOG_DEBUG("Freed padded_authkey DMA buffer padded_authkey_dma_addr=0x%llX\n",
- (unsigned long long)ctx->auth_state.hmac.padded_authkey_dma_addr);
- ctx->auth_state.hmac.padded_authkey_dma_addr = 0;
- ctx->auth_state.hmac.padded_authkey = NULL;
+ hmac->padded_authkey,
+ hmac->padded_authkey_dma_addr);
+ SSI_LOG_DEBUG("Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
+ hmac->padded_authkey_dma_addr);
+ hmac->padded_authkey_dma_addr = 0;
+ hmac->padded_authkey = NULL;
}
}
}
@@ -144,9 +151,7 @@ static int ssi_aead_init(struct crypto_aead *tfm)
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct ssi_crypto_alg *ssi_alg =
container_of(alg, struct ssi_crypto_alg, aead_alg);
- SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx, crypto_tfm_alg_name(&(tfm->base)));
-
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx, crypto_tfm_alg_name(&tfm->base));
/* Initialize modes in instance */
ctx->cipher_mode = ssi_alg->cipher_mode;
@@ -158,7 +163,7 @@ static int ssi_aead_init(struct crypto_aead *tfm)
/* Allocate key buffer, cache line aligned */
ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
- &ctx->enckey_dma_addr, GFP_KERNEL);
+ &ctx->enckey_dma_addr, GFP_KERNEL);
if (!ctx->enckey) {
SSI_LOG_ERR("Failed allocating key buffer\n");
goto init_failed;
@@ -168,31 +173,42 @@ static int ssi_aead_init(struct crypto_aead *tfm)
/* Set default authlen value */
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
+ struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
+ const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
+
/* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
/* (and temporary for user key - up to 256b) */
- ctx->auth_state.xcbc.xcbc_keys = dma_alloc_coherent(dev,
- CC_AES_128_BIT_KEY_SIZE * 3,
- &ctx->auth_state.xcbc.xcbc_keys_dma_addr, GFP_KERNEL);
- if (!ctx->auth_state.xcbc.xcbc_keys) {
+ xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
+ &xcbc->xcbc_keys_dma_addr,
+ GFP_KERNEL);
+ if (!xcbc->xcbc_keys) {
SSI_LOG_ERR("Failed allocating buffer for XCBC keys\n");
goto init_failed;
}
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
+ struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
+ const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
+ dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
+
/* Allocate dma-coherent buffer for IPAD + OPAD */
- ctx->auth_state.hmac.ipad_opad = dma_alloc_coherent(dev,
- 2 * MAX_HMAC_DIGEST_SIZE,
- &ctx->auth_state.hmac.ipad_opad_dma_addr, GFP_KERNEL);
- if (!ctx->auth_state.hmac.ipad_opad) {
+ hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
+ &hmac->ipad_opad_dma_addr,
+ GFP_KERNEL);
+
+ if (!hmac->ipad_opad) {
SSI_LOG_ERR("Failed allocating IPAD/OPAD buffer\n");
goto init_failed;
}
+
SSI_LOG_DEBUG("Allocated authkey buffer in context ctx->authkey=@%p\n",
- ctx->auth_state.hmac.ipad_opad);
+ hmac->ipad_opad);
+
+ hmac->padded_authkey = dma_alloc_coherent(dev,
+ MAX_HMAC_BLOCK_SIZE,
+ pkey_dma,
+ GFP_KERNEL);
- ctx->auth_state.hmac.padded_authkey = dma_alloc_coherent(dev,
- MAX_HMAC_BLOCK_SIZE,
- &ctx->auth_state.hmac.padded_authkey_dma_addr, GFP_KERNEL);
- if (!ctx->auth_state.hmac.padded_authkey) {
+ if (!hmac->padded_authkey) {
SSI_LOG_ERR("failed to allocate padded_authkey\n");
goto init_failed;
}
@@ -223,7 +239,7 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
- ctx->authsize) != 0) {
+ ctx->authsize) != 0) {
SSI_LOG_DEBUG("Payload authentication failure, "
"(auth-size=%d, cipher=%d).\n",
ctx->authsize, ctx->cipher_mode);
@@ -236,8 +252,8 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
} else { /*ENCRYPT*/
if (unlikely(areq_ctx->is_icv_fragmented))
ssi_buffer_mgr_copy_scatterlist_portion(
- areq_ctx->mac_buf, areq_ctx->dstSgl, areq->cryptlen + areq_ctx->dstOffset,
- areq->cryptlen + areq_ctx->dstOffset + ctx->authsize, SSI_SG_FROM_BUF);
+ areq_ctx->mac_buf, areq_ctx->dst_sgl, areq->cryptlen + areq_ctx->dst_offset,
+ areq->cryptlen + areq_ctx->dst_offset + ctx->authsize, SSI_SG_FROM_BUF);
/* If an IV was generated, copy it back to the user provided buffer. */
if (areq_ctx->backup_giv) {
@@ -292,12 +308,13 @@ static int xcbc_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
{
- unsigned int hmacPadConst[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
+ unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
unsigned int digest_ofs = 0;
unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
+ struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
int idx = 0;
int i;
@@ -325,7 +342,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
/* Prepare ipad key */
hw_desc_init(&desc[idx]);
- set_xor_val(&desc[idx], hmacPadConst[i]);
+ set_xor_val(&desc[idx], hmac_pad_const[i]);
set_cipher_mode(&desc[idx], hash_mode);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
@@ -334,7 +351,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
/* Perform HASH update */
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
- ctx->auth_state.hmac.padded_authkey_dma_addr,
+ hmac->padded_authkey_dma_addr,
SHA256_BLOCK_SIZE, NS_BIT);
set_cipher_mode(&desc[idx], hash_mode);
set_xor_active(&desc[idx]);
@@ -345,8 +362,8 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
set_dout_dlli(&desc[idx],
- (ctx->auth_state.hmac.ipad_opad_dma_addr +
- digest_ofs), digest_size, NS_BIT, 0);
+ (hmac->ipad_opad_dma_addr + digest_ofs),
+ digest_size, NS_BIT, 0);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
@@ -361,7 +378,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
{
SSI_LOG_DEBUG("enc_keylen=%u authkeylen=%u\n",
- ctx->enc_keylen, ctx->auth_keylen);
+ ctx->enc_keylen, ctx->auth_keylen);
switch (ctx->auth_mode) {
case DRV_HASH_SHA1:
@@ -385,7 +402,7 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
if (unlikely(ctx->flow_mode == S_DIN_to_DES)) {
if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
SSI_LOG_ERR("Invalid cipher(3DES) key size: %u\n",
- ctx->enc_keylen);
+ ctx->enc_keylen);
return -EINVAL;
}
} else { /* Default assumed to be AES ciphers */
@@ -393,7 +410,7 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
(ctx->enc_keylen != AES_KEYSIZE_192) &&
(ctx->enc_keylen != AES_KEYSIZE_256)) {
SSI_LOG_ERR("Invalid cipher(AES) key size: %u\n",
- ctx->enc_keylen);
+ ctx->enc_keylen);
return -EINVAL;
}
}
@@ -536,9 +553,9 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
int seq_len = 0, rc = -EINVAL;
SSI_LOG_DEBUG("Setting key in context @%p for %s. key=%p keylen=%u\n",
- ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
+ ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)),
+ key, keylen);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
/* STAT_PHASE_0: Init and sanity checks */
if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
@@ -654,7 +671,6 @@ static int ssi_aead_setauthsize(
{
struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
/* Unsupported auth. sizes */
if ((authsize == 0) ||
(authsize > crypto_aead_maxauthsize(authenc))) {
@@ -669,7 +685,7 @@ static int ssi_aead_setauthsize(
#if SSI_CC_HAS_AES_CCM
static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+ unsigned int authsize)
{
switch (authsize) {
case 8:
@@ -684,7 +700,7 @@ static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
}
static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+ unsigned int authsize)
{
switch (authsize) {
case 4:
@@ -762,11 +778,11 @@ ssi_aead_process_authenc_data_desc(
{
struct scatterlist *cipher =
(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
- areq_ctx->dstSgl : areq_ctx->srcSgl;
+ areq_ctx->dst_sgl : areq_ctx->src_sgl;
unsigned int offset =
(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
- areq_ctx->dstOffset : areq_ctx->srcOffset;
+ areq_ctx->dst_offset : areq_ctx->src_offset;
SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
@@ -828,11 +844,11 @@ ssi_aead_process_cipher_data_desc(
SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
- (sg_dma_address(areq_ctx->srcSgl) +
- areq_ctx->srcOffset), areq_ctx->cryptlen, NS_BIT);
+ (sg_dma_address(areq_ctx->src_sgl) +
+ areq_ctx->src_offset), areq_ctx->cryptlen, NS_BIT);
set_dout_dlli(&desc[idx],
- (sg_dma_address(areq_ctx->dstSgl) +
- areq_ctx->dstOffset),
+ (sg_dma_address(areq_ctx->dst_sgl) +
+ areq_ctx->dst_offset),
areq_ctx->cryptlen, NS_BIT, 0);
set_flow_mode(&desc[idx], flow_mode);
break;
@@ -1168,8 +1184,8 @@ static inline void ssi_aead_load_mlli_to_sram(
(req_ctx->data_buff_type == SSI_DMA_BUF_MLLI) ||
!req_ctx->is_single_pass)) {
SSI_LOG_DEBUG("Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
- (unsigned int)ctx->drvdata->mlli_sram_addr,
- req_ctx->mlli_params.mlli_len);
+ (unsigned int)ctx->drvdata->mlli_sram_addr,
+ req_ctx->mlli_params.mlli_len);
/* Copy MLLI table host-to-sram */
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_DLLI,
@@ -1313,7 +1329,8 @@ ssi_aead_xcbc_authenc(
}
static int validate_data_size(struct ssi_aead_ctx *ctx,
- enum drv_crypto_direction direct, struct aead_request *req)
+ enum drv_crypto_direction direct,
+ struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int assoclen = req->assoclen;
@@ -1321,7 +1338,7 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
(req->cryptlen - ctx->authsize) : req->cryptlen;
if (unlikely((direct == DRV_CRYPTO_DIRECTION_DECRYPT) &&
- (req->cryptlen < ctx->authsize)))
+ (req->cryptlen < ctx->authsize)))
goto data_size_err;
areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
@@ -1329,7 +1346,7 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
switch (ctx->flow_mode) {
case S_DIN_to_AES:
if (unlikely((ctx->cipher_mode == DRV_CIPHER_CBC) &&
- !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)))
+ !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)))
goto data_size_err;
if (ctx->cipher_mode == DRV_CIPHER_CCM)
break;
@@ -1365,27 +1382,27 @@ data_size_err:
}
#if SSI_CC_HAS_AES_CCM
-static unsigned int format_ccm_a0(u8 *pA0Buff, u32 headerSize)
+static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
{
unsigned int len = 0;
- if (headerSize == 0)
+ if (header_size == 0)
return 0;
- if (headerSize < ((1UL << 16) - (1UL << 8))) {
+ if (header_size < ((1UL << 16) - (1UL << 8))) {
len = 2;
- pA0Buff[0] = (headerSize >> 8) & 0xFF;
- pA0Buff[1] = headerSize & 0xFF;
+ pa0_buff[0] = (header_size >> 8) & 0xFF;
+ pa0_buff[1] = header_size & 0xFF;
} else {
len = 6;
- pA0Buff[0] = 0xFF;
- pA0Buff[1] = 0xFE;
- pA0Buff[2] = (headerSize >> 24) & 0xFF;
- pA0Buff[3] = (headerSize >> 16) & 0xFF;
- pA0Buff[4] = (headerSize >> 8) & 0xFF;
- pA0Buff[5] = headerSize & 0xFF;
+ pa0_buff[0] = 0xFF;
+ pa0_buff[1] = 0xFE;
+ pa0_buff[2] = (header_size >> 24) & 0xFF;
+ pa0_buff[3] = (header_size >> 16) & 0xFF;
+ pa0_buff[4] = (header_size >> 8) & 0xFF;
+ pa0_buff[5] = header_size & 0xFF;
}
return len;
@@ -1557,7 +1574,7 @@ static int config_ccm_adata(struct aead_request *req)
/* taken from crypto/ccm.c */
/* 2 <= L <= 8, so 1 <= L' <= 7. */
- if (2 > l || l > 8) {
+ if (l < 2 || l > 8) {
SSI_LOG_ERR("illegal iv value %X\n", req->iv[0]);
return -EINVAL;
}
@@ -1848,8 +1865,9 @@ static inline void ssi_aead_dump_gcm(
SSI_LOG_DEBUG("%s\n", title);
}
- SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n", \
- ctx->cipher_mode, ctx->authsize, ctx->enc_keylen, req->assoclen, req_ctx->cryptlen);
+ SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n",
+ ctx->cipher_mode, ctx->authsize, ctx->enc_keylen,
+ req->assoclen, req_ctx->cryptlen);
if (ctx->enckey)
dump_byte_array("mac key", ctx->enckey, 16);
@@ -1864,7 +1882,7 @@ static inline void ssi_aead_dump_gcm(
dump_byte_array("mac_buf", req_ctx->mac_buf, AES_BLOCK_SIZE);
- dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.lenA, AES_BLOCK_SIZE);
+ dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.len_a, AES_BLOCK_SIZE);
if (req->src && req->cryptlen)
dump_byte_array("req->src", sg_virt(req->src), req->cryptlen + req->assoclen);
@@ -1886,7 +1904,7 @@ static int config_gcm_context(struct aead_request *req)
(req->cryptlen - ctx->authsize);
__be32 counter = cpu_to_be32(2);
- SSI_LOG_DEBUG("config_gcm_context() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n", cryptlen, req->assoclen, ctx->authsize);
+ SSI_LOG_DEBUG("%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n", __func__, cryptlen, req->assoclen, ctx->authsize);
memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
@@ -1903,16 +1921,16 @@ static int config_gcm_context(struct aead_request *req)
__be64 temp64;
temp64 = cpu_to_be64(req->assoclen * 8);
- memcpy(&req_ctx->gcm_len_block.lenA, &temp64, sizeof(temp64));
+ memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = cpu_to_be64(cryptlen * 8);
- memcpy(&req_ctx->gcm_len_block.lenC, &temp64, 8);
+ memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
} else { //rfc4543=> all data(AAD,IV,Plain) are considered additional data that is nothing is encrypted.
__be64 temp64;
temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
- memcpy(&req_ctx->gcm_len_block.lenA, &temp64, sizeof(temp64));
+ memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = 0;
- memcpy(&req_ctx->gcm_len_block.lenC, &temp64, 8);
+ memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
}
return 0;
@@ -1944,16 +1962,16 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
struct ssi_crypto_req ssi_req = {};
SSI_LOG_DEBUG("%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
- ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"), ctx, req, req->iv,
- sg_virt(req->src), req->src->offset, sg_virt(req->dst), req->dst->offset, req->cryptlen);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"),
+ ctx, req, req->iv, sg_virt(req->src), req->src->offset,
+ sg_virt(req->dst), req->dst->offset, req->cryptlen);
/* STAT_PHASE_0: Init and sanity checks */
/* Check data length according to mode */
if (unlikely(validate_data_size(ctx, direct, req) != 0)) {
SSI_LOG_ERR("Unsupported crypt/assoc len %d/%d.\n",
- req->cryptlen, req->assoclen);
+ req->cryptlen, req->assoclen);
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
return -EINVAL;
}
@@ -1976,7 +1994,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE);
if (!areq_ctx->backup_giv) /*User none-generated IV*/
memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
- req->iv, CTR_RFC3686_IV_SIZE);
+ req->iv, CTR_RFC3686_IV_SIZE);
/* Initialize counter portion of counter block */
*(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
@@ -2198,7 +2216,7 @@ static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
int rc = 0;
- SSI_LOG_DEBUG("ssi_rfc4106_gcm_setkey() keylen %d, key %p\n", keylen, key);
+ SSI_LOG_DEBUG("%s() keylen %d, key %p\n", __func__, keylen, key);
if (keylen < 4)
return -EINVAL;
@@ -2216,7 +2234,7 @@ static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
int rc = 0;
- SSI_LOG_DEBUG("ssi_rfc4543_gcm_setkey() keylen %d, key %p\n", keylen, key);
+ SSI_LOG_DEBUG("%s() keylen %d, key %p\n", __func__, keylen, key);
if (keylen < 4)
return -EINVAL;
@@ -2230,7 +2248,7 @@ static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
}
static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+ unsigned int authsize)
{
switch (authsize) {
case 4:
@@ -2249,9 +2267,9 @@ static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
}
static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+ unsigned int authsize)
{
- SSI_LOG_DEBUG("ssi_rfc4106_gcm_setauthsize() authsize %d\n", authsize);
+ SSI_LOG_DEBUG("authsize %d\n", authsize);
switch (authsize) {
case 8:
@@ -2268,7 +2286,7 @@ static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- SSI_LOG_DEBUG("ssi_rfc4543_gcm_setauthsize() authsize %d\n", authsize);
+ SSI_LOG_DEBUG("authsize %d\n", authsize);
if (authsize != 16)
return -EINVAL;
@@ -2641,7 +2659,7 @@ static struct ssi_crypto_alg *ssi_aead_create_alg(struct ssi_alg_template *templ
struct ssi_crypto_alg *t_alg;
struct aead_alg *alg;
- t_alg = kzalloc(sizeof(struct ssi_crypto_alg), GFP_KERNEL);
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
if (!t_alg) {
SSI_LOG_ERR("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM);
@@ -2696,7 +2714,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
int rc = -ENOMEM;
int alg;
- aead_handle = kmalloc(sizeof(struct ssi_aead_handle), GFP_KERNEL);
+ aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
if (!aead_handle) {
rc = -ENOMEM;
goto fail0;
@@ -2720,14 +2738,14 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
SSI_LOG_ERR("%s alg allocation failed\n",
- aead_algs[alg].driver_name);
+ aead_algs[alg].driver_name);
goto fail1;
}
t_alg->drvdata = drvdata;
rc = crypto_register_aead(&t_alg->aead_alg);
if (unlikely(rc != 0)) {
SSI_LOG_ERR("%s alg registration failed\n",
- t_alg->aead_alg.base.cra_driver_name);
+ t_alg->aead_alg.base.cra_driver_name);
goto fail2;
} else {
list_add_tail(&t_alg->entry, &aead_handle->aead_list);