summaryrefslogtreecommitdiff
path: root/drivers/crypto/ccree/cc_aead.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/ccree/cc_aead.c')
-rw-r--r--drivers/crypto/ccree/cc_aead.c118
1 files changed, 93 insertions, 25 deletions
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index a3527c00b29a..7aa4cbe19a86 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -23,12 +23,8 @@
#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
-#define AES_CCM_RFC4309_NONCE_SIZE 3
#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
-/* Value of each ICV_CMP byte (of 8) in case of success */
-#define ICV_VERIF_OK 0x01
-
struct cc_aead_handle {
cc_sram_addr_t sram_workspace_addr;
struct list_head aead_list;
@@ -220,6 +216,10 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ /* BACKLOG notification */
+ if (err == -EINPROGRESS)
+ goto done;
+
cc_unmap_aead_request(dev, areq);
/* Restore ordinary iv pointer */
@@ -424,7 +424,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
/* This function prepers the user key so it can pass to the hmac processing
* (copy to intenral buffer or hash in case of key longer than block
*/
-static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
+static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
unsigned int keylen)
{
dma_addr_t key_dma_addr = 0;
@@ -437,6 +437,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
unsigned int hashmode;
unsigned int idx = 0;
int rc = 0;
+ u8 *key = NULL;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
dma_addr_t padded_authkey_dma_addr =
ctx->auth_state.hmac.padded_authkey_dma_addr;
@@ -455,11 +456,17 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
}
if (keylen != 0) {
+
+ key = kmemdup(authkey, keylen, GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
+
key_dma_addr = dma_map_single(dev, (void *)key, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
+ kzfree(key);
return -ENOMEM;
}
if (keylen > blocksize) {
@@ -542,6 +549,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
if (key_dma_addr)
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
+ kzfree(key);
+
return rc;
}
@@ -650,6 +659,39 @@ setkey_error:
return rc;
}
+static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ u32 flags;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(aead);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err)) {
+ crypto_aead_set_flags(aead, flags);
+ goto out;
+ }
+
+ err = cc_aead_setkey(aead, key, keylen);
+
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
@@ -731,7 +773,7 @@ static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
dev_dbg(dev, "ASSOC buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
- areq->assoclen, NS_BIT);
+ areq_ctx->assoclen, NS_BIT);
set_flow_mode(&desc[idx], flow_mode);
if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
areq_ctx->cryptlen > 0)
@@ -1080,9 +1122,11 @@ static void cc_proc_header_desc(struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
+
/* Hash associated data */
- if (req->assoclen > 0)
+ if (areq_ctx->assoclen > 0)
cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
/* Hash IV */
@@ -1159,9 +1203,9 @@ static void cc_mlli_to_sram(struct aead_request *req,
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
- !req_ctx->is_single_pass) {
+ !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
(unsigned int)ctx->drvdata->mlli_sram_addr,
req_ctx->mlli_params.mlli_len);
@@ -1310,7 +1354,7 @@ static int validate_data_size(struct cc_aead_ctx *ctx,
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- unsigned int assoclen = req->assoclen;
+ unsigned int assoclen = areq_ctx->assoclen;
unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
(req->cryptlen - ctx->authsize) : req->cryptlen;
@@ -1469,7 +1513,7 @@ static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
idx++;
/* process assoc data */
- if (req->assoclen > 0) {
+ if (req_ctx->assoclen > 0) {
cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
} else {
hw_desc_init(&desc[idx]);
@@ -1561,7 +1605,7 @@ static int config_ccm_adata(struct aead_request *req)
* NIST Special Publication 800-38C
*/
*b0 |= (8 * ((m - 2) / 2));
- if (req->assoclen > 0)
+ if (req_ctx->assoclen > 0)
*b0 |= 64; /* Enable bit 6 if Adata exists. */
rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
@@ -1572,7 +1616,7 @@ static int config_ccm_adata(struct aead_request *req)
/* END of "taken from crypto/ccm.c" */
/* l(a) - size of associated data. */
- req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
+ req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
req->iv[15] = 1;
@@ -1604,7 +1648,7 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
CCM_BLOCK_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
- req->assoclen -= CCM_BLOCK_IV_SIZE;
+ areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
}
static void cc_set_ghash_desc(struct aead_request *req,
@@ -1812,7 +1856,7 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
// for gcm and rfc4106.
cc_set_ghash_desc(req, desc, seq_size);
/* process(ghash) assoc data */
- if (req->assoclen > 0)
+ if (req_ctx->assoclen > 0)
cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
cc_set_gctr_desc(req, desc, seq_size);
/* process(gctr+ghash) */
@@ -1836,8 +1880,8 @@ static int config_gcm_context(struct aead_request *req)
(req->cryptlen - ctx->authsize);
__be32 counter = cpu_to_be32(2);
- dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
- __func__, cryptlen, req->assoclen, ctx->authsize);
+ dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
+ __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
@@ -1853,7 +1897,7 @@ static int config_gcm_context(struct aead_request *req)
if (!req_ctx->plaintext_authenticate_only) {
__be64 temp64;
- temp64 = cpu_to_be64(req->assoclen * 8);
+ temp64 = cpu_to_be64(req_ctx->assoclen * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = cpu_to_be64(cryptlen * 8);
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1863,8 +1907,8 @@ static int config_gcm_context(struct aead_request *req)
*/
__be64 temp64;
- temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
- cryptlen) * 8);
+ temp64 = cpu_to_be64((req_ctx->assoclen +
+ GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = 0;
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1884,7 +1928,7 @@ static void cc_proc_rfc4_gcm(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
GCM_BLOCK_RFC4_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
- req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
+ areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
}
static int cc_proc_aead(struct aead_request *req,
@@ -1909,7 +1953,7 @@ static int cc_proc_aead(struct aead_request *req,
/* Check data length according to mode */
if (validate_data_size(ctx, direct, req)) {
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
- req->cryptlen, req->assoclen);
+ req->cryptlen, areq_ctx->assoclen);
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
return -EINVAL;
}
@@ -2058,8 +2102,11 @@ static int cc_aead_encrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = false;
@@ -2087,8 +2134,11 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
@@ -2106,8 +2156,11 @@ static int cc_aead_decrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = false;
@@ -2133,8 +2186,11 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
@@ -2250,8 +2306,11 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->plaintext_authenticate_only = false;
@@ -2273,11 +2332,14 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
//plaintext is not encryped with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
cc_proc_rfc4_gcm(req);
@@ -2305,8 +2367,11 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->plaintext_authenticate_only = false;
@@ -2328,11 +2393,14 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
//plaintext is not decryped with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
cc_proc_rfc4_gcm(req);
@@ -2372,7 +2440,7 @@ static struct cc_alg_template aead_algs[] = {
.driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
.template_aead = {
- .setkey = cc_aead_setkey,
+ .setkey = cc_des3_aead_setkey,
.setauthsize = cc_aead_setauthsize,
.encrypt = cc_aead_encrypt,
.decrypt = cc_aead_decrypt,
@@ -2412,7 +2480,7 @@ static struct cc_alg_template aead_algs[] = {
.driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
.template_aead = {
- .setkey = cc_aead_setkey,
+ .setkey = cc_des3_aead_setkey,
.setauthsize = cc_aead_setauthsize,
.encrypt = cc_aead_encrypt,
.decrypt = cc_aead_decrypt,