summaryrefslogtreecommitdiff
path: root/drivers/crypto/caam/caamalg.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/caam/caamalg.c')
-rw-r--r--drivers/crypto/caam/caamalg.c238
1 files changed, 184 insertions, 54 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 80ae69f906fb..9eac5099098e 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2016-2018 NXP
+ * Copyright 2016-2019 NXP
*
* Based on talitos crypto API driver.
*
@@ -766,6 +766,27 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
return 0;
}
+static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 tmp[DES3_EDE_EXPKEY_WORDS];
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+
+ if (keylen == DES3_EDE_KEY_SIZE &&
+ __des3_ede_setkey(tmp, &tfm->crt_flags, key, DES3_EDE_KEY_SIZE)) {
+ return -EINVAL;
+ }
+
+ if (!des_ekey(tmp, key) && (crypto_skcipher_get_flags(skcipher) &
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_WEAK_KEY);
+ return -EINVAL;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen);
+}
+
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
@@ -802,6 +823,8 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
* aead_edesc - s/w-extended aead descriptor
* @src_nents: number of segments in input s/w scatterlist
* @dst_nents: number of segments in output s/w scatterlist
+ * @mapped_src_nents: number of segments in input h/w link table
+ * @mapped_dst_nents: number of segments in output h/w link table
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
@@ -810,6 +833,8 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
struct aead_edesc {
int src_nents;
int dst_nents;
+ int mapped_src_nents;
+ int mapped_dst_nents;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -820,6 +845,8 @@ struct aead_edesc {
* skcipher_edesc - s/w-extended skcipher descriptor
* @src_nents: number of segments in input s/w scatterlist
* @dst_nents: number of segments in output s/w scatterlist
+ * @mapped_src_nents: number of segments in input h/w link table
+ * @mapped_dst_nents: number of segments in output h/w link table
* @iv_dma: dma address of iv for checking continuity and link table
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
@@ -830,6 +857,8 @@ struct aead_edesc {
struct skcipher_edesc {
int src_nents;
int dst_nents;
+ int mapped_src_nents;
+ int mapped_dst_nents;
dma_addr_t iv_dma;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
@@ -846,7 +875,8 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
if (dst != src) {
if (src_nents)
dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ if (dst_nents)
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
} else {
dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}
@@ -961,8 +991,9 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
* The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block. This is used e.g. by the CTS mode.
*/
- scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
- ivsize, 0);
+ if (ivsize)
+ scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
+ ivsize, ivsize, 0);
kfree(edesc);
@@ -1023,11 +1054,12 @@ static void init_aead_job(struct aead_request *req,
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
if (all_contig) {
- src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
+ src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) :
+ 0;
in_options = 0;
} else {
src_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
+ sec4_sg_index += edesc->mapped_src_nents;
in_options = LDST_SGF;
}
@@ -1038,8 +1070,11 @@ static void init_aead_job(struct aead_request *req,
out_options = in_options;
if (unlikely(req->src != req->dst)) {
- if (edesc->dst_nents == 1) {
+ if (!edesc->mapped_dst_nents) {
+ dst_dma = 0;
+ } else if (edesc->mapped_dst_nents == 1) {
dst_dma = sg_dma_address(req->dst);
+ out_options = 0;
} else {
dst_dma = edesc->sec4_sg_dma +
sec4_sg_index *
@@ -1183,9 +1218,9 @@ static void init_skcipher_job(struct skcipher_request *req,
int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc = edesc->hw_desc;
u32 *sh_desc;
- u32 out_options = 0;
- dma_addr_t dst_dma, ptr;
- int len;
+ u32 in_options = 0, out_options = 0;
+ dma_addr_t src_dma, dst_dma, ptr;
+ int len, sec4_sg_index = 0;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
@@ -1203,21 +1238,27 @@ static void init_skcipher_job(struct skcipher_request *req,
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
- append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->cryptlen + ivsize,
- LDST_SGF);
+ if (ivsize || edesc->mapped_src_nents > 1) {
+ src_dma = edesc->sec4_sg_dma;
+ sec4_sg_index = edesc->mapped_src_nents + !!ivsize;
+ in_options = LDST_SGF;
+ } else {
+ src_dma = sg_dma_address(req->src);
+ }
+
+ append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
if (likely(req->src == req->dst)) {
- dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
- out_options = LDST_SGF;
+ dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
+ out_options = in_options;
+ } else if (edesc->mapped_dst_nents == 1) {
+ dst_dma = sg_dma_address(req->dst);
} else {
- if (edesc->dst_nents == 1) {
- dst_dma = sg_dma_address(req->dst);
- } else {
- dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
- sizeof(struct sec4_sg_entry);
- out_options = LDST_SGF;
- }
+ dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
+ sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
}
+
append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
}
@@ -1289,12 +1330,19 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
mapped_src_nents = 0;
}
- mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(jrdev, "unable to map destination\n");
- dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
- return ERR_PTR(-ENOMEM);
+ /* Cover also the case of null (zero length) output data */
+ if (dst_nents) {
+ mapped_dst_nents = dma_map_sg(jrdev, req->dst,
+ dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(jrdev, "unable to map destination\n");
+ dma_unmap_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_dst_nents = 0;
}
}
@@ -1313,6 +1361,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
+ edesc->mapped_src_nents = mapped_src_nents;
+ edesc->mapped_dst_nents = mapped_dst_nents;
edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
desc_bytes;
*all_contig_ptr = !(mapped_src_nents > 1);
@@ -1586,7 +1636,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
struct skcipher_edesc *edesc;
- dma_addr_t iv_dma;
+ dma_addr_t iv_dma = 0;
u8 *iv;
int ivsize = crypto_skcipher_ivsize(skcipher);
int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
@@ -1621,7 +1671,6 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
dev_err(jrdev, "unable to map source\n");
return ERR_PTR(-ENOMEM);
}
-
mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
DMA_FROM_DEVICE);
if (unlikely(!mapped_dst_nents)) {
@@ -1631,7 +1680,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
}
}
- sec4_sg_ents = 1 + mapped_src_nents;
+ if (!ivsize && mapped_src_nents == 1)
+ sec4_sg_ents = 0; // no need for an input hw s/g table
+ else
+ sec4_sg_ents = mapped_src_nents + !!ivsize;
dst_sg_idx = sec4_sg_ents;
sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
@@ -1650,39 +1702,48 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
+ edesc->mapped_src_nents = mapped_src_nents;
+ edesc->mapped_dst_nents = mapped_dst_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
desc_bytes);
/* Make sure IV is located in a DMAable area */
- iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
- memcpy(iv, req->iv, ivsize);
+ if (ivsize) {
+ iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
+ memcpy(iv, req->iv, ivsize);
+
+ iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents,
+ dst_nents, 0, 0, 0, 0);
+ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
- iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, iv_dma)) {
- dev_err(jrdev, "unable to map IV\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
+ dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
}
-
- dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
- sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
+ if (dst_sg_idx)
+ sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg +
+ !!ivsize, 0);
if (mapped_dst_nents > 1) {
sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
edesc->sec4_sg + dst_sg_idx, 0);
}
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
+ if (sec4_sg_bytes) {
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents,
+ dst_nents, iv_dma, ivsize, 0, 0);
+ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
}
edesc->iv_dma = iv_dma;
@@ -1749,8 +1810,9 @@ static int skcipher_decrypt(struct skcipher_request *req)
* The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block.
*/
- scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
- ivsize, 0);
+ if (ivsize)
+ scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
+ ivsize, ivsize, 0);
/* Create and submit job descriptor*/
init_skcipher_job(req, edesc, false);
@@ -1796,7 +1858,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-3des-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = des_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
@@ -1812,7 +1874,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-des-caam",
.cra_blocksize = DES_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = des_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES_KEY_SIZE,
@@ -1878,6 +1940,66 @@ static struct caam_skcipher_alg driver_algs[] = {
},
.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
},
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ecb-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = des_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ecb(arc4)",
+ .cra_driver_name = "ecb-arc4-caam",
+ .cra_blocksize = ARC4_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = ARC4_MIN_KEY_SIZE,
+ .max_keysize = ARC4_MAX_KEY_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB,
+ },
};
static struct caam_aead_alg driver_aeads[] = {
@@ -3337,6 +3459,7 @@ static int __init caam_algapi_init(void)
struct caam_drv_private *priv;
int i = 0, err = 0;
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
+ u32 arc4_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false;
@@ -3381,6 +3504,8 @@ static int __init caam_algapi_init(void)
CHA_ID_LS_DES_SHIFT;
aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >>
+ CHA_ID_LS_ARC4_SHIFT;
ccha_inst = 0;
ptha_inst = 0;
} else {
@@ -3397,6 +3522,7 @@ static int __init caam_algapi_init(void)
md_inst = mdha & CHA_VER_NUM_MASK;
ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
+ arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
}
/* If MD is present, limit digest size based on LP256 */
@@ -3417,6 +3543,10 @@ static int __init caam_algapi_init(void)
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
continue;
+ /* Skip ARC4 algorithms if not supported by device */
+ if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4)
+ continue;
+
/*
* Check support for AES modes not available
* on LP devices.