diff options
Diffstat (limited to 'drivers/crypto/ccree/cc_buffer_mgr.c')
| -rw-r--r-- | drivers/crypto/ccree/cc_buffer_mgr.c | 110 |
1 files changed, 62 insertions, 48 deletions
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index a5e041d9d2cf..dc7e0cd51c25 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -52,7 +52,7 @@ static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type) static void cc_copy_mac(struct device *dev, struct aead_request *req, enum cc_sg_cpy_direct dir) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); u32 skip = req->assoclen + req->cryptlen; cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src, @@ -224,7 +224,7 @@ static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data, /* Set MLLI size for the bypass operation */ mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE); - dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n", + dev_dbg(dev, "MLLI params: virt_addr=%p dma_addr=%pad mlli_len=0x%X\n", mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr, mlli_params->mlli_len); @@ -239,7 +239,7 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data, { unsigned int index = sgl_data->num_of_buffers; - dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n", + dev_dbg(dev, "index=%u nents=%u sgl=%p data_len=0x%08X is_last=%d\n", index, nents, sgl, data_len, is_last_table); sgl_data->nents[index] = nents; sgl_data->entry[index].sgl = sgl; @@ -258,6 +258,13 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg, { int ret = 0; + if (!nbytes) { + *mapped_nents = 0; + *lbytes = 0; + *nents = 0; + return 0; + } + *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); if (*nents > max_sg_nents) { *nents = 0; @@ -267,7 +274,7 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg, } ret = dma_map_sg(dev, sg, *nents, direction); - if (dma_mapping_error(dev, ret)) { + if (!ret) { *nents = 0; dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret); return -ENOMEM; @@ -291,7 +298,7 @@ cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx, dev_err(dev, "dma_map_sg() config buffer failed\n"); return -ENOMEM; } - dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", + dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%p offset=%u length=%u\n", &sg_dma_address(&areq_ctx->ccm_adata_sg), sg_page(&areq_ctx->ccm_adata_sg), sg_virt(&areq_ctx->ccm_adata_sg), @@ -316,7 +323,7 @@ static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx, dev_err(dev, "dma_map_sg() src buffer failed\n"); return -ENOMEM; } - dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", + dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%p offset=%u length=%u\n", &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg), sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset, areq_ctx->buff_sg->length); @@ -349,12 +356,14 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx, req_ctx->mlli_params.mlli_dma_addr); } - dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); - dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); - if (src != dst) { - dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL); - dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst)); + dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE); + dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE); + dev_dbg(dev, "Unmapped req->dst=%p\n", sg_virt(dst)); + dev_dbg(dev, "Unmapped req->src=%p\n", sg_virt(src)); + } else { + dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); + dev_dbg(dev, "Unmapped req->src=%p\n", sg_virt(src)); } } @@ -370,6 +379,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, u32 dummy = 0; int rc = 0; u32 mapped_nents = 0; + int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); req_ctx->dma_buf_type = CC_DMA_BUF_DLLI; mlli_params->curr_pool = NULL; @@ -381,18 +391,18 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, req_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) { - dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", + dev_err(dev, "Mapping iv %u B at va=%p for DMA failed\n", ivsize, info); return -ENOMEM; } - dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", + dev_dbg(dev, "Mapped iv %u B at va=%p to dma=%pad\n", ivsize, info, &req_ctx->gen_ctx.iv_dma_addr); } else { req_ctx->gen_ctx.iv_dma_addr = 0; } /* Map the src SGL */ - rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents, + rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); if (rc) goto cipher_exit; @@ -409,7 +419,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, } } else { /* Map the dst sg */ - rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, + rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE, &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); if (rc) @@ -446,9 +456,10 @@ cipher_exit: void cc_unmap_aead_request(struct device *dev, struct aead_request *req) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); unsigned int hw_iv_size = areq_ctx->hw_iv_size; struct cc_drvdata *drvdata = dev_get_drvdata(dev); + int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); if (areq_ctx->mac_buf_dma_addr) { dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, @@ -495,7 +506,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) && (areq_ctx->mlli_params.mlli_virt_addr)) { - dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", + dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%p\n", &areq_ctx->mlli_params.mlli_dma_addr, areq_ctx->mlli_params.mlli_virt_addr); dma_pool_free(areq_ctx->mlli_params.curr_pool, @@ -503,17 +514,15 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) areq_ctx->mlli_params.mlli_dma_addr); } - dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", + dev_dbg(dev, "Unmapping src sgl: req->src=%p areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, areq_ctx->assoclen, req->cryptlen); - dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, - DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction); if (req->src != req->dst) { - dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", + dev_dbg(dev, "Unmapping dst sgl: req->dst=%p\n", sg_virt(req->dst)); - dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, - DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE); } if (drvdata->coherent && areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && @@ -537,7 +546,7 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata, struct buffer_array *sg_data, bool is_last, bool do_chain) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); unsigned int hw_iv_size = areq_ctx->hw_iv_size; struct device *dev = drvdata_to_dev(drvdata); gfp_t flags = cc_gfp_flags(&req->base); @@ -557,7 +566,7 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata, dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) { - dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", + dev_err(dev, "Mapping iv %u B at va=%p for DMA failed\n", hw_iv_size, req->iv); kfree_sensitive(areq_ctx->gen_ctx.iv); areq_ctx->gen_ctx.iv = NULL; @@ -565,7 +574,7 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata, goto chain_iv_exit; } - dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", + dev_dbg(dev, "Mapped iv %u B at va=%p to dma=%pad\n", hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr); chain_iv_exit: @@ -577,7 +586,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata, struct buffer_array *sg_data, bool is_last, bool do_chain) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc = 0; int mapped_nents = 0; struct device *dev = drvdata_to_dev(drvdata); @@ -643,7 +652,7 @@ chain_assoc_exit: static void cc_prepare_aead_data_dlli(struct aead_request *req, u32 *src_last_bytes, u32 *dst_last_bytes) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; unsigned int authsize = areq_ctx->req_authsize; struct scatterlist *sg; @@ -669,7 +678,7 @@ static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata, u32 *src_last_bytes, u32 *dst_last_bytes, bool is_last_table) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; unsigned int authsize = areq_ctx->req_authsize; struct device *dev = drvdata_to_dev(drvdata); @@ -781,7 +790,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, struct buffer_array *sg_data, bool is_last_table, bool do_chain) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); struct device *dev = drvdata_to_dev(drvdata); enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; unsigned int authsize = areq_ctx->req_authsize; @@ -836,7 +845,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, else size_for_map -= authsize; - rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL, + rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE, &areq_ctx->dst.mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, &dst_mapped_nents); @@ -886,7 +895,7 @@ chain_data_exit: static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata, struct aead_request *req) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); u32 curr_mlli_size = 0; if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { @@ -936,7 +945,7 @@ static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata, int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); struct mlli_params *mlli_params = &areq_ctx->mlli_params; struct device *dev = drvdata_to_dev(drvdata); struct buffer_array sg_data; @@ -968,7 +977,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, dma_addr)) { - dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", + dev_err(dev, "Mapping mac_buf %u B at va=%p for DMA failed\n", MAX_MAC_SIZE, areq_ctx->mac_buf); rc = -ENOMEM; goto aead_map_failure; @@ -982,7 +991,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) DMA_TO_DEVICE); if (dma_mapping_error(dev, dma_addr)) { - dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", + dev_err(dev, "Mapping mac_buf %u B at va=%p for DMA failed\n", AES_BLOCK_SIZE, addr); areq_ctx->ccm_iv0_dma_addr = 0; rc = -ENOMEM; @@ -1000,7 +1009,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, dma_addr)) { - dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n", + dev_err(dev, "Mapping hkey %u B at va=%p for DMA failed\n", AES_BLOCK_SIZE, areq_ctx->hkey); rc = -ENOMEM; goto aead_map_failure; @@ -1010,7 +1019,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block, AES_BLOCK_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma_addr)) { - dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n", + dev_err(dev, "Mapping gcm_len_block %u B at va=%p for DMA failed\n", AES_BLOCK_SIZE, &areq_ctx->gcm_len_block); rc = -ENOMEM; goto aead_map_failure; @@ -1021,7 +1030,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) AES_BLOCK_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma_addr)) { - dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n", + dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%p for DMA failed\n", AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1)); areq_ctx->gcm_iv_inc1_dma_addr = 0; rc = -ENOMEM; @@ -1033,7 +1042,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) AES_BLOCK_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma_addr)) { - dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n", + dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%p for DMA failed\n", AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2)); areq_ctx->gcm_iv_inc2_dma_addr = 0; rc = -ENOMEM; @@ -1049,7 +1058,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) size_to_map += authsize; } - rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL, + rc = cc_map_sg(dev, req->src, size_to_map, + (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL), &areq_ctx->src.mapped_nents, (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES), @@ -1142,7 +1152,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, u32 dummy = 0; u32 mapped_nents = 0; - dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n", + dev_dbg(dev, "final params : curr_buff=%p curr_buff_cnt=0x%X nbytes = 0x%X src=%p curr_index=%u\n", curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); /* Init the type of the dma buffer */ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; @@ -1225,8 +1235,9 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, int rc = 0; u32 dummy = 0; u32 mapped_nents = 0; + int sg_nents; - dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n", + dev_dbg(dev, " update params : curr_buff=%p curr_buff_cnt=0x%X nbytes=0x%X src=%p curr_index=%u\n", curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); /* Init the type of the dma buffer */ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; @@ -1236,9 +1247,12 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, areq_ctx->in_nents = 0; if (total_in_len < block_size) { - dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", + dev_dbg(dev, " less than one block: curr_buff=%p *curr_buff_cnt=0x%X copy_to=%p\n", curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); - areq_ctx->in_nents = sg_nents_for_len(src, nbytes); + sg_nents = sg_nents_for_len(src, nbytes); + if (sg_nents < 0) + return sg_nents; + areq_ctx->in_nents = sg_nents; sg_copy_to_buffer(src, areq_ctx->in_nents, &curr_buff[*curr_buff_cnt], nbytes); *curr_buff_cnt += nbytes; @@ -1255,7 +1269,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, /* Copy the new residue to next buffer */ if (*next_buff_cnt) { - dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n", + dev_dbg(dev, " handle residue: next buff %p skip data %u residue %u\n", next_buff, (update_data_len - *curr_buff_cnt), *next_buff_cnt); cc_copy_sg_portion(dev, next_buff, src, @@ -1328,7 +1342,7 @@ void cc_unmap_hash_request(struct device *dev, void *ctx, *allocated and should be released */ if (areq_ctx->mlli_params.curr_pool) { - dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", + dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%p\n", &areq_ctx->mlli_params.mlli_dma_addr, areq_ctx->mlli_params.mlli_virt_addr); dma_pool_free(areq_ctx->mlli_params.curr_pool, @@ -1337,14 +1351,14 @@ void cc_unmap_hash_request(struct device *dev, void *ctx, } if (src && areq_ctx->in_nents) { - dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n", + dev_dbg(dev, "Unmapped sg src: virt=%p dma=%pad len=0x%X\n", sg_virt(src), &sg_dma_address(src), sg_dma_len(src)); dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); } if (*prev_len) { - dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n", + dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%p dma=%pad len 0x%X\n", sg_virt(areq_ctx->buff_sg), &sg_dma_address(areq_ctx->buff_sg), sg_dma_len(areq_ctx->buff_sg)); |
