diff options
Diffstat (limited to 'drivers/crypto/hisilicon/sec2/sec_crypto.c')
| -rw-r--r-- | drivers/crypto/hisilicon/sec2/sec_crypto.c | 2045 |
1 files changed, 1589 insertions, 456 deletions
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 64614a9bdf21..31590d01139a 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -2,12 +2,15 @@ /* Copyright (c) 2019 HiSilicon Limited. */ #include <crypto/aes.h> +#include <crypto/aead.h> #include <crypto/algapi.h> #include <crypto/authenc.h> #include <crypto/des.h> #include <crypto/hash.h> #include <crypto/internal/aead.h> -#include <crypto/sha.h> +#include <crypto/internal/des.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> #include <crypto/skcipher.h> #include <crypto/xts.h> #include <linux/crypto.h> @@ -19,6 +22,7 @@ #define SEC_PRIORITY 4001 #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE) +#define SEC_XTS_MID_KEY_SIZE (3 * AES_MIN_KEY_SIZE) #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE) #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE) #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE) @@ -35,156 +39,293 @@ #define SEC_AEAD_ALG_OFFSET 11 #define SEC_AUTH_OFFSET 6 +#define SEC_DE_OFFSET_V3 9 +#define SEC_SCENE_OFFSET_V3 5 +#define SEC_CKEY_OFFSET_V3 13 +#define SEC_CTR_CNT_OFFSET 25 +#define SEC_CTR_CNT_ROLLOVER 2 +#define SEC_SRC_SGL_OFFSET_V3 11 +#define SEC_DST_SGL_OFFSET_V3 14 +#define SEC_CALG_OFFSET_V3 4 +#define SEC_AKEY_OFFSET_V3 9 +#define SEC_MAC_OFFSET_V3 4 +#define SEC_AUTH_ALG_OFFSET_V3 15 +#define SEC_CIPHER_AUTH_V3 0xbf +#define SEC_AUTH_CIPHER_V3 0x40 #define SEC_FLAG_OFFSET 7 #define SEC_FLAG_MASK 0x0780 #define SEC_TYPE_MASK 0x0F #define SEC_DONE_MASK 0x0001 +#define SEC_ICV_MASK 0x000E -#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH) +#define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth)) #define SEC_SGL_SGE_NR 128 -#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev) #define SEC_CIPHER_AUTH 0xfe #define SEC_AUTH_CIPHER 0x1 #define SEC_MAX_MAC_LEN 64 #define SEC_MAX_AAD_LEN 65535 -#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH) +#define SEC_MAX_CCM_AAD_LEN 65279 +#define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth)) -#define SEC_PBUF_SZ 512 #define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ #define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE) #define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \ SEC_MAX_MAC_LEN * 2) #define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG) -#define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM) -#define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \ - SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM)) -#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \ - SEC_PBUF_LEFT_SZ) +#define SEC_PBUF_PAGE_NUM(depth) ((depth) / SEC_PBUF_NUM) +#define SEC_PBUF_LEFT_SZ(depth) (SEC_PBUF_PKG * ((depth) - \ + SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM)) +#define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \ + SEC_PBUF_LEFT_SZ(depth)) -#define SEC_SQE_LEN_RATE 4 #define SEC_SQE_CFLAG 2 #define SEC_SQE_AEAD_FLAG 3 #define SEC_SQE_DONE 0x1 +#define SEC_ICV_ERR 0x2 +#define MAC_LEN_MASK 0x1U +#define MAX_INPUT_DATA_LEN 0xFFFE00 +#define BITS_MASK 0xFF +#define WORD_MASK 0x3 +#define BYTE_BITS 0x8 +#define BYTES_TO_WORDS(bcount) ((bcount) >> 2) +#define SEC_XTS_NAME_SZ 0x3 +#define IV_CM_CAL_NUM 2 +#define IV_CL_MASK 0x7 +#define IV_CL_MIN 2 +#define IV_CL_MID 4 +#define IV_CL_MAX 8 +#define IV_FLAGS_OFFSET 0x6 +#define IV_CM_OFFSET 0x3 +#define IV_LAST_BYTE1 1 +#define IV_LAST_BYTE2 2 +#define IV_LAST_BYTE_MASK 0xFF +#define IV_CTR_INIT 0x1 +#define IV_BYTE_OFFSET 0x8 +#define SEC_GCM_MIN_AUTH_SZ 0x8 +#define SEC_RETRY_MAX_CNT 5U + +static DEFINE_MUTEX(sec_algs_lock); +static unsigned int sec_available_devs; + +struct sec_skcipher { + u64 alg_msk; + struct skcipher_alg alg; +}; + +struct sec_aead { + u64 alg_msk; + struct aead_alg alg; +}; -static atomic_t sec_active_devs; +static int sec_aead_soft_crypto(struct sec_ctx *ctx, + struct aead_request *aead_req, + bool encrypt); +static int sec_skcipher_soft_crypto(struct sec_ctx *ctx, + struct skcipher_request *sreq, bool encrypt); -/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */ -static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req) +static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) { - if (req->c_req.encrypt) - return (u32)atomic_inc_return(&ctx->enc_qcyclic) % - ctx->hlf_q_num; + int req_id; - return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num + - ctx->hlf_q_num; + spin_lock_bh(&qp_ctx->id_lock); + req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC); + spin_unlock_bh(&qp_ctx->id_lock); + return req_id; } -static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req) +static void sec_free_req_id(struct sec_req *req) { - if (req->c_req.encrypt) - atomic_dec(&ctx->enc_qcyclic); - else - atomic_dec(&ctx->dec_qcyclic); + struct sec_qp_ctx *qp_ctx = req->qp_ctx; + int req_id = req->req_id; + + if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) { + dev_err(req->ctx->dev, "free request id invalid!\n"); + return; + } + + spin_lock_bh(&qp_ctx->id_lock); + idr_remove(&qp_ctx->req_idr, req_id); + spin_unlock_bh(&qp_ctx->id_lock); } -static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) +static u8 pre_parse_finished_bd(struct bd_status *status, void *resp) { - int req_id; + struct sec_sqe *bd = resp; + + status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; + status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1; + status->flag = (le16_to_cpu(bd->type2.done_flag) & + SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; + status->tag = le16_to_cpu(bd->type2.tag); + status->err_type = bd->type2.error_type; + + return bd->type_cipher_auth & SEC_TYPE_MASK; +} + +static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp) +{ + struct sec_sqe3 *bd3 = resp; + + status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK; + status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1; + status->flag = (le16_to_cpu(bd3->done_flag) & + SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; + status->tag = le64_to_cpu(bd3->tag); + status->err_type = bd3->error_type; + + return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK; +} - mutex_lock(&qp_ctx->req_lock); +static int sec_cb_status_check(struct sec_req *req, + struct bd_status *status) +{ + struct sec_ctx *ctx = req->ctx; - req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, - 0, QM_Q_DEPTH, GFP_ATOMIC); - mutex_unlock(&qp_ctx->req_lock); - if (unlikely(req_id < 0)) { - dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n"); - return req_id; + if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) { + dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n", + req->err_type, status->done); + return -EIO; } - req->qp_ctx = qp_ctx; - qp_ctx->req_list[req_id] = req; - return req_id; + if (unlikely(ctx->alg_type == SEC_SKCIPHER)) { + if (unlikely(status->flag != SEC_SQE_CFLAG)) { + dev_err_ratelimited(ctx->dev, "flag[%u]\n", + status->flag); + return -EIO; + } + } else if (unlikely(ctx->alg_type == SEC_AEAD)) { + if (unlikely(status->flag != SEC_SQE_AEAD_FLAG || + status->icv == SEC_ICV_ERR)) { + dev_err_ratelimited(ctx->dev, + "flag[%u], icv[%u]\n", + status->flag, status->icv); + return -EBADMSG; + } + } + + return 0; } -static void sec_free_req_id(struct sec_req *req) +static int qp_send_message(struct sec_req *req) { struct sec_qp_ctx *qp_ctx = req->qp_ctx; - int req_id = req->req_id; + int ret; - if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) { - dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n"); - return; + if (atomic_read(&qp_ctx->qp->qp_status.used) == qp_ctx->qp->sq_depth - 1) + return -EBUSY; + + spin_lock_bh(&qp_ctx->req_lock); + if (atomic_read(&qp_ctx->qp->qp_status.used) == qp_ctx->qp->sq_depth - 1) { + spin_unlock_bh(&qp_ctx->req_lock); + return -EBUSY; } - qp_ctx->req_list[req_id] = NULL; - req->qp_ctx = NULL; + if (qp_ctx->ctx->type_supported == SEC_BD_TYPE2) { + req->sec_sqe.type2.tag = cpu_to_le16((u16)qp_ctx->send_head); + qp_ctx->req_list[qp_ctx->send_head] = req; + } - mutex_lock(&qp_ctx->req_lock); - idr_remove(&qp_ctx->req_idr, req_id); - mutex_unlock(&qp_ctx->req_lock); + ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); + if (ret) { + spin_unlock_bh(&qp_ctx->req_lock); + return ret; + } + if (qp_ctx->ctx->type_supported == SEC_BD_TYPE2) + qp_ctx->send_head = (qp_ctx->send_head + 1) % qp_ctx->qp->sq_depth; + + spin_unlock_bh(&qp_ctx->req_lock); + + atomic64_inc(&req->ctx->sec->debug.dfx.send_cnt); + return -EINPROGRESS; } -static int sec_aead_verify(struct sec_req *req) +static void sec_alg_send_backlog_soft(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { - struct aead_request *aead_req = req->aead_req.aead_req; - struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); - size_t authsize = crypto_aead_authsize(tfm); - u8 *mac_out = req->aead_req.out_mac; - u8 *mac = mac_out + SEC_MAX_MAC_LEN; - struct scatterlist *sgl = aead_req->src; - size_t sz; + struct sec_req *req, *tmp; + int ret; + + list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) { + list_del(&req->list); + ctx->req_op->buf_unmap(ctx, req); + if (req->req_id >= 0) + sec_free_req_id(req); + + if (ctx->alg_type == SEC_AEAD) + ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req, + req->c_req.encrypt); + else + ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req, + req->c_req.encrypt); - sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize, - aead_req->cryptlen + aead_req->assoclen - - authsize); - if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) { - dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n"); - return -EBADMSG; + /* Wake up the busy thread first, then return the errno. */ + crypto_request_complete(req->base, -EINPROGRESS); + crypto_request_complete(req->base, ret); } +} - return 0; +static void sec_alg_send_backlog(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) +{ + struct sec_req *req, *tmp; + int ret; + + spin_lock_bh(&qp_ctx->backlog.lock); + list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) { + ret = qp_send_message(req); + switch (ret) { + case -EINPROGRESS: + list_del(&req->list); + crypto_request_complete(req->base, -EINPROGRESS); + break; + case -EBUSY: + /* Device is busy and stop send any request. */ + goto unlock; + default: + /* Release memory resources and send all requests through software. */ + sec_alg_send_backlog_soft(ctx, qp_ctx); + goto unlock; + } + } + +unlock: + spin_unlock_bh(&qp_ctx->backlog.lock); } static void sec_req_cb(struct hisi_qp *qp, void *resp) { struct sec_qp_ctx *qp_ctx = qp->qp_ctx; struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx; - struct sec_sqe *bd = resp; + u8 type_supported = qp_ctx->ctx->type_supported; + struct bd_status status; struct sec_ctx *ctx; struct sec_req *req; - u16 done, flag; - int err = 0; + int err; u8 type; - type = bd->type_cipher_auth & SEC_TYPE_MASK; - if (unlikely(type != SEC_BD_TYPE2)) { + if (type_supported == SEC_BD_TYPE2) { + type = pre_parse_finished_bd(&status, resp); + req = qp_ctx->req_list[status.tag]; + } else { + type = pre_parse_finished_bd3(&status, resp); + req = (void *)(uintptr_t)status.tag; + } + + if (unlikely(type != type_supported)) { atomic64_inc(&dfx->err_bd_cnt); - pr_err("err bd type [%d]\n", type); + pr_err("err bd type [%u]\n", type); return; } - req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)]; if (unlikely(!req)) { atomic64_inc(&dfx->invalid_req_cnt); + atomic_inc(&qp->qp_status.used); return; } - req->err_type = bd->type2.error_type; + + req->err_type = status.err_type; ctx = req->ctx; - done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; - flag = (le16_to_cpu(bd->type2.done_flag) & - SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; - if (unlikely(req->err_type || done != SEC_SQE_DONE || - (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) || - (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) { - dev_err(SEC_CTX_DEV(ctx), - "err_type[%d],done[%d],flag[%d]\n", - req->err_type, done, flag); - err = -EIO; + err = sec_cb_status_check(req, &status); + if (err) atomic64_inc(&dfx->done_flag_cnt); - } - - if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt) - err = sec_aead_verify(req); atomic64_inc(&dfx->recv_cnt); @@ -193,42 +334,65 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) ctx->req_op->callback(ctx, req, err); } -static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) +static int sec_alg_send_message_retry(struct sec_req *req) { - struct sec_qp_ctx *qp_ctx = req->qp_ctx; + int ctr = 0; int ret; - mutex_lock(&qp_ctx->req_lock); - ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); - mutex_unlock(&qp_ctx->req_lock); - atomic64_inc(&ctx->sec->debug.dfx.send_cnt); + do { + ret = qp_send_message(req); + } while (ret == -EBUSY && ctr++ < SEC_RETRY_MAX_CNT); + + return ret; +} - if (unlikely(ret == -EBUSY)) - return -ENOBUFS; +static int sec_alg_try_enqueue(struct sec_req *req) +{ + /* Check if any request is already backlogged */ + if (!list_empty(&req->backlog->list)) + return -EBUSY; - if (!ret) { - if (req->fake_busy) { - atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); - ret = -EBUSY; - } else { - ret = -EINPROGRESS; - } - } + /* Try to enqueue to HW ring */ + return qp_send_message(req); +} + + +static int sec_alg_send_message_maybacklog(struct sec_req *req) +{ + int ret; + + ret = sec_alg_try_enqueue(req); + if (ret != -EBUSY) + return ret; + + spin_lock_bh(&req->backlog->lock); + ret = sec_alg_try_enqueue(req); + if (ret == -EBUSY) + list_add_tail(&req->list, &req->backlog->list); + spin_unlock_bh(&req->backlog->lock); return ret; } -/* Get DMA memory resources */ +static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) +{ + if (req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG) + return sec_alg_send_message_maybacklog(req); + + return sec_alg_send_message_retry(req); +} + static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res) { + u16 q_depth = res->depth; int i; - res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ, + res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), &res->c_ivin_dma, GFP_KERNEL); if (!res->c_ivin) return -ENOMEM; - for (i = 1; i < QM_Q_DEPTH; i++) { + for (i = 1; i < q_depth; i++) { res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE; res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE; } @@ -239,20 +403,46 @@ static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res) static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res) { if (res->c_ivin) - dma_free_coherent(dev, SEC_TOTAL_IV_SZ, + dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth), res->c_ivin, res->c_ivin_dma); } +static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res) +{ + u16 q_depth = res->depth; + int i; + + res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), + &res->a_ivin_dma, GFP_KERNEL); + if (!res->a_ivin) + return -ENOMEM; + + for (i = 1; i < q_depth; i++) { + res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE; + res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE; + } + + return 0; +} + +static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res) +{ + if (res->a_ivin) + dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth), + res->a_ivin, res->a_ivin_dma); +} + static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res) { + u16 q_depth = res->depth; int i; - res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1, + res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1, &res->out_mac_dma, GFP_KERNEL); if (!res->out_mac) return -ENOMEM; - for (i = 1; i < QM_Q_DEPTH; i++) { + for (i = 1; i < q_depth; i++) { res[i].out_mac_dma = res->out_mac_dma + i * (SEC_MAX_MAC_LEN << 1); res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1); @@ -264,14 +454,14 @@ static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res) static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res) { if (res->out_mac) - dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1, + dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1, res->out_mac, res->out_mac_dma); } static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res) { if (res->pbuf) - dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ, + dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth), res->pbuf, res->pbuf_dma); } @@ -281,10 +471,12 @@ static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res) */ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) { + u16 q_depth = res->depth; + int size = SEC_PBUF_PAGE_NUM(q_depth); int pbuf_page_offset; int i, j, k; - res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ, + res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth), &res->pbuf_dma, GFP_KERNEL); if (!res->pbuf) return -ENOMEM; @@ -297,11 +489,11 @@ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) * So we need SEC_PBUF_PAGE_NUM numbers of PAGE * for the SEC_TOTAL_PBUF_SZ */ - for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) { + for (i = 0; i <= size; i++) { pbuf_page_offset = PAGE_SIZE * i; for (j = 0; j < SEC_PBUF_NUM; j++) { k = i * SEC_PBUF_NUM + j; - if (k == QM_Q_DEPTH) + if (k == q_depth) break; res[k].pbuf = res->pbuf + j * SEC_PBUF_PKG + pbuf_page_offset; @@ -309,14 +501,15 @@ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) j * SEC_PBUF_PKG + pbuf_page_offset; } } + return 0; } static int sec_alg_resource_alloc(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { - struct device *dev = SEC_CTX_DEV(ctx); struct sec_alg_res *res = qp_ctx->res; + struct device *dev = ctx->dev; int ret; ret = sec_alloc_civ_resource(dev, res); @@ -324,67 +517,72 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx, return ret; if (ctx->alg_type == SEC_AEAD) { + ret = sec_alloc_aiv_resource(dev, res); + if (ret) + goto alloc_aiv_fail; + ret = sec_alloc_mac_resource(dev, res); if (ret) - goto alloc_fail; + goto alloc_mac_fail; } if (ctx->pbuf_supported) { ret = sec_alloc_pbuf_resource(dev, res); if (ret) { dev_err(dev, "fail to alloc pbuf dma resource!\n"); - goto alloc_fail; + goto alloc_pbuf_fail; } } return 0; -alloc_fail: - sec_free_civ_resource(dev, res); +alloc_pbuf_fail: + if (ctx->alg_type == SEC_AEAD) + sec_free_mac_resource(dev, qp_ctx->res); +alloc_mac_fail: + if (ctx->alg_type == SEC_AEAD) + sec_free_aiv_resource(dev, res); +alloc_aiv_fail: + sec_free_civ_resource(dev, res); return ret; } static void sec_alg_resource_free(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; sec_free_civ_resource(dev, qp_ctx->res); if (ctx->pbuf_supported) sec_free_pbuf_resource(dev, qp_ctx->res); - if (ctx->alg_type == SEC_AEAD) + if (ctx->alg_type == SEC_AEAD) { sec_free_mac_resource(dev, qp_ctx->res); + sec_free_aiv_resource(dev, qp_ctx->res); + } } -static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, - int qp_ctx_id, int alg_type) +static int sec_alloc_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { - struct device *dev = SEC_CTX_DEV(ctx); - struct sec_qp_ctx *qp_ctx; - struct hisi_qp *qp; + u16 q_depth = qp_ctx->qp->sq_depth; + struct device *dev = ctx->dev; int ret = -ENOMEM; - qp_ctx = &ctx->qp_ctx[qp_ctx_id]; - qp = ctx->qps[qp_ctx_id]; - qp->req_type = 0; - qp->qp_ctx = qp_ctx; - qp->req_cb = sec_req_cb; - qp_ctx->qp = qp; - qp_ctx->ctx = ctx; + qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL); + if (!qp_ctx->req_list) + return ret; - mutex_init(&qp_ctx->req_lock); - atomic_set(&qp_ctx->pending_reqs, 0); - idr_init(&qp_ctx->req_idr); + qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL); + if (!qp_ctx->res) + goto err_free_req_list; + qp_ctx->res->depth = q_depth; - qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, - SEC_SGL_SGE_NR); + qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR); if (IS_ERR(qp_ctx->c_in_pool)) { dev_err(dev, "fail to create sgl pool for input!\n"); - goto err_destroy_idr; + goto err_free_res; } - qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, - SEC_SGL_SGE_NR); + qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR); if (IS_ERR(qp_ctx->c_out_pool)) { dev_err(dev, "fail to create sgl pool for output!\n"); goto err_free_c_in_pool; @@ -394,35 +592,74 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, if (ret) goto err_free_c_out_pool; - ret = hisi_qm_start_qp(qp, 0); - if (ret < 0) - goto err_queue_free; - return 0; -err_queue_free: - sec_alg_resource_free(ctx, qp_ctx); err_free_c_out_pool: hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); err_free_c_in_pool: hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); -err_destroy_idr: - idr_destroy(&qp_ctx->req_idr); - +err_free_res: + kfree(qp_ctx->res); +err_free_req_list: + kfree(qp_ctx->req_list); return ret; } -static void sec_release_qp_ctx(struct sec_ctx *ctx, - struct sec_qp_ctx *qp_ctx) +static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; - hisi_qm_stop_qp(qp_ctx->qp); sec_alg_resource_free(ctx, qp_ctx); - hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); + kfree(qp_ctx->res); + kfree(qp_ctx->req_list); +} + +static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id) +{ + struct sec_qp_ctx *qp_ctx; + struct hisi_qp *qp; + int ret; + qp_ctx = &ctx->qp_ctx[qp_ctx_id]; + qp = ctx->qps[qp_ctx_id]; + qp->req_type = 0; + qp->qp_ctx = qp_ctx; + qp_ctx->qp = qp; + qp_ctx->ctx = ctx; + + qp->req_cb = sec_req_cb; + + spin_lock_init(&qp_ctx->req_lock); + idr_init(&qp_ctx->req_idr); + spin_lock_init(&qp_ctx->backlog.lock); + spin_lock_init(&qp_ctx->id_lock); + INIT_LIST_HEAD(&qp_ctx->backlog.list); + qp_ctx->send_head = 0; + + ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx); + if (ret) + goto err_destroy_idr; + + ret = hisi_qm_start_qp(qp, 0); + if (ret < 0) + goto err_resource_free; + + return 0; + +err_resource_free: + sec_free_qp_ctx_resource(ctx, qp_ctx); +err_destroy_idr: + idr_destroy(&qp_ctx->req_idr); + return ret; +} + +static void sec_release_qp_ctx(struct sec_ctx *ctx, + struct sec_qp_ctx *qp_ctx) +{ + hisi_qm_stop_qp(qp_ctx->qp); + sec_free_qp_ctx_resource(ctx, qp_ctx); idr_destroy(&qp_ctx->req_idr); } @@ -439,30 +676,31 @@ static int sec_ctx_base_init(struct sec_ctx *ctx) sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm); ctx->sec = sec; + ctx->dev = &sec->qm.pdev->dev; ctx->hlf_q_num = sec->ctx_q_num >> 1; ctx->pbuf_supported = ctx->sec->iommu_used; - - /* Half of queue depth is taken as fake requests limit in the queue. */ - ctx->fake_req_limit = QM_Q_DEPTH >> 1; ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), GFP_KERNEL); - if (!ctx->qp_ctx) - return -ENOMEM; + if (!ctx->qp_ctx) { + ret = -ENOMEM; + goto err_destroy_qps; + } for (i = 0; i < sec->ctx_q_num; i++) { - ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0); + ret = sec_create_qp_ctx(ctx, i); if (ret) goto err_sec_release_qp_ctx; } return 0; + err_sec_release_qp_ctx: for (i = i - 1; i >= 0; i--) sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); - - sec_destroy_qps(ctx->qps, sec->ctx_q_num); kfree(ctx->qp_ctx); +err_destroy_qps: + sec_destroy_qps(ctx->qps, sec->ctx_q_num); return ret; } @@ -481,7 +719,7 @@ static int sec_cipher_init(struct sec_ctx *ctx) { struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; - c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE, &c_ctx->c_key_dma, GFP_KERNEL); if (!c_ctx->c_key) return -ENOMEM; @@ -494,7 +732,7 @@ static void sec_cipher_uninit(struct sec_ctx *ctx) struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE); - dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, c_ctx->c_key, c_ctx->c_key_dma); } @@ -502,7 +740,7 @@ static int sec_auth_init(struct sec_ctx *ctx) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, &a_ctx->a_key_dma, GFP_KERNEL); if (!a_ctx->a_key) return -ENOMEM; @@ -514,21 +752,39 @@ static void sec_auth_uninit(struct sec_ctx *ctx) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE); - dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE); + dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, a_ctx->a_key, a_ctx->a_key_dma); } +static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm) +{ + const char *alg = crypto_tfm_alg_name(&tfm->base); + struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + + c_ctx->fallback = false; + + c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(c_ctx->fbtfm)) { + pr_err("failed to alloc fallback tfm for %s!\n", alg); + return PTR_ERR(c_ctx->fbtfm); + } + + return 0; +} + static int sec_skcipher_init(struct crypto_skcipher *tfm) { struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); int ret; ctx->alg_type = SEC_SKCIPHER; - crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req)); + crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct sec_req)); ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm); if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { - dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n"); + pr_err("get error skcipher iv size!\n"); return -EINVAL; } @@ -540,10 +796,16 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm) if (ret) goto err_cipher_init; + ret = sec_skcipher_fbtfm_init(tfm); + if (ret) + goto err_fbtfm_init; + return 0; + +err_fbtfm_init: + sec_cipher_uninit(ctx); err_cipher_init: sec_ctx_base_uninit(ctx); - return ret; } @@ -551,14 +813,23 @@ static void sec_skcipher_uninit(struct crypto_skcipher *tfm) { struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + if (ctx->c_ctx.fbtfm) + crypto_free_sync_skcipher(ctx->c_ctx.fbtfm); + sec_cipher_uninit(ctx); sec_ctx_base_uninit(ctx); } -static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx, - const u32 keylen, - const enum sec_cmode c_mode) +static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key, const u32 keylen) { + struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + int ret; + + ret = verify_skcipher_des3_key(tfm, key); + if (ret) + return ret; + switch (keylen) { case SEC_DES3_2KEY_SIZE: c_ctx->c_key_len = SEC_CKEY_3DES_2KEY; @@ -582,6 +853,9 @@ static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx, case SEC_XTS_MIN_KEY_SIZE: c_ctx->c_key_len = SEC_CKEY_128BIT; break; + case SEC_XTS_MID_KEY_SIZE: + c_ctx->fallback = true; + break; case SEC_XTS_MAX_KEY_SIZE: c_ctx->c_key_len = SEC_CKEY_256BIT; break; @@ -590,19 +864,25 @@ static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx, return -EINVAL; } } else { - switch (keylen) { - case AES_KEYSIZE_128: - c_ctx->c_key_len = SEC_CKEY_128BIT; - break; - case AES_KEYSIZE_192: - c_ctx->c_key_len = SEC_CKEY_192BIT; - break; - case AES_KEYSIZE_256: - c_ctx->c_key_len = SEC_CKEY_256BIT; - break; - default: - pr_err("hisi_sec2: aes key error!\n"); + if (c_ctx->c_alg == SEC_CALG_SM4 && + keylen != AES_KEYSIZE_128) { + pr_err("hisi_sec2: sm4 key error!\n"); return -EINVAL; + } else { + switch (keylen) { + case AES_KEYSIZE_128: + c_ctx->c_key_len = SEC_CKEY_128BIT; + break; + case AES_KEYSIZE_192: + c_ctx->c_key_len = SEC_CKEY_192BIT; + break; + case AES_KEYSIZE_256: + c_ctx->c_key_len = SEC_CKEY_256BIT; + break; + default: + pr_err("hisi_sec2: aes key error!\n"); + return -EINVAL; + } } } @@ -615,12 +895,13 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, { struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + struct device *dev = ctx->dev; int ret; if (c_mode == SEC_CMODE_XTS) { ret = xts_verify_key(tfm, key, keylen); if (ret) { - dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n"); + dev_err(dev, "xts mode key err!\n"); return ret; } } @@ -630,23 +911,30 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, switch (c_alg) { case SEC_CALG_3DES: - ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode); + ret = sec_skcipher_3des_setkey(tfm, key, keylen); break; case SEC_CALG_AES: case SEC_CALG_SM4: ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode); break; default: + dev_err(dev, "sec c_alg err!\n"); return -EINVAL; } if (ret) { - dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n"); + dev_err(dev, "set sec key err!\n"); return ret; } memcpy(c_ctx->c_key, key, keylen); - + if (c_ctx->fbtfm) { + ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen); + if (ret) { + dev_err(dev, "failed to set fallback skcipher key!\n"); + return ret; + } + } return 0; } @@ -660,12 +948,12 @@ static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\ GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB) GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC) GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS) - +GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR) GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB) GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC) - GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS) GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC) +GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR) static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, struct scatterlist *src) @@ -673,32 +961,44 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, struct aead_request *aead_req = req->aead_req.aead_req; struct sec_cipher_req *c_req = &req->c_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct device *dev = SEC_CTX_DEV(ctx); + struct sec_request_buf *buf = &req->buf; + struct device *dev = ctx->dev; int copy_size, pbuf_length; int req_id = req->req_id; + struct crypto_aead *tfm; + u8 *mac_offset, *pbuf; + size_t authsize; if (ctx->alg_type == SEC_AEAD) copy_size = aead_req->cryptlen + aead_req->assoclen; else copy_size = c_req->c_len; - pbuf_length = sg_copy_to_buffer(src, sg_nents(src), - qp_ctx->res[req_id].pbuf, - copy_size); + pbuf = req->req_id < 0 ? buf->pbuf : qp_ctx->res[req_id].pbuf; + pbuf_length = sg_copy_to_buffer(src, sg_nents(src), pbuf, copy_size); if (unlikely(pbuf_length != copy_size)) { dev_err(dev, "copy src data to pbuf error!\n"); return -EINVAL; } + if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) { + tfm = crypto_aead_reqtfm(aead_req); + authsize = crypto_aead_authsize(tfm); + mac_offset = pbuf + copy_size - authsize; + memcpy(req->aead_req.out_mac, mac_offset, authsize); + } - c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma; + if (req->req_id < 0) { + buf->in_dma = dma_map_single(dev, buf->pbuf, SEC_PBUF_SZ, DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, buf->in_dma))) + return -ENOMEM; - if (!c_req->c_in_dma) { - dev_err(dev, "fail to set pbuffer address!\n"); - return -ENOMEM; + buf->out_dma = buf->in_dma; + return 0; } - c_req->c_out_dma = c_req->c_in_dma; + req->in_dma = qp_ctx->res[req_id].pbuf_dma; + c_req->c_out_dma = req->in_dma; return 0; } @@ -709,7 +1009,7 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, struct aead_request *aead_req = req->aead_req.aead_req; struct sec_cipher_req *c_req = &req->c_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct device *dev = SEC_CTX_DEV(ctx); + struct sec_request_buf *buf = &req->buf; int copy_size, pbuf_length; int req_id = req->req_id; @@ -718,66 +1018,182 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, else copy_size = c_req->c_len; - pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), - qp_ctx->res[req_id].pbuf, - copy_size); - + if (req->req_id < 0) + pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), buf->pbuf, copy_size); + else + pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), qp_ctx->res[req_id].pbuf, + copy_size); if (unlikely(pbuf_length != copy_size)) - dev_err(dev, "copy pbuf data to dst error!\n"); + dev_err(ctx->dev, "copy pbuf data to dst error!\n"); + if (req->req_id < 0) + dma_unmap_single(ctx->dev, buf->in_dma, SEC_PBUF_SZ, DMA_BIDIRECTIONAL); } -static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, - struct scatterlist *src, struct scatterlist *dst) +static int sec_aead_mac_init(struct sec_aead_req *req) +{ + struct aead_request *aead_req = req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); + size_t authsize = crypto_aead_authsize(tfm); + struct scatterlist *sgl = aead_req->src; + u8 *mac_out = req->out_mac; + size_t copy_size; + off_t skip_size; + + /* Copy input mac */ + skip_size = aead_req->assoclen + aead_req->cryptlen - authsize; + copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size); + if (unlikely(copy_size != authsize)) + return -EINVAL; + + return 0; +} + +static void fill_sg_to_hw_sge(struct scatterlist *sgl, struct sec_hw_sge *hw_sge) +{ + hw_sge->buf = sg_dma_address(sgl); + hw_sge->len = cpu_to_le32(sg_dma_len(sgl)); + hw_sge->page_ctrl = sg_virt(sgl); +} + +static int sec_cipher_to_hw_sgl(struct device *dev, struct scatterlist *src, + struct sec_hw_sgl *src_in, dma_addr_t *hw_sgl_dma, + int dma_dir) +{ + struct sec_hw_sge *curr_hw_sge = src_in->sge_entries; + u32 i, sg_n, sg_n_mapped; + struct scatterlist *sg; + u32 sge_var = 0; + + sg_n = sg_nents(src); + sg_n_mapped = dma_map_sg(dev, src, sg_n, dma_dir); + if (unlikely(!sg_n_mapped)) { + dev_err(dev, "dma mapping for SG error!\n"); + return -EINVAL; + } else if (unlikely(sg_n_mapped > SEC_SGE_NR_NUM)) { + dev_err(dev, "the number of entries in input scatterlist error!\n"); + dma_unmap_sg(dev, src, sg_n, dma_dir); + return -EINVAL; + } + + for_each_sg(src, sg, sg_n_mapped, i) { + fill_sg_to_hw_sge(sg, curr_hw_sge); + curr_hw_sge++; + sge_var++; + } + + src_in->entry_sum_in_sgl = cpu_to_le16(sge_var); + src_in->entry_sum_in_chain = cpu_to_le16(SEC_SGE_NR_NUM); + src_in->entry_length_in_sgl = cpu_to_le16(SEC_SGE_NR_NUM); + *hw_sgl_dma = dma_map_single(dev, src_in, sizeof(struct sec_hw_sgl), dma_dir); + if (unlikely(dma_mapping_error(dev, *hw_sgl_dma))) { + dma_unmap_sg(dev, src, sg_n, dma_dir); + return -ENOMEM; + } + + return 0; +} + +static void sec_cipher_put_hw_sgl(struct device *dev, struct scatterlist *src, + dma_addr_t src_in, int dma_dir) +{ + dma_unmap_single(dev, src_in, sizeof(struct sec_hw_sgl), dma_dir); + dma_unmap_sg(dev, src, sg_nents(src), dma_dir); +} + +static int sec_cipher_map_sgl(struct device *dev, struct sec_req *req, + struct scatterlist *src, struct scatterlist *dst) +{ + struct sec_hw_sgl *src_in = &req->buf.data_buf.in; + struct sec_hw_sgl *dst_out = &req->buf.data_buf.out; + int ret; + + if (dst == src) { + ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma, + DMA_BIDIRECTIONAL); + req->buf.out_dma = req->buf.in_dma; + return ret; + } + + ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma, DMA_TO_DEVICE); + if (unlikely(ret)) + return ret; + + ret = sec_cipher_to_hw_sgl(dev, dst, dst_out, &req->buf.out_dma, + DMA_FROM_DEVICE); + if (unlikely(ret)) { + sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE); + return ret; + } + + return 0; +} + +static int sec_cipher_map_inner(struct sec_ctx *ctx, struct sec_req *req, + struct scatterlist *src, struct scatterlist *dst) { struct sec_cipher_req *c_req = &req->c_req; struct sec_aead_req *a_req = &req->aead_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; struct sec_alg_res *res = &qp_ctx->res[req->req_id]; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; + enum dma_data_direction src_direction; int ret; if (req->use_pbuf) { - ret = sec_cipher_pbuf_map(ctx, req, src); c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET; c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET; if (ctx->alg_type == SEC_AEAD) { + a_req->a_ivin = res->a_ivin; + a_req->a_ivin_dma = res->a_ivin_dma; a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET; a_req->out_mac_dma = res->pbuf_dma + SEC_PBUF_MAC_OFFSET; } - - return ret; + return sec_cipher_pbuf_map(ctx, req, src); } + c_req->c_ivin = res->c_ivin; c_req->c_ivin_dma = res->c_ivin_dma; if (ctx->alg_type == SEC_AEAD) { + a_req->a_ivin = res->a_ivin; + a_req->a_ivin_dma = res->a_ivin_dma; a_req->out_mac = res->out_mac; a_req->out_mac_dma = res->out_mac_dma; } - c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src, - qp_ctx->c_in_pool, - req->req_id, - &c_req->c_in_dma); - - if (IS_ERR(c_req->c_in)) { + src_direction = dst == src ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src, + qp_ctx->c_in_pool, + req->req_id, + &req->in_dma, src_direction); + if (IS_ERR(req->in)) { dev_err(dev, "fail to dma map input sgl buffers!\n"); - return PTR_ERR(c_req->c_in); + return PTR_ERR(req->in); + } + + if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) { + ret = sec_aead_mac_init(a_req); + if (unlikely(ret)) { + dev_err(dev, "fail to init mac data for ICV!\n"); + hisi_acc_sg_buf_unmap(dev, src, req->in, src_direction); + return ret; + } } if (dst == src) { - c_req->c_out = c_req->c_in; - c_req->c_out_dma = c_req->c_in_dma; + c_req->c_out = req->in; + c_req->c_out_dma = req->in_dma; } else { c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst, qp_ctx->c_out_pool, req->req_id, - &c_req->c_out_dma); + &c_req->c_out_dma, + DMA_FROM_DEVICE); if (IS_ERR(c_req->c_out)) { dev_err(dev, "fail to dma map output sgl buffers!\n"); - hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); + hisi_acc_sg_buf_unmap(dev, src, req->in, src_direction); return PTR_ERR(c_req->c_out); } } @@ -785,19 +1201,108 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, return 0; } +static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, + struct scatterlist *src, struct scatterlist *dst) +{ + struct sec_aead_req *a_req = &req->aead_req; + struct sec_cipher_req *c_req = &req->c_req; + bool is_aead = (ctx->alg_type == SEC_AEAD); + struct device *dev = ctx->dev; + int ret = -ENOMEM; + + if (req->req_id >= 0) + return sec_cipher_map_inner(ctx, req, src, dst); + + c_req->c_ivin = c_req->c_ivin_buf; + c_req->c_ivin_dma = dma_map_single(dev, c_req->c_ivin, + SEC_IV_SIZE, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, c_req->c_ivin_dma))) + return -ENOMEM; + + if (is_aead) { + a_req->a_ivin = a_req->a_ivin_buf; + a_req->out_mac = a_req->out_mac_buf; + a_req->a_ivin_dma = dma_map_single(dev, a_req->a_ivin, + SEC_IV_SIZE, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, a_req->a_ivin_dma))) + goto free_c_ivin_dma; + + a_req->out_mac_dma = dma_map_single(dev, a_req->out_mac, + SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, a_req->out_mac_dma))) + goto free_a_ivin_dma; + } + if (req->use_pbuf) { + ret = sec_cipher_pbuf_map(ctx, req, src); + if (unlikely(ret)) + goto free_out_mac_dma; + + return 0; + } + + if (!c_req->encrypt && is_aead) { + ret = sec_aead_mac_init(a_req); + if (unlikely(ret)) { + dev_err(dev, "fail to init mac data for ICV!\n"); + goto free_out_mac_dma; + } + } + + ret = sec_cipher_map_sgl(dev, req, src, dst); + if (unlikely(ret)) { + dev_err(dev, "fail to dma map input sgl buffers!\n"); + goto free_out_mac_dma; + } + + return 0; + +free_out_mac_dma: + if (is_aead) + dma_unmap_single(dev, a_req->out_mac_dma, SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL); +free_a_ivin_dma: + if (is_aead) + dma_unmap_single(dev, a_req->a_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE); +free_c_ivin_dma: + dma_unmap_single(dev, c_req->c_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE); + return ret; +} + static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req, struct scatterlist *src, struct scatterlist *dst) { + struct sec_aead_req *a_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; + + if (req->req_id >= 0) { + if (req->use_pbuf) { + sec_cipher_pbuf_unmap(ctx, req, dst); + } else { + if (dst != src) { + hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out, DMA_FROM_DEVICE); + hisi_acc_sg_buf_unmap(dev, src, req->in, DMA_TO_DEVICE); + } else { + hisi_acc_sg_buf_unmap(dev, src, req->in, DMA_BIDIRECTIONAL); + } + } + return; + } if (req->use_pbuf) { sec_cipher_pbuf_unmap(ctx, req, dst); } else { - if (dst != src) - hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); + if (dst != src) { + sec_cipher_put_hw_sgl(dev, dst, req->buf.out_dma, DMA_FROM_DEVICE); + sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE); + } else { + sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_BIDIRECTIONAL); + } + } - hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out); + dma_unmap_single(dev, c_req->c_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE); + if (ctx->alg_type == SEC_AEAD) { + dma_unmap_single(dev, a_req->a_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE); + dma_unmap_single(dev, a_req->out_mac_dma, SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL); } } @@ -841,14 +1346,10 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, struct crypto_authenc_keys *keys) { struct crypto_shash *hash_tfm = ctx->hash_tfm; - int blocksize, ret; - - if (!keys->authkeylen) { - pr_err("hisi_sec2: aead auth key error!\n"); - return -EINVAL; - } + int blocksize, digestsize, ret; blocksize = crypto_shash_blocksize(hash_tfm); + digestsize = crypto_shash_digestsize(hash_tfm); if (keys->authkeylen > blocksize) { ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey, keys->authkeylen, ctx->a_key); @@ -856,67 +1357,107 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, pr_err("hisi_sec2: aead auth digest error!\n"); return -EINVAL; } - ctx->a_key_len = blocksize; + ctx->a_key_len = digestsize; } else { - memcpy(ctx->a_key, keys->authkey, keys->authkeylen); + if (keys->authkeylen) + memcpy(ctx->a_key, keys->authkey, keys->authkeylen); ctx->a_key_len = keys->authkeylen; } return 0; } +static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(aead); + struct sec_ctx *ctx = crypto_tfm_ctx(tfm); + struct sec_auth_ctx *a_ctx = &ctx->a_ctx; + + return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize); +} + +static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx, + struct crypto_aead *tfm, const u8 *key, + unsigned int keylen) +{ + crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK); + crypto_aead_set_flags(a_ctx->fallback_aead_tfm, + crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); + return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen); +} + static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, const u32 keylen, const enum sec_hash_alg a_alg, const enum sec_calg c_alg, - const enum sec_mac_len mac_len, const enum sec_cmode c_mode) { struct sec_ctx *ctx = crypto_aead_ctx(tfm); struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + struct sec_auth_ctx *a_ctx = &ctx->a_ctx; + struct device *dev = ctx->dev; struct crypto_authenc_keys keys; int ret; ctx->a_ctx.a_alg = a_alg; ctx->c_ctx.c_alg = c_alg; - ctx->a_ctx.mac_len = mac_len; c_ctx->c_mode = c_mode; - if (crypto_authenc_extractkeys(&keys, key, keylen)) + if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) { + ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode); + if (ret) { + dev_err(dev, "set sec aes ccm cipher key err!\n"); + return ret; + } + memcpy(c_ctx->c_key, key, keylen); + + return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); + } + + ret = crypto_authenc_extractkeys(&keys, key, keylen); + if (ret) { + dev_err(dev, "sec extract aead keys err!\n"); goto bad_key; + } ret = sec_aead_aes_set_key(c_ctx, &keys); if (ret) { - dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n"); + dev_err(dev, "set sec cipher key err!\n"); goto bad_key; } ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys); if (ret) { - dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n"); + dev_err(dev, "set sec auth key err!\n"); + goto bad_key; + } + + ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); + if (ret) { + dev_err(dev, "set sec fallback key err!\n"); goto bad_key; } return 0; + bad_key: memzero_explicit(&keys, sizeof(struct crypto_authenc_keys)); - - return -EINVAL; + return ret; } -#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \ -static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \ - u32 keylen) \ -{ \ - return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\ +#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode) \ +static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen) \ +{ \ + return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode); \ } -GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, - SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC) -GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, - SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC) -GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, - SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM) +GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM) +GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM) +GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM) static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req) { @@ -950,7 +1491,6 @@ static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req) unmap_req_buf: ctx->req_op->buf_unmap(ctx, req); - return ret; } @@ -980,8 +1520,15 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma); sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); - sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma); - sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); + if (req->req_id < 0) { + sec_sqe->type2.data_src_addr = cpu_to_le64(req->buf.in_dma); + sec_sqe->type2.data_dst_addr = cpu_to_le64(req->buf.out_dma); + } else { + sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma); + sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); + } + if (sec_sqe->type2.data_src_addr != sec_sqe->type2.data_dst_addr) + de = 0x1 << SEC_DE_OFFSET; sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) << SEC_CMODE_OFFSET); @@ -996,29 +1543,92 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET; sec_sqe->type_cipher_auth = bd_type | cipher; - if (req->use_pbuf) + /* Set destination and source address type */ + if (req->use_pbuf) { sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET; - else + da_type = SEC_PBUF << SEC_DST_SGL_OFFSET; + } else { sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET; + da_type = SEC_SGL << SEC_DST_SGL_OFFSET; + } + + sec_sqe->sdm_addr_type |= da_type; scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET; - if (c_req->c_in_dma != c_req->c_out_dma) - de = 0x1 << SEC_DE_OFFSET; sec_sqe->sds_sa_type = (de | scene | sa_type); - /* Just set DST address type */ - if (req->use_pbuf) - da_type = SEC_PBUF << SEC_DST_SGL_OFFSET; + sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len); + + return 0; +} + +static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req) +{ + struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3; + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + struct sec_cipher_req *c_req = &req->c_req; + u32 bd_param = 0; + u16 cipher; + + memset(sec_sqe3, 0, sizeof(struct sec_sqe3)); + + sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma); + sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); + if (req->req_id < 0) { + sec_sqe3->data_src_addr = cpu_to_le64(req->buf.in_dma); + sec_sqe3->data_dst_addr = cpu_to_le64(req->buf.out_dma); + } else { + sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma); + sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma); + } + if (sec_sqe3->data_src_addr != sec_sqe3->data_dst_addr) + bd_param |= 0x1 << SEC_DE_OFFSET_V3; + + sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) | + c_ctx->c_mode; + sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) << + SEC_CKEY_OFFSET_V3); + + if (c_req->encrypt) + cipher = SEC_CIPHER_ENC; else - da_type = SEC_SGL << SEC_DST_SGL_OFFSET; - sec_sqe->sdm_addr_type |= da_type; + cipher = SEC_CIPHER_DEC; + sec_sqe3->c_icv_key |= cpu_to_le16(cipher); - sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len); - sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id); + /* Set the CTR counter mode is 128bit rollover */ + sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER << + SEC_CTR_CNT_OFFSET); + + if (req->use_pbuf) { + bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3; + bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3; + } else { + bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3; + bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3; + } + + bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3; + + bd_param |= SEC_BD_TYPE3; + sec_sqe3->bd_param = cpu_to_le32(bd_param); + + sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len); + sec_sqe3->tag = cpu_to_le64((unsigned long)req); return 0; } +/* increment counter (128-bit int) */ +static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums) +{ + do { + --bits; + nums += counter[bits]; + counter[bits] = nums & BITS_MASK; + nums >>= BYTE_BITS; + } while (bits && nums); +} + static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) { struct aead_request *aead_req = req->aead_req.aead_req; @@ -1029,50 +1639,162 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) size_t sz; u8 *iv; - if (req->c_req.encrypt) - sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst; - else - sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src; - if (alg_type == SEC_SKCIPHER) { + sgl = req->c_req.encrypt ? sk_req->dst : sk_req->src; iv = sk_req->iv; cryptlen = sk_req->cryptlen; } else { + sgl = req->c_req.encrypt ? aead_req->dst : aead_req->src; iv = aead_req->iv; cryptlen = aead_req->cryptlen; } - sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size, - cryptlen - iv_size); - if (unlikely(sz != iv_size)) - dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n"); + if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) { + sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size, + cryptlen - iv_size); + if (unlikely(sz != iv_size)) + dev_err(req->ctx->dev, "copy output iv error!\n"); + } else { + sz = (cryptlen + iv_size - 1) / iv_size; + ctr_iv_inc(iv, iv_size, sz); + } } static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, int err) { - struct skcipher_request *sk_req = req->c_req.sk_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; - atomic_dec(&qp_ctx->pending_reqs); - sec_free_req_id(req); + if (req->req_id >= 0) + sec_free_req_id(req); - /* IV output at encrypto of CBC mode */ - if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt) + /* IV output at encrypto of CBC/CTR mode */ + if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC || + ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt) sec_update_iv(req, SEC_SKCIPHER); - if (req->fake_busy) - sk_req->base.complete(&sk_req->base, -EINPROGRESS); + crypto_request_complete(req->base, err); + sec_alg_send_backlog(ctx, qp_ctx); +} + +static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req) +{ + struct aead_request *aead_req = req->aead_req.aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); + size_t authsize = crypto_aead_authsize(tfm); + struct sec_aead_req *a_req = &req->aead_req; + struct sec_cipher_req *c_req = &req->c_req; + u32 data_size = aead_req->cryptlen; + u8 flage = 0; + u8 cm, cl; + + /* the specification has been checked in aead_iv_demension_check() */ + cl = c_req->c_ivin[0] + 1; + c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00; + memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl); + c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT; + + /* the last 3bit is L' */ + flage |= c_req->c_ivin[0] & IV_CL_MASK; + + /* the M' is bit3~bit5, the Flags is bit6 */ + cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM; + flage |= cm << IV_CM_OFFSET; + if (aead_req->assoclen) + flage |= 0x01 << IV_FLAGS_OFFSET; - sk_req->base.complete(&sk_req->base, err); + memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize); + a_req->a_ivin[0] = flage; + + /* + * the last 32bit is counter's initial number, + * but the nonce uses the first 16bit + * the tail 16bit fill with the cipher length + */ + if (!c_req->encrypt) + data_size = aead_req->cryptlen - authsize; + + a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = + data_size & IV_LAST_BYTE_MASK; + data_size >>= IV_BYTE_OFFSET; + a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] = + data_size & IV_LAST_BYTE_MASK; } -static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req) +static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req) { struct aead_request *aead_req = req->aead_req.aead_req; + struct sec_aead_req *a_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize); + + if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) { + /* + * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter}, + * the counter must set to 0x01 + * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} + */ + set_aead_auth_iv(ctx, req); + } else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) { + /* GCM 12Byte Cipher_IV == Auth_IV */ + memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE); + } +} + +static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir, + struct sec_req *req, struct sec_sqe *sec_sqe) +{ + struct sec_aead_req *a_req = &req->aead_req; + struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); + + /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */ + sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize); + + /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */ + sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr; + sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma); + sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET; + + if (dir) + sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; + else + sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; + + sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen); + sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0); + sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); + + sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma); +} + +static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir, + struct sec_req *req, struct sec_sqe3 *sqe3) +{ + struct sec_aead_req *a_req = &req->aead_req; + struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); + + /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */ + sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3); + + /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */ + sqe3->a_key_addr = sqe3->c_key_addr; + sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma); + sqe3->auth_mac_key |= SEC_NO_AUTH; + + if (dir) + sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3; + else + sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3; + + sqe3->a_len_key = cpu_to_le32(aq->assoclen); + sqe3->auth_src_offset = cpu_to_le16(0x0); + sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen); + sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma); } static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, @@ -1081,26 +1803,26 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, struct sec_aead_req *a_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma); - sec_sqe->type2.mac_key_alg = - cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE); + sec_sqe->type2.mac_key_alg = cpu_to_le32(BYTES_TO_WORDS(authsize)); sec_sqe->type2.mac_key_alg |= - cpu_to_le32((u32)((ctx->a_key_len) / - SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET); + cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET); sec_sqe->type2.mac_key_alg |= cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); - sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET; - - if (dir) + if (dir) { + sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET; sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; - else + } else { + sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET; sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; - + } sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen); sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); @@ -1116,11 +1838,72 @@ static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req) ret = sec_skcipher_bd_fill(ctx, req); if (unlikely(ret)) { - dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n"); + dev_err(ctx->dev, "skcipher bd fill is error!\n"); return ret; } - sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe); + if (ctx->c_ctx.c_mode == SEC_CMODE_CCM || + ctx->c_ctx.c_mode == SEC_CMODE_GCM) + sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe); + else + sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe); + + return 0; +} + +static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir, + struct sec_req *req, struct sec_sqe3 *sqe3) +{ + struct sec_aead_req *a_req = &req->aead_req; + struct sec_cipher_req *c_req = &req->c_req; + struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); + + sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma); + + sqe3->auth_mac_key |= + cpu_to_le32(BYTES_TO_WORDS(authsize) << SEC_MAC_OFFSET_V3); + + sqe3->auth_mac_key |= + cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET_V3); + + sqe3->auth_mac_key |= + cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3); + + if (dir) { + sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1); + sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3; + } else { + sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2); + sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3; + } + sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen); + + sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen); + + sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma); +} + +static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req) +{ + struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; + struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3; + int ret; + + ret = sec_skcipher_bd_fill_v3(ctx, req); + if (unlikely(ret)) { + dev_err(ctx->dev, "skcipher bd3 fill is error!\n"); + return ret; + } + + if (ctx->c_ctx.c_mode == SEC_CMODE_CCM || + ctx->c_ctx.c_mode == SEC_CMODE_GCM) + sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt, + req, sec_sqe3); + else + sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt, + req, sec_sqe3); return 0; } @@ -1129,75 +1912,53 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) { struct aead_request *a_req = req->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); - struct sec_aead_req *aead_req = &req->aead_req; - struct sec_cipher_req *c_req = &req->c_req; size_t authsize = crypto_aead_authsize(tfm); struct sec_qp_ctx *qp_ctx = req->qp_ctx; size_t sz; - atomic_dec(&qp_ctx->pending_reqs); - - if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) - sec_update_iv(req, SEC_AEAD); - - /* Copy output mac */ - if (!err && c_req->encrypt) { - struct scatterlist *sgl = a_req->dst; - - sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), - aead_req->out_mac, - authsize, a_req->cryptlen + - a_req->assoclen); + if (!err && req->c_req.encrypt) { + if (c->c_ctx.c_mode == SEC_CMODE_CBC) + sec_update_iv(req, SEC_AEAD); + sz = sg_pcopy_from_buffer(a_req->dst, sg_nents(a_req->dst), req->aead_req.out_mac, + authsize, a_req->cryptlen + a_req->assoclen); if (unlikely(sz != authsize)) { - dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n"); + dev_err(c->dev, "copy out mac err!\n"); err = -EINVAL; } } - sec_free_req_id(req); + if (req->req_id >= 0) + sec_free_req_id(req); - if (req->fake_busy) - a_req->base.complete(&a_req->base, -EINPROGRESS); - - a_req->base.complete(&a_req->base, err); + crypto_request_complete(req->base, err); + sec_alg_send_backlog(c, qp_ctx); } -static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req) +static void sec_request_uninit(struct sec_req *req) { - struct sec_qp_ctx *qp_ctx = req->qp_ctx; - - atomic_dec(&qp_ctx->pending_reqs); - sec_free_req_id(req); - sec_free_queue_id(ctx, req); + if (req->req_id >= 0) + sec_free_req_id(req); } static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) { struct sec_qp_ctx *qp_ctx; - int queue_id; + int i = 0; - /* To load balance */ - queue_id = sec_alloc_queue_id(ctx, req); - qp_ctx = &ctx->qp_ctx[queue_id]; + do { + qp_ctx = &ctx->qp_ctx[i]; + req->req_id = sec_alloc_req_id(req, qp_ctx); + } while (req->req_id < 0 && ++i < ctx->sec->ctx_q_num); - req->req_id = sec_alloc_req_id(req, qp_ctx); - if (unlikely(req->req_id < 0)) { - sec_free_queue_id(ctx, req); - return req->req_id; - } - - if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs)) - req->fake_busy = true; - else - req->fake_busy = false; + req->qp_ctx = qp_ctx; + req->backlog = &qp_ctx->backlog; return 0; } static int sec_process(struct sec_ctx *ctx, struct sec_req *req) { - struct sec_cipher_req *c_req = &req->c_req; int ret; ret = sec_request_init(ctx, req); @@ -1209,12 +1970,13 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req) goto err_uninit_req; /* Output IV as decrypto */ - if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) + if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC || + ctx->c_ctx.c_mode == SEC_CMODE_CTR)) sec_update_iv(req, ctx->alg_type); ret = ctx->req_op->bd_send(ctx, req); - if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) { - dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n"); + if (unlikely((ret != -EBUSY && ret != -EINPROGRESS))) { + dev_err_ratelimited(ctx->dev, "send sec request failed!\n"); goto err_send_req; } @@ -1224,17 +1986,23 @@ err_send_req: /* As failing, restore the IV from user */ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) { if (ctx->alg_type == SEC_SKCIPHER) - memcpy(req->c_req.sk_req->iv, c_req->c_ivin, + memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin, ctx->c_ctx.ivsize); else - memcpy(req->aead_req.aead_req->iv, c_req->c_ivin, + memcpy(req->aead_req.aead_req->iv, req->c_req.c_ivin, ctx->c_ctx.ivsize); } sec_request_untransfer(ctx, req); -err_uninit_req: - sec_request_uninit(ctx, req); +err_uninit_req: + sec_request_uninit(req); + if (ctx->alg_type == SEC_AEAD) + ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req, + req->c_req.encrypt); + else + ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req, + req->c_req.encrypt); return ret; } @@ -1251,20 +2019,51 @@ static const struct sec_req_op sec_skcipher_req_ops = { static const struct sec_req_op sec_aead_req_ops = { .buf_map = sec_aead_sgl_map, .buf_unmap = sec_aead_sgl_unmap, - .do_transfer = sec_aead_copy_iv, + .do_transfer = sec_aead_set_iv, .bd_fill = sec_aead_bd_fill, .bd_send = sec_bd_send, .callback = sec_aead_callback, .process = sec_process, }; +static const struct sec_req_op sec_skcipher_req_ops_v3 = { + .buf_map = sec_skcipher_sgl_map, + .buf_unmap = sec_skcipher_sgl_unmap, + .do_transfer = sec_skcipher_copy_iv, + .bd_fill = sec_skcipher_bd_fill_v3, + .bd_send = sec_bd_send, + .callback = sec_skcipher_callback, + .process = sec_process, +}; + +static const struct sec_req_op sec_aead_req_ops_v3 = { + .buf_map = sec_aead_sgl_map, + .buf_unmap = sec_aead_sgl_unmap, + .do_transfer = sec_aead_set_iv, + .bd_fill = sec_aead_bd_fill_v3, + .bd_send = sec_bd_send, + .callback = sec_aead_callback, + .process = sec_process, +}; + static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm) { struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret; - ctx->req_op = &sec_skcipher_req_ops; + ret = sec_skcipher_init(tfm); + if (ret) + return ret; + + if (ctx->sec->qm.ver < QM_HW_V3) { + ctx->type_supported = SEC_BD_TYPE2; + ctx->req_op = &sec_skcipher_req_ops; + } else { + ctx->type_supported = SEC_BD_TYPE3; + ctx->req_op = &sec_skcipher_req_ops_v3; + } - return sec_skcipher_init(tfm); + return ret; } static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm) @@ -1277,18 +2076,25 @@ static int sec_aead_init(struct crypto_aead *tfm) struct sec_ctx *ctx = crypto_aead_ctx(tfm); int ret; - crypto_aead_set_reqsize(tfm, sizeof(struct sec_req)); + crypto_aead_set_reqsize_dma(tfm, sizeof(struct sec_req)); ctx->alg_type = SEC_AEAD; ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm); - if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { - dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n"); + if (ctx->c_ctx.ivsize < SEC_AIV_SIZE || + ctx->c_ctx.ivsize > SEC_IV_SIZE) { + pr_err("get error aead iv size!\n"); return -EINVAL; } - ctx->req_op = &sec_aead_req_ops; ret = sec_ctx_base_init(ctx); if (ret) return ret; + if (ctx->sec->qm.ver < QM_HW_V3) { + ctx->type_supported = SEC_BD_TYPE2; + ctx->req_op = &sec_aead_req_ops; + } else { + ctx->type_supported = SEC_BD_TYPE3; + ctx->req_op = &sec_aead_req_ops_v3; + } ret = sec_auth_init(ctx); if (ret) @@ -1304,7 +2110,6 @@ err_cipher_init: sec_auth_uninit(ctx); err_auth_init: sec_ctx_base_uninit(ctx); - return ret; } @@ -1319,8 +2124,10 @@ static void sec_aead_exit(struct crypto_aead *tfm) static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) { + struct aead_alg *alg = crypto_aead_alg(tfm); struct sec_ctx *ctx = crypto_aead_ctx(tfm); - struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; + struct sec_auth_ctx *a_ctx = &ctx->a_ctx; + const char *aead_name = alg->base.cra_name; int ret; ret = sec_aead_init(tfm); @@ -1329,11 +2136,20 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) return ret; } - auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); - if (IS_ERR(auth_ctx->hash_tfm)) { - dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n"); + a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); + if (IS_ERR(a_ctx->hash_tfm)) { + dev_err(ctx->dev, "aead alloc shash error!\n"); + sec_aead_exit(tfm); + return PTR_ERR(a_ctx->hash_tfm); + } + + a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0, + CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); + if (IS_ERR(a_ctx->fallback_aead_tfm)) { + dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n"); + crypto_free_shash(ctx->a_ctx.hash_tfm); sec_aead_exit(tfm); - return PTR_ERR(auth_ctx->hash_tfm); + return PTR_ERR(a_ctx->fallback_aead_tfm); } return 0; @@ -1343,10 +2159,45 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm) { struct sec_ctx *ctx = crypto_aead_ctx(tfm); + crypto_free_aead(ctx->a_ctx.fallback_aead_tfm); crypto_free_shash(ctx->a_ctx.hash_tfm); sec_aead_exit(tfm); } +static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm) +{ + struct aead_alg *alg = crypto_aead_alg(tfm); + struct sec_ctx *ctx = crypto_aead_ctx(tfm); + struct sec_auth_ctx *a_ctx = &ctx->a_ctx; + const char *aead_name = alg->base.cra_name; + int ret; + + ret = sec_aead_init(tfm); + if (ret) { + dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n"); + return ret; + } + + a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0, + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC); + if (IS_ERR(a_ctx->fallback_aead_tfm)) { + dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n"); + sec_aead_exit(tfm); + return PTR_ERR(a_ctx->fallback_aead_tfm); + } + + return 0; +} + +static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm) +{ + struct sec_ctx *ctx = crypto_aead_ctx(tfm); + + crypto_free_aead(ctx->a_ctx.fallback_aead_tfm); + sec_aead_exit(tfm); +} + static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm) { return sec_aead_ctx_init(tfm, "sha1"); @@ -1362,16 +2213,51 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) return sec_aead_ctx_init(tfm, "sha512"); } -static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) +static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq) +{ + u32 cryptlen = sreq->c_req.sk_req->cryptlen; + struct device *dev = ctx->dev; + u8 c_mode = ctx->c_ctx.c_mode; + int ret = 0; + + switch (c_mode) { + case SEC_CMODE_XTS: + if (unlikely(cryptlen < AES_BLOCK_SIZE)) { + dev_err(dev, "skcipher XTS mode input length error!\n"); + ret = -EINVAL; + } + break; + case SEC_CMODE_ECB: + case SEC_CMODE_CBC: + if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) { + dev_err(dev, "skcipher AES input length error!\n"); + ret = -EINVAL; + } + break; + case SEC_CMODE_CTR: + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static int sec_skcipher_param_check(struct sec_ctx *ctx, + struct sec_req *sreq, bool *need_fallback) { struct skcipher_request *sk_req = sreq->c_req.sk_req; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg; if (unlikely(!sk_req->src || !sk_req->dst)) { dev_err(dev, "skcipher input param error!\n"); return -EINVAL; } + + if (sk_req->cryptlen > MAX_INPUT_DATA_LEN) + *need_fallback = true; + sreq->c_req.c_len = sk_req->cryptlen; if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ) @@ -1386,35 +2272,71 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) } return 0; } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) { - if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) { - dev_err(dev, "skcipher aes input length error!\n"); - return -EINVAL; - } - return 0; + return sec_skcipher_cryptlen_check(ctx, sreq); } dev_err(dev, "skcipher algorithm error!\n"); + return -EINVAL; } +static int sec_skcipher_soft_crypto(struct sec_ctx *ctx, + struct skcipher_request *sreq, bool encrypt) +{ + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm); + struct device *dev = ctx->dev; + int ret; + + if (!c_ctx->fbtfm) { + dev_err_ratelimited(dev, "the soft tfm isn't supported in the current system.\n"); + return -EINVAL; + } + + skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm); + + /* software need sync mode to do crypto */ + skcipher_request_set_callback(subreq, sreq->base.flags, + NULL, NULL); + skcipher_request_set_crypt(subreq, sreq->src, sreq->dst, + sreq->cryptlen, sreq->iv); + if (encrypt) + ret = crypto_skcipher_encrypt(subreq); + else + ret = crypto_skcipher_decrypt(subreq); + + skcipher_request_zero(subreq); + + return ret; +} + static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); - struct sec_req *req = skcipher_request_ctx(sk_req); + struct sec_req *req = skcipher_request_ctx_dma(sk_req); struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + bool need_fallback = false; int ret; - if (!sk_req->cryptlen) + if (!sk_req->cryptlen) { + if (ctx->c_ctx.c_mode == SEC_CMODE_XTS) + return -EINVAL; return 0; + } + req->flag = sk_req->base.flags; req->c_req.sk_req = sk_req; req->c_req.encrypt = encrypt; req->ctx = ctx; + req->base = &sk_req->base; - ret = sec_skcipher_param_check(ctx, req); + ret = sec_skcipher_param_check(ctx, req, &need_fallback); if (unlikely(ret)) return -EINVAL; + if (unlikely(ctx->c_ctx.fallback || need_fallback)) + return sec_skcipher_soft_crypto(ctx, sk_req, encrypt); + return ctx->req_op->process(ctx, req); } @@ -1428,73 +2350,152 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req) return sec_skcipher_crypto(sk_req, false); } -#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \ - sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\ +#define SEC_SKCIPHER_ALG(sec_cra_name, sec_set_key, \ + sec_min_key_size, sec_max_key_size, blk_size, iv_size)\ {\ .base = {\ .cra_name = sec_cra_name,\ .cra_driver_name = "hisi_sec_"sec_cra_name,\ .cra_priority = SEC_PRIORITY,\ - .cra_flags = CRYPTO_ALG_ASYNC,\ + .cra_flags = CRYPTO_ALG_ASYNC |\ + CRYPTO_ALG_NEED_FALLBACK,\ .cra_blocksize = blk_size,\ .cra_ctxsize = sizeof(struct sec_ctx),\ .cra_module = THIS_MODULE,\ },\ - .init = ctx_init,\ - .exit = ctx_exit,\ + .init = sec_skcipher_ctx_init,\ + .exit = sec_skcipher_ctx_exit,\ .setkey = sec_set_key,\ .decrypt = sec_skcipher_decrypt,\ .encrypt = sec_skcipher_encrypt,\ .min_keysize = sec_min_key_size,\ .max_keysize = sec_max_key_size,\ .ivsize = iv_size,\ -}, +} -#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \ - max_key_size, blk_size, iv_size) \ - SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \ - sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size) +static struct sec_skcipher sec_skciphers[] = { + { + .alg_msk = BIT(0), + .alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE, + AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0), + }, + { + .alg_msk = BIT(1), + .alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE, + AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(2), + .alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, AES_MIN_KEY_SIZE, + AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(3), + .alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, SEC_XTS_MIN_KEY_SIZE, + SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(12), + .alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, AES_MIN_KEY_SIZE, + AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(13), + .alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE, + AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(14), + .alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, SEC_XTS_MIN_KEY_SIZE, + SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(23), + .alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE, + SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0), + }, + { + .alg_msk = BIT(24), + .alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE, + SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, + DES3_EDE_BLOCK_SIZE), + }, +}; -static struct skcipher_alg sec_skciphers[] = { - SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, - AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, - AES_BLOCK_SIZE, 0) +static int aead_iv_demension_check(struct aead_request *aead_req) +{ + u8 cl; + + cl = aead_req->iv[0] + 1; + if (cl < IV_CL_MIN || cl > IV_CL_MAX) + return -EINVAL; - SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, - AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, - AES_BLOCK_SIZE, AES_BLOCK_SIZE) + if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl)) + return -EOVERFLOW; - SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, - SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE, - AES_BLOCK_SIZE, AES_BLOCK_SIZE) + return 0; +} - SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, - SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, - DES3_EDE_BLOCK_SIZE, 0) +static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) +{ + struct aead_request *req = sreq->aead_req.aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + size_t sz = crypto_aead_authsize(tfm); + u8 c_mode = ctx->c_ctx.c_mode; + int ret; - SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, - SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, - DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE) + if (unlikely(ctx->sec->qm.ver == QM_HW_V2 && !sreq->c_req.c_len)) + return -EINVAL; - SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, - SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, - AES_BLOCK_SIZE, AES_BLOCK_SIZE) + if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || + req->assoclen > SEC_MAX_AAD_LEN)) + return -EINVAL; - SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, - AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, - AES_BLOCK_SIZE, AES_BLOCK_SIZE) -}; + if (c_mode == SEC_CMODE_CCM) { + if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) + return -EINVAL; -static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) + ret = aead_iv_demension_check(req); + if (unlikely(ret)) + return -EINVAL; + } else if (c_mode == SEC_CMODE_CBC) { + if (unlikely(sz & WORD_MASK)) + return -EINVAL; + if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK)) + return -EINVAL; + } else if (c_mode == SEC_CMODE_GCM) { + if (unlikely(sz < SEC_GCM_MIN_AUTH_SZ)) + return -EINVAL; + } + + return 0; +} + +static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq, bool *need_fallback) { - u8 c_alg = ctx->c_ctx.c_alg; struct aead_request *req = sreq->aead_req.aead_req; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - size_t authsize = crypto_aead_authsize(tfm); + struct device *dev = ctx->dev; + u8 c_alg = ctx->c_ctx.c_alg; + + if (unlikely(!req->src || !req->dst)) { + dev_err(dev, "aead input param error!\n"); + return -EINVAL; + } - if (unlikely(!req->src || !req->dst || !req->cryptlen || - req->assoclen > SEC_MAX_AAD_LEN)) { - dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n"); + if (unlikely(ctx->c_ctx.c_mode == SEC_CMODE_CBC && + sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { + dev_err(dev, "aead cbc mode input data length error!\n"); + return -EINVAL; + } + + /* Support AES or SM4 */ + if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) { + dev_err(dev, "aead crypto alg error!\n"); + return -EINVAL; + } + + if (unlikely(sec_aead_spec_check(ctx, sreq))) { + *need_fallback = true; return -EINVAL; } @@ -1504,39 +2505,59 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) else sreq->use_pbuf = false; - /* Support AES only */ - if (unlikely(c_alg != SEC_CALG_AES)) { - dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n"); - return -EINVAL; + return 0; +} - } - if (sreq->c_req.encrypt) - sreq->c_req.c_len = req->cryptlen; - else - sreq->c_req.c_len = req->cryptlen - authsize; +static int sec_aead_soft_crypto(struct sec_ctx *ctx, + struct aead_request *aead_req, + bool encrypt) +{ + struct sec_auth_ctx *a_ctx = &ctx->a_ctx; + struct aead_request *subreq; + int ret; - if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { - dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n"); - return -EINVAL; - } + subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL); + if (!subreq) + return -ENOMEM; - return 0; + aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm); + aead_request_set_callback(subreq, aead_req->base.flags, + aead_req->base.complete, aead_req->base.data); + aead_request_set_crypt(subreq, aead_req->src, aead_req->dst, + aead_req->cryptlen, aead_req->iv); + aead_request_set_ad(subreq, aead_req->assoclen); + + if (encrypt) + ret = crypto_aead_encrypt(subreq); + else + ret = crypto_aead_decrypt(subreq); + aead_request_free(subreq); + + return ret; } static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) { struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); - struct sec_req *req = aead_request_ctx(a_req); + struct sec_req *req = aead_request_ctx_dma(a_req); struct sec_ctx *ctx = crypto_aead_ctx(tfm); + size_t sz = crypto_aead_authsize(tfm); + bool need_fallback = false; int ret; + req->flag = a_req->base.flags; req->aead_req.aead_req = a_req; req->c_req.encrypt = encrypt; req->ctx = ctx; + req->base = &a_req->base; + req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz); - ret = sec_aead_param_check(ctx, req); - if (unlikely(ret)) + ret = sec_aead_param_check(ctx, req, &need_fallback); + if (unlikely(ret)) { + if (need_fallback) + return sec_aead_soft_crypto(ctx, a_req, encrypt); return -EINVAL; + } return ctx->req_op->process(ctx, req); } @@ -1551,14 +2572,15 @@ static int sec_aead_decrypt(struct aead_request *a_req) return sec_aead_crypto(a_req, false); } -#define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\ +#define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\ ctx_exit, blk_size, iv_size, max_authsize)\ {\ .base = {\ .cra_name = sec_cra_name,\ .cra_driver_name = "hisi_sec_"sec_cra_name,\ .cra_priority = SEC_PRIORITY,\ - .cra_flags = CRYPTO_ALG_ASYNC,\ + .cra_flags = CRYPTO_ALG_ASYNC |\ + CRYPTO_ALG_NEED_FALLBACK,\ .cra_blocksize = blk_size,\ .cra_ctxsize = sizeof(struct sec_ctx),\ .cra_module = THIS_MODULE,\ @@ -1566,59 +2588,170 @@ static int sec_aead_decrypt(struct aead_request *a_req) .init = ctx_init,\ .exit = ctx_exit,\ .setkey = sec_set_key,\ + .setauthsize = sec_aead_setauthsize,\ .decrypt = sec_aead_decrypt,\ .encrypt = sec_aead_encrypt,\ .ivsize = iv_size,\ .maxauthsize = max_authsize,\ } -#define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\ - SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\ - sec_aead_ctx_exit, blksize, ivsize, authsize) - -static struct aead_alg sec_aeads[] = { - SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", - sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init, - AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE), +static struct sec_aead sec_aeads[] = { + { + .alg_msk = BIT(6), + .alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init, + sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE, + AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(7), + .alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init, + sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE, + AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(17), + .alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init, + sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE, + AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(18), + .alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init, + sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE, + AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(43), + .alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1, + sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE, + AES_BLOCK_SIZE, SHA1_DIGEST_SIZE), + }, + { + .alg_msk = BIT(44), + .alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256, + sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE, + AES_BLOCK_SIZE, SHA256_DIGEST_SIZE), + }, + { + .alg_msk = BIT(45), + .alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512, + sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE, + AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), + }, +}; - SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", - sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init, - AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE), +static void sec_unregister_skcipher(u64 alg_mask, int end) +{ + int i; - SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", - sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init, - AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), -}; + for (i = 0; i < end; i++) + if (sec_skciphers[i].alg_msk & alg_mask) + crypto_unregister_skcipher(&sec_skciphers[i].alg); +} -int sec_register_to_crypto(void) +static int sec_register_skcipher(u64 alg_mask) { - int ret = 0; + int i, ret, count; - /* To avoid repeat register */ - if (atomic_add_return(1, &sec_active_devs) == 1) { - ret = crypto_register_skciphers(sec_skciphers, - ARRAY_SIZE(sec_skciphers)); - if (ret) - return ret; + count = ARRAY_SIZE(sec_skciphers); + + for (i = 0; i < count; i++) { + if (!(sec_skciphers[i].alg_msk & alg_mask)) + continue; - ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); + ret = crypto_register_skcipher(&sec_skciphers[i].alg); if (ret) - goto reg_aead_fail; + goto err; } + return 0; + +err: + sec_unregister_skcipher(alg_mask, i); + return ret; +} + +static void sec_unregister_aead(u64 alg_mask, int end) +{ + int i; + + for (i = 0; i < end; i++) + if (sec_aeads[i].alg_msk & alg_mask) + crypto_unregister_aead(&sec_aeads[i].alg); +} + +static int sec_register_aead(u64 alg_mask) +{ + int i, ret, count; -reg_aead_fail: - crypto_unregister_skciphers(sec_skciphers, ARRAY_SIZE(sec_skciphers)); + count = ARRAY_SIZE(sec_aeads); + + for (i = 0; i < count; i++) { + if (!(sec_aeads[i].alg_msk & alg_mask)) + continue; + + ret = crypto_register_aead(&sec_aeads[i].alg); + if (ret) + goto err; + } + + return 0; + +err: + sec_unregister_aead(alg_mask, i); return ret; } -void sec_unregister_from_crypto(void) +int sec_register_to_crypto(struct hisi_qm *qm) { - if (atomic_sub_return(1, &sec_active_devs) == 0) { - crypto_unregister_skciphers(sec_skciphers, - ARRAY_SIZE(sec_skciphers)); - crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); + u64 alg_mask; + int ret = 0; + + alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB, + SEC_DRV_ALG_BITMAP_LOW_TB); + + mutex_lock(&sec_algs_lock); + if (sec_available_devs) { + sec_available_devs++; + goto unlock; } + + ret = sec_register_skcipher(alg_mask); + if (ret) + goto unlock; + + ret = sec_register_aead(alg_mask); + if (ret) + goto unreg_skcipher; + + sec_available_devs++; + mutex_unlock(&sec_algs_lock); + + return 0; + +unreg_skcipher: + sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); +unlock: + mutex_unlock(&sec_algs_lock); + return ret; +} + +void sec_unregister_from_crypto(struct hisi_qm *qm) +{ + u64 alg_mask; + + alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB, + SEC_DRV_ALG_BITMAP_LOW_TB); + + mutex_lock(&sec_algs_lock); + if (--sec_available_devs) + goto unlock; + + sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads)); + sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); + +unlock: + mutex_unlock(&sec_algs_lock); } |
