diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-01-24 07:48:10 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-01-24 07:48:10 -0800 |
commit | 454cb97726fe62a04b187a0d631ec0a69f6b713a (patch) | |
tree | 16dae08192b97960398b9cb9cc92439fb1b7e4a6 /drivers | |
parent | ae2d4fc540cd27d667d10597b6ad8cc4c6ce622a (diff) | |
parent | 9d4f8e54cef2c42e23ef258833dbd06a1eaff89b (diff) |
Merge tag 'v6.14-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"API:
- Remove physical address skcipher walking
- Fix boot-up self-test race
Algorithms:
- Optimisations for x86/aes-gcm
- Optimisations for x86/aes-xts
- Remove VMAC
- Remove keywrap
Drivers:
- Remove n2
Others:
- Fixes for padata UAF
- Fix potential rhashtable deadlock by moving schedule_work outside
lock"
* tag 'v6.14-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (75 commits)
rhashtable: Fix rhashtable_try_insert test
dt-bindings: crypto: qcom,inline-crypto-engine: Document the SM8750 ICE
dt-bindings: crypto: qcom,prng: Document SM8750 RNG
dt-bindings: crypto: qcom-qce: Document the SM8750 crypto engine
crypto: asymmetric_keys - Remove unused key_being_used_for[]
padata: avoid UAF for reorder_work
padata: fix UAF in padata_reorder
padata: add pd get/put refcnt helper
crypto: skcipher - call cond_resched() directly
crypto: skcipher - optimize initializing skcipher_walk fields
crypto: skcipher - clean up initialization of skcipher_walk::flags
crypto: skcipher - fold skcipher_walk_skcipher() into skcipher_walk_virt()
crypto: skcipher - remove redundant check for SKCIPHER_WALK_SLOW
crypto: skcipher - remove redundant clamping to page size
crypto: skcipher - remove unnecessary page alignment of bounce buffer
crypto: skcipher - document skcipher_walk_done() and rename some vars
crypto: omap - switch from scatter_walk to plain offset
crypto: powerpc/p10-aes-gcm - simplify handling of linear associated data
crypto: bcm - Drop unused setting of local 'ptr' variable
crypto: hisilicon/qm - support new function communication
...
Diffstat (limited to 'drivers')
32 files changed, 767 insertions, 2895 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 0a9cdd31cbd9..19ab145f912e 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -200,23 +200,6 @@ config S390_PRNG It is available as of z9. -config CRYPTO_DEV_NIAGARA2 - tristate "Niagara2 Stream Processing Unit driver" - select CRYPTO_LIB_DES - select CRYPTO_SKCIPHER - select CRYPTO_HASH - select CRYPTO_MD5 - select CRYPTO_SHA1 - select CRYPTO_SHA256 - depends on SPARC64 - help - Each core of a Niagara2 processor contains a Stream - Processing Unit, which itself contains several cryptographic - sub-units. One set provides the Modular Arithmetic Unit, - used for SSL offload. The other set provides the Cipher - Group, which can perform encryption, decryption, hashing, - checksumming, and raw copies. - config CRYPTO_DEV_SL3516 tristate "Storlink SL3516 crypto offloader" depends on ARCH_GEMINI || COMPILE_TEST diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index ad4ccef67d12..fef18ffdb128 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -21,8 +21,6 @@ obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/ obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o -obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o -n2_crypto-y := n2_core.o n2_asm.o obj-$(CONFIG_CRYPTO_DEV_NX) += nx/ obj-$(CONFIG_CRYPTO_DEV_OMAP) += omap-crypto.o obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c index 6283e8c6d51d..86c227caa722 100644 --- a/drivers/crypto/bcm/spu.c +++ b/drivers/crypto/bcm/spu.c @@ -836,7 +836,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms) u32 cipher_bits = 0; u32 ecf_bits = 0; u8 sctx_words = 0; - u8 *ptr = spu_hdr; flow_log("%s()\n", __func__); flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg, @@ -847,7 +846,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms) /* starting out: zero the header (plus some) */ memset(spu_hdr, 0, sizeof(struct SPUHEADER)); - ptr += sizeof(struct SPUHEADER); /* format master header word */ /* Do not set the next bit even though the datasheet says to */ @@ -861,10 +859,8 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms) /* copy the encryption keys in the SAD entry */ if (cipher_parms->alg) { - if (cipher_parms->key_len) { - ptr += cipher_parms->key_len; + if (cipher_parms->key_len) sctx_words += cipher_parms->key_len / 4; - } /* * if encrypting then set IV size, use SCTX IV unless no IV @@ -873,7 +869,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms) if (cipher_parms->iv_len) { /* Use SCTX IV */ ecf_bits |= SCTX_IV; - ptr += cipher_parms->iv_len; sctx_words += cipher_parms->iv_len / 4; } } diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c index 87781c1534ee..079a22cc9f02 100644 --- a/drivers/crypto/caam/blob_gen.c +++ b/drivers/crypto/caam/blob_gen.c @@ -2,6 +2,7 @@ /* * Copyright (C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de> * Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de> + * Copyright 2024 NXP */ #define pr_fmt(fmt) "caam blob_gen: " fmt @@ -104,7 +105,7 @@ int caam_process_blob(struct caam_blob_priv *priv, } ctrlpriv = dev_get_drvdata(jrdev->parent); - moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->ctrl->perfmon.status)); + moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status)); if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED) dev_warn(jrdev, "using insecure test key, enable HAB to use unique device key!\n"); diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c index 5b105a23f699..410084a9039c 100644 --- a/drivers/crypto/ccp/dbc.c +++ b/drivers/crypto/ccp/dbc.c @@ -7,6 +7,8 @@ * Author: Mario Limonciello <mario.limonciello@amd.com> */ +#include <linux/mutex.h> + #include "dbc.h" #define DBC_DEFAULT_TIMEOUT (10 * MSEC_PER_SEC) @@ -137,64 +139,49 @@ static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return -ENODEV; dbc_dev = psp_master->dbc_data; - mutex_lock(&dbc_dev->ioctl_mutex); + guard(mutex)(&dbc_dev->ioctl_mutex); switch (cmd) { case DBCIOCNONCE: - if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_nonce))) { - ret = -EFAULT; - goto unlock; - } + if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_nonce))) + return -EFAULT; ret = send_dbc_nonce(dbc_dev); if (ret) - goto unlock; + return ret; - if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_nonce))) { - ret = -EFAULT; - goto unlock; - } + if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_nonce))) + return -EFAULT; break; case DBCIOCUID: - if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_setuid))) { - ret = -EFAULT; - goto unlock; - } + if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_setuid))) + return -EFAULT; *dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_setuid); ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_UID); if (ret) - goto unlock; + return ret; - if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_setuid))) { - ret = -EFAULT; - goto unlock; - } + if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_setuid))) + return -EFAULT; break; case DBCIOCPARAM: - if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_param))) { - ret = -EFAULT; - goto unlock; - } + if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_param))) + return -EFAULT; *dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_param); ret = send_dbc_parameter(dbc_dev); if (ret) - goto unlock; + return ret; - if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_param))) { - ret = -EFAULT; - goto unlock; - } + if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_param))) + return -EFAULT; break; default: - ret = -EINVAL; - + return -EINVAL; } -unlock: - mutex_unlock(&dbc_dev->ioctl_mutex); - return ret; + return 0; } static const struct file_operations dbc_fops = { diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 96fde9437b4b..f5b47e5ff48a 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -1209,7 +1209,6 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->mode = uacce_mode; qm->pdev = pdev; - qm->ver = pdev->revision; qm->sqe_size = HPRE_SQE_SIZE; qm->dev_name = hpre_name; @@ -1396,6 +1395,17 @@ static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm) return ACC_ERR_RECOVERED; } +static bool hpre_dev_is_abnormal(struct hisi_qm *qm) +{ + u32 err_status; + + err_status = hpre_get_hw_err_status(qm); + if (err_status & qm->err_info.dev_shutdown_mask) + return true; + + return false; +} + static void hpre_err_info_init(struct hisi_qm *qm) { struct hisi_qm_err_info *err_info = &qm->err_info; @@ -1428,6 +1438,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = { .show_last_dfx_regs = hpre_show_last_dfx_regs, .err_info_init = hpre_err_info_init, .get_err_result = hpre_get_err_result, + .dev_is_abnormal = hpre_dev_is_abnormal, }; static int hpre_pf_probe_init(struct hpre *hpre) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 19c1b5d3c954..d3f5d108b898 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -30,8 +30,6 @@ /* mailbox */ #define QM_MB_PING_ALL_VFS 0xffff -#define QM_MB_CMD_DATA_SHIFT 32 -#define QM_MB_CMD_DATA_MASK GENMASK(31, 0) #define QM_MB_STATUS_MASK GENMASK(12, 9) /* sqc shift */ @@ -102,6 +100,8 @@ #define QM_PM_CTRL 0x100148 #define QM_IDLE_DISABLE BIT(9) +#define QM_SUB_VERSION_ID 0x210 + #define QM_VFT_CFG_DATA_L 0x100064 #define QM_VFT_CFG_DATA_H 0x100068 #define QM_SQC_VFT_BUF_SIZE (7ULL << 8) @@ -119,6 +119,7 @@ #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0) #define QM_SQC_VFT_NUM_SHIFT_V2 45 #define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0) +#define QM_MAX_QC_TYPE 2 #define QM_ABNORMAL_INT_SOURCE 0x100000 #define QM_ABNORMAL_INT_MASK 0x100004 @@ -176,6 +177,10 @@ #define QM_IFC_INT_MASK 0x0024 #define QM_IFC_INT_STATUS 0x0028 #define QM_IFC_INT_SET_V 0x002C +#define QM_PF2VF_PF_W 0x104700 +#define QM_VF2PF_PF_R 0x104800 +#define QM_VF2PF_VF_W 0x320 +#define QM_PF2VF_VF_R 0x380 #define QM_IFC_SEND_ALL_VFS GENMASK(6, 0) #define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0) #define QM_IFC_INT_SOURCE_MASK BIT(0) @@ -185,8 +190,11 @@ #define QM_WAIT_DST_ACK 10 #define QM_MAX_PF_WAIT_COUNT 10 #define QM_MAX_VF_WAIT_COUNT 40 -#define QM_VF_RESET_WAIT_US 20000 -#define QM_VF_RESET_WAIT_CNT 3000 +#define QM_VF_RESET_WAIT_US 20000 +#define QM_VF_RESET_WAIT_CNT 3000 +#define QM_VF2PF_REG_SIZE 4 +#define QM_IFC_CMD_MASK GENMASK(31, 0) +#define QM_IFC_DATA_SHIFT 32 #define QM_VF_RESET_WAIT_TIMEOUT_US \ (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT) @@ -234,8 +242,6 @@ #define QM_QOS_MAX_CIR_U 6 #define QM_AUTOSUSPEND_DELAY 3000 -#define QM_DEV_ALG_MAX_LEN 256 - /* abnormal status value for stopping queue */ #define QM_STOP_QUEUE_FAIL 1 #define QM_DUMP_SQC_FAIL 3 @@ -276,7 +282,7 @@ enum qm_alg_type { ALG_TYPE_1, }; -enum qm_mb_cmd { +enum qm_ifc_cmd { QM_PF_FLR_PREPARE = 0x01, QM_PF_SRST_PREPARE, QM_PF_RESET_DONE, @@ -333,6 +339,7 @@ static const struct hisi_qm_cap_info qm_cap_info_comm[] = { {QM_SUPPORT_STOP_FUNC, 0x3100, 0, BIT(10), 0x0, 0x0, 0x1}, {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1}, {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1}, + {QM_SUPPORT_DAE, 0x3100, 0, BIT(15), 0x0, 0x0, 0x0}, }; static const struct hisi_qm_cap_info qm_cap_info_pf[] = { @@ -396,6 +403,11 @@ struct hisi_qm_hw_ops { void (*hw_error_uninit)(struct hisi_qm *qm); enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); int (*set_msi)(struct hisi_qm *qm, bool set); + + /* (u64)msg = (u32)data << 32 | (enum qm_ifc_cmd)cmd */ + int (*set_ifc_begin)(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num); + void (*set_ifc_end)(struct hisi_qm *qm); + int (*get_ifc)(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num); }; struct hisi_qm_hw_error { @@ -501,15 +513,20 @@ static u32 qm_get_dev_err_status(struct hisi_qm *qm) /* Check if the error causes the master ooo block */ static bool qm_check_dev_error(struct hisi_qm *qm) { - u32 val, dev_val; + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); + u32 err_status; - if (qm->fun_type == QM_HW_VF) + if (pf_qm->fun_type == QM_HW_VF) return false; - val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask; - dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask; + err_status = qm_get_hw_error_status(pf_qm); + if (err_status & pf_qm->err_info.qm_shutdown_mask) + return true; + + if (pf_qm->err_ini->dev_is_abnormal) + return pf_qm->err_ini->dev_is_abnormal(pf_qm); - return val || dev_val; + return false; } static int qm_wait_reset_finish(struct hisi_qm *qm) @@ -654,7 +671,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_mb); /* op 0: set xqc information to hardware, 1: get xqc information from hardware. */ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op) { - struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); struct qm_mailbox mailbox; dma_addr_t xqc_dma; void *tmp_xqc; @@ -688,7 +704,7 @@ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op } /* Setting xqc will fail if master OOO is blocked. */ - if (qm_check_dev_error(pf_qm)) { + if (qm_check_dev_error(qm)) { dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n"); return -EIO; } @@ -855,10 +871,10 @@ int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *d strcat(algs, dev_algs[i].alg); ptr = strrchr(algs, '\n'); - if (ptr) { + if (ptr) *ptr = '\0'; - qm->uacce->algs = algs; - } + + qm->uacce->algs = algs; return 0; } @@ -1052,11 +1068,10 @@ static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id) static void qm_reset_function(struct hisi_qm *qm) { - struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); struct device *dev = &qm->pdev->dev; int ret; - if (qm_check_dev_error(pf_qm)) + if (qm_check_dev_error(qm)) return; ret = qm_reset_prepare_ready(qm); @@ -1540,17 +1555,15 @@ static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask) static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id) { struct device *dev = &qm->pdev->dev; - u32 cmd; - u64 msg; + enum qm_ifc_cmd cmd; int ret; - ret = qm_get_mb_cmd(qm, &msg, vf_id); + ret = qm->ops->get_ifc(qm, &cmd, NULL, vf_id); if (ret) { - dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id); + dev_err(dev, "failed to get command from VF(%u)!\n", vf_id); return; } - cmd = msg & QM_MB_CMD_DATA_MASK; switch (cmd) { case QM_VF_PREPARE_FAIL: dev_err(dev, "failed to stop VF(%u)!\n", vf_id); @@ -1562,7 +1575,7 @@ static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id) case QM_VF_START_DONE: break; default: - dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id); + dev_err(dev, "unsupported command(0x%x) sent by VF(%u)!\n", cmd, vf_id); break; } } @@ -1630,17 +1643,14 @@ static void qm_trigger_pf_interrupt(struct hisi_qm *qm) writel(val, qm->io_base + QM_IFC_INT_SET_V); } -static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num) +static int qm_ping_single_vf(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num) { struct device *dev = &qm->pdev->dev; - struct qm_mailbox mailbox; int cnt = 0; u64 val; int ret; - qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0); - mutex_lock(&qm->mailbox_lock); - ret = qm_mb_nolock(qm, &mailbox); + ret = qm->ops->set_ifc_begin(qm, cmd, data, fun_num); if (ret) { dev_err(dev, "failed to send command to vf(%u)!\n", fun_num); goto err_unlock; @@ -1662,27 +1672,23 @@ static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num) } err_unlock: - mutex_unlock(&qm->mailbox_lock); + qm->ops->set_ifc_end(qm); return ret; } -static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd) +static int qm_ping_all_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd) { struct device *dev = &qm->pdev->dev; u32 vfs_num = qm->vfs_num; - struct qm_mailbox mailbox; u64 val = 0; int cnt = 0; int ret; u32 i; - qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0); - mutex_lock(&qm->mailbox_lock); - /* PF sends command to all VFs by mailbox */ - ret = qm_mb_nolock(qm, &mailbox); + ret = qm->ops->set_ifc_begin(qm, cmd, 0, QM_MB_PING_ALL_VFS); if (ret) { - dev_err(dev, "failed to send command to VFs!\n"); - mutex_unlock(&qm->mailbox_lock); + dev_err(dev, "failed to send command(0x%x) to all vfs!\n", cmd); + qm->ops->set_ifc_end(qm); return ret; } @@ -1692,7 +1698,7 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd) val = readq(qm->io_base + QM_IFC_READY_STATUS); /* If all VFs acked, PF notifies VFs successfully. */ if (!(val & GENMASK(vfs_num, 1))) { - mutex_unlock(&qm->mailbox_lock); + qm->ops->set_ifc_end(qm); return 0; } @@ -1700,7 +1706,7 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd) break; } - mutex_unlock(&qm->mailbox_lock); + qm->ops->set_ifc_end(qm); /* Check which vf respond timeout. */ for (i = 1; i <= vfs_num; i++) { @@ -1711,18 +1717,15 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd) return -ETIMEDOUT; } -static int qm_ping_pf(struct hisi_qm *qm, u64 cmd) +static int qm_ping_pf(struct hisi_qm *qm, enum qm_ifc_cmd cmd) { - struct qm_mailbox mailbox; int cnt = 0; u32 val; int ret; - qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0); - mutex_lock(&qm->mailbox_lock); - ret = qm_mb_nolock(qm, &mailbox); + ret = qm->ops->set_ifc_begin(qm, cmd, 0, 0); if (ret) { - dev_err(&qm->pdev->dev, "failed to send command to PF!\n"); + dev_err(&qm->pdev->dev, "failed to send command(0x%x) to PF!\n", cmd); goto unlock; } @@ -1741,7 +1744,8 @@ static int qm_ping_pf(struct hisi_qm *qm, u64 cmd) } unlock: - mutex_unlock(&qm->mailbox_lock); + qm->ops->set_ifc_end(qm); + return ret; } @@ -1842,6 +1846,94 @@ static int qm_set_msi_v3(struct hisi_qm *qm, bool set) return ret; } +static int qm_set_ifc_begin_v3(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num) +{ + struct qm_mailbox mailbox; + u64 msg; + + msg = cmd | (u64)data << QM_IFC_DATA_SHIFT; + + qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, msg, fun_num, 0); + mutex_lock(&qm->mailbox_lock); + return qm_mb_nolock(qm, &mailbox); +} + +static void qm_set_ifc_end_v3(struct hisi_qm *qm) +{ + mutex_unlock(&qm->mailbox_lock); +} + +static int qm_get_ifc_v3(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num) +{ + u64 msg; + int ret; + + ret = qm_get_mb_cmd(qm, &msg, fun_num); + if (ret) + return ret; + + *cmd = msg & QM_IFC_CMD_MASK; + + if (data) + *data = msg >> QM_IFC_DATA_SHIFT; + + return 0; +} + +static int qm_set_ifc_begin_v4(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num) +{ + uintptr_t offset; + u64 msg; + + if (qm->fun_type == QM_HW_PF) + offset = QM_PF2VF_PF_W; + else + offset = QM_VF2PF_VF_W; + + msg = cmd | (u64)data << QM_IFC_DATA_SHIFT; + + mutex_lock(&qm->ifc_lock); + writeq(msg, qm->io_base + offset); + + return 0; +} + +static void qm_set_ifc_end_v4(struct hisi_qm *qm) +{ + mutex_unlock(&qm->ifc_lock); +} + +static u64 qm_get_ifc_pf(struct hisi_qm *qm, u32 fun_num) +{ + uintptr_t offset; + + offset = QM_VF2PF_PF_R + QM_VF2PF_REG_SIZE * fun_num; + + return (u64)readl(qm->io_base + offset); +} + +static u64 qm_get_ifc_vf(struct hisi_qm *qm) +{ + return readq(qm->io_base + QM_PF2VF_VF_R); +} + +static int qm_get_ifc_v4(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num) +{ + u64 msg; + + if (qm->fun_type == QM_HW_PF) + msg = qm_get_ifc_pf(qm, fun_num); + else + msg = qm_get_ifc_vf(qm); + + *cmd = msg & QM_IFC_CMD_MASK; + + if (data) + *data = msg >> QM_IFC_DATA_SHIFT; + + return 0; +} + static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { .qm_db = qm_db_v1, .hw_error_init = qm_hw_error_init_v1, @@ -1864,6 +1956,21 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v3 = { .hw_error_uninit = qm_hw_error_uninit_v3, .hw_error_handle = qm_hw_error_handle_v2, .set_msi = qm_set_msi_v3, + .set_ifc_begin = qm_set_ifc_begin_v3, + .set_ifc_end = qm_set_ifc_end_v3, + .get_ifc = qm_get_ifc_v3, +}; + +static const struct hisi_qm_hw_ops qm_hw_ops_v4 = { + .get_vft = qm_get_vft_v2, + .qm_db = qm_db_v2, + .hw_error_init = qm_hw_error_init_v3, + .hw_error_uninit = qm_hw_error_uninit_v3, + .hw_error_handle = qm_hw_error_handle_v2, + .set_msi = qm_set_msi_v3, + .set_ifc_begin = qm_set_ifc_begin_v4, + .set_ifc_end = qm_set_ifc_end_v4, + .get_ifc = qm_get_ifc_v4, }; static void *qm_get_avail_sqe(struct hisi_qp *qp) @@ -2156,12 +2263,11 @@ static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id) static int qm_drain_qp(struct hisi_qp *qp) { struct hisi_qm *qm = qp->qm; - struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); u32 state = 0; int ret; /* No need to judge if master OOO is blocked. */ - if (qm_check_dev_error(pf_qm)) + if (qm_check_dev_error(qm)) return 0; /* HW V3 supports drain qp by device */ @@ -2475,7 +2581,7 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd, sizeof(struct hisi_qp_ctx))) return -EFAULT; - if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1) + if (qp_ctx.qc_type > QM_MAX_QC_TYPE) return -EINVAL; qm_set_sqctype(q, qp_ctx.qc_type); @@ -2843,11 +2949,14 @@ static void hisi_qm_pre_init(struct hisi_qm *qm) qm->ops = &qm_hw_ops_v1; else if (qm->ver == QM_HW_V2) qm->ops = &qm_hw_ops_v2; - else + else if (qm->ver == QM_HW_V3) qm->ops = &qm_hw_ops_v3; + else + qm->ops = &qm_hw_ops_v4; pci_set_drvdata(pdev, qm); mutex_init(&qm->mailbox_lock); + mutex_init(&qm->ifc_lock); init_rwsem(&qm->qps_lock); qm->qp_in_used = 0; if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { @@ -3607,7 +3716,6 @@ static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index) static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num) { struct device *dev = &qm->pdev->dev; - u64 mb_cmd; u32 qos; int ret; @@ -3617,10 +3725,9 @@ static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num) return; } - mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT; - ret = qm_ping_single_vf(qm, mb_cmd, fun_num); + ret = qm_ping_single_vf(qm, QM_PF_SET_QOS, qos, fun_num); if (ret) - dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num); + dev_err(dev, "failed to send command(0x%x) to VF(%u)!\n", QM_PF_SET_QOS, fun_num); } static int qm_vf_read_qos(struct hisi_qm *qm) @@ -4109,7 +4216,7 @@ stop_fail: return ret; } -static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd, +static int qm_try_stop_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd, enum qm_stop_reason stop_reason) { struct pci_dev *pdev = qm->pdev; @@ -4122,7 +4229,7 @@ static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd, if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { ret = qm_ping_all_vfs(qm, cmd); if (ret) - pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n"); + pci_err(pdev, "failed to send command to all VFs before PF reset!\n"); } else { ret = qm_vf_reset_prepare(qm, stop_reason); if (ret) @@ -4137,6 +4244,12 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm) struct pci_dev *pdev = qm->pdev; int ret; + if (qm->err_ini->set_priv_status) { + ret = qm->err_ini->set_priv_status(qm); + if (ret) + return ret; + } + ret = qm_reset_prepare_ready(qm); if (ret) { pci_err(pdev, "Controller reset not ready!\n"); @@ -4298,7 +4411,7 @@ restart_fail: return ret; } -static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd) +static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd) { struct pci_dev *pdev = qm->pdev; int ret; @@ -4527,7 +4640,7 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev) * Check whether there is an ECC mbit error, If it occurs, need to * wait for soft reset to fix it. */ - while (qm_check_dev_error(pf_qm)) { + while (qm_check_dev_error(qm)) { msleep(++delay); if (delay > QM_RESET_WAIT_TIMEOUT) return; @@ -4675,7 +4788,7 @@ static void hisi_qm_controller_reset(struct work_struct *rst_work) static void qm_pf_reset_vf_prepare(struct hisi_qm *qm, enum qm_stop_reason stop_reason) { - enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE; + enum qm_ifc_cmd cmd = QM_VF_PREPARE_DONE; struct pci_dev *pdev = qm->pdev; int ret; @@ -4709,7 +4822,7 @@ out: static void qm_pf_reset_vf_done(struct hisi_qm *qm) { - enum qm_mb_cmd cmd = QM_VF_START_DONE; + enum qm_ifc_cmd cmd = QM_VF_START_DONE; struct pci_dev *pdev = qm->pdev; int ret; @@ -4732,7 +4845,6 @@ static int qm_wait_pf_reset_finish(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; u32 val, cmd; - u64 msg; int ret; /* Wait for reset to finish */ @@ -4749,16 +4861,15 @@ static int qm_wait_pf_reset_finish(struct hisi_qm *qm) * Whether message is got successfully, * VF needs to ack PF by clearing the interrupt. */ - ret = qm_get_mb_cmd(qm, &msg, 0); + ret = qm->ops->get_ifc(qm, &cmd, NULL, 0); qm_clear_cmd_interrupt(qm, 0); if (ret) { - dev_err(dev, "failed to get msg from PF in reset done!\n"); + dev_err(dev, "failed to get command from PF in reset done!\n"); return ret; } - cmd = msg & QM_MB_CMD_DATA_MASK; if (cmd != QM_PF_RESET_DONE) { - dev_err(dev, "the cmd(%u) is not reset done!\n", cmd); + dev_err(dev, "the command(0x%x) is not reset done!\n", cmd); ret = -EINVAL; } @@ -4795,22 +4906,21 @@ err_get_status: static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) { struct device *dev = &qm->pdev->dev; - u64 msg; - u32 cmd; + enum qm_ifc_cmd cmd; + u32 data; int ret; /* * Get the msg from source by sending mailbox. Whether message is got * successfully, destination needs to ack source by clearing the interrupt. */ - ret = qm_get_mb_cmd(qm, &msg, fun_num); + ret = qm->ops->get_ifc(qm, &cmd, &data, fun_num); qm_clear_cmd_interrupt(qm, BIT(fun_num)); if (ret) { - dev_err(dev, "failed to get msg from source!\n"); + dev_err(dev, "failed to get command from source!\n"); return; } - cmd = msg & QM_MB_CMD_DATA_MASK; switch (cmd) { case QM_PF_FLR_PREPARE: qm_pf_reset_vf_process(qm, QM_DOWN); @@ -4822,10 +4932,10 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) qm_vf_get_qos(qm, fun_num); break; case QM_PF_SET_QOS: - qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT; + qm->mb_qos = data; break; default: - dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num); + dev_err(dev, "unsupported command(0x%x) sent by function(%u)!\n", cmd, fun_num); break; } } @@ -5167,6 +5277,20 @@ static int qm_get_hw_caps(struct hisi_qm *qm) return qm_pre_store_caps(qm); } +static void qm_get_version(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + u32 sub_version_id; + + qm->ver = pdev->revision; + + if (pdev->revision == QM_HW_V3) { + sub_version_id = readl(qm->io_base + QM_SUB_VERSION_ID); + if (sub_version_id) + qm->ver = sub_version_id; + } +} + static int qm_get_pci_res(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5186,6 +5310,8 @@ static int qm_get_pci_res(struct hisi_qm *qm) goto err_request_mem_regions; } + qm_get_version(qm); + ret = qm_get_hw_caps(qm); if (ret) goto err_ioremap; @@ -5205,6 +5331,7 @@ static int qm_get_pci_res(struct hisi_qm *qm) qm->db_interval = 0; } + hisi_qm_pre_init(qm); ret = qm_get_qp_num(qm); if (ret) goto err_db_ioremap; @@ -5247,6 +5374,14 @@ static int qm_clear_device(struct hisi_qm *qm) return ret; } + if (qm->err_ini->set_priv_status) { + ret = qm->err_ini->set_priv_status(qm); + if (ret) { + writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL); + return ret; + } + } + return qm_reset_device(qm); } @@ -5461,8 +5596,6 @@ int hisi_qm_init(struct hisi_qm *qm) struct device *dev = &pdev->dev; int ret; - hisi_qm_pre_init(qm); - ret = hisi_qm_pci_init(qm); if (ret) return ret; @@ -5598,6 +5731,12 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm) if (ret) return ret; + if (qm->err_ini->set_priv_status) { + ret = qm->err_ini->set_priv_status(qm); + if (ret) + return ret; + } + ret = qm_set_pf_mse(qm, false); if (ret) pci_err(pdev, "failed to disable MSE before suspending!\n"); diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 356188bee6fb..4b9970230822 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -37,6 +37,7 @@ struct sec_aead_req { u8 *a_ivin; dma_addr_t a_ivin_dma; struct aead_request *aead_req; + bool fallback; }; /* SEC request of Crypto */ @@ -90,9 +91,7 @@ struct sec_auth_ctx { dma_addr_t a_key_dma; u8 *a_key; u8 a_key_len; - u8 mac_len; u8 a_alg; - bool fallback; struct crypto_shash *hash_tfm; struct crypto_aead *fallback_aead_tfm; }; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index ae9ebbb4103d..66bc07da9eb6 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -948,15 +948,14 @@ static int sec_aead_mac_init(struct sec_aead_req *req) struct aead_request *aead_req = req->aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); size_t authsize = crypto_aead_authsize(tfm); - u8 *mac_out = req->out_mac; struct scatterlist *sgl = aead_req->src; + u8 *mac_out = req->out_mac; size_t copy_size; off_t skip_size; /* Copy input mac */ skip_size = aead_req->assoclen + aead_req->cryptlen - authsize; - copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, - authsize, skip_size); + copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size); if (unlikely(copy_size != authsize)) return -EINVAL; @@ -1120,10 +1119,7 @@ static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize) struct sec_ctx *ctx = crypto_tfm_ctx(tfm); struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - if (unlikely(a_ctx->fallback_aead_tfm)) - return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize); - - return 0; + return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize); } static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx, @@ -1139,7 +1135,6 @@ static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx, static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, const u32 keylen, const enum sec_hash_alg a_alg, const enum sec_calg c_alg, - const enum sec_mac_len mac_len, const enum sec_cmode c_mode) { struct sec_ctx *ctx = crypto_aead_ctx(tfm); @@ -1151,7 +1146,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, ctx->a_ctx.a_alg = a_alg; ctx->c_ctx.c_alg = c_alg; - ctx->a_ctx.mac_len = mac_len; c_ctx->c_mode = c_mode; if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) { @@ -1162,13 +1156,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, } memcpy(c_ctx->c_key, key, keylen); - if (unlikely(a_ctx->fallback_aead_tfm)) { - ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); - if (ret) - return ret; - } - - return 0; + return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); } ret = crypto_authenc_extractkeys(&keys, key, keylen); @@ -1187,10 +1175,15 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, goto bad_key; } - if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) || - (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) { + if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) { ret = -EINVAL; - dev_err(dev, "MAC or AUTH key length error!\n"); + dev_err(dev, "AUTH key length error!\n"); + goto bad_key; + } + + ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); + if (ret) { + dev_err(dev, "set sec fallback key err!\n"); goto bad_key; } @@ -1202,27 +1195,19 @@ bad_key: } -#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \ -static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \ - u32 keylen) \ -{ \ - return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\ -} - -GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, - SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC) -GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, - SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC) -GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, - SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC) -GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, - SEC_HMAC_CCM_MAC, SEC_CMODE_CCM) -GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, - SEC_HMAC_GCM_MAC, SEC_CMODE_GCM) -GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, - SEC_HMAC_CCM_MAC, SEC_CMODE_CCM) -GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, - SEC_HMAC_GCM_MAC, SEC_CMODE_GCM) +#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode) \ +static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen) \ +{ \ + return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode); \ +} + +GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM) +GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM) +GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM) +GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM) static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req) { @@ -1470,9 +1455,10 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req) { struct aead_request *aead_req = req->aead_req.aead_req; - struct sec_cipher_req *c_req = &req->c_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); + size_t authsize = crypto_aead_authsize(tfm); struct sec_aead_req *a_req = &req->aead_req; - size_t authsize = ctx->a_ctx.mac_len; + struct sec_cipher_req *c_req = &req->c_req; u32 data_size = aead_req->cryptlen; u8 flage = 0; u8 cm, cl; @@ -1513,10 +1499,8 @@ static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req) static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req) { struct aead_request *aead_req = req->aead_req.aead_req; - struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); - size_t authsize = crypto_aead_authsize(tfm); - struct sec_cipher_req *c_req = &req->c_req; struct sec_aead_req *a_req = &req->aead_req; + struct sec_cipher_req *c_req = &req->c_req; memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize); @@ -1524,15 +1508,11 @@ static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req) /* * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter}, * the counter must set to 0x01 + * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */ - ctx->a_ctx.mac_len = authsize; - /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */ set_aead_auth_iv(ctx, req); - } - - /* GCM 12Byte Cipher_IV == Auth_IV */ - if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) { - ctx->a_ctx.mac_len = authsize; + } else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) { + /* GCM 12Byte Cipher_IV == Auth_IV */ memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE); } } @@ -1542,9 +1522,11 @@ static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir, { struct sec_aead_req *a_req = &req->aead_req; struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */ - sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len); + sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize); /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */ sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr; @@ -1568,9 +1550,11 @@ static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir, { struct sec_aead_req *a_req = &req->aead_req; struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */ - sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3); + sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3); /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */ sqe3->a_key_addr = sqe3->c_key_addr; @@ -1594,11 +1578,12 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, struct sec_aead_req *a_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma); - sec_sqe->type2.mac_key_alg = - cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE); + sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE); sec_sqe->type2.mac_key_alg |= cpu_to_le32((u32)((ctx->a_key_len) / @@ -1648,11 +1633,13 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir, struct sec_aead_req *a_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma); sqe3->auth_mac_key |= - cpu_to_le32((u32)(ctx->mac_len / + cpu_to_le32((u32)(authsize / SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3); sqe3->auth_mac_key |= @@ -1703,9 +1690,9 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) { struct aead_request *a_req = req->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); + size_t authsize = crypto_aead_authsize(tfm); struct sec_aead_req *aead_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; - size_t authsize = crypto_aead_authsize(tfm); struct sec_qp_ctx *qp_ctx = req->qp_ctx; struct aead_request *backlog_aead_req; struct sec_req *backlog_req; @@ -1718,10 +1705,8 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) if (!err && c_req->encrypt) { struct scatterlist *sgl = a_req->dst; - sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), - aead_req->out_mac, - authsize, a_req->cryptlen + - a_req->assoclen); + sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac, + authsize, a_req->cryptlen + a_req->assoclen); if (unlikely(sz != authsize)) { dev_err(c->dev, "copy out mac err!\n"); err = -EINVAL; @@ -1929,8 +1914,10 @@ static void sec_aead_exit(struct crypto_aead *tfm) static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) { + struct aead_alg *alg = crypto_aead_alg(tfm); struct sec_ctx *ctx = crypto_aead_ctx(tfm); - struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; + struct sec_auth_ctx *a_ctx = &ctx->a_ctx; + const char *aead_name = alg->base.cra_name; int ret; ret = sec_aead_init(tfm); @@ -1939,11 +1926,20 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) return ret; } - auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); - if (IS_ERR(auth_ctx->hash_tfm)) { + a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); + if (IS_ERR(a_ctx->hash_tfm)) { dev_err(ctx->dev, "aead alloc shash error!\n"); sec_aead_exit(tfm); - return PTR_ERR(auth_ctx->hash_tfm); + return PTR_ERR(a_ctx->hash_tfm); + } + + a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0, + CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); + if (IS_ERR(a_ctx->fallback_aead_tfm)) { + dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n"); + crypto_free_shash(ctx->a_ctx.hash_tfm); + sec_aead_exit(tfm); + return PTR_ERR(a_ctx->fallback_aead_tfm); } return 0; @@ -1953,6 +1949,7 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm) { struct sec_ctx *ctx = crypto_aead_ctx(tfm); + crypto_free_aead(ctx->a_ctx.fallback_aead_tfm); crypto_free_shash(ctx->a_ctx.hash_tfm); sec_aead_exit(tfm); } @@ -1979,7 +1976,6 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm) sec_aead_exit(tfm); return PTR_ERR(a_ctx->fallback_aead_tfm); } - a_ctx->fallback = false; return 0; } @@ -2233,21 +2229,20 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) { struct aead_request *req = sreq->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(req); - size_t authsize = crypto_aead_authsize(tfm); + size_t sz = crypto_aead_authsize(tfm); u8 c_mode = ctx->c_ctx.c_mode; struct device *dev = ctx->dev; int ret; - if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || - req->assoclen > SEC_MAX_AAD_LEN)) { - dev_err(dev, "aead input spec error!\n"); + /* Hardware does not handle cases where authsize is less than 4 bytes */ + if (unlikely(sz < MIN_MAC_LEN)) { + sreq->aead_req.fallback = true; return -EINVAL; } - if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) || - (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN || - authsize & MAC_LEN_MASK)))) { - dev_err(dev, "aead input mac length error!\n"); + if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || + req->assoclen > SEC_MAX_AAD_LEN)) { + dev_err(dev, "aead input spec error!\n"); return -EINVAL; } @@ -2266,7 +2261,7 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) if (sreq->c_req.encrypt) sreq->c_req.c_len = req->cryptlen; else - sreq->c_req.c_len = req->cryptlen - authsize; + sreq->c_req.c_len = req->cryptlen - sz; if (c_mode == SEC_CMODE_CBC) { if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { dev_err(dev, "aead crypto length error!\n"); @@ -2292,8 +2287,8 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) if (ctx->sec->qm.ver == QM_HW_V2) { if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt && - req->cryptlen <= authsize))) { - ctx->a_ctx.fallback = true; + req->cryptlen <= authsize))) { + sreq->aead_req.fallback = true; return -EINVAL; } } @@ -2321,16 +2316,9 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx, bool encrypt) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - struct device *dev = ctx->dev; struct aead_request *subreq; int ret; - /* Kunpeng920 aead mode not support input 0 size */ - if (!a_ctx->fallback_aead_tfm) { - dev_err(dev, "aead fallback tfm is NULL!\n"); - return -EINVAL; - } - subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL); if (!subreq) return -ENOMEM; @@ -2362,10 +2350,11 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) req->aead_req.aead_req = a_req; req->c_req.encrypt = encrypt; req->ctx = ctx; + req->aead_req.fallback = false; ret = sec_aead_param_check(ctx, req); if (unlikely(ret)) { - if (ctx->a_ctx.fallback) + if (req->aead_req.fallback) return sec_aead_soft_crypto(ctx, a_req, encrypt); return -EINVAL; } diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h index 27a0ee5ad913..04725b514382 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.h +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h @@ -23,17 +23,6 @@ enum sec_hash_alg { SEC_A_HMAC_SHA512 = 0x15, }; -enum sec_mac_len { - SEC_HMAC_CCM_MAC = 16, - SEC_HMAC_GCM_MAC = 16, - SEC_SM3_MAC = 32, - SEC_HMAC_SM3_MAC = 32, - SEC_HMAC_MD5_MAC = 16, - SEC_HMAC_SHA1_MAC = 20, - SEC_HMAC_SHA256_MAC = 32, - SEC_HMAC_SHA512_MAC = 64, -}; - enum sec_cmode { SEC_CMODE_ECB = 0x0, SEC_CMODE_CBC = 0x1, diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 8ec5333bb5aa..72cf48d1f3ab 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -1097,6 +1097,17 @@ static enum acc_err_result sec_get_err_result(struct hisi_qm *qm) return ACC_ERR_RECOVERED; } +static bool sec_dev_is_abnormal(struct hisi_qm *qm) +{ + u32 err_status; + + err_status = sec_get_hw_err_status(qm); + if (err_status & qm->err_info.dev_shutdown_mask) + return true; + + return false; +} + static void sec_err_info_init(struct hisi_qm *qm) { struct hisi_qm_err_info *err_info = &qm->err_info; @@ -1129,6 +1140,7 @@ static const struct hisi_qm_err_ini sec_err_ini = { .show_last_dfx_regs = sec_show_last_dfx_regs, .err_info_init = sec_err_info_init, .get_err_result = sec_get_err_result, + .dev_is_abnormal = sec_dev_is_abnormal, }; static int sec_pf_probe_init(struct sec_dev *sec) @@ -1180,7 +1192,6 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) int ret; qm->pdev = pdev; - qm->ver = pdev->revision; qm->mode = uacce_mode; qm->sqe_size = SEC_SQE_SIZE; qm->dev_name = sec_name; diff --git a/drivers/crypto/hisilicon/zip/Makefile b/drivers/crypto/hisilicon/zip/Makefile index a936f099ee22..13de020b77d6 100644 --- a/drivers/crypto/hisilicon/zip/Makefile +++ b/drivers/crypto/hisilicon/zip/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += hisi_zip.o -hisi_zip-objs = zip_main.o zip_crypto.o +hisi_zip-objs = zip_main.o zip_crypto.o dae_main.o diff --git a/drivers/crypto/hisilicon/zip/dae_main.c b/drivers/crypto/hisilicon/zip/dae_main.c new file mode 100644 index 000000000000..6f22e4c36e49 --- /dev/null +++ b/drivers/crypto/hisilicon/zip/dae_main.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 HiSilicon Limited. */ + +#include <linux/bitops.h> +#include <linux/io.h> +#include <linux/uacce.h> +#include "zip.h" + +/* memory */ +#define DAE_MEM_START_OFFSET 0x331040 +#define DAE_MEM_DONE_OFFSET 0x331044 +#define DAE_MEM_START_MASK 0x1 +#define DAE_MEM_DONE_MASK 0x1 +#define DAE_REG_RD_INTVRL_US 10 +#define DAE_REG_RD_TMOUT_US USEC_PER_SEC + +#define DAE_ALG_NAME "hashagg" + +/* error */ +#define DAE_AXI_CFG_OFFSET 0x331000 +#define DAE_AXI_SHUTDOWN_MASK (BIT(0) | BIT(5)) +#define DAE_ERR_SOURCE_OFFSET 0x331C84 +#define DAE_ERR_STATUS_OFFSET 0x331C88 +#define DAE_ERR_CE_OFFSET 0x331CA0 +#define DAE_ERR_CE_MASK BIT(3) +#define DAE_ERR_NFE_OFFSET 0x331CA4 +#define DAE_ERR_NFE_MASK 0x17 +#define DAE_ERR_FE_OFFSET 0x331CA8 +#define DAE_ERR_FE_MASK 0 +#define DAE_ECC_MBIT_MASK BIT(2) +#define DAE_ECC_INFO_OFFSET 0x33400C +#define DAE_ERR_SHUTDOWN_OFFSET 0x331CAC +#define DAE_ERR_SHUTDOWN_MASK 0x17 +#define DAE_ERR_ENABLE_OFFSET 0x331C80 +#define DAE_ERR_ENABLE_MASK (DAE_ERR_FE_MASK | DAE_ERR_NFE_MASK | DAE_ERR_CE_MASK) +#define DAE_AM_CTRL_GLOBAL_OFFSET 0x330000 +#define DAE_AM_RETURN_OFFSET 0x330150 +#define DAE_AM_RETURN_MASK 0x3 +#define DAE_AXI_CFG_OFFSET 0x331000 +#define DAE_AXI_SHUTDOWN_EN_MASK (BIT(0) | BIT(5)) + +struct hisi_dae_hw_error { + u32 int_msk; + const char *msg; +}; + +static const struct hisi_dae_hw_error dae_hw_error[] = { + { .int_msk = BIT(0), .msg = "dae_axi_bus_err" }, + { .int_msk = BIT(1), .msg = "dae_axi_poison_err" }, + { .int_msk = BIT(2), .msg = "dae_ecc_2bit_err" }, + { .int_msk = BIT(3), .msg = "dae_ecc_1bit_err" }, + { .int_msk = BIT(4), .msg = "dae_fsm_hbeat_err" }, +}; + +static inline bool dae_is_support(struct hisi_qm *qm) +{ + if (test_bit(QM_SUPPORT_DAE, &qm->caps)) + return true; + + return false; +} + +int hisi_dae_set_user_domain(struct hisi_qm *qm) +{ + u32 val; + int ret; + + if (!dae_is_support(qm)) + return 0; + + val = readl(qm->io_base + DAE_MEM_START_OFFSET); + val |= DAE_MEM_START_MASK; + writel(val, qm->io_base + DAE_MEM_START_OFFSET); + ret = readl_relaxed_poll_timeout(qm->io_base + DAE_MEM_DONE_OFFSET, val, + val & DAE_MEM_DONE_MASK, + DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US); + if (ret) + pci_err(qm->pdev, "failed to init dae memory!\n"); + + return ret; +} + +int hisi_dae_set_alg(struct hisi_qm *qm) +{ + size_t len; + + if (!dae_is_support(qm)) + return 0; + + if (!qm->uacce) + return 0; + + len = strlen(qm->uacce->algs); + /* A line break may be required */ + if (len + strlen(DAE_ALG_NAME) + 1 >= QM_DEV_ALG_MAX_LEN) { + pci_err(qm->pdev, "algorithm name is too long!\n"); + return -EINVAL; + } + + if (len) + strcat((char *)qm->uacce->algs, "\n"); + + strcat((char *)qm->uacce->algs, DAE_ALG_NAME); + + return 0; +} + +static void hisi_dae_master_ooo_ctrl(struct hisi_qm *qm, bool enable) +{ + u32 axi_val, err_val; + + axi_val = readl(qm->io_base + DAE_AXI_CFG_OFFSET); + if (enable) { + axi_val |= DAE_AXI_SHUTDOWN_MASK; + err_val = DAE_ERR_SHUTDOWN_MASK; + } else { + axi_val &= ~DAE_AXI_SHUTDOWN_MASK; + err_val = 0; + } + + writel(axi_val, qm->io_base + DAE_AXI_CFG_OFFSET); + writel(err_val, qm->io_base + DAE_ERR_SHUTDOWN_OFFSET); +} + +void hisi_dae_hw_error_enable(struct hisi_qm *qm) +{ + if (!dae_is_support(qm)) + return; + + /* clear dae hw error source if having */ + writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_SOURCE_OFFSET); + + /* configure error type */ + writel(DAE_ERR_CE_MASK, qm->io_base + DAE_ERR_CE_OFFSET); + writel(DAE_ERR_NFE_MASK, qm->io_base + DAE_ERR_NFE_OFFSET); + writel(DAE_ERR_FE_MASK, qm->io_base + DAE_ERR_FE_OFFSET); + + hisi_dae_master_ooo_ctrl(qm, true); + + /* enable dae hw error interrupts */ + writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_ENABLE_OFFSET); +} + +void hisi_dae_hw_error_disable(struct hisi_qm *qm) +{ + if (!dae_is_support(qm)) + return; + + writel(0, qm->io_base + DAE_ERR_ENABLE_OFFSET); + hisi_dae_master_ooo_ctrl(qm, false); +} + +static u32 hisi_dae_get_hw_err_status(struct hisi_qm *qm) +{ + return readl(qm->io_base + DAE_ERR_STATUS_OFFSET); +} + +static void hisi_dae_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + if (!dae_is_support(qm)) + return; + + writel(err_sts, qm->io_base + DAE_ERR_SOURCE_OFFSET); +} + +static void hisi_dae_disable_error_report(struct hisi_qm *qm, u32 err_type) +{ + writel(DAE_ERR_NFE_MASK & (~err_type), qm->io_base + DAE_ERR_NFE_OFFSET); +} + +static void hisi_dae_log_hw_error(struct hisi_qm *qm, u32 err_type) +{ + const struct hisi_dae_hw_error *err = dae_hw_error; + struct device *dev = &qm->pdev->dev; + u32 ecc_info; + size_t i; + + for (i = 0; i < ARRAY_SIZE(dae_hw_error); i++) { + err = &dae_hw_error[i]; + if (!(err->int_msk & err_type)) + continue; + + dev_err(dev, "%s [error status=0x%x] found\n", + err->msg, err->int_msk); + + if (err->int_msk & DAE_ECC_MBIT_MASK) { + ecc_info = readl(qm->io_base + DAE_ECC_INFO_OFFSET); + dev_err(dev, "dae multi ecc sram info 0x%x\n", ecc_info); + } + } +} + +enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm) +{ + u32 err_status; + + if (!dae_is_support(qm)) + return ACC_ERR_NONE; + + err_status = hisi_dae_get_hw_err_status(qm); + if (!err_status) + return ACC_ERR_NONE; + + hisi_dae_log_hw_error(qm, err_status); + + if (err_status & DAE_ERR_NFE_MASK) { + /* Disable the same error reporting until device is recovered. */ + hisi_dae_disable_error_report(qm, err_status); + return ACC_ERR_NEED_RESET; + } + hisi_dae_clear_hw_err_status(qm, err_status); + + return ACC_ERR_RECOVERED; +} + +bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm) +{ + u32 err_status; + + if (!dae_is_support(qm)) + return false; + + err_status = hisi_dae_get_hw_err_status(qm); + if (err_status & DAE_ERR_NFE_MASK) + return true; + + return false; +} + +int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm) +{ + u32 val; + int ret; + + if (!dae_is_support(qm)) + return 0; + + val = readl(qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET); + val |= BIT(0); + writel(val, qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET); + + ret = readl_relaxed_poll_timeout(qm->io_base + DAE_AM_RETURN_OFFSET, + val, (val == DAE_AM_RETURN_MASK), + DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US); + if (ret) + dev_err(&qm->pdev->dev, "failed to close dae axi ooo!\n"); + + return ret; +} + +void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm) +{ + u32 val; + + if (!dae_is_support(qm)) + return; + + val = readl(qm->io_base + DAE_AXI_CFG_OFFSET); + + writel(val & ~DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET); + writel(val | DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET); +} diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h index 2fecf346c3c9..9fb2a9c01132 100644 --- a/drivers/crypto/hisilicon/zip/zip.h +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -103,4 +103,12 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num, int node); int hisi_zip_register_to_crypto(struct hisi_qm *qm); void hisi_zip_unregister_from_crypto(struct hisi_qm *qm); bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg); +int hisi_dae_set_user_domain(struct hisi_qm *qm); +int hisi_dae_set_alg(struct hisi_qm *qm); +void hisi_dae_hw_error_disable(struct hisi_qm *qm); +void hisi_dae_hw_error_enable(struct hisi_qm *qm); +void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm); +int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm); +bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm); +enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm); #endif diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 9239b251c2d7..d8ba23b7cc7d 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -582,7 +582,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) hisi_zip_enable_clock_gate(qm); - return 0; + return hisi_dae_set_user_domain(qm); } static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable) @@ -631,6 +631,8 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm) /* enable ZIP hw error interrupts */ writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG); + + hisi_dae_hw_error_enable(qm); } static void hisi_zip_hw_error_disable(struct hisi_qm *qm) @@ -643,6 +645,8 @@ static void hisi_zip_hw_error_disable(struct hisi_qm *qm) writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG); hisi_zip_master_ooo_ctrl(qm, false); + + hisi_dae_hw_error_disable(qm); } static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file) @@ -1129,6 +1133,8 @@ static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm) writel(val | HZIP_AXI_SHUTDOWN_ENABLE, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); + + hisi_dae_open_axi_master_ooo(qm); } static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm) @@ -1147,8 +1153,11 @@ static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm) static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm) { + enum acc_err_result zip_result = ACC_ERR_NONE; + enum acc_err_result dae_result; u32 err_status; + /* Get device hardware new error status */ err_status = hisi_zip_get_hw_err_status(qm); if (err_status) { if (err_status & qm->err_info.ecc_2bits_mask) @@ -1159,11 +1168,32 @@ static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm) /* Disable the same error reporting until device is recovered. */ hisi_zip_disable_error_report(qm, err_status); return ACC_ERR_NEED_RESET; + } else { + hisi_zip_clear_hw_err_status(qm, err_status); } - hisi_zip_clear_hw_err_status(qm, err_status); } - return ACC_ERR_RECOVERED; + dae_result = hisi_dae_get_err_result(qm); + + return (zip_result == ACC_ERR_NEED_RESET || + dae_result == ACC_ERR_NEED_RESET) ? + ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED; +} + +static bool hisi_zip_dev_is_abnormal(struct hisi_qm *qm) +{ + u32 err_status; + + err_status = hisi_zip_get_hw_err_status(qm); + if (err_status & qm->err_info.dev_shutdown_mask) + return true; + + return hisi_dae_dev_is_abnormal(qm); +} + +static int hisi_zip_set_priv_status(struct hisi_qm *qm) +{ + return hisi_dae_close_axi_master_ooo(qm); } static void hisi_zip_err_info_init(struct hisi_qm *qm) @@ -1200,6 +1230,8 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = { .show_last_dfx_regs = hisi_zip_show_last_dfx_regs, .err_info_init = hisi_zip_err_info_init, .get_err_result = hisi_zip_get_err_result, + .set_priv_status = hisi_zip_set_priv_status, + .dev_is_abnormal = hisi_zip_dev_is_abnormal, }; static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) @@ -1264,7 +1296,6 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) int ret; qm->pdev = pdev; - qm->ver = pdev->revision; qm->mode = uacce_mode; qm->sqe_size = HZIP_SQE_SIZE; qm->dev_name = hisi_zip_name; @@ -1301,17 +1332,24 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) ret = zip_pre_store_cap_reg(qm); if (ret) { pci_err(qm->pdev, "Failed to pre-store capability registers!\n"); - hisi_qm_uninit(qm); - return ret; + goto err_qm_uninit; } alg_msk = qm->cap_tables.dev_cap_table[ZIP_ALG_BITMAP].cap_val; ret = hisi_qm_set_algs(qm, alg_msk, zip_dev_algs, ARRAY_SIZE(zip_dev_algs)); if (ret) { pci_err(qm->pdev, "Failed to set zip algs!\n"); - hisi_qm_uninit(qm); + goto err_qm_uninit; } + ret = hisi_dae_set_alg(qm); + if (ret) + goto err_qm_uninit; + + return 0; + +err_qm_uninit: + hisi_qm_uninit(qm); return ret; } diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 9e557649e5d0..c3776b0de51d 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -173,7 +173,7 @@ static int set_iaa_sync_mode(const char *name) async_mode = false; use_irq = false; } else if (sysfs_streq(name, "async")) { - async_mode = true; + async_mode = false; use_irq = false; } else if (sysfs_streq(name, "async_irq")) { async_mode = true; diff --git a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c index 449c6d3ab2db..fcc0cf4df637 100644 --- a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c +++ b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c @@ -471,6 +471,7 @@ static int init_ixp_crypto(struct device *dev) return -ENODEV; } npe_id = npe_spec.args[0]; + of_node_put(npe_spec.np); ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0, &queue_spec); @@ -479,6 +480,7 @@ static int init_ixp_crypto(struct device *dev) return -ENODEV; } recv_qid = queue_spec.args[0]; + of_node_put(queue_spec.np); ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0, &queue_spec); @@ -487,6 +489,7 @@ static int init_ixp_crypto(struct device *dev) return -ENODEV; } send_qid = queue_spec.args[0]; + of_node_put(queue_spec.np); } else { /* * Hardcoded engine when using platform data, this goes away diff --git a/drivers/crypto/n2_asm.S b/drivers/crypto/n2_asm.S deleted file mode 100644 index 9a67dbf340f4..000000000000 --- a/drivers/crypto/n2_asm.S +++ /dev/null @@ -1,96 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* n2_asm.S: Hypervisor calls for NCS support. - * - * Copyright (C) 2009 David S. Miller <davem@davemloft.net> - */ - -#include <linux/linkage.h> -#include <asm/hypervisor.h> -#include "n2_core.h" - - /* o0: queue type - * o1: RA of queue - * o2: num entries in queue - * o3: address of queue handle return - */ -ENTRY(sun4v_ncs_qconf) - mov HV_FAST_NCS_QCONF, %o5 - ta HV_FAST_TRAP - stx %o1, [%o3] - retl - nop -ENDPROC(sun4v_ncs_qconf) - - /* %o0: queue handle - * %o1: address of queue type return - * %o2: address of queue base address return - * %o3: address of queue num entries return - */ -ENTRY(sun4v_ncs_qinfo) - mov %o1, %g1 - mov %o2, %g2 - mov %o3, %g3 - mov HV_FAST_NCS_QINFO, %o5 - ta HV_FAST_TRAP - stx %o1, [%g1] - stx %o2, [%g2] - stx %o3, [%g3] - retl - nop -ENDPROC(sun4v_ncs_qinfo) - - /* %o0: queue handle - * %o1: address of head offset return - */ -ENTRY(sun4v_ncs_gethead) - mov %o1, %o2 - mov HV_FAST_NCS_GETHEAD, %o5 - ta HV_FAST_TRAP - stx %o1, [%o2] - retl - nop -ENDPROC(sun4v_ncs_gethead) - - /* %o0: queue handle - * %o1: address of tail offset return - */ -ENTRY(sun4v_ncs_gettail) - mov %o1, %o2 - mov HV_FAST_NCS_GETTAIL, %o5 - ta HV_FAST_TRAP - stx %o1, [%o2] - retl - nop -ENDPROC(sun4v_ncs_gettail) - - /* %o0: queue handle - * %o1: new tail offset - */ -ENTRY(sun4v_ncs_settail) - mov HV_FAST_NCS_SETTAIL, %o5 - ta HV_FAST_TRAP - retl - nop -ENDPROC(sun4v_ncs_settail) - - /* %o0: queue handle - * %o1: address of devino return - */ -ENTRY(sun4v_ncs_qhandle_to_devino) - mov %o1, %o2 - mov HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5 - ta HV_FAST_TRAP - stx %o1, [%o2] - retl - nop -ENDPROC(sun4v_ncs_qhandle_to_devino) - - /* %o0: queue handle - * %o1: new head offset - */ -ENTRY(sun4v_ncs_sethead_marker) - mov HV_FAST_NCS_SETHEAD_MARKER, %o5 - ta HV_FAST_TRAP - retl - nop -ENDPROC(sun4v_ncs_sethead_marker) diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c deleted file mode 100644 index 14c302d2db79..000000000000 --- a/drivers/crypto/n2_core.c +++ /dev/null @@ -1,2168 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. - * - * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net> - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/platform_device.h> -#include <linux/cpumask.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/crypto.h> -#include <crypto/md5.h> -#include <crypto/sha1.h> -#include <crypto/sha2.h> -#include <crypto/aes.h> -#include <crypto/internal/des.h> -#include <linux/mutex.h> -#include <linux/delay.h> -#include <linux/sched.h> - -#include <crypto/internal/hash.h> -#include <crypto/internal/skcipher.h> -#include <crypto/scatterwalk.h> -#include <crypto/algapi.h> - -#include <asm/hypervisor.h> -#include <asm/mdesc.h> - -#include "n2_core.h" - -#define DRV_MODULE_NAME "n2_crypto" -#define DRV_MODULE_VERSION "0.2" -#define DRV_MODULE_RELDATE "July 28, 2011" - -static const char version[] = - DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; - -MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); -MODULE_DESCRIPTION("Niagara2 Crypto driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_MODULE_VERSION); - -#define N2_CRA_PRIORITY 200 - -static DEFINE_MUTEX(spu_lock); - -struct spu_queue { - cpumask_t sharing; - unsigned long qhandle; - - spinlock_t lock; - u8 q_type; - void *q; - unsigned long head; - unsigned long tail; - struct list_head jobs; - - unsigned long devino; - - char irq_name[32]; - unsigned int irq; - - struct list_head list; -}; - -struct spu_qreg { - struct spu_queue *queue; - unsigned long type; -}; - -static struct spu_queue **cpu_to_cwq; -static struct spu_queue **cpu_to_mau; - -static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off) -{ - if (q->q_type == HV_NCS_QTYPE_MAU) { - off += MAU_ENTRY_SIZE; - if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES)) - off = 0; - } else { - off += CWQ_ENTRY_SIZE; - if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES)) - off = 0; - } - return off; -} - -struct n2_request_common { - struct list_head entry; - unsigned int offset; -}; -#define OFFSET_NOT_RUNNING (~(unsigned int)0) - -/* An async job request records the final tail value it used in - * n2_request_common->offset, test to see if that offset is in - * the range old_head, new_head, inclusive. - */ -static inline bool job_finished(struct spu_queue *q, unsigned int offset, - unsigned long old_head, unsigned long new_head) -{ - if (old_head <= new_head) { - if (offset > old_head && offset <= new_head) - return true; - } else { - if (offset > old_head || offset <= new_head) - return true; - } - return false; -} - -/* When the HEAD marker is unequal to the actual HEAD, we get - * a virtual device INO interrupt. We should process the - * completed CWQ entries and adjust the HEAD marker to clear - * the IRQ. - */ -static irqreturn_t cwq_intr(int irq, void *dev_id) -{ - unsigned long off, new_head, hv_ret; - struct spu_queue *q = dev_id; - - pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n", - smp_processor_id(), q->qhandle); - - spin_lock(&q->lock); - - hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head); - - pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n", - smp_processor_id(), new_head, hv_ret); - - for (off = q->head; off != new_head; off = spu_next_offset(q, off)) { - /* XXX ... XXX */ - } - - hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head); - if (hv_ret == HV_EOK) - q->head = new_head; - - spin_unlock(&q->lock); - - return IRQ_HANDLED; -} - -static irqreturn_t mau_intr(int irq, void *dev_id) -{ - struct spu_queue *q = dev_id; - unsigned long head, hv_ret; - - spin_lock(&q->lock); - - pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n", - smp_processor_id(), q->qhandle); - - hv_ret = sun4v_ncs_gethead(q->qhandle, &head); - - pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n", - smp_processor_id(), head, hv_ret); - - sun4v_ncs_sethead_marker(q->qhandle, head); - - spin_unlock(&q->lock); - - return IRQ_HANDLED; -} - -static void *spu_queue_next(struct spu_queue *q, void *cur) -{ - return q->q + spu_next_offset(q, cur - q->q); -} - -static int spu_queue_num_free(struct spu_queue *q) -{ - unsigned long head = q->head; - unsigned long tail = q->tail; - unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES); - unsigned long diff; - - if (head > tail) - diff = head - tail; - else - diff = (end - tail) + head; - - return (diff / CWQ_ENTRY_SIZE) - 1; -} - -static void *spu_queue_alloc(struct spu_queue *q, int num_entries) -{ - int avail = spu_queue_num_free(q); - - if (avail >= num_entries) - return q->q + q->tail; - - return NULL; -} - -static unsigned long spu_queue_submit(struct spu_queue *q, void *last) -{ - unsigned long hv_ret, new_tail; - - new_tail = spu_next_offset(q, last - q->q); - - hv_ret = sun4v_ncs_settail(q->qhandle, new_tail); - if (hv_ret == HV_EOK) - q->tail = new_tail; - return hv_ret; -} - -static u64 control_word_base(unsigned int len, unsigned int hmac_key_len, - int enc_type, int auth_type, - unsigned int hash_len, - bool sfas, bool sob, bool eob, bool encrypt, - int opcode) -{ - u64 word = (len - 1) & CONTROL_LEN; - - word |= ((u64) opcode << CONTROL_OPCODE_SHIFT); - word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT); - word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT); - if (sfas) - word |= CONTROL_STORE_FINAL_AUTH_STATE; - if (sob) - word |= CONTROL_START_OF_BLOCK; - if (eob) - word |= CONTROL_END_OF_BLOCK; - if (encrypt) - word |= CONTROL_ENCRYPT; - if (hmac_key_len) - word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT; - if (hash_len) - word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT; - - return word; -} - -#if 0 -static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) -{ - if (this_len >= 64 || - qp->head != qp->tail) - return true; - return false; -} -#endif - -struct n2_ahash_alg { - struct list_head entry; - const u8 *hash_zero; - const u8 *hash_init; - u8 hw_op_hashsz; - u8 digest_size; - u8 auth_type; - u8 hmac_type; - struct ahash_alg alg; -}; - -static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm) -{ - struct crypto_alg *alg = tfm->__crt_alg; - struct ahash_alg *ahash_alg; - - ahash_alg = container_of(alg, struct ahash_alg, halg.base); - - return container_of(ahash_alg, struct n2_ahash_alg, alg); -} - -struct n2_hmac_alg { - const char *child_alg; - struct n2_ahash_alg derived; -}; - -static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm) -{ - struct crypto_alg *alg = tfm->__crt_alg; - struct ahash_alg *ahash_alg; - - ahash_alg = container_of(alg, struct ahash_alg, halg.base); - - return container_of(ahash_alg, struct n2_hmac_alg, derived.alg); -} - -struct n2_hash_ctx { - struct crypto_ahash *fallback_tfm; -}; - -#define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */ - -struct n2_hmac_ctx { - struct n2_hash_ctx base; - - struct crypto_shash *child_shash; - - int hash_key_len; - unsigned char hash_key[N2_HASH_KEY_MAX]; -}; - -struct n2_hash_req_ctx { - union { - struct md5_state md5; - struct sha1_state sha1; - struct sha256_state sha256; - } u; - - struct ahash_request fallback_req; -}; - -static int n2_hash_async_init(struct ahash_request *req) -{ - struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); - - ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; - - return crypto_ahash_init(&rctx->fallback_req); -} - -static int n2_hash_async_update(struct ahash_request *req) -{ - struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); - - ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; - - return crypto_ahash_update(&rctx->fallback_req); -} - -static int n2_hash_async_final(struct ahash_request *req) -{ - struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); - - ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.result = req->result; - - return crypto_ahash_final(&rctx->fallback_req); -} - -static int n2_hash_async_finup(struct ahash_request *req) -{ - struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); - - ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; - rctx->fallback_req.result = req->result; - - return crypto_ahash_finup(&rctx->fallback_req); -} - -static int n2_hash_async_noimport(struct ahash_request *req, const void *in) -{ - return -ENOSYS; -} - -static int n2_hash_async_noexport(struct ahash_request *req, void *out) -{ - return -ENOSYS; -} - -static int n2_hash_cra_init(struct crypto_tfm *tfm) -{ - const char *fallback_driver_name = crypto_tfm_alg_name(tfm); - struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); - struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct crypto_ahash *fallback_tfm; - int err; - - fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, - CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(fallback_tfm)) { - pr_warn("Fallback driver '%s' could not be loaded!\n", - fallback_driver_name); - err = PTR_ERR(fallback_tfm); - goto out; - } - - crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + - crypto_ahash_reqsize(fallback_tfm))); - - ctx->fallback_tfm = fallback_tfm; - return 0; - -out: - return err; -} - -static void n2_hash_cra_exit(struct crypto_tfm *tfm) -{ - struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); - struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); - - crypto_free_ahash(ctx->fallback_tfm); -} - -static int n2_hmac_cra_init(struct crypto_tfm *tfm) -{ - const char *fallback_driver_name = crypto_tfm_alg_name(tfm); - struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); - struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); - struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm); - struct crypto_ahash *fallback_tfm; - struct crypto_shash *child_shash; - int err; - - fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, - CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(fallback_tfm)) { - pr_warn("Fallback driver '%s' could not be loaded!\n", - fallback_driver_name); - err = PTR_ERR(fallback_tfm); - goto out; - } - - child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0); - if (IS_ERR(child_shash)) { - pr_warn("Child shash '%s' could not be loaded!\n", - n2alg->child_alg); - err = PTR_ERR(child_shash); - goto out_free_fallback; - } - - crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + - crypto_ahash_reqsize(fallback_tfm))); - - ctx->child_shash = child_shash; - ctx->base.fallback_tfm = fallback_tfm; - return 0; - -out_free_fallback: - crypto_free_ahash(fallback_tfm); - -out: - return err; -} - -static void n2_hmac_cra_exit(struct crypto_tfm *tfm) -{ - struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); - struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); - - crypto_free_ahash(ctx->base.fallback_tfm); - crypto_free_shash(ctx->child_shash); -} - -static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) -{ - struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); - struct crypto_shash *child_shash = ctx->child_shash; - struct crypto_ahash *fallback_tfm; - int err, bs, ds; - - fallback_tfm = ctx->base.fallback_tfm; - err = crypto_ahash_setkey(fallback_tfm, key, keylen); - if (err) - return err; - - bs = crypto_shash_blocksize(child_shash); - ds = crypto_shash_digestsize(child_shash); - BUG_ON(ds > N2_HASH_KEY_MAX); - if (keylen > bs) { - err = crypto_shash_tfm_digest(child_shash, key, keylen, - ctx->hash_key); - if (err) - return err; - keylen = ds; - } else if (keylen <= N2_HASH_KEY_MAX) - memcpy(ctx->hash_key, key, keylen); - - ctx->hash_key_len = keylen; - - return err; -} - -static unsigned long wait_for_tail(struct spu_queue *qp) -{ - unsigned long head, hv_ret; - - do { - hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); - if (hv_ret != HV_EOK) { - pr_err("Hypervisor error on gethead\n"); - break; - } - if (head == qp->tail) { - qp->head = head; - break; - } - } while (1); - return hv_ret; -} - -static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, - struct cwq_initial_entry *ent) -{ - unsigned long hv_ret = spu_queue_submit(qp, ent); - - if (hv_ret == HV_EOK) - hv_ret = wait_for_tail(qp); - - return hv_ret; -} - -static int n2_do_async_digest(struct ahash_request *req, - unsigned int auth_type, unsigned int digest_size, - unsigned int result_size, void *hash_loc, - unsigned long auth_key, unsigned int auth_key_len) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct cwq_initial_entry *ent; - struct crypto_hash_walk walk; - struct spu_queue *qp; - unsigned long flags; - int err = -ENODEV; - int nbytes, cpu; - - /* The total effective length of the operation may not - * exceed 2^16. - */ - if (unlikely(req->nbytes > (1 << 16))) { - struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); - struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); - - ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; - rctx->fallback_req.result = req->result; - - return crypto_ahash_digest(&rctx->fallback_req); - } - - nbytes = crypto_hash_walk_first(req, &walk); - - cpu = get_cpu(); - qp = cpu_to_cwq[cpu]; - if (!qp) - goto out; - - spin_lock_irqsave(&qp->lock, flags); - - /* XXX can do better, improve this later by doing a by-hand scatterlist - * XXX walk, etc. - */ - ent = qp->q + qp->tail; - - ent->control = control_word_base(nbytes, auth_key_len, 0, - auth_type, digest_size, - false, true, false, false, - OPCODE_INPLACE_BIT | - OPCODE_AUTH_MAC); - ent->src_addr = __pa(walk.data); - ent->auth_key_addr = auth_key; - ent->auth_iv_addr = __pa(hash_loc); - ent->final_auth_state_addr = 0UL; - ent->enc_key_addr = 0UL; - ent->enc_iv_addr = 0UL; - ent->dest_addr = __pa(hash_loc); - - nbytes = crypto_hash_walk_done(&walk, 0); - while (nbytes > 0) { - ent = spu_queue_next(qp, ent); - - ent->control = (nbytes - 1); - ent->src_addr = __pa(walk.data); - ent->auth_key_addr = 0UL; - ent->auth_iv_addr = 0UL; - ent->final_auth_state_addr = 0UL; - ent->enc_key_addr = 0UL; - ent->enc_iv_addr = 0UL; - ent->dest_addr = 0UL; - - nbytes = crypto_hash_walk_done(&walk, 0); - } - ent->control |= CONTROL_END_OF_BLOCK; - - if (submit_and_wait_for_tail(qp, ent) != HV_EOK) - err = -EINVAL; - else - err = 0; - - spin_unlock_irqrestore(&qp->lock, flags); - - if (!err) - memcpy(req->result, hash_loc, result_size); -out: - put_cpu(); - - return err; -} - -static int n2_hash_async_digest(struct ahash_request *req) -{ - struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm); - struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); - int ds; - - ds = n2alg->digest_size; - if (unlikely(req->nbytes == 0)) { - memcpy(req->result, n2alg->hash_zero, ds); - return 0; - } - memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz); - - return n2_do_async_digest(req, n2alg->auth_type, - n2alg->hw_op_hashsz, ds, - &rctx->u, 0UL, 0); -} - -static int n2_hmac_async_digest(struct ahash_request *req) -{ - struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm); - struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); - int ds; - - ds = n2alg->derived.digest_size; - if (unlikely(req->nbytes == 0) || - unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) { - struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); - struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); - - ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; - rctx->fallback_req.result = req->result; - - return crypto_ahash_digest(&rctx->fallback_req); - } - memcpy(&rctx->u, n2alg->derived.hash_init, - n2alg->derived.hw_op_hashsz); - - return n2_do_async_digest(req, n2alg->derived.hmac_type, - n2alg->derived.hw_op_hashsz, ds, - &rctx->u, - __pa(&ctx->hash_key), - ctx->hash_key_len); -} - -struct n2_skcipher_context { - int key_len; - int enc_type; - union { - u8 aes[AES_MAX_KEY_SIZE]; - u8 des[DES_KEY_SIZE]; - u8 des3[3 * DES_KEY_SIZE]; - } key; -}; - -#define N2_CHUNK_ARR_LEN 16 - -struct n2_crypto_chunk { - struct list_head entry; - unsigned long iv_paddr : 44; - unsigned long arr_len : 20; - unsigned long dest_paddr; - unsigned long dest_final; - struct { - unsigned long src_paddr : 44; - unsigned long src_len : 20; - } arr[N2_CHUNK_ARR_LEN]; -}; - -struct n2_request_context { - struct skcipher_walk walk; - struct list_head chunk_list; - struct n2_crypto_chunk chunk; - u8 temp_iv[16]; -}; - -/* The SPU allows some level of flexibility for partial cipher blocks - * being specified in a descriptor. - * - * It merely requires that every descriptor's length field is at least - * as large as the cipher block size. This means that a cipher block - * can span at most 2 descriptors. However, this does not allow a - * partial block to span into the final descriptor as that would - * violate the rule (since every descriptor's length must be at lest - * the block size). So, for example, assuming an 8 byte block size: - * - * 0xe --> 0xa --> 0x8 - * - * is a valid length sequence, whereas: - * - * 0xe --> 0xb --> 0x7 - * - * is not a valid sequence. - */ - -struct n2_skcipher_alg { - struct list_head entry; - u8 enc_type; - struct skcipher_alg skcipher; -}; - -static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm) -{ - struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - - return container_of(alg, struct n2_skcipher_alg, skcipher); -} - -static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key, - unsigned int keylen) -{ - struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); - struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm); - struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher); - - ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK); - - switch (keylen) { - case AES_KEYSIZE_128: - ctx->enc_type |= ENC_TYPE_ALG_AES128; - break; - case AES_KEYSIZE_192: - ctx->enc_type |= ENC_TYPE_ALG_AES192; - break; - case AES_KEYSIZE_256: - ctx->enc_type |= ENC_TYPE_ALG_AES256; - break; - default: - return -EINVAL; - } - - ctx->key_len = keylen; - memcpy(ctx->key.aes, key, keylen); - return 0; -} - -static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key, - unsigned int keylen) -{ - struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); - struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm); - struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher); - int err; - - err = verify_skcipher_des_key(skcipher, key); - if (err) - return err; - - ctx->enc_type = n2alg->enc_type; - - ctx->key_len = keylen; - memcpy(ctx->key.des, key, keylen); - return 0; -} - -static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key, - unsigned int keylen) -{ - struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); - struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm); - struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher); - int err; - - err = verify_skcipher_des3_key(skcipher, key); - if (err) - return err; - - ctx->enc_type = n2alg->enc_type; - - ctx->key_len = keylen; - memcpy(ctx->key.des3, key, keylen); - return 0; -} - -static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size) -{ - int this_len = nbytes; - - this_len -= (nbytes & (block_size - 1)); - return this_len > (1 << 16) ? (1 << 16) : this_len; -} - -static int __n2_crypt_chunk(struct crypto_skcipher *skcipher, - struct n2_crypto_chunk *cp, - struct spu_queue *qp, bool encrypt) -{ - struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher); - struct cwq_initial_entry *ent; - bool in_place; - int i; - - ent = spu_queue_alloc(qp, cp->arr_len); - if (!ent) { - pr_info("queue_alloc() of %d fails\n", - cp->arr_len); - return -EBUSY; - } - - in_place = (cp->dest_paddr == cp->arr[0].src_paddr); - - ent->control = control_word_base(cp->arr[0].src_len, - 0, ctx->enc_type, 0, 0, - false, true, false, encrypt, - OPCODE_ENCRYPT | - (in_place ? OPCODE_INPLACE_BIT : 0)); - ent->src_addr = cp->arr[0].src_paddr; - ent->auth_key_addr = 0UL; - ent->auth_iv_addr = 0UL; - ent->final_auth_state_addr = 0UL; - ent->enc_key_addr = __pa(&ctx->key); - ent->enc_iv_addr = cp->iv_paddr; - ent->dest_addr = (in_place ? 0UL : cp->dest_paddr); - - for (i = 1; i < cp->arr_len; i++) { - ent = spu_queue_next(qp, ent); - - ent->control = cp->arr[i].src_len - 1; - ent->src_addr = cp->arr[i].src_paddr; - ent->auth_key_addr = 0UL; - ent->auth_iv_addr = 0UL; - ent->final_auth_state_addr = 0UL; - ent->enc_key_addr = 0UL; - ent->enc_iv_addr = 0UL; - ent->dest_addr = 0UL; - } - ent->control |= CONTROL_END_OF_BLOCK; - - return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0; -} - -static int n2_compute_chunks(struct skcipher_request *req) -{ - struct n2_request_context *rctx = skcipher_request_ctx(req); - struct skcipher_walk *walk = &rctx->walk; - struct n2_crypto_chunk *chunk; - unsigned long dest_prev; - unsigned int tot_len; - bool prev_in_place; - int err, nbytes; - - err = skcipher_walk_async(walk, req); - if (err) - return err; - - INIT_LIST_HEAD(&rctx->chunk_list); - - chunk = &rctx->chunk; - INIT_LIST_HEAD(&chunk->entry); - - chunk->iv_paddr = 0UL; - chunk->arr_len = 0; - chunk->dest_paddr = 0UL; - - prev_in_place = false; - dest_prev = ~0UL; - tot_len = 0; - - while ((nbytes = walk->nbytes) != 0) { - unsigned long dest_paddr, src_paddr; - bool in_place; - int this_len; - - src_paddr = (page_to_phys(walk->src.phys.page) + - walk->src.phys.offset); - dest_paddr = (page_to_phys(walk->dst.phys.page) + - walk->dst.phys.offset); - in_place = (src_paddr == dest_paddr); - this_len = skcipher_descriptor_len(nbytes, walk->blocksize); - - if (chunk->arr_len != 0) { - if (in_place != prev_in_place || - (!prev_in_place && - dest_paddr != dest_prev) || - chunk->arr_len == N2_CHUNK_ARR_LEN || - tot_len + this_len > (1 << 16)) { - chunk->dest_final = dest_prev; - list_add_tail(&chunk->entry, - &rctx->chunk_list); - chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC); - if (!chunk) { - err = -ENOMEM; - break; - } - INIT_LIST_HEAD(&chunk->entry); - } - } - if (chunk->arr_len == 0) { - chunk->dest_paddr = dest_paddr; - tot_len = 0; - } - chunk->arr[chunk->arr_len].src_paddr = src_paddr; - chunk->arr[chunk->arr_len].src_len = this_len; - chunk->arr_len++; - - dest_prev = dest_paddr + this_len; - prev_in_place = in_place; - tot_len += this_len; - - err = skcipher_walk_done(walk, nbytes - this_len); - if (err) - break; - } - if (!err && chunk->arr_len != 0) { - chunk->dest_final = dest_prev; - list_add_tail(&chunk->entry, &rctx->chunk_list); - } - - return err; -} - -static void n2_chunk_complete(struct skcipher_request *req, void *final_iv) -{ - struct n2_request_context *rctx = skcipher_request_ctx(req); - struct n2_crypto_chunk *c, *tmp; - - if (final_iv) - memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize); - - list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { - list_del(&c->entry); - if (unlikely(c != &rctx->chunk)) - kfree(c); - } - -} - -static int n2_do_ecb(struct skcipher_request *req, bool encrypt) -{ - struct n2_request_context *rctx = skcipher_request_ctx(req); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - int err = n2_compute_chunks(req); - struct n2_crypto_chunk *c, *tmp; - unsigned long flags, hv_ret; - struct spu_queue *qp; - - if (err) - return err; - - qp = cpu_to_cwq[get_cpu()]; - err = -ENODEV; - if (!qp) - goto out; - - spin_lock_irqsave(&qp->lock, flags); - - list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { - err = __n2_crypt_chunk(tfm, c, qp, encrypt); - if (err) - break; - list_del(&c->entry); - if (unlikely(c != &rctx->chunk)) - kfree(c); - } - if (!err) { - hv_ret = wait_for_tail(qp); - if (hv_ret != HV_EOK) - err = -EINVAL; - } - - spin_unlock_irqrestore(&qp->lock, flags); - -out: - put_cpu(); - - n2_chunk_complete(req, NULL); - return err; -} - -static int n2_encrypt_ecb(struct skcipher_request *req) -{ - return n2_do_ecb(req, true); -} - -static int n2_decrypt_ecb(struct skcipher_request *req) -{ - return n2_do_ecb(req, false); -} - -static int n2_do_chaining(struct skcipher_request *req, bool encrypt) -{ - struct n2_request_context *rctx = skcipher_request_ctx(req); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - unsigned long flags, hv_ret, iv_paddr; - int err = n2_compute_chunks(req); - struct n2_crypto_chunk *c, *tmp; - struct spu_queue *qp; - void *final_iv_addr; - - final_iv_addr = NULL; - - if (err) - return err; - - qp = cpu_to_cwq[get_cpu()]; - err = -ENODEV; - if (!qp) - goto out; - - spin_lock_irqsave(&qp->lock, flags); - - if (encrypt) { - iv_paddr = __pa(rctx->walk.iv); - list_for_each_entry_safe(c, tmp, &rctx->chunk_list, - entry) { - c->iv_paddr = iv_paddr; - err = __n2_crypt_chunk(tfm, c, qp, true); - if (err) - break; - iv_paddr = c->dest_final - rctx->walk.blocksize; - list_del(&c->entry); - if (unlikely(c != &rctx->chunk)) - kfree(c); - } - final_iv_addr = __va(iv_paddr); - } else { - list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list, - entry) { - if (c == &rctx->chunk) { - iv_paddr = __pa(rctx->walk.iv); - } else { - iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr + - tmp->arr[tmp->arr_len-1].src_len - - rctx->walk.blocksize); - } - if (!final_iv_addr) { - unsigned long pa; - - pa = (c->arr[c->arr_len-1].src_paddr + - c->arr[c->arr_len-1].src_len - - rctx->walk.blocksize); - final_iv_addr = rctx->temp_iv; - memcpy(rctx->temp_iv, __va(pa), - rctx->walk.blocksize); - } - c->iv_paddr = iv_paddr; - err = __n2_crypt_chunk(tfm, c, qp, false); - if (err) - break; - list_del(&c->entry); - if (unlikely(c != &rctx->chunk)) - kfree(c); - } - } - if (!err) { - hv_ret = wait_for_tail(qp); - if (hv_ret != HV_EOK) - err = -EINVAL; - } - - spin_unlock_irqrestore(&qp->lock, flags); - -out: - put_cpu(); - - n2_chunk_complete(req, err ? NULL : final_iv_addr); - return err; -} - -static int n2_encrypt_chaining(struct skcipher_request *req) -{ - return n2_do_chaining(req, true); -} - -static int n2_decrypt_chaining(struct skcipher_request *req) -{ - return n2_do_chaining(req, false); -} - -struct n2_skcipher_tmpl { - const char *name; - const char *drv_name; - u8 block_size; - u8 enc_type; - struct skcipher_alg skcipher; -}; - -static const struct n2_skcipher_tmpl skcipher_tmpls[] = { - /* DES: ECB CBC and CFB are supported */ - { .name = "ecb(des)", - .drv_name = "ecb-des", - .block_size = DES_BLOCK_SIZE, - .enc_type = (ENC_TYPE_ALG_DES | - ENC_TYPE_CHAINING_ECB), - .skcipher = { - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .setkey = n2_des_setkey, - .encrypt = n2_encrypt_ecb, - .decrypt = n2_decrypt_ecb, - }, - }, - { .name = "cbc(des)", - .drv_name = "cbc-des", - .block_size = DES_BLOCK_SIZE, - .enc_type = (ENC_TYPE_ALG_DES | - ENC_TYPE_CHAINING_CBC), - .skcipher = { - .ivsize = DES_BLOCK_SIZE, - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .setkey = n2_des_setkey, - .encrypt = n2_encrypt_chaining, - .decrypt = n2_decrypt_chaining, - }, - }, - - /* 3DES: ECB CBC and CFB are supported */ - { .name = "ecb(des3_ede)", - .drv_name = "ecb-3des", - .block_size = DES_BLOCK_SIZE, - .enc_type = (ENC_TYPE_ALG_3DES | - ENC_TYPE_CHAINING_ECB), - .skcipher = { - .min_keysize = 3 * DES_KEY_SIZE, - .max_keysize = 3 * DES_KEY_SIZE, - .setkey = n2_3des_setkey, - .encrypt = n2_encrypt_ecb, - .decrypt = n2_decrypt_ecb, - }, - }, - { .name = "cbc(des3_ede)", - .drv_name = "cbc-3des", - .block_size = DES_BLOCK_SIZE, - .enc_type = (ENC_TYPE_ALG_3DES | - ENC_TYPE_CHAINING_CBC), - .skcipher = { - .ivsize = DES_BLOCK_SIZE, - .min_keysize = 3 * DES_KEY_SIZE, - .max_keysize = 3 * DES_KEY_SIZE, - .setkey = n2_3des_setkey, - .encrypt = n2_encrypt_chaining, - .decrypt = n2_decrypt_chaining, - }, - }, - - /* AES: ECB CBC and CTR are supported */ - { .name = "ecb(aes)", - .drv_name = "ecb-aes", - .block_size = AES_BLOCK_SIZE, - .enc_type = (ENC_TYPE_ALG_AES128 | - ENC_TYPE_CHAINING_ECB), - .skcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = n2_aes_setkey, - .encrypt = n2_encrypt_ecb, - .decrypt = n2_decrypt_ecb, - }, - }, - { .name = "cbc(aes)", - .drv_name = "cbc-aes", - .block_size = AES_BLOCK_SIZE, - .enc_type = (ENC_TYPE_ALG_AES128 | - ENC_TYPE_CHAINING_CBC), - .skcipher = { - .ivsize = AES_BLOCK_SIZE, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = n2_aes_setkey, - .encrypt = n2_encrypt_chaining, - .decrypt = n2_decrypt_chaining, - }, - }, - { .name = "ctr(aes)", - .drv_name = "ctr-aes", - .block_size = AES_BLOCK_SIZE, - .enc_type = (ENC_TYPE_ALG_AES128 | - ENC_TYPE_CHAINING_COUNTER), - .skcipher = { - .ivsize = AES_BLOCK_SIZE, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = n2_aes_setkey, - .encrypt = n2_encrypt_chaining, - .decrypt = n2_encrypt_chaining, - }, - }, - -}; -#define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls) - -static LIST_HEAD(skcipher_algs); - -struct n2_hash_tmpl { - const char *name; - const u8 *hash_zero; - const u8 *hash_init; - u8 hw_op_hashsz; - u8 digest_size; - u8 statesize; - u8 block_size; - u8 auth_type; - u8 hmac_type; -}; - -static const __le32 n2_md5_init[MD5_HASH_WORDS] = { - cpu_to_le32(MD5_H0), - cpu_to_le32(MD5_H1), - cpu_to_le32(MD5_H2), - cpu_to_le32(MD5_H3), -}; -static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = { - SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, -}; -static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = { - SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, - SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, -}; -static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = { - SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, - SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, -}; - -static const struct n2_hash_tmpl hash_tmpls[] = { - { .name = "md5", - .hash_zero = md5_zero_message_hash, - .hash_init = (u8 *)n2_md5_init, - .auth_type = AUTH_TYPE_MD5, - .hmac_type = AUTH_TYPE_HMAC_MD5, - .hw_op_hashsz = MD5_DIGEST_SIZE, - .digest_size = MD5_DIGEST_SIZE, - .statesize = sizeof(struct md5_state), - .block_size = MD5_HMAC_BLOCK_SIZE }, - { .name = "sha1", - .hash_zero = sha1_zero_message_hash, - .hash_init = (u8 *)n2_sha1_init, - .auth_type = AUTH_TYPE_SHA1, - .hmac_type = AUTH_TYPE_HMAC_SHA1, - .hw_op_hashsz = SHA1_DIGEST_SIZE, - .digest_size = SHA1_DIGEST_SIZE, - .statesize = sizeof(struct sha1_state), - .block_size = SHA1_BLOCK_SIZE }, - { .name = "sha256", - .hash_zero = sha256_zero_message_hash, - .hash_init = (u8 *)n2_sha256_init, - .auth_type = AUTH_TYPE_SHA256, - .hmac_type = AUTH_TYPE_HMAC_SHA256, - .hw_op_hashsz = SHA256_DIGEST_SIZE, - .digest_size = SHA256_DIGEST_SIZE, - .statesize = sizeof(struct sha256_state), - .block_size = SHA256_BLOCK_SIZE }, - { .name = "sha224", - .hash_zero = sha224_zero_message_hash, - .hash_init = (u8 *)n2_sha224_init, - .auth_type = AUTH_TYPE_SHA256, - .hmac_type = AUTH_TYPE_RESERVED, - .hw_op_hashsz = SHA256_DIGEST_SIZE, - .digest_size = SHA224_DIGEST_SIZE, - .statesize = sizeof(struct sha256_state), - .block_size = SHA224_BLOCK_SIZE }, -}; -#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) - -static LIST_HEAD(ahash_algs); -static LIST_HEAD(hmac_algs); - -static int algs_registered; - -static void __n2_unregister_algs(void) -{ - struct n2_skcipher_alg *skcipher, *skcipher_tmp; - struct n2_ahash_alg *alg, *alg_tmp; - struct n2_hmac_alg *hmac, *hmac_tmp; - - list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) { - crypto_unregister_skcipher(&skcipher->skcipher); - list_del(&skcipher->entry); - kfree(skcipher); - } - list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) { - crypto_unregister_ahash(&hmac->derived.alg); - list_del(&hmac->derived.entry); - kfree(hmac); - } - list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { - crypto_unregister_ahash(&alg->alg); - list_del(&alg->entry); - kfree(alg); - } -} - -static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm) -{ - crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context)); - return 0; -} - -static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl) -{ - struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); - struct skcipher_alg *alg; - int err; - - if (!p) - return -ENOMEM; - - alg = &p->skcipher; - *alg = tmpl->skcipher; - - snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); - snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); - alg->base.cra_priority = N2_CRA_PRIORITY; - alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | - CRYPTO_ALG_ALLOCATES_MEMORY; - alg->base.cra_blocksize = tmpl->block_size; - p->enc_type = tmpl->enc_type; - alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context); - alg->base.cra_module = THIS_MODULE; - alg->init = n2_skcipher_init_tfm; - - list_add(&p->entry, &skcipher_algs); - err = crypto_register_skcipher(alg); - if (err) { - pr_err("%s alg registration failed\n", alg->base.cra_name); - list_del(&p->entry); - kfree(p); - } else { - pr_info("%s alg registered\n", alg->base.cra_name); - } - return err; -} - -static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash) -{ - struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); - struct ahash_alg *ahash; - struct crypto_alg *base; - int err; - - if (!p) - return -ENOMEM; - - p->child_alg = n2ahash->alg.halg.base.cra_name; - memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg)); - INIT_LIST_HEAD(&p->derived.entry); - - ahash = &p->derived.alg; - ahash->digest = n2_hmac_async_digest; - ahash->setkey = n2_hmac_async_setkey; - - base = &ahash->halg.base; - err = -EINVAL; - if (snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", - p->child_alg) >= CRYPTO_MAX_ALG_NAME) - goto out_free_p; - if (snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", - p->child_alg) >= CRYPTO_MAX_ALG_NAME) - goto out_free_p; - - base->cra_ctxsize = sizeof(struct n2_hmac_ctx); - base->cra_init = n2_hmac_cra_init; - base->cra_exit = n2_hmac_cra_exit; - - list_add(&p->derived.entry, &hmac_algs); - err = crypto_register_ahash(ahash); - if (err) { - pr_err("%s alg registration failed\n", base->cra_name); - list_del(&p->derived.entry); -out_free_p: - kfree(p); - } else { - pr_info("%s alg registered\n", base->cra_name); - } - return err; -} - -static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) -{ - struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); - struct hash_alg_common *halg; - struct crypto_alg *base; - struct ahash_alg *ahash; - int err; - - if (!p) - return -ENOMEM; - - p->hash_zero = tmpl->hash_zero; - p->hash_init = tmpl->hash_init; - p->auth_type = tmpl->auth_type; - p->hmac_type = tmpl->hmac_type; - p->hw_op_hashsz = tmpl->hw_op_hashsz; - p->digest_size = tmpl->digest_size; - - ahash = &p->alg; - ahash->init = n2_hash_async_init; - ahash->update = n2_hash_async_update; - ahash->final = n2_hash_async_final; - ahash->finup = n2_hash_async_finup; - ahash->digest = n2_hash_async_digest; - ahash->export = n2_hash_async_noexport; - ahash->import = n2_hash_async_noimport; - - halg = &ahash->halg; - halg->digestsize = tmpl->digest_size; - halg->statesize = tmpl->statesize; - - base = &halg->base; - snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); - snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name); - base->cra_priority = N2_CRA_PRIORITY; - base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_NEED_FALLBACK; - base->cra_blocksize = tmpl->block_size; - base->cra_ctxsize = sizeof(struct n2_hash_ctx); - base->cra_module = THIS_MODULE; - base->cra_init = n2_hash_cra_init; - base->cra_exit = n2_hash_cra_exit; - - list_add(&p->entry, &ahash_algs); - err = crypto_register_ahash(ahash); - if (err) { - pr_err("%s alg registration failed\n", base->cra_name); - list_del(&p->entry); - kfree(p); - } else { - pr_info("%s alg registered\n", base->cra_name); - } - if (!err && p->hmac_type != AUTH_TYPE_RESERVED) - err = __n2_register_one_hmac(p); - return err; -} - -static int n2_register_algs(void) -{ - int i, err = 0; - - mutex_lock(&spu_lock); - if (algs_registered++) - goto out; - - for (i = 0; i < NUM_HASH_TMPLS; i++) { - err = __n2_register_one_ahash(&hash_tmpls[i]); - if (err) { - __n2_unregister_algs(); - goto out; - } - } - for (i = 0; i < NUM_CIPHER_TMPLS; i++) { - err = __n2_register_one_skcipher(&skcipher_tmpls[i]); - if (err) { - __n2_unregister_algs(); - goto out; - } - } - -out: - mutex_unlock(&spu_lock); - return err; -} - -static void n2_unregister_algs(void) -{ - mutex_lock(&spu_lock); - if (!--algs_registered) - __n2_unregister_algs(); - mutex_unlock(&spu_lock); -} - -/* To map CWQ queues to interrupt sources, the hypervisor API provides - * a devino. This isn't very useful to us because all of the - * interrupts listed in the device_node have been translated to - * Linux virtual IRQ cookie numbers. - * - * So we have to back-translate, going through the 'intr' and 'ino' - * property tables of the n2cp MDESC node, matching it with the OF - * 'interrupts' property entries, in order to figure out which - * devino goes to which already-translated IRQ. - */ -static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip, - unsigned long dev_ino) -{ - const unsigned int *dev_intrs; - unsigned int intr; - int i; - - for (i = 0; i < ip->num_intrs; i++) { - if (ip->ino_table[i].ino == dev_ino) - break; - } - if (i == ip->num_intrs) - return -ENODEV; - - intr = ip->ino_table[i].intr; - - dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL); - if (!dev_intrs) - return -ENODEV; - - for (i = 0; i < dev->archdata.num_irqs; i++) { - if (dev_intrs[i] == intr) - return i; - } - - return -ENODEV; -} - -static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip, - const char *irq_name, struct spu_queue *p, - irq_handler_t handler) -{ - unsigned long herr; - int index; - - herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino); - if (herr) - return -EINVAL; - - index = find_devino_index(dev, ip, p->devino); - if (index < 0) - return index; - - p->irq = dev->archdata.irqs[index]; - - sprintf(p->irq_name, "%s-%d", irq_name, index); - - return request_irq(p->irq, handler, 0, p->irq_name, p); -} - -static struct kmem_cache *queue_cache[2]; - -static void *new_queue(unsigned long q_type) -{ - return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL); -} - -static void free_queue(void *p, unsigned long q_type) -{ - kmem_cache_free(queue_cache[q_type - 1], p); -} - -static int queue_cache_init(void) -{ - if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) - queue_cache[HV_NCS_QTYPE_MAU - 1] = - kmem_cache_create("mau_queue", - (MAU_NUM_ENTRIES * - MAU_ENTRY_SIZE), - MAU_ENTRY_SIZE, 0, NULL); - if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) - return -ENOMEM; - - if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) - queue_cache[HV_NCS_QTYPE_CWQ - 1] = - kmem_cache_create("cwq_queue", - (CWQ_NUM_ENTRIES * - CWQ_ENTRY_SIZE), - CWQ_ENTRY_SIZE, 0, NULL); - if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { - kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); - queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; - return -ENOMEM; - } - return 0; -} - -static void queue_cache_destroy(void) -{ - kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); - kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); - queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; - queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL; -} - -static long spu_queue_register_workfn(void *arg) -{ - struct spu_qreg *qr = arg; - struct spu_queue *p = qr->queue; - unsigned long q_type = qr->type; - unsigned long hv_ret; - - hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q), - CWQ_NUM_ENTRIES, &p->qhandle); - if (!hv_ret) - sun4v_ncs_sethead_marker(p->qhandle, 0); - - return hv_ret ? -EINVAL : 0; -} - -static int spu_queue_register(struct spu_queue *p, unsigned long q_type) -{ - int cpu = cpumask_any_and(&p->sharing, cpu_online_mask); - struct spu_qreg qr = { .queue = p, .type = q_type }; - - return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr); -} - -static int spu_queue_setup(struct spu_queue *p) -{ - int err; - - p->q = new_queue(p->q_type); - if (!p->q) - return -ENOMEM; - - err = spu_queue_register(p, p->q_type); - if (err) { - free_queue(p->q, p->q_type); - p->q = NULL; - } - - return err; -} - -static void spu_queue_destroy(struct spu_queue *p) -{ - unsigned long hv_ret; - - if (!p->q) - return; - - hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle); - - if (!hv_ret) - free_queue(p->q, p->q_type); -} - -static void spu_list_destroy(struct list_head *list) -{ - struct spu_queue *p, *n; - - list_for_each_entry_safe(p, n, list, list) { - int i; - - for (i = 0; i < NR_CPUS; i++) { - if (cpu_to_cwq[i] == p) - cpu_to_cwq[i] = NULL; - } - - if (p->irq) { - free_irq(p->irq, p); - p->irq = 0; - } - spu_queue_destroy(p); - list_del(&p->list); - kfree(p); - } -} - -/* Walk the backward arcs of a CWQ 'exec-unit' node, - * gathering cpu membership information. - */ -static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, - struct platform_device *dev, - u64 node, struct spu_queue *p, - struct spu_queue **table) -{ - u64 arc; - - mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) { - u64 tgt = mdesc_arc_target(mdesc, arc); - const char *name = mdesc_node_name(mdesc, tgt); - const u64 *id; - - if (strcmp(name, "cpu")) - continue; - id = mdesc_get_property(mdesc, tgt, "id", NULL); - if (table[*id] != NULL) { - dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n", - dev->dev.of_node); - return -EINVAL; - } - cpumask_set_cpu(*id, &p->sharing); - table[*id] = p; - } - return 0; -} - -/* Process an 'exec-unit' MDESC node of type 'cwq'. */ -static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, - struct platform_device *dev, struct mdesc_handle *mdesc, - u64 node, const char *iname, unsigned long q_type, - irq_handler_t handler, struct spu_queue **table) -{ - struct spu_queue *p; - int err; - - p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); - if (!p) { - dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n", - dev->dev.of_node); - return -ENOMEM; - } - - cpumask_clear(&p->sharing); - spin_lock_init(&p->lock); - p->q_type = q_type; - INIT_LIST_HEAD(&p->jobs); - list_add(&p->list, list); - - err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table); - if (err) - return err; - - err = spu_queue_setup(p); - if (err) - return err; - - return spu_map_ino(dev, ip, iname, p, handler); -} - -static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev, - struct spu_mdesc_info *ip, struct list_head *list, - const char *exec_name, unsigned long q_type, - irq_handler_t handler, struct spu_queue **table) -{ - int err = 0; - u64 node; - - mdesc_for_each_node_by_name(mdesc, node, "exec-unit") { - const char *type; - - type = mdesc_get_property(mdesc, node, "type", NULL); - if (!type || strcmp(type, exec_name)) - continue; - - err = handle_exec_unit(ip, list, dev, mdesc, node, - exec_name, q_type, handler, table); - if (err) { - spu_list_destroy(list); - break; - } - } - - return err; -} - -static int get_irq_props(struct mdesc_handle *mdesc, u64 node, - struct spu_mdesc_info *ip) -{ - const u64 *ino; - int ino_len; - int i; - - ino = mdesc_get_property(mdesc, node, "ino", &ino_len); - if (!ino) { - printk("NO 'ino'\n"); - return -ENODEV; - } - - ip->num_intrs = ino_len / sizeof(u64); - ip->ino_table = kzalloc((sizeof(struct ino_blob) * - ip->num_intrs), - GFP_KERNEL); - if (!ip->ino_table) - return -ENOMEM; - - for (i = 0; i < ip->num_intrs; i++) { - struct ino_blob *b = &ip->ino_table[i]; - b->intr = i + 1; - b->ino = ino[i]; - } - - return 0; -} - -static int grab_mdesc_irq_props(struct mdesc_handle *mdesc, - struct platform_device *dev, - struct spu_mdesc_info *ip, - const char *node_name) -{ - u64 node, reg; - - if (of_property_read_reg(dev->dev.of_node, 0, ®, NULL) < 0) - return -ENODEV; - - mdesc_for_each_node_by_name(mdesc, node, "virtual-device") { - const char *name; - const u64 *chdl; - - name = mdesc_get_property(mdesc, node, "name", NULL); - if (!name || strcmp(name, node_name)) - continue; - chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL); - if (!chdl || (*chdl != reg)) - continue; - ip->cfg_handle = *chdl; - return get_irq_props(mdesc, node, ip); - } - - return -ENODEV; -} - -static unsigned long n2_spu_hvapi_major; -static unsigned long n2_spu_hvapi_minor; - -static int n2_spu_hvapi_register(void) -{ - int err; - - n2_spu_hvapi_major = 2; - n2_spu_hvapi_minor = 0; - - err = sun4v_hvapi_register(HV_GRP_NCS, - n2_spu_hvapi_major, - &n2_spu_hvapi_minor); - - if (!err) - pr_info("Registered NCS HVAPI version %lu.%lu\n", - n2_spu_hvapi_major, - n2_spu_hvapi_minor); - - return err; -} - -static void n2_spu_hvapi_unregister(void) -{ - sun4v_hvapi_unregister(HV_GRP_NCS); -} - -static int global_ref; - -static int grab_global_resources(void) -{ - int err = 0; - - mutex_lock(&spu_lock); - - if (global_ref++) - goto out; - - err = n2_spu_hvapi_register(); - if (err) - goto out; - - err = queue_cache_init(); - if (err) - goto out_hvapi_release; - - err = -ENOMEM; - cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *), - GFP_KERNEL); - if (!cpu_to_cwq) - goto out_queue_cache_destroy; - - cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *), - GFP_KERNEL); - if (!cpu_to_mau) - goto out_free_cwq_table; - - err = 0; - -out: - if (err) - global_ref--; - mutex_unlock(&spu_lock); - return err; - -out_free_cwq_table: - kfree(cpu_to_cwq); - cpu_to_cwq = NULL; - -out_queue_cache_destroy: - queue_cache_destroy(); - -out_hvapi_release: - n2_spu_hvapi_unregister(); - goto out; -} - -static void release_global_resources(void) -{ - mutex_lock(&spu_lock); - if (!--global_ref) { - kfree(cpu_to_cwq); - cpu_to_cwq = NULL; - - kfree(cpu_to_mau); - cpu_to_mau = NULL; - - queue_cache_destroy(); - n2_spu_hvapi_unregister(); - } - mutex_unlock(&spu_lock); -} - -static struct n2_crypto *alloc_n2cp(void) -{ - struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL); - - if (np) - INIT_LIST_HEAD(&np->cwq_list); - - return np; -} - -static void free_n2cp(struct n2_crypto *np) -{ - kfree(np->cwq_info.ino_table); - np->cwq_info.ino_table = NULL; - - kfree(np); -} - -static void n2_spu_driver_version(void) -{ - static int n2_spu_version_printed; - - if (n2_spu_version_printed++ == 0) - pr_info("%s", version); -} - -static int n2_crypto_probe(struct platform_device *dev) -{ - struct mdesc_handle *mdesc; - struct n2_crypto *np; - int err; - - n2_spu_driver_version(); - - pr_info("Found N2CP at %pOF\n", dev->dev.of_node); - - np = alloc_n2cp(); - if (!np) { - dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n", - dev->dev.of_node); - return -ENOMEM; - } - - err = grab_global_resources(); - if (err) { - dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n", - dev->dev.of_node); - goto out_free_n2cp; - } - - mdesc = mdesc_grab(); - - if (!mdesc) { - dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n", - dev->dev.of_node); - err = -ENODEV; - goto out_free_global; - } - err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp"); - if (err) { - dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n", - dev->dev.of_node); - mdesc_release(mdesc); - goto out_free_global; - } - - err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list, - "cwq", HV_NCS_QTYPE_CWQ, cwq_intr, - cpu_to_cwq); - mdesc_release(mdesc); - - if (err) { - dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n", - dev->dev.of_node); - goto out_free_global; - } - - err = n2_register_algs(); - if (err) { - dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n", - dev->dev.of_node); - goto out_free_spu_list; - } - - dev_set_drvdata(&dev->dev, np); - - return 0; - -out_free_spu_list: - spu_list_destroy(&np->cwq_list); - -out_free_global: - release_global_resources(); - -out_free_n2cp: - free_n2cp(np); - - return err; -} - -static void n2_crypto_remove(struct platform_device *dev) -{ - struct n2_crypto *np = dev_get_drvdata(&dev->dev); - - n2_unregister_algs(); - - spu_list_destroy(&np->cwq_list); - - release_global_resources(); - - free_n2cp(np); -} - -static struct n2_mau *alloc_ncp(void) -{ - struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL); - - if (mp) - INIT_LIST_HEAD(&mp->mau_list); - - return mp; -} - -static void free_ncp(struct n2_mau *mp) -{ - kfree(mp->mau_info.ino_table); - mp->mau_info.ino_table = NULL; - - kfree(mp); -} - -static int n2_mau_probe(struct platform_device *dev) -{ - struct mdesc_handle *mdesc; - struct n2_mau *mp; - int err; - - n2_spu_driver_version(); - - pr_info("Found NCP at %pOF\n", dev->dev.of_node); - - mp = alloc_ncp(); - if (!mp) { - dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n", - dev->dev.of_node); - return -ENOMEM; - } - - err = grab_global_resources(); - if (err) { - dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n", - dev->dev.of_node); - goto out_free_ncp; - } - - mdesc = mdesc_grab(); - - if (!mdesc) { - dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n", - dev->dev.of_node); - err = -ENODEV; - goto out_free_global; - } - - err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp"); - if (err) { - dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n", - dev->dev.of_node); - mdesc_release(mdesc); - goto out_free_global; - } - - err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list, - "mau", HV_NCS_QTYPE_MAU, mau_intr, - cpu_to_mau); - mdesc_release(mdesc); - - if (err) { - dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n", - dev->dev.of_node); - goto out_free_global; - } - - dev_set_drvdata(&dev->dev, mp); - - return 0; - -out_free_global: - release_global_resources(); - -out_free_ncp: - free_ncp(mp); - - return err; -} - -static void n2_mau_remove(struct platform_device *dev) -{ - struct n2_mau *mp = dev_get_drvdata(&dev->dev); - - spu_list_destroy(&mp->mau_list); - - release_global_resources(); - - free_ncp(mp); -} - -static const struct of_device_id n2_crypto_match[] = { - { - .name = "n2cp", - .compatible = "SUNW,n2-cwq", - }, - { - .name = "n2cp", - .compatible = "SUNW,vf-cwq", - }, - { - .name = "n2cp", - .compatible = "SUNW,kt-cwq", - }, - {}, -}; - -MODULE_DEVICE_TABLE(of, n2_crypto_match); - -static struct platform_driver n2_crypto_driver = { - .driver = { - .name = "n2cp", - .of_match_table = n2_crypto_match, - }, - .probe = n2_crypto_probe, - .remove = n2_crypto_remove, -}; - -static const struct of_device_id n2_mau_match[] = { - { - .name = "ncp", - .compatible = "SUNW,n2-mau", - }, - { - .name = "ncp", - .compatible = "SUNW,vf-mau", - }, - { - .name = "ncp", - .compatible = "SUNW,kt-mau", - }, - {}, -}; - -MODULE_DEVICE_TABLE(of, n2_mau_match); - -static struct platform_driver n2_mau_driver = { - .driver = { - .name = "ncp", - .of_match_table = n2_mau_match, - }, - .probe = n2_mau_probe, - .remove = n2_mau_remove, -}; - -static struct platform_driver * const drivers[] = { - &n2_crypto_driver, - &n2_mau_driver, -}; - -static int __init n2_init(void) -{ - return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); -} - -static void __exit n2_exit(void) -{ - platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); -} - -module_init(n2_init); -module_exit(n2_exit); diff --git a/drivers/crypto/n2_core.h b/drivers/crypto/n2_core.h deleted file mode 100644 index 2406763b0306..000000000000 --- a/drivers/crypto/n2_core.h +++ /dev/null @@ -1,232 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _N2_CORE_H -#define _N2_CORE_H - -#ifndef __ASSEMBLY__ - -struct ino_blob { - u64 intr; - u64 ino; -}; - -struct spu_mdesc_info { - u64 cfg_handle; - struct ino_blob *ino_table; - int num_intrs; -}; - -struct n2_crypto { - struct spu_mdesc_info cwq_info; - struct list_head cwq_list; -}; - -struct n2_mau { - struct spu_mdesc_info mau_info; - struct list_head mau_list; -}; - -#define CWQ_ENTRY_SIZE 64 -#define CWQ_NUM_ENTRIES 64 - -#define MAU_ENTRY_SIZE 64 -#define MAU_NUM_ENTRIES 64 - -struct cwq_initial_entry { - u64 control; - u64 src_addr; - u64 auth_key_addr; - u64 auth_iv_addr; - u64 final_auth_state_addr; - u64 enc_key_addr; - u64 enc_iv_addr; - u64 dest_addr; -}; - -struct cwq_ext_entry { - u64 len; - u64 src_addr; - u64 resv1; - u64 resv2; - u64 resv3; - u64 resv4; - u64 resv5; - u64 resv6; -}; - -struct cwq_final_entry { - u64 control; - u64 src_addr; - u64 resv1; - u64 resv2; - u64 resv3; - u64 resv4; - u64 resv5; - u64 resv6; -}; - -#define CONTROL_LEN 0x000000000000ffffULL -#define CONTROL_LEN_SHIFT 0 -#define CONTROL_HMAC_KEY_LEN 0x0000000000ff0000ULL -#define CONTROL_HMAC_KEY_LEN_SHIFT 16 -#define CONTROL_ENC_TYPE 0x00000000ff000000ULL -#define CONTROL_ENC_TYPE_SHIFT 24 -#define ENC_TYPE_ALG_RC4_STREAM 0x00ULL -#define ENC_TYPE_ALG_RC4_NOSTREAM 0x04ULL -#define ENC_TYPE_ALG_DES 0x08ULL -#define ENC_TYPE_ALG_3DES 0x0cULL -#define ENC_TYPE_ALG_AES128 0x10ULL -#define ENC_TYPE_ALG_AES192 0x14ULL -#define ENC_TYPE_ALG_AES256 0x18ULL -#define ENC_TYPE_ALG_RESERVED 0x1cULL -#define ENC_TYPE_ALG_MASK 0x1cULL -#define ENC_TYPE_CHAINING_ECB 0x00ULL -#define ENC_TYPE_CHAINING_CBC 0x01ULL -#define ENC_TYPE_CHAINING_CFB 0x02ULL -#define ENC_TYPE_CHAINING_COUNTER 0x03ULL -#define ENC_TYPE_CHAINING_MASK 0x03ULL -#define CONTROL_AUTH_TYPE 0x0000001f00000000ULL -#define CONTROL_AUTH_TYPE_SHIFT 32 -#define AUTH_TYPE_RESERVED 0x00ULL -#define AUTH_TYPE_MD5 0x01ULL -#define AUTH_TYPE_SHA1 0x02ULL -#define AUTH_TYPE_SHA256 0x03ULL -#define AUTH_TYPE_CRC32 0x04ULL -#define AUTH_TYPE_HMAC_MD5 0x05ULL -#define AUTH_TYPE_HMAC_SHA1 0x06ULL -#define AUTH_TYPE_HMAC_SHA256 0x07ULL -#define AUTH_TYPE_TCP_CHECKSUM 0x08ULL -#define AUTH_TYPE_SSL_HMAC_MD5 0x09ULL -#define AUTH_TYPE_SSL_HMAC_SHA1 0x0aULL -#define AUTH_TYPE_SSL_HMAC_SHA256 0x0bULL -#define CONTROL_STRAND 0x000000e000000000ULL -#define CONTROL_STRAND_SHIFT 37 -#define CONTROL_HASH_LEN 0x0000ff0000000000ULL -#define CONTROL_HASH_LEN_SHIFT 40 -#define CONTROL_INTERRUPT 0x0001000000000000ULL -#define CONTROL_STORE_FINAL_AUTH_STATE 0x0002000000000000ULL -#define CONTROL_RESERVED 0x001c000000000000ULL -#define CONTROL_HV_DONE 0x0004000000000000ULL -#define CONTROL_HV_PROTOCOL_ERROR 0x0008000000000000ULL -#define CONTROL_HV_HARDWARE_ERROR 0x0010000000000000ULL -#define CONTROL_END_OF_BLOCK 0x0020000000000000ULL -#define CONTROL_START_OF_BLOCK 0x0040000000000000ULL -#define CONTROL_ENCRYPT 0x0080000000000000ULL -#define CONTROL_OPCODE 0xff00000000000000ULL -#define CONTROL_OPCODE_SHIFT 56 -#define OPCODE_INPLACE_BIT 0x80ULL -#define OPCODE_SSL_KEYBLOCK 0x10ULL -#define OPCODE_COPY 0x20ULL -#define OPCODE_ENCRYPT 0x40ULL -#define OPCODE_AUTH_MAC 0x41ULL - -#endif /* !(__ASSEMBLY__) */ - -/* NCS v2.0 hypervisor interfaces */ -#define HV_NCS_QTYPE_MAU 0x01 -#define HV_NCS_QTYPE_CWQ 0x02 - -/* ncs_qconf() - * TRAP: HV_FAST_TRAP - * FUNCTION: HV_FAST_NCS_QCONF - * ARG0: Queue type (HV_NCS_QTYPE_{MAU,CWQ}) - * ARG1: Real address of queue, or handle for unconfigure - * ARG2: Number of entries in queue, zero for unconfigure - * RET0: status - * RET1: queue handle - * - * Configure a queue in the stream processing unit. - * - * The real address given as the base must be 64-byte - * aligned. - * - * The queue size can range from a minimum of 2 to a maximum - * of 64. The queue size must be a power of two. - * - * To unconfigure a queue, specify a length of zero and place - * the queue handle into ARG1. - * - * On configure success the hypervisor will set the FIRST, HEAD, - * and TAIL registers to the address of the first entry in the - * queue. The LAST register will be set to point to the last - * entry in the queue. - */ -#define HV_FAST_NCS_QCONF 0x111 - -/* ncs_qinfo() - * TRAP: HV_FAST_TRAP - * FUNCTION: HV_FAST_NCS_QINFO - * ARG0: Queue handle - * RET0: status - * RET1: Queue type (HV_NCS_QTYPE_{MAU,CWQ}) - * RET2: Queue base address - * RET3: Number of entries - */ -#define HV_FAST_NCS_QINFO 0x112 - -/* ncs_gethead() - * TRAP: HV_FAST_TRAP - * FUNCTION: HV_FAST_NCS_GETHEAD - * ARG0: Queue handle - * RET0: status - * RET1: queue head offset - */ -#define HV_FAST_NCS_GETHEAD 0x113 - -/* ncs_gettail() - * TRAP: HV_FAST_TRAP - * FUNCTION: HV_FAST_NCS_GETTAIL - * ARG0: Queue handle - * RET0: status - * RET1: queue tail offset - */ -#define HV_FAST_NCS_GETTAIL 0x114 - -/* ncs_settail() - * TRAP: HV_FAST_TRAP - * FUNCTION: HV_FAST_NCS_SETTAIL - * ARG0: Queue handle - * ARG1: New tail offset - * RET0: status - */ -#define HV_FAST_NCS_SETTAIL 0x115 - -/* ncs_qhandle_to_devino() - * TRAP: HV_FAST_TRAP - * FUNCTION: HV_FAST_NCS_QHANDLE_TO_DEVINO - * ARG0: Queue handle - * RET0: status - * RET1: devino - */ -#define HV_FAST_NCS_QHANDLE_TO_DEVINO 0x116 - -/* ncs_sethead_marker() - * TRAP: HV_FAST_TRAP - * FUNCTION: HV_FAST_NCS_SETHEAD_MARKER - * ARG0: Queue handle - * ARG1: New head offset - * RET0: status - */ -#define HV_FAST_NCS_SETHEAD_MARKER 0x117 - -#ifndef __ASSEMBLY__ -extern unsigned long sun4v_ncs_qconf(unsigned long queue_type, - unsigned long queue_ra, - unsigned long num_entries, - unsigned long *qhandle); -extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle, - unsigned long *queue_type, - unsigned long *queue_ra, - unsigned long *num_entries); -extern unsigned long sun4v_ncs_gethead(unsigned long qhandle, - unsigned long *head); -extern unsigned long sun4v_ncs_gettail(unsigned long qhandle, - unsigned long *tail); -extern unsigned long sun4v_ncs_settail(unsigned long qhandle, - unsigned long tail); -extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle, - unsigned long *devino); -extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle, - unsigned long head); -#endif /* !(__ASSEMBLY__) */ - -#endif /* _N2_CORE_H */ diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index e27b84616743..551dd32a8db0 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -18,7 +18,6 @@ #include <crypto/internal/aead.h> #include <crypto/internal/engine.h> #include <crypto/internal/skcipher.h> -#include <crypto/scatterwalk.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/err.h> @@ -272,9 +271,9 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd, int ret; if (dd->pio_only) { - scatterwalk_start(&dd->in_walk, dd->in_sg); + dd->in_sg_offset = 0; if (out_sg_len) - scatterwalk_start(&dd->out_walk, dd->out_sg); + dd->out_sg_offset = 0; /* Enable DATAIN interrupt and let it take care of the rest */ @@ -871,21 +870,18 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id) BUG_ON(!dd->in_sg); - BUG_ON(_calc_walked(in) > dd->in_sg->length); + BUG_ON(dd->in_sg_offset > dd->in_sg->length); - src = sg_virt(dd->in_sg) + _calc_walked(in); + src = sg_virt(dd->in_sg) + dd->in_sg_offset; for (i = 0; i < AES_BLOCK_WORDS; i++) { omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src); - - scatterwalk_advance(&dd->in_walk, 4); - if (dd->in_sg->length == _calc_walked(in)) { + dd->in_sg_offset += 4; + if (dd->in_sg_offset == dd->in_sg->length) { dd->in_sg = sg_next(dd->in_sg); if (dd->in_sg) { - scatterwalk_start(&dd->in_walk, - dd->in_sg); - src = sg_virt(dd->in_sg) + - _calc_walked(in); + dd->in_sg_offset = 0; + src = sg_virt(dd->in_sg); } } else { src++; @@ -904,20 +900,18 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id) BUG_ON(!dd->out_sg); - BUG_ON(_calc_walked(out) > dd->out_sg->length); + BUG_ON(dd->out_sg_offset > dd->out_sg->length); - dst = sg_virt(dd->out_sg) + _calc_walked(out); + dst = sg_virt(dd->out_sg) + dd->out_sg_offset; for (i = 0; i < AES_BLOCK_WORDS; i++) { *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i)); - scatterwalk_advance(&dd->out_walk, 4); - if (dd->out_sg->length == _calc_walked(out)) { + dd->out_sg_offset += 4; + if (dd->out_sg_offset == dd->out_sg->length) { dd->out_sg = sg_next(dd->out_sg); if (dd->out_sg) { - scatterwalk_start(&dd->out_walk, - dd->out_sg); - dst = sg_virt(dd->out_sg) + - _calc_walked(out); + dd->out_sg_offset = 0; + dst = sg_virt(dd->out_sg); } } else { dst++; diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h index 0f35c9164764..41d67780fd45 100644 --- a/drivers/crypto/omap-aes.h +++ b/drivers/crypto/omap-aes.h @@ -14,8 +14,6 @@ #define DST_MAXBURST 4 #define DMA_MIN (DST_MAXBURST * sizeof(u32)) -#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset) - /* * OMAP TRM gives bitfields as start:end, where start is the higher bit * number. For example 7:0 @@ -186,8 +184,8 @@ struct omap_aes_dev { struct scatterlist out_sgl; struct scatterlist *orig_out; - struct scatter_walk in_walk; - struct scatter_walk out_walk; + unsigned int in_sg_offset; + unsigned int out_sg_offset; struct dma_chan *dma_lch_in; struct dma_chan *dma_lch_out; int in_sg_len; diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 498cbd585ed1..a099460d5f21 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -19,7 +19,6 @@ #include <crypto/engine.h> #include <crypto/internal/des.h> #include <crypto/internal/skcipher.h> -#include <crypto/scatterwalk.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/err.h> @@ -40,8 +39,6 @@ #define DES_BLOCK_WORDS (DES_BLOCK_SIZE >> 2) -#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset) - #define DES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \ ((x ^ 0x01) * 0x04)) @@ -152,8 +149,8 @@ struct omap_des_dev { struct scatterlist out_sgl; struct scatterlist *orig_out; - struct scatter_walk in_walk; - struct scatter_walk out_walk; + unsigned int in_sg_offset; + unsigned int out_sg_offset; struct dma_chan *dma_lch_in; struct dma_chan *dma_lch_out; int in_sg_len; @@ -379,8 +376,8 @@ static int omap_des_crypt_dma(struct crypto_tfm *tfm, int ret; if (dd->pio_only) { - scatterwalk_start(&dd->in_walk, dd->in_sg); - scatterwalk_start(&dd->out_walk, dd->out_sg); + dd->in_sg_offset = 0; + dd->out_sg_offset = 0; /* Enable DATAIN interrupt and let it take care of the rest */ @@ -836,21 +833,18 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id) BUG_ON(!dd->in_sg); - BUG_ON(_calc_walked(in) > dd->in_sg->length); + BUG_ON(dd->in_sg_offset > dd->in_sg->length); - src = sg_virt(dd->in_sg) + _calc_walked(in); + src = sg_virt(dd->in_sg) + dd->in_sg_offset; for (i = 0; i < DES_BLOCK_WORDS; i++) { omap_des_write(dd, DES_REG_DATA_N(dd, i), *src); - - scatterwalk_advance(&dd->in_walk, 4); - if (dd->in_sg->length == _calc_walked(in)) { + dd->in_sg_offset += 4; + if (dd->in_sg_offset == dd->in_sg->length) { dd->in_sg = sg_next(dd->in_sg); if (dd->in_sg) { - scatterwalk_start(&dd->in_walk, - dd->in_sg); - src = sg_virt(dd->in_sg) + - _calc_walked(in); + dd->in_sg_offset = 0; + src = sg_virt(dd->in_sg); } } else { src++; @@ -869,20 +863,18 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id) BUG_ON(!dd->out_sg); - BUG_ON(_calc_walked(out) > dd->out_sg->length); + BUG_ON(dd->out_sg_offset > dd->out_sg->length); - dst = sg_virt(dd->out_sg) + _calc_walked(out); + dst = sg_virt(dd->out_sg) + dd->out_sg_offset; for (i = 0; i < DES_BLOCK_WORDS; i++) { *dst = omap_des_read(dd, DES_REG_DATA_N(dd, i)); - scatterwalk_advance(&dd->out_walk, 4); - if (dd->out_sg->length == _calc_walked(out)) { + dd->out_sg_offset += 4; + if (dd->out_sg_offset == dd->out_sg->length) { dd->out_sg = sg_next(dd->out_sg); if (dd->out_sg) { - scatterwalk_start(&dd->out_walk, - dd->out_sg); - dst = sg_virt(dd->out_sg) + - _calc_walked(out); + dd->out_sg_offset = 0; + dst = sg_virt(dd->out_sg); } } else { dst++; diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c index 7d811728f047..97b56e92ea33 100644 --- a/drivers/crypto/qce/aead.c +++ b/drivers/crypto/qce/aead.c @@ -786,7 +786,7 @@ static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_devi alg->init = qce_aead_init; alg->exit = qce_aead_exit; - alg->base.cra_priority = 300; + alg->base.cra_priority = 275; alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c index e228a31fe28d..e95e84486d9a 100644 --- a/drivers/crypto/qce/core.c +++ b/drivers/crypto/qce/core.c @@ -3,14 +3,15 @@ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. */ +#include <linux/cleanup.h> #include <linux/clk.h> +#include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/interconnect.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> -#include <linux/spinlock.h> #include <linux/types.h> #include <crypto/algapi.h> #include <crypto/internal/hash.h> @@ -37,9 +38,10 @@ static const struct qce_algo_ops *qce_ops[] = { #endif }; -static void qce_unregister_algs(struct qce_device *qce) +static void qce_unregister_algs(void *data) { const struct qce_algo_ops *ops; + struct qce_device *qce = data; int i; for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { @@ -48,19 +50,22 @@ static void qce_unregister_algs(struct qce_device *qce) } } -static int qce_register_algs(struct qce_device *qce) +static int devm_qce_register_algs(struct qce_device *qce) { const struct qce_algo_ops *ops; - int i, ret = -ENODEV; + int i, j, ret = -ENODEV; for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { ops = qce_ops[i]; ret = ops->register_algs(qce); - if (ret) - break; + if (ret) { + for (j = i - 1; j >= 0; j--) + ops->unregister_algs(qce); + return ret; + } } - return ret; + return devm_add_action_or_reset(qce->dev, qce_unregister_algs, qce); } static int qce_handle_request(struct crypto_async_request *async_req) @@ -84,55 +89,49 @@ static int qce_handle_queue(struct qce_device *qce, struct crypto_async_request *req) { struct crypto_async_request *async_req, *backlog; - unsigned long flags; int ret = 0, err; - spin_lock_irqsave(&qce->lock, flags); + scoped_guard(mutex, &qce->lock) { + if (req) + ret = crypto_enqueue_request(&qce->queue, req); - if (req) - ret = crypto_enqueue_request(&qce->queue, req); + /* busy, do not dequeue request */ + if (qce->req) + return ret; - /* busy, do not dequeue request */ - if (qce->req) { - spin_unlock_irqrestore(&qce->lock, flags); - return ret; + backlog = crypto_get_backlog(&qce->queue); + async_req = crypto_dequeue_request(&qce->queue); + if (async_req) + qce->req = async_req; } - backlog = crypto_get_backlog(&qce->queue); - async_req = crypto_dequeue_request(&qce->queue); - if (async_req) - qce->req = async_req; - - spin_unlock_irqrestore(&qce->lock, flags); - if (!async_req) return ret; if (backlog) { - spin_lock_bh(&qce->lock); - crypto_request_complete(backlog, -EINPROGRESS); - spin_unlock_bh(&qce->lock); + scoped_guard(mutex, &qce->lock) + crypto_request_complete(backlog, -EINPROGRESS); } err = qce_handle_request(async_req); if (err) { qce->result = err; - tasklet_schedule(&qce->done_tasklet); + schedule_work(&qce->done_work); } return ret; } -static void qce_tasklet_req_done(unsigned long data) +static void qce_req_done_work(struct work_struct *work) { - struct qce_device *qce = (struct qce_device *)data; + struct qce_device *qce = container_of(work, struct qce_device, + done_work); struct crypto_async_request *req; - unsigned long flags; - spin_lock_irqsave(&qce->lock, flags); - req = qce->req; - qce->req = NULL; - spin_unlock_irqrestore(&qce->lock, flags); + scoped_guard(mutex, &qce->lock) { + req = qce->req; + qce->req = NULL; + } if (req) crypto_request_complete(req, qce->result); @@ -149,7 +148,7 @@ static int qce_async_request_enqueue(struct qce_device *qce, static void qce_async_request_done(struct qce_device *qce, int ret) { qce->result = ret; - tasklet_schedule(&qce->done_tasklet); + schedule_work(&qce->done_work); } static int qce_check_version(struct qce_device *qce) @@ -209,15 +208,15 @@ static int qce_crypto_probe(struct platform_device *pdev) if (ret < 0) return ret; - qce->core = devm_clk_get_optional(qce->dev, "core"); + qce->core = devm_clk_get_optional_enabled(qce->dev, "core"); if (IS_ERR(qce->core)) return PTR_ERR(qce->core); - qce->iface = devm_clk_get_optional(qce->dev, "iface"); + qce->iface = devm_clk_get_optional_enabled(qce->dev, "iface"); if (IS_ERR(qce->iface)) return PTR_ERR(qce->iface); - qce->bus = devm_clk_get_optional(qce->dev, "bus"); + qce->bus = devm_clk_get_optional_enabled(qce->dev, "bus"); if (IS_ERR(qce->bus)) return PTR_ERR(qce->bus); @@ -229,64 +228,25 @@ static int qce_crypto_probe(struct platform_device *pdev) if (ret) return ret; - ret = clk_prepare_enable(qce->core); + ret = devm_qce_dma_request(qce->dev, &qce->dma); if (ret) - goto err_mem_path_disable; - - ret = clk_prepare_enable(qce->iface); - if (ret) - goto err_clks_core; - - ret = clk_prepare_enable(qce->bus); - if (ret) - goto err_clks_iface; + return ret; - ret = qce_dma_request(qce->dev, &qce->dma); + ret = qce_check_version(qce); if (ret) - goto err_clks; + return ret; - ret = qce_check_version(qce); + ret = devm_mutex_init(qce->dev, &qce->lock); if (ret) - goto err_clks; + return ret; - spin_lock_init(&qce->lock); - tasklet_init(&qce->done_tasklet, qce_tasklet_req_done, - (unsigned long)qce); + INIT_WORK(&qce->done_work, qce_req_done_work); crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH); qce->async_req_enqueue = qce_async_request_enqueue; qce->async_req_done = qce_async_request_done; - ret = qce_register_algs(qce); - if (ret) - goto err_dma; - - return 0; - -err_dma: - qce_dma_release(&qce->dma); -err_clks: - clk_disable_unprepare(qce->bus); -err_clks_iface: - clk_disable_unprepare(qce->iface); -err_clks_core: - clk_disable_unprepare(qce->core); -err_mem_path_disable: - icc_set_bw(qce->mem_path, 0, 0); - - return ret; -} - -static void qce_crypto_remove(struct platform_device *pdev) -{ - struct qce_device *qce = platform_get_drvdata(pdev); - - tasklet_kill(&qce->done_tasklet); - qce_unregister_algs(qce); - qce_dma_release(&qce->dma); - clk_disable_unprepare(qce->bus); - clk_disable_unprepare(qce->iface); - clk_disable_unprepare(qce->core); + return devm_qce_register_algs(qce); } static const struct of_device_id qce_crypto_of_match[] = { @@ -299,7 +259,6 @@ MODULE_DEVICE_TABLE(of, qce_crypto_of_match); static struct platform_driver qce_crypto_driver = { .probe = qce_crypto_probe, - .remove = qce_crypto_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = qce_crypto_of_match, diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h index 228fcd69ec51..eb6fa7a8b64a 100644 --- a/drivers/crypto/qce/core.h +++ b/drivers/crypto/qce/core.h @@ -6,13 +6,16 @@ #ifndef _CORE_H_ #define _CORE_H_ +#include <linux/mutex.h> +#include <linux/workqueue.h> + #include "dma.h" /** * struct qce_device - crypto engine device structure * @queue: crypto request queue * @lock: the lock protects queue and req - * @done_tasklet: done tasklet object + * @done_work: workqueue context * @req: current active request * @result: result of current transform * @base: virtual IO base @@ -28,8 +31,8 @@ */ struct qce_device { struct crypto_queue queue; - spinlock_t lock; - struct tasklet_struct done_tasklet; + struct mutex lock; + struct work_struct done_work; struct crypto_async_request *req; int result; void __iomem *base; diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c index 46db5bf366b4..1dec7aea852d 100644 --- a/drivers/crypto/qce/dma.c +++ b/drivers/crypto/qce/dma.c @@ -3,12 +3,22 @@ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. */ +#include <linux/device.h> #include <linux/dmaengine.h> #include <crypto/scatterwalk.h> #include "dma.h" -int qce_dma_request(struct device *dev, struct qce_dma_data *dma) +static void qce_dma_release(void *data) +{ + struct qce_dma_data *dma = data; + + dma_release_channel(dma->txchan); + dma_release_channel(dma->rxchan); + kfree(dma->result_buf); +} + +int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma) { int ret; @@ -31,7 +41,8 @@ int qce_dma_request(struct device *dev, struct qce_dma_data *dma) dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ; - return 0; + return devm_add_action_or_reset(dev, qce_dma_release, dma); + error_nomem: dma_release_channel(dma->rxchan); error_rx: @@ -39,13 +50,6 @@ error_rx: return ret; } -void qce_dma_release(struct qce_dma_data *dma) -{ - dma_release_channel(dma->txchan); - dma_release_channel(dma->rxchan); - kfree(dma->result_buf); -} - struct scatterlist * qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl, unsigned int max_len) diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h index 786402169360..31629185000e 100644 --- a/drivers/crypto/qce/dma.h +++ b/drivers/crypto/qce/dma.h @@ -34,8 +34,7 @@ struct qce_dma_data { void *ignore_buf; }; -int qce_dma_request(struct device *dev, struct qce_dma_data *dma); -void qce_dma_release(struct qce_dma_data *dma); +int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma); int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in, int in_ents, struct scatterlist *sg_out, int out_ents, dma_async_tx_callback cb, void *cb_param); diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index fc72af8aa9a7..71b748183cfa 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -482,7 +482,7 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def, base = &alg->halg.base; base->cra_blocksize = def->blocksize; - base->cra_priority = 300; + base->cra_priority = 175; base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; base->cra_ctxsize = sizeof(struct qce_sha_ctx); base->cra_alignmask = 0; diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c index 5b493fdc1e74..ffb334eb5b34 100644 --- a/drivers/crypto/qce/skcipher.c +++ b/drivers/crypto/qce/skcipher.c @@ -461,7 +461,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def, alg->encrypt = qce_skcipher_encrypt; alg->decrypt = qce_skcipher_decrypt; - alg->base.cra_priority = 300; + alg->base.cra_priority = 275; alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY; diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c index 9d130592cc0a..d734c9a56786 100644 --- a/drivers/crypto/tegra/tegra-se-aes.c +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -1750,10 +1750,13 @@ static int tegra_cmac_digest(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + int ret; - tegra_cmac_init(req); - rctx->task |= SHA_UPDATE | SHA_FINAL; + ret = tegra_cmac_init(req); + if (ret) + return ret; + rctx->task |= SHA_UPDATE | SHA_FINAL; return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); } diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c index 4d4bd727f498..0b5cdd5676b1 100644 --- a/drivers/crypto/tegra/tegra-se-hash.c +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -615,13 +615,16 @@ static int tegra_sha_digest(struct ahash_request *req) struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + int ret; if (ctx->fallback) return tegra_sha_fallback_digest(req); - tegra_sha_init(req); - rctx->task |= SHA_UPDATE | SHA_FINAL; + ret = tegra_sha_init(req); + if (ret) + return ret; + rctx->task |= SHA_UPDATE | SHA_FINAL; return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); } |