summaryrefslogtreecommitdiff
path: root/drivers/crypto/hisilicon
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/hisilicon')
-rw-r--r--drivers/crypto/hisilicon/debugfs.c65
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h23
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c11
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c284
-rw-r--r--drivers/crypto/hisilicon/qm.c629
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c4
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h28
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c264
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h11
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c169
-rw-r--r--drivers/crypto/hisilicon/sgl.c19
-rw-r--r--drivers/crypto/hisilicon/trng/trng.c6
-rw-r--r--drivers/crypto/hisilicon/zip/Makefile2
-rw-r--r--drivers/crypto/hisilicon/zip/dae_main.c262
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h26
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c17
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c292
17 files changed, 1471 insertions, 641 deletions
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
index cd67fa348ca7..45e130b901eb 100644
--- a/drivers/crypto/hisilicon/debugfs.c
+++ b/drivers/crypto/hisilicon/debugfs.c
@@ -13,6 +13,7 @@
#define QM_DFX_COMMON_LEN 0xC3
#define QM_DFX_REGS_LEN 4UL
#define QM_DBG_TMP_BUF_LEN 22
+#define QM_XQC_ADDR_MASK GENMASK(31, 0)
#define CURRENT_FUN_MASK GENMASK(5, 0)
#define CURRENT_Q_MASK GENMASK(31, 16)
#define QM_SQE_ADDR_MASK GENMASK(7, 0)
@@ -167,7 +168,6 @@ static void dump_show(struct hisi_qm *qm, void *info,
static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
{
struct device *dev = &qm->pdev->dev;
- struct qm_sqc *sqc_curr;
struct qm_sqc sqc;
u32 qp_id;
int ret;
@@ -183,6 +183,8 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1);
if (!ret) {
+ sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
dump_show(qm, &sqc, sizeof(struct qm_sqc), name);
return 0;
@@ -190,9 +192,10 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
down_read(&qm->qps_lock);
if (qm->sqc) {
- sqc_curr = qm->sqc + qp_id;
-
- dump_show(qm, sqc_curr, sizeof(*sqc_curr), "SOFT SQC");
+ memcpy(&sqc, qm->sqc + qp_id, sizeof(struct qm_sqc));
+ sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
+ dump_show(qm, &sqc, sizeof(struct qm_sqc), "SOFT SQC");
}
up_read(&qm->qps_lock);
@@ -202,7 +205,6 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
{
struct device *dev = &qm->pdev->dev;
- struct qm_cqc *cqc_curr;
struct qm_cqc cqc;
u32 qp_id;
int ret;
@@ -218,6 +220,8 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1);
if (!ret) {
+ cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
dump_show(qm, &cqc, sizeof(struct qm_cqc), name);
return 0;
@@ -225,9 +229,10 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
down_read(&qm->qps_lock);
if (qm->cqc) {
- cqc_curr = qm->cqc + qp_id;
-
- dump_show(qm, cqc_curr, sizeof(*cqc_curr), "SOFT CQC");
+ memcpy(&cqc, qm->cqc + qp_id, sizeof(struct qm_cqc));
+ cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
+ dump_show(qm, &cqc, sizeof(struct qm_cqc), "SOFT CQC");
}
up_read(&qm->qps_lock);
@@ -263,6 +268,10 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name)
if (ret)
return ret;
+ aeqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ aeqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
+ eqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ eqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
dump_show(qm, xeqc, size, name);
return ret;
@@ -310,27 +319,26 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
static int qm_sq_dump(struct hisi_qm *qm, char *s, char *name)
{
- u16 sq_depth = qm->qp_array->cq_depth;
- void *sqe, *sqe_curr;
+ u16 sq_depth = qm->qp_array->sq_depth;
struct hisi_qp *qp;
u32 qp_id, sqe_id;
+ void *sqe;
int ret;
ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth);
if (ret)
return ret;
- sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL);
+ sqe = kzalloc(qm->sqe_size, GFP_KERNEL);
if (!sqe)
return -ENOMEM;
qp = &qm->qp_array[qp_id];
- memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth);
- sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
- memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
+ memcpy(sqe, qp->sqe + sqe_id * qm->sqe_size, qm->sqe_size);
+ memset(sqe + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
qm->debug.sqe_mask_len);
- dump_show(qm, sqe_curr, qm->sqe_size, name);
+ dump_show(qm, sqe, qm->sqe_size, name);
kfree(sqe);
@@ -809,8 +817,14 @@ static void dfx_regs_uninit(struct hisi_qm *qm,
{
int i;
+ if (!dregs)
+ return;
+
/* Setting the pointer is NULL to prevent double free */
for (i = 0; i < reg_len; i++) {
+ if (!dregs[i].regs)
+ continue;
+
kfree(dregs[i].regs);
dregs[i].regs = NULL;
}
@@ -860,14 +874,21 @@ alloc_error:
static int qm_diff_regs_init(struct hisi_qm *qm,
struct dfx_diff_registers *dregs, u32 reg_len)
{
+ int ret;
+
qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
- if (IS_ERR(qm->debug.qm_diff_regs))
- return PTR_ERR(qm->debug.qm_diff_regs);
+ if (IS_ERR(qm->debug.qm_diff_regs)) {
+ ret = PTR_ERR(qm->debug.qm_diff_regs);
+ qm->debug.qm_diff_regs = NULL;
+ return ret;
+ }
qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
if (IS_ERR(qm->debug.acc_diff_regs)) {
dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
- return PTR_ERR(qm->debug.acc_diff_regs);
+ ret = PTR_ERR(qm->debug.acc_diff_regs);
+ qm->debug.acc_diff_regs = NULL;
+ return ret;
}
return 0;
@@ -908,7 +929,9 @@ static int qm_last_regs_init(struct hisi_qm *qm)
static void qm_diff_regs_uninit(struct hisi_qm *qm, u32 reg_len)
{
dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
+ qm->debug.acc_diff_regs = NULL;
dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
+ qm->debug.qm_diff_regs = NULL;
}
/**
@@ -1075,12 +1098,12 @@ static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
{
struct debugfs_file *file = qm->debug.files + index;
- debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
- &qm_debug_fops);
-
file->index = index;
mutex_init(&file->lock);
file->debug = &qm->debug;
+
+ debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
+ &qm_debug_fops);
}
static int qm_debugfs_atomic64_set(void *data, u64 val)
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 9f0b94c8e03d..0f3ddbadbcf9 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -100,6 +100,29 @@ struct hpre_sqe {
__le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
};
+enum hpre_cap_table_type {
+ QM_RAS_NFE_TYPE = 0x0,
+ QM_RAS_NFE_RESET,
+ QM_RAS_CE_TYPE,
+ HPRE_RAS_NFE_TYPE,
+ HPRE_RAS_NFE_RESET,
+ HPRE_RAS_CE_TYPE,
+ HPRE_CORE_INFO,
+ HPRE_CORE_EN,
+ HPRE_DRV_ALG_BITMAP,
+ HPRE_ALG_BITMAP,
+ HPRE_CORE1_BITMAP_CAP,
+ HPRE_CORE2_BITMAP_CAP,
+ HPRE_CORE3_BITMAP_CAP,
+ HPRE_CORE4_BITMAP_CAP,
+ HPRE_CORE5_BITMAP_CAP,
+ HPRE_CORE6_BITMAP_CAP,
+ HPRE_CORE7_BITMAP_CAP,
+ HPRE_CORE8_BITMAP_CAP,
+ HPRE_CORE9_BITMAP_CAP,
+ HPRE_CORE10_BITMAP_CAP,
+};
+
struct hisi_qp *hpre_create_qp(u8 type);
int hpre_algs_register(struct hisi_qm *qm);
void hpre_algs_unregister(struct hisi_qm *qm);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 764532a6ca82..61b5e1c5d019 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -39,6 +39,8 @@ struct hpre_ctx;
#define HPRE_DFX_SEC_TO_US 1000000
#define HPRE_DFX_US_TO_NS 1000
+#define HPRE_ENABLE_HPCORE_SHIFT 7
+
/* due to nist p521 */
#define HPRE_ECC_MAX_KSZ 66
@@ -131,6 +133,8 @@ struct hpre_ctx {
};
/* for ecc algorithms */
unsigned int curve_id;
+ /* for high performance core */
+ u8 enable_hpcore;
};
struct hpre_asym_request {
@@ -575,7 +579,9 @@ static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
do {
atomic64_inc(&dfx[HPRE_SEND_CNT].value);
+ spin_lock_bh(&ctx->req_lock);
ret = hisi_qp_send(ctx->qp, msg);
+ spin_unlock_bh(&ctx->req_lock);
if (ret != -EBUSY)
break;
atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
@@ -1617,6 +1623,8 @@ static int hpre_ecdh_compute_value(struct kpp_request *req)
}
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
+ msg->resv1 = ctx->enable_hpcore << HPRE_ENABLE_HPCORE_SHIFT;
+
ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
@@ -1651,6 +1659,7 @@ static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
ctx->curve_id = ECC_CURVE_NIST_P256;
+ ctx->enable_hpcore = 1;
kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
@@ -2004,8 +2013,6 @@ static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
}
static struct akcipher_alg rsa = {
- .sign = hpre_rsa_dec,
- .verify = hpre_rsa_enc,
.encrypt = hpre_rsa_enc,
.decrypt = hpre_rsa_dec,
.set_pub_key = hpre_rsa_setpubkey,
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index d93aa6630a57..f5b47e5ff48a 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -13,9 +13,8 @@
#include <linux/uacce.h>
#include "hpre.h"
-#define HPRE_QM_ABNML_INT_MASK 0x100004
+#define CAP_FILE_PERMISSION 0444
#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
-#define HPRE_COMM_CNT_CLR_CE 0x0
#define HPRE_CTRL_CNT_CLR_CE 0x301000
#define HPRE_FSM_MAX_CNT 0x301008
#define HPRE_VFG_AXQOS 0x30100c
@@ -42,7 +41,6 @@
#define HPRE_HAC_INT_SET 0x301500
#define HPRE_RNG_TIMEOUT_NUM 0x301A34
#define HPRE_CORE_INT_ENABLE 0
-#define HPRE_CORE_INT_DISABLE GENMASK(21, 0)
#define HPRE_RDCHN_INI_ST 0x301a00
#define HPRE_CLSTR_BASE 0x302000
#define HPRE_CORE_EN_OFFSET 0x04
@@ -66,7 +64,6 @@
#define HPRE_CLSTR_ADDR_INTRVL 0x1000
#define HPRE_CLUSTER_INQURY 0x100
#define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
-#define HPRE_TIMEOUT_ABNML_BIT 6
#define HPRE_PASID_EN_BIT 9
#define HPRE_REG_RD_INTVRL_US 10
#define HPRE_REG_RD_TMOUT_US 1000
@@ -106,7 +103,7 @@
#define HPRE_SHAPER_TYPE_RATE 640
#define HPRE_VIA_MSI_DSM 1
#define HPRE_SQE_MASK_OFFSET 8
-#define HPRE_SQE_MASK_LEN 24
+#define HPRE_SQE_MASK_LEN 44
#define HPRE_CTX_Q_NUM_DEF 1
#define HPRE_DFX_BASE 0x301000
@@ -203,11 +200,11 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
{HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},
{HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},
{HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
- {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFFFE},
- {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE},
- {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE},
+ {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFC3E},
+ {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFC3E},
+ {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFC3E},
{HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
- {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
+ {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
{HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2},
{HPRE_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA},
{HPRE_CLUSTER_CORE_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA},
@@ -226,18 +223,27 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
{HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
};
-enum hpre_pre_store_cap_idx {
- HPRE_CLUSTER_NUM_CAP_IDX = 0x0,
- HPRE_CORE_ENABLE_BITMAP_CAP_IDX,
- HPRE_DRV_ALG_BITMAP_CAP_IDX,
- HPRE_DEV_ALG_BITMAP_CAP_IDX,
-};
-
-static const u32 hpre_pre_store_caps[] = {
- HPRE_CLUSTER_NUM_CAP,
- HPRE_CORE_ENABLE_BITMAP_CAP,
- HPRE_DRV_ALG_BITMAP_CAP,
- HPRE_DEV_ALG_BITMAP_CAP,
+static const struct hisi_qm_cap_query_info hpre_cap_query_info[] = {
+ {QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C37, 0x7C37},
+ {QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC77, 0x6C77},
+ {QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8},
+ {HPRE_RAS_NFE_TYPE, "HPRE_RAS_NFE_TYPE ", 0x3130, 0x0, 0x3FFFFE, 0x1FFFC3E},
+ {HPRE_RAS_NFE_RESET, "HPRE_RAS_NFE_RESET ", 0x3134, 0x0, 0x3FFFFE, 0xBFFC3E},
+ {HPRE_RAS_CE_TYPE, "HPRE_RAS_CE_TYPE ", 0x3138, 0x0, 0x1, 0x1},
+ {HPRE_CORE_INFO, "HPRE_CORE_INFO ", 0x313c, 0x0, 0x420802, 0x120A0A},
+ {HPRE_CORE_EN, "HPRE_CORE_EN ", 0x3140, 0x0, 0xF, 0x3FF},
+ {HPRE_DRV_ALG_BITMAP, "HPRE_DRV_ALG_BITMAP ", 0x3144, 0x0, 0x03, 0x27},
+ {HPRE_ALG_BITMAP, "HPRE_ALG_BITMAP ", 0x3148, 0x0, 0x03, 0x7F},
+ {HPRE_CORE1_BITMAP_CAP, "HPRE_CORE1_BITMAP_CAP ", 0x314c, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE2_BITMAP_CAP, "HPRE_CORE2_BITMAP_CAP ", 0x3150, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE3_BITMAP_CAP, "HPRE_CORE3_BITMAP_CAP ", 0x3154, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE4_BITMAP_CAP, "HPRE_CORE4_BITMAP_CAP ", 0x3158, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE5_BITMAP_CAP, "HPRE_CORE5_BITMAP_CAP ", 0x315c, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE6_BITMAP_CAP, "HPRE_CORE6_BITMAP_CAP ", 0x3160, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE7_BITMAP_CAP, "HPRE_CORE7_BITMAP_CAP ", 0x3164, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE8_BITMAP_CAP, "HPRE_CORE8_BITMAP_CAP ", 0x3168, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE9_BITMAP_CAP, "HPRE_CORE9_BITMAP_CAP ", 0x316c, 0x0, 0x10, 0x10},
+ {HPRE_CORE10_BITMAP_CAP, "HPRE_CORE10_BITMAP_CAP ", 0x3170, 0x0, 0x10, 0x10},
};
static const struct hpre_hw_error hpre_hw_errors[] = {
@@ -358,11 +364,13 @@ static struct dfx_diff_registers hpre_diff_regs[] = {
},
};
+static const struct hisi_qm_err_ini hpre_err_ini;
+
bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
{
u32 cap_val;
- cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP_CAP_IDX].cap_val;
+ cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP].cap_val;
if (alg & cap_val)
return true;
@@ -417,7 +425,7 @@ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
pf_q_num_flag = true;
- return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
+ return hisi_qm_q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
}
static const struct kernel_param_ops hpre_pf_q_num_ops = {
@@ -505,14 +513,17 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
static int hpre_set_cluster(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- unsigned long offset;
u32 cluster_core_mask;
+ unsigned long offset;
+ u32 hpre_core_info;
u8 clusters_num;
u32 val = 0;
int ret, i;
- cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_ENABLE_BITMAP_CAP_IDX].cap_val;
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_EN].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
offset = i * HPRE_CLSTR_ADDR_INTRVL;
@@ -595,6 +606,9 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm)
static void hpre_enable_clock_gate(struct hisi_qm *qm)
{
+ unsigned long offset;
+ u8 clusters_num, i;
+ u32 hpre_core_info;
u32 val;
if (qm->ver < QM_HW_V3)
@@ -608,17 +622,26 @@ static void hpre_enable_clock_gate(struct hisi_qm *qm)
val |= HPRE_PEH_CFG_AUTO_GATE_EN;
writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
- val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
- val |= HPRE_CLUSTER_DYN_CTL_EN;
- writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
-
- val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
- val |= HPRE_CORE_GATE_EN;
- writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
+ for (i = 0; i < clusters_num; i++) {
+ offset = (unsigned long)i * HPRE_CLSTR_ADDR_INTRVL;
+ val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
+ val |= HPRE_CLUSTER_DYN_CTL_EN;
+ writel(val, qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
+
+ val = readl(qm->io_base + offset + HPRE_CORE_SHB_CFG);
+ val |= HPRE_CORE_GATE_EN;
+ writel(val, qm->io_base + offset + HPRE_CORE_SHB_CFG);
+ }
}
static void hpre_disable_clock_gate(struct hisi_qm *qm)
{
+ unsigned long offset;
+ u8 clusters_num, i;
+ u32 hpre_core_info;
u32 val;
if (qm->ver < QM_HW_V3)
@@ -632,13 +655,19 @@ static void hpre_disable_clock_gate(struct hisi_qm *qm)
val &= ~HPRE_PEH_CFG_AUTO_GATE_EN;
writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
- val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
- val &= ~HPRE_CLUSTER_DYN_CTL_EN;
- writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
-
- val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
- val &= ~HPRE_CORE_GATE_EN;
- writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
+ for (i = 0; i < clusters_num; i++) {
+ offset = (unsigned long)i * HPRE_CLSTR_ADDR_INTRVL;
+ val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
+ val &= ~HPRE_CLUSTER_DYN_CTL_EN;
+ writel(val, qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
+
+ val = readl(qm->io_base + offset + HPRE_CORE_SHB_CFG);
+ val &= ~HPRE_CORE_GATE_EN;
+ writel(val, qm->io_base + offset + HPRE_CORE_SHB_CFG);
+ }
}
static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
@@ -654,11 +683,6 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
- /* HPRE need more time, we close this interrupt */
- val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK);
- val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
- writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK);
-
if (qm->ver >= QM_HW_V3)
writel(HPRE_RSA_ENB | HPRE_ECC_ENB,
qm->io_base + HPRE_TYPES_ENB);
@@ -667,9 +691,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE);
writel(0x0, qm->io_base + HPRE_BD_ENDIAN);
- writel(0x0, qm->io_base + HPRE_INT_MASK);
writel(0x0, qm->io_base + HPRE_POISON_BYPASS);
- writel(0x0, qm->io_base + HPRE_COMM_CNT_CLR_CE);
writel(0x0, qm->io_base + HPRE_ECC_BYPASS);
writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG);
@@ -708,11 +730,14 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
static void hpre_cnt_regs_clear(struct hisi_qm *qm)
{
unsigned long offset;
+ u32 hpre_core_info;
u8 clusters_num;
int i;
/* clear clusterX/cluster_ctrl */
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
@@ -759,7 +784,7 @@ static void hpre_hw_error_disable(struct hisi_qm *qm)
static void hpre_hw_error_enable(struct hisi_qm *qm)
{
- u32 ce, nfe;
+ u32 ce, nfe, err_en;
ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
@@ -776,7 +801,8 @@ static void hpre_hw_error_enable(struct hisi_qm *qm)
hpre_master_ooo_ctrl(qm, true);
/* enable hpre hw error interrupts */
- writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
+ err_en = ce | nfe | HPRE_HAC_RAS_FE_ENABLE;
+ writel(~err_en, qm->io_base + HPRE_INT_MASK);
}
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
@@ -1003,10 +1029,13 @@ static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
+ u32 hpre_core_info;
u8 clusters_num;
int i, ret;
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
if (ret >= HPRE_DBGFS_VAL_MAX_LEN)
@@ -1049,6 +1078,26 @@ static int hpre_ctrl_debug_init(struct hisi_qm *qm)
return hpre_cluster_debugfs_init(qm);
}
+static int hpre_cap_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+ u32 i, size;
+
+ size = qm->cap_tables.qm_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name,
+ qm->cap_tables.qm_cap_table[i].cap_val);
+
+ size = qm->cap_tables.dev_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name,
+ qm->cap_tables.dev_cap_table[i].cap_val);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hpre_cap_regs);
+
static void hpre_dfx_debug_init(struct hisi_qm *qm)
{
struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs;
@@ -1067,6 +1116,9 @@ static void hpre_dfx_debug_init(struct hisi_qm *qm)
if (qm->fun_type == QM_HW_PF && hpre_regs)
debugfs_create_file("diff_regs", 0444, parent,
qm, &hpre_diff_regs_fops);
+
+ debugfs_create_file("cap_regs", CAP_FILE_PERMISSION,
+ qm->debug.debug_root, qm, &hpre_cap_regs_fops);
}
static int hpre_debugfs_init(struct hisi_qm *qm)
@@ -1074,67 +1126,73 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
struct device *dev = &qm->pdev->dev;
int ret;
- qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
- hpre_debugfs_root);
-
- qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
- qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
ret = hisi_qm_regs_debugfs_init(qm, hpre_diff_regs, ARRAY_SIZE(hpre_diff_regs));
if (ret) {
dev_warn(dev, "Failed to init HPRE diff regs!\n");
- goto debugfs_remove;
+ return ret;
}
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ hpre_debugfs_root);
+ qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
+
hisi_qm_debug_init(qm);
if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) {
ret = hpre_ctrl_debug_init(qm);
if (ret)
- goto failed_to_create;
+ goto debugfs_remove;
}
hpre_dfx_debug_init(qm);
return 0;
-failed_to_create:
- hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
debugfs_remove:
debugfs_remove_recursive(qm->debug.debug_root);
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
return ret;
}
static void hpre_debugfs_exit(struct hisi_qm *qm)
{
- hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
-
debugfs_remove_recursive(qm->debug.debug_root);
+
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
}
static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
{
struct hisi_qm_cap_record *hpre_cap;
struct device *dev = &qm->pdev->dev;
+ u32 hpre_core_info;
+ u8 clusters_num;
size_t i, size;
- size = ARRAY_SIZE(hpre_pre_store_caps);
+ size = ARRAY_SIZE(hpre_cap_query_info);
hpre_cap = devm_kzalloc(dev, sizeof(*hpre_cap) * size, GFP_KERNEL);
if (!hpre_cap)
return -ENOMEM;
for (i = 0; i < size; i++) {
- hpre_cap[i].type = hpre_pre_store_caps[i];
- hpre_cap[i].cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info,
- hpre_pre_store_caps[i], qm->cap_ver);
+ hpre_cap[i].type = hpre_cap_query_info[i].type;
+ hpre_cap[i].name = hpre_cap_query_info[i].name;
+ hpre_cap[i].cap_val = hisi_qm_get_cap_value(qm, hpre_cap_query_info,
+ i, qm->cap_ver);
}
- if (hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val > HPRE_CLUSTERS_NUM_MAX) {
+ hpre_core_info = hpre_cap[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
+ if (clusters_num > HPRE_CLUSTERS_NUM_MAX) {
dev_err(dev, "Device cluster num %u is out of range for driver supports %d!\n",
- hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val, HPRE_CLUSTERS_NUM_MAX);
+ clusters_num, HPRE_CLUSTERS_NUM_MAX);
return -EINVAL;
}
qm->cap_tables.dev_cap_table = hpre_cap;
+ qm->cap_tables.dev_cap_size = size;
return 0;
}
@@ -1151,7 +1209,6 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->mode = uacce_mode;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->sqe_size = HPRE_SQE_SIZE;
qm->dev_name = hpre_name;
@@ -1162,6 +1219,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = pf_q_num;
qm->debug.curr_qm_qp_num = pf_q_num;
qm->qm_list = &hpre_devices;
+ qm->err_ini = &hpre_err_ini;
if (pf_q_num_flag)
set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
}
@@ -1180,7 +1238,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return ret;
}
- alg_msk = qm->cap_tables.dev_cap_table[HPRE_DEV_ALG_BITMAP_CAP_IDX].cap_val;
+ alg_msk = qm->cap_tables.dev_cap_table[HPRE_ALG_BITMAP].cap_val;
ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs));
if (ret) {
pci_err(pdev, "Failed to set hpre algs!\n");
@@ -1196,10 +1254,13 @@ static int hpre_show_last_regs_init(struct hisi_qm *qm)
int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
struct qm_debug *debug = &qm->debug;
void __iomem *io_base;
+ u32 hpre_core_info;
u8 clusters_num;
int i, j, idx;
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num +
com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
if (!debug->last_words)
@@ -1239,6 +1300,7 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
struct qm_debug *debug = &qm->debug;
struct pci_dev *pdev = qm->pdev;
void __iomem *io_base;
+ u32 hpre_core_info;
u8 clusters_num;
int i, j, idx;
u32 val;
@@ -1254,7 +1316,9 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
hpre_com_dfx_regs[i].name, debug->last_words[i], val);
}
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
io_base = qm->io_base + hpre_cluster_offsets[i];
for (j = 0; j < cluster_dfx_regs_num; j++) {
@@ -1288,11 +1352,15 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
- u32 nfe;
-
writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
- nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
- writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
+}
+
+static void hpre_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ u32 nfe_mask;
+
+ nfe_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe_mask & (~err_type), qm->io_base + HPRE_RAS_NFE_ENB);
}
static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
@@ -1306,6 +1374,38 @@ static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
+static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hpre_get_hw_err_status(qm);
+ if (err_status) {
+ if (err_status & qm->err_info.ecc_2bits_mask)
+ qm->err_status.is_dev_ecc_mbit = true;
+ hpre_log_hw_error(qm, err_status);
+
+ if (err_status & qm->err_info.dev_reset_mask) {
+ /* Disable the same error reporting until device is recovered. */
+ hpre_disable_error_report(qm, err_status);
+ return ACC_ERR_NEED_RESET;
+ }
+ hpre_clear_hw_err_status(qm, err_status);
+ }
+
+ return ACC_ERR_RECOVERED;
+}
+
+static bool hpre_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hpre_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_shutdown_mask)
+ return true;
+
+ return false;
+}
+
static void hpre_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -1332,12 +1432,13 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.hw_err_disable = hpre_hw_error_disable,
.get_dev_hw_err_status = hpre_get_hw_err_status,
.clear_dev_hw_err_status = hpre_clear_hw_err_status,
- .log_dev_hw_err = hpre_log_hw_error,
.open_axi_master_ooo = hpre_open_axi_master_ooo,
.open_sva_prefetch = hpre_open_sva_prefetch,
.close_sva_prefetch = hpre_close_sva_prefetch,
.show_last_dfx_regs = hpre_show_last_dfx_regs,
.err_info_init = hpre_err_info_init,
+ .get_err_result = hpre_get_err_result,
+ .dev_is_abnormal = hpre_dev_is_abnormal,
};
static int hpre_pf_probe_init(struct hpre *hpre)
@@ -1351,8 +1452,6 @@ static int hpre_pf_probe_init(struct hpre *hpre)
hpre_open_sva_prefetch(qm);
- qm->err_ini = &hpre_err_ini;
- qm->err_ini->err_info_init(qm);
hisi_qm_dev_err_init(qm);
ret = hpre_show_last_regs_init(qm);
if (ret)
@@ -1381,6 +1480,18 @@ static int hpre_probe_init(struct hpre *hpre)
return 0;
}
+static void hpre_probe_uninit(struct hisi_qm *qm)
+{
+ if (qm->fun_type == QM_HW_VF)
+ return;
+
+ hpre_cnt_regs_clear(qm);
+ qm->debug.curr_qm_qp_num = 0;
+ hpre_show_last_regs_uninit(qm);
+ hpre_close_sva_prefetch(qm);
+ hisi_qm_dev_err_uninit(qm);
+}
+
static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_qm *qm;
@@ -1406,7 +1517,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = hisi_qm_start(qm);
if (ret)
- goto err_with_err_init;
+ goto err_with_probe_init;
ret = hpre_debugfs_init(qm);
if (ret)
@@ -1445,9 +1556,8 @@ err_qm_del_list:
hpre_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
-err_with_err_init:
- hpre_show_last_regs_uninit(qm);
- hisi_qm_dev_err_uninit(qm);
+err_with_probe_init:
+ hpre_probe_uninit(qm);
err_with_qm_init:
hisi_qm_uninit(qm);
@@ -1469,13 +1579,7 @@ static void hpre_remove(struct pci_dev *pdev)
hpre_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
- if (qm->fun_type == QM_HW_PF) {
- hpre_cnt_regs_clear(qm);
- qm->debug.curr_qm_qp_num = 0;
- hpre_show_last_regs_uninit(qm);
- hisi_qm_dev_err_uninit(qm);
- }
-
+ hpre_probe_uninit(qm);
hisi_qm_uninit(qm);
}
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 92f0a1d9b4a6..7c41f9593d03 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -30,8 +30,6 @@
/* mailbox */
#define QM_MB_PING_ALL_VFS 0xffff
-#define QM_MB_CMD_DATA_SHIFT 32
-#define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
#define QM_MB_STATUS_MASK GENMASK(12, 9)
/* sqc shift */
@@ -102,6 +100,8 @@
#define QM_PM_CTRL 0x100148
#define QM_IDLE_DISABLE BIT(9)
+#define QM_SUB_VERSION_ID 0x210
+
#define QM_VFT_CFG_DATA_L 0x100064
#define QM_VFT_CFG_DATA_H 0x100068
#define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
@@ -119,6 +119,7 @@
#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
#define QM_SQC_VFT_NUM_SHIFT_V2 45
#define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0)
+#define QM_MAX_QC_TYPE 2
#define QM_ABNORMAL_INT_SOURCE 0x100000
#define QM_ABNORMAL_INT_MASK 0x100004
@@ -176,6 +177,10 @@
#define QM_IFC_INT_MASK 0x0024
#define QM_IFC_INT_STATUS 0x0028
#define QM_IFC_INT_SET_V 0x002C
+#define QM_PF2VF_PF_W 0x104700
+#define QM_VF2PF_PF_R 0x104800
+#define QM_VF2PF_VF_W 0x320
+#define QM_PF2VF_VF_R 0x380
#define QM_IFC_SEND_ALL_VFS GENMASK(6, 0)
#define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0)
#define QM_IFC_INT_SOURCE_MASK BIT(0)
@@ -185,8 +190,11 @@
#define QM_WAIT_DST_ACK 10
#define QM_MAX_PF_WAIT_COUNT 10
#define QM_MAX_VF_WAIT_COUNT 40
-#define QM_VF_RESET_WAIT_US 20000
-#define QM_VF_RESET_WAIT_CNT 3000
+#define QM_VF_RESET_WAIT_US 20000
+#define QM_VF_RESET_WAIT_CNT 3000
+#define QM_VF2PF_REG_SIZE 4
+#define QM_IFC_CMD_MASK GENMASK(31, 0)
+#define QM_IFC_DATA_SHIFT 32
#define QM_VF_RESET_WAIT_TIMEOUT_US \
(QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
@@ -234,8 +242,6 @@
#define QM_QOS_MAX_CIR_U 6
#define QM_AUTOSUSPEND_DELAY 3000
-#define QM_DEV_ALG_MAX_LEN 256
-
/* abnormal status value for stopping queue */
#define QM_STOP_QUEUE_FAIL 1
#define QM_DUMP_SQC_FAIL 3
@@ -271,18 +277,12 @@ enum vft_type {
SHAPER_VFT,
};
-enum acc_err_result {
- ACC_ERR_NONE,
- ACC_ERR_NEED_RESET,
- ACC_ERR_RECOVERED,
-};
-
enum qm_alg_type {
ALG_TYPE_0,
ALG_TYPE_1,
};
-enum qm_mb_cmd {
+enum qm_ifc_cmd {
QM_PF_FLR_PREPARE = 0x01,
QM_PF_SRST_PREPARE,
QM_PF_RESET_DONE,
@@ -307,11 +307,29 @@ enum qm_basic_type {
QM_VF_IRQ_NUM_CAP,
};
-enum qm_pre_store_cap_idx {
- QM_EQ_IRQ_TYPE_CAP_IDX = 0x0,
- QM_AEQ_IRQ_TYPE_CAP_IDX,
- QM_ABN_IRQ_TYPE_CAP_IDX,
- QM_PF2VF_IRQ_TYPE_CAP_IDX,
+enum qm_cap_table_type {
+ QM_CAP_VF = 0x0,
+ QM_AEQE_NUM,
+ QM_SCQE_NUM,
+ QM_EQ_IRQ,
+ QM_AEQ_IRQ,
+ QM_ABNORMAL_IRQ,
+ QM_MB_IRQ,
+ MAX_IRQ_NUM,
+ EXT_BAR_INDEX,
+};
+
+static const struct hisi_qm_cap_query_info qm_cap_query_info[] = {
+ {QM_CAP_VF, "QM_CAP_VF ", 0x3100, 0x0, 0x0, 0x6F01},
+ {QM_AEQE_NUM, "QM_AEQE_NUM ", 0x3104, 0x800, 0x4000800, 0x4000800},
+ {QM_SCQE_NUM, "QM_SCQE_NUM ",
+ 0x3108, 0x4000400, 0x4000400, 0x4000400},
+ {QM_EQ_IRQ, "QM_EQ_IRQ ", 0x310c, 0x10000, 0x10000, 0x10000},
+ {QM_AEQ_IRQ, "QM_AEQ_IRQ ", 0x3110, 0x0, 0x10001, 0x10001},
+ {QM_ABNORMAL_IRQ, "QM_ABNORMAL_IRQ ", 0x3114, 0x0, 0x10003, 0x10003},
+ {QM_MB_IRQ, "QM_MB_IRQ ", 0x3118, 0x0, 0x0, 0x10002},
+ {MAX_IRQ_NUM, "MAX_IRQ_NUM ", 0x311c, 0x10001, 0x40002, 0x40003},
+ {EXT_BAR_INDEX, "EXT_BAR_INDEX ", 0x3120, 0x0, 0x0, 0x14},
};
static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
@@ -321,6 +339,7 @@ static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
{QM_SUPPORT_STOP_FUNC, 0x3100, 0, BIT(10), 0x0, 0x0, 0x1},
{QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
{QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_DAE, 0x3100, 0, BIT(15), 0x0, 0x0, 0x0},
};
static const struct hisi_qm_cap_info qm_cap_info_pf[] = {
@@ -344,13 +363,6 @@ static const struct hisi_qm_cap_info qm_basic_info[] = {
{QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3},
};
-static const u32 qm_pre_store_caps[] = {
- QM_EQ_IRQ_TYPE_CAP,
- QM_AEQ_IRQ_TYPE_CAP,
- QM_ABN_IRQ_TYPE_CAP,
- QM_PF2VF_IRQ_TYPE_CAP,
-};
-
struct qm_mailbox {
__le16 w0;
__le16 queue_num;
@@ -391,6 +403,11 @@ struct hisi_qm_hw_ops {
void (*hw_error_uninit)(struct hisi_qm *qm);
enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
int (*set_msi)(struct hisi_qm *qm, bool set);
+
+ /* (u64)msg = (u32)data << 32 | (enum qm_ifc_cmd)cmd */
+ int (*set_ifc_begin)(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num);
+ void (*set_ifc_end)(struct hisi_qm *qm);
+ int (*get_ifc)(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num);
};
struct hisi_qm_hw_error {
@@ -450,6 +467,38 @@ static struct qm_typical_qos_table shaper_cbs_s[] = {
};
static void qm_irqs_unregister(struct hisi_qm *qm);
+static int qm_reset_device(struct hisi_qm *qm);
+int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp,
+ unsigned int device)
+{
+ struct pci_dev *pdev;
+ u32 n, q_num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL);
+ if (!pdev) {
+ q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
+ pr_info("No device found currently, suppose queue number is %u\n",
+ q_num);
+ } else {
+ if (pdev->revision == QM_HW_V1)
+ q_num = QM_QNUM_V1;
+ else
+ q_num = QM_QNUM_V2;
+
+ pci_dev_put(pdev);
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret || n < QM_MIN_QNUM || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_q_num_set);
static u32 qm_get_hw_error_status(struct hisi_qm *qm)
{
@@ -464,15 +513,20 @@ static u32 qm_get_dev_err_status(struct hisi_qm *qm)
/* Check if the error causes the master ooo block */
static bool qm_check_dev_error(struct hisi_qm *qm)
{
- u32 val, dev_val;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
+ u32 err_status;
- if (qm->fun_type == QM_HW_VF)
+ if (pf_qm->fun_type == QM_HW_VF)
return false;
- val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask;
- dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask;
+ err_status = qm_get_hw_error_status(pf_qm);
+ if (err_status & pf_qm->err_info.qm_shutdown_mask)
+ return true;
+
+ if (pf_qm->err_ini->dev_is_abnormal)
+ return pf_qm->err_ini->dev_is_abnormal(pf_qm);
- return val || dev_val;
+ return false;
}
static int qm_wait_reset_finish(struct hisi_qm *qm)
@@ -617,7 +671,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_mb);
/* op 0: set xqc information to hardware, 1: get xqc information from hardware. */
int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op)
{
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
struct qm_mailbox mailbox;
dma_addr_t xqc_dma;
void *tmp_xqc;
@@ -645,10 +698,13 @@ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op
tmp_xqc = qm->xqc_buf.aeqc;
xqc_dma = qm->xqc_buf.aeqc_dma;
break;
+ default:
+ dev_err(&qm->pdev->dev, "unknown mailbox cmd %u\n", cmd);
+ return -EINVAL;
}
/* Setting xqc will fail if master OOO is blocked. */
- if (qm_check_dev_error(pf_qm)) {
+ if (qm_check_dev_error(qm)) {
dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n");
return -EIO;
}
@@ -759,6 +815,27 @@ u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
}
EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info);
+u32 hisi_qm_get_cap_value(struct hisi_qm *qm,
+ const struct hisi_qm_cap_query_info *info_table,
+ u32 index, bool is_read)
+{
+ u32 val;
+
+ switch (qm->ver) {
+ case QM_HW_V1:
+ return info_table[index].v1_val;
+ case QM_HW_V2:
+ return info_table[index].v2_val;
+ default:
+ if (!is_read)
+ return info_table[index].v3_val;
+
+ val = readl(qm->io_base + info_table[index].offset);
+ return val;
+ }
+}
+EXPORT_SYMBOL_GPL(hisi_qm_get_cap_value);
+
static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
u16 *high_bits, enum qm_basic_type type)
{
@@ -785,7 +862,7 @@ int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *d
return -EINVAL;
}
- algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+ algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN, GFP_KERNEL);
if (!algs)
return -ENOMEM;
@@ -794,10 +871,10 @@ int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *d
strcat(algs, dev_algs[i].alg);
ptr = strrchr(algs, '\n');
- if (ptr) {
+ if (ptr)
*ptr = '\0';
- qm->uacce->algs = algs;
- }
+
+ qm->uacce->algs = algs;
return 0;
}
@@ -991,11 +1068,10 @@ static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
static void qm_reset_function(struct hisi_qm *qm)
{
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
struct device *dev = &qm->pdev->dev;
int ret;
- if (qm_check_dev_error(pf_qm))
+ if (qm_check_dev_error(qm))
return;
ret = qm_reset_prepare_ready(qm);
@@ -1421,22 +1497,25 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
{
- u32 error_status, tmp;
+ u32 error_status;
- /* read err sts */
- tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
- error_status = qm->error_mask & tmp;
-
- if (error_status) {
+ error_status = qm_get_hw_error_status(qm);
+ if (error_status & qm->error_mask) {
if (error_status & QM_ECC_MBIT)
qm->err_status.is_qm_ecc_mbit = true;
qm_log_hw_error(qm, error_status);
- if (error_status & qm->err_info.qm_reset_mask)
+ if (error_status & qm->err_info.qm_reset_mask) {
+ /* Disable the same error reporting until device is recovered. */
+ writel(qm->err_info.nfe & (~error_status),
+ qm->io_base + QM_RAS_NFE_ENABLE);
return ACC_ERR_NEED_RESET;
+ }
+ /* Clear error source if not need reset. */
writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(qm->err_info.ce, qm->io_base + QM_RAS_CE_ENABLE);
}
return ACC_ERR_RECOVERED;
@@ -1476,17 +1555,15 @@ static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
{
struct device *dev = &qm->pdev->dev;
- u32 cmd;
- u64 msg;
+ enum qm_ifc_cmd cmd;
int ret;
- ret = qm_get_mb_cmd(qm, &msg, vf_id);
+ ret = qm->ops->get_ifc(qm, &cmd, NULL, vf_id);
if (ret) {
- dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id);
+ dev_err(dev, "failed to get command from VF(%u)!\n", vf_id);
return;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
switch (cmd) {
case QM_VF_PREPARE_FAIL:
dev_err(dev, "failed to stop VF(%u)!\n", vf_id);
@@ -1498,7 +1575,7 @@ static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
case QM_VF_START_DONE:
break;
default:
- dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id);
+ dev_err(dev, "unsupported command(0x%x) sent by VF(%u)!\n", cmd, vf_id);
break;
}
}
@@ -1566,17 +1643,14 @@ static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
writel(val, qm->io_base + QM_IFC_INT_SET_V);
}
-static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
+static int qm_ping_single_vf(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- struct qm_mailbox mailbox;
int cnt = 0;
u64 val;
int ret;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0);
- mutex_lock(&qm->mailbox_lock);
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, data, fun_num);
if (ret) {
dev_err(dev, "failed to send command to vf(%u)!\n", fun_num);
goto err_unlock;
@@ -1598,27 +1672,23 @@ static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
}
err_unlock:
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
return ret;
}
-static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
+static int qm_ping_all_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
struct device *dev = &qm->pdev->dev;
u32 vfs_num = qm->vfs_num;
- struct qm_mailbox mailbox;
u64 val = 0;
int cnt = 0;
int ret;
u32 i;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0);
- mutex_lock(&qm->mailbox_lock);
- /* PF sends command to all VFs by mailbox */
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, 0, QM_MB_PING_ALL_VFS);
if (ret) {
- dev_err(dev, "failed to send command to VFs!\n");
- mutex_unlock(&qm->mailbox_lock);
+ dev_err(dev, "failed to send command(0x%x) to all vfs!\n", cmd);
+ qm->ops->set_ifc_end(qm);
return ret;
}
@@ -1628,7 +1698,7 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
val = readq(qm->io_base + QM_IFC_READY_STATUS);
/* If all VFs acked, PF notifies VFs successfully. */
if (!(val & GENMASK(vfs_num, 1))) {
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
return 0;
}
@@ -1636,7 +1706,7 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
break;
}
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
/* Check which vf respond timeout. */
for (i = 1; i <= vfs_num; i++) {
@@ -1647,18 +1717,15 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
return -ETIMEDOUT;
}
-static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
+static int qm_ping_pf(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
- struct qm_mailbox mailbox;
int cnt = 0;
u32 val;
int ret;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0);
- mutex_lock(&qm->mailbox_lock);
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, 0, 0);
if (ret) {
- dev_err(&qm->pdev->dev, "failed to send command to PF!\n");
+ dev_err(&qm->pdev->dev, "failed to send command(0x%x) to PF!\n", cmd);
goto unlock;
}
@@ -1677,7 +1744,8 @@ static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
}
unlock:
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
+
return ret;
}
@@ -1778,6 +1846,94 @@ static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
return ret;
}
+static int qm_set_ifc_begin_v3(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
+{
+ struct qm_mailbox mailbox;
+ u64 msg;
+
+ msg = cmd | (u64)data << QM_IFC_DATA_SHIFT;
+
+ qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, msg, fun_num, 0);
+ mutex_lock(&qm->mailbox_lock);
+ return qm_mb_nolock(qm, &mailbox);
+}
+
+static void qm_set_ifc_end_v3(struct hisi_qm *qm)
+{
+ mutex_unlock(&qm->mailbox_lock);
+}
+
+static int qm_get_ifc_v3(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num)
+{
+ u64 msg;
+ int ret;
+
+ ret = qm_get_mb_cmd(qm, &msg, fun_num);
+ if (ret)
+ return ret;
+
+ *cmd = msg & QM_IFC_CMD_MASK;
+
+ if (data)
+ *data = msg >> QM_IFC_DATA_SHIFT;
+
+ return 0;
+}
+
+static int qm_set_ifc_begin_v4(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
+{
+ uintptr_t offset;
+ u64 msg;
+
+ if (qm->fun_type == QM_HW_PF)
+ offset = QM_PF2VF_PF_W;
+ else
+ offset = QM_VF2PF_VF_W;
+
+ msg = cmd | (u64)data << QM_IFC_DATA_SHIFT;
+
+ mutex_lock(&qm->ifc_lock);
+ writeq(msg, qm->io_base + offset);
+
+ return 0;
+}
+
+static void qm_set_ifc_end_v4(struct hisi_qm *qm)
+{
+ mutex_unlock(&qm->ifc_lock);
+}
+
+static u64 qm_get_ifc_pf(struct hisi_qm *qm, u32 fun_num)
+{
+ uintptr_t offset;
+
+ offset = QM_VF2PF_PF_R + QM_VF2PF_REG_SIZE * fun_num;
+
+ return (u64)readl(qm->io_base + offset);
+}
+
+static u64 qm_get_ifc_vf(struct hisi_qm *qm)
+{
+ return readq(qm->io_base + QM_PF2VF_VF_R);
+}
+
+static int qm_get_ifc_v4(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num)
+{
+ u64 msg;
+
+ if (qm->fun_type == QM_HW_PF)
+ msg = qm_get_ifc_pf(qm, fun_num);
+ else
+ msg = qm_get_ifc_vf(qm);
+
+ *cmd = msg & QM_IFC_CMD_MASK;
+
+ if (data)
+ *data = msg >> QM_IFC_DATA_SHIFT;
+
+ return 0;
+}
+
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
.qm_db = qm_db_v1,
.hw_error_init = qm_hw_error_init_v1,
@@ -1800,6 +1956,21 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
.hw_error_uninit = qm_hw_error_uninit_v3,
.hw_error_handle = qm_hw_error_handle_v2,
.set_msi = qm_set_msi_v3,
+ .set_ifc_begin = qm_set_ifc_begin_v3,
+ .set_ifc_end = qm_set_ifc_end_v3,
+ .get_ifc = qm_get_ifc_v3,
+};
+
+static const struct hisi_qm_hw_ops qm_hw_ops_v4 = {
+ .get_vft = qm_get_vft_v2,
+ .qm_db = qm_db_v2,
+ .hw_error_init = qm_hw_error_init_v3,
+ .hw_error_uninit = qm_hw_error_uninit_v3,
+ .hw_error_handle = qm_hw_error_handle_v2,
+ .set_msi = qm_set_msi_v3,
+ .set_ifc_begin = qm_set_ifc_begin_v4,
+ .set_ifc_end = qm_set_ifc_end_v4,
+ .get_ifc = qm_get_ifc_v4,
};
static void *qm_get_avail_sqe(struct hisi_qp *qp)
@@ -2092,12 +2263,11 @@ static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id)
static int qm_drain_qp(struct hisi_qp *qp)
{
struct hisi_qm *qm = qp->qm;
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
u32 state = 0;
int ret;
/* No need to judge if master OOO is blocked. */
- if (qm_check_dev_error(pf_qm))
+ if (qm_check_dev_error(qm))
return 0;
/* HW V3 supports drain qp by device */
@@ -2411,7 +2581,7 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
sizeof(struct hisi_qp_ctx)))
return -EFAULT;
- if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
+ if (qp_ctx.qc_type > QM_MAX_QC_TYPE)
return -EINVAL;
qm_set_sqctype(q, qp_ctx.qc_type);
@@ -2779,11 +2949,14 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
qm->ops = &qm_hw_ops_v1;
else if (qm->ver == QM_HW_V2)
qm->ops = &qm_hw_ops_v2;
- else
+ else if (qm->ver == QM_HW_V3)
qm->ops = &qm_hw_ops_v3;
+ else
+ qm->ops = &qm_hw_ops_v4;
pci_set_drvdata(pdev, qm);
mutex_init(&qm->mailbox_lock);
+ mutex_init(&qm->ifc_lock);
init_rwsem(&qm->qps_lock);
qm->qp_in_used = 0;
if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
@@ -2893,12 +3066,9 @@ void hisi_qm_uninit(struct hisi_qm *qm)
hisi_qm_set_state(qm, QM_NOT_READY);
up_write(&qm->qps_lock);
+ qm_remove_uacce(qm);
qm_irqs_unregister(qm);
hisi_qm_pci_uninit(qm);
- if (qm->use_sva) {
- uacce_remove(qm->uacce);
- qm->uacce = NULL;
- }
}
EXPORT_SYMBOL_GPL(hisi_qm_uninit);
@@ -3546,7 +3716,6 @@ static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- u64 mb_cmd;
u32 qos;
int ret;
@@ -3556,10 +3725,9 @@ static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
return;
}
- mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT;
- ret = qm_ping_single_vf(qm, mb_cmd, fun_num);
+ ret = qm_ping_single_vf(qm, QM_PF_SET_QOS, qos, fun_num);
if (ret)
- dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num);
+ dev_err(dev, "failed to send command(0x%x) to VF(%u)!\n", QM_PF_SET_QOS, fun_num);
}
static int qm_vf_read_qos(struct hisi_qm *qm)
@@ -3793,14 +3961,13 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
goto err_put_sync;
}
- qm->vfs_num = num_vfs;
-
ret = pci_enable_sriov(pdev, num_vfs);
if (ret) {
pci_err(pdev, "Can't enable VF!\n");
qm_clear_vft_config(qm);
goto err_put_sync;
}
+ qm->vfs_num = num_vfs;
pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
@@ -3822,7 +3989,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
- int ret;
if (pci_vfs_assigned(pdev)) {
pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
@@ -3837,13 +4003,10 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
pci_disable_sriov(pdev);
- ret = qm_clear_vft_config(qm);
- if (ret)
- return ret;
-
+ qm->vfs_num = 0;
qm_pm_put_sync(qm);
- return 0;
+ return qm_clear_vft_config(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
@@ -3865,30 +4028,12 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
{
- u32 err_sts;
-
- if (!qm->err_ini->get_dev_hw_err_status) {
- dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
+ if (!qm->err_ini->get_err_result) {
+ dev_err(&qm->pdev->dev, "Device doesn't support reset!\n");
return ACC_ERR_NONE;
}
- /* get device hardware error status */
- err_sts = qm->err_ini->get_dev_hw_err_status(qm);
- if (err_sts) {
- if (err_sts & qm->err_info.ecc_2bits_mask)
- qm->err_status.is_dev_ecc_mbit = true;
-
- if (qm->err_ini->log_dev_hw_err)
- qm->err_ini->log_dev_hw_err(qm, err_sts);
-
- if (err_sts & qm->err_info.dev_reset_mask)
- return ACC_ERR_NEED_RESET;
-
- if (qm->err_ini->clear_dev_hw_err_status)
- qm->err_ini->clear_dev_hw_err_status(qm, err_sts);
- }
-
- return ACC_ERR_RECOVERED;
+ return qm->err_ini->get_err_result(qm);
}
static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
@@ -4019,6 +4164,28 @@ static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
return -ETIMEDOUT;
}
+static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
+{
+ u32 nfe_enb = 0;
+
+ /* Kunpeng930 hardware automatically close master ooo when NFE occurs */
+ if (qm->ver >= QM_HW_V3)
+ return;
+
+ if (!qm->err_status.is_dev_ecc_mbit &&
+ qm->err_status.is_qm_ecc_mbit &&
+ qm->err_ini->close_axi_master_ooo) {
+ qm->err_ini->close_axi_master_ooo(qm);
+ } else if (qm->err_status.is_dev_ecc_mbit &&
+ !qm->err_status.is_qm_ecc_mbit &&
+ !qm->err_ini->close_axi_master_ooo) {
+ nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
+ qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
+ }
+}
+
static int qm_vf_reset_prepare(struct hisi_qm *qm,
enum qm_stop_reason stop_reason)
{
@@ -4049,7 +4216,7 @@ stop_fail:
return ret;
}
-static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
+static int qm_try_stop_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd,
enum qm_stop_reason stop_reason)
{
struct pci_dev *pdev = qm->pdev;
@@ -4062,7 +4229,7 @@ static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
ret = qm_ping_all_vfs(qm, cmd);
if (ret)
- pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
+ pci_err(pdev, "failed to send command to all VFs before PF reset!\n");
} else {
ret = qm_vf_reset_prepare(qm, stop_reason);
if (ret)
@@ -4077,12 +4244,20 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
int ret;
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret)
+ return ret;
+ }
+
ret = qm_reset_prepare_ready(qm);
if (ret) {
pci_err(pdev, "Controller reset not ready!\n");
return ret;
}
+ qm_dev_ecc_mbit_handle(qm);
+
/* PF obtains the information of VF by querying the register. */
qm_cmd_uninit(qm);
@@ -4113,33 +4288,26 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
return 0;
}
-static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
+static int qm_master_ooo_check(struct hisi_qm *qm)
{
- u32 nfe_enb = 0;
+ u32 val;
+ int ret;
- /* Kunpeng930 hardware automatically close master ooo when NFE occurs */
- if (qm->ver >= QM_HW_V3)
- return;
+ /* Check the ooo register of the device before resetting the device. */
+ writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+ ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
+ val, (val == ACC_MASTER_TRANS_RETURN_RW),
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret)
+ pci_warn(qm->pdev, "Bus lock! Please reset system.\n");
- if (!qm->err_status.is_dev_ecc_mbit &&
- qm->err_status.is_qm_ecc_mbit &&
- qm->err_ini->close_axi_master_ooo) {
- qm->err_ini->close_axi_master_ooo(qm);
- } else if (qm->err_status.is_dev_ecc_mbit &&
- !qm->err_status.is_qm_ecc_mbit &&
- !qm->err_ini->close_axi_master_ooo) {
- nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
- writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
- qm->io_base + QM_RAS_NFE_ENABLE);
- writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
- }
+ return ret;
}
-static int qm_soft_reset(struct hisi_qm *qm)
+static int qm_soft_reset_prepare(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
int ret;
- u32 val;
/* Ensure all doorbells and mailboxes received by QM */
ret = qm_check_req_recv(qm);
@@ -4160,30 +4328,23 @@ static int qm_soft_reset(struct hisi_qm *qm)
return ret;
}
- qm_dev_ecc_mbit_handle(qm);
-
- /* OOO register set and check */
- writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
- qm->io_base + ACC_MASTER_GLOBAL_CTRL);
-
- /* If bus lock, reset chip */
- ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
- val,
- (val == ACC_MASTER_TRANS_RETURN_RW),
- POLL_PERIOD, POLL_TIMEOUT);
- if (ret) {
- pci_emerg(pdev, "Bus lock! Please reset system.\n");
+ ret = qm_master_ooo_check(qm);
+ if (ret)
return ret;
- }
if (qm->err_ini->close_sva_prefetch)
qm->err_ini->close_sva_prefetch(qm);
ret = qm_set_pf_mse(qm, false);
- if (ret) {
+ if (ret)
pci_err(pdev, "Fails to disable pf MSE bit.\n");
- return ret;
- }
+
+ return ret;
+}
+
+static int qm_reset_device(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
/* The reset related sub-control registers are not in PCI BAR */
if (ACPI_HANDLE(&pdev->dev)) {
@@ -4202,12 +4363,23 @@ static int qm_soft_reset(struct hisi_qm *qm)
pci_err(pdev, "Reset step %llu failed!\n", value);
return -EIO;
}
- } else {
- pci_err(pdev, "No reset method!\n");
- return -EINVAL;
+
+ return 0;
}
- return 0;
+ pci_err(pdev, "No reset method!\n");
+ return -EINVAL;
+}
+
+static int qm_soft_reset(struct hisi_qm *qm)
+{
+ int ret;
+
+ ret = qm_soft_reset_prepare(qm);
+ if (ret)
+ return ret;
+
+ return qm_reset_device(qm);
}
static int qm_vf_reset_done(struct hisi_qm *qm)
@@ -4239,7 +4411,7 @@ restart_fail:
return ret;
}
-static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
+static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4468,7 +4640,7 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
* Check whether there is an ECC mbit error, If it occurs, need to
* wait for soft reset to fix it.
*/
- while (qm_check_dev_error(pf_qm)) {
+ while (qm_check_dev_error(qm)) {
msleep(++delay);
if (delay > QM_RESET_WAIT_TIMEOUT)
return;
@@ -4616,7 +4788,7 @@ static void hisi_qm_controller_reset(struct work_struct *rst_work)
static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
enum qm_stop_reason stop_reason)
{
- enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE;
+ enum qm_ifc_cmd cmd = QM_VF_PREPARE_DONE;
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4650,7 +4822,7 @@ out:
static void qm_pf_reset_vf_done(struct hisi_qm *qm)
{
- enum qm_mb_cmd cmd = QM_VF_START_DONE;
+ enum qm_ifc_cmd cmd = QM_VF_START_DONE;
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4673,7 +4845,6 @@ static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
u32 val, cmd;
- u64 msg;
int ret;
/* Wait for reset to finish */
@@ -4690,16 +4861,15 @@ static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
* Whether message is got successfully,
* VF needs to ack PF by clearing the interrupt.
*/
- ret = qm_get_mb_cmd(qm, &msg, 0);
+ ret = qm->ops->get_ifc(qm, &cmd, NULL, 0);
qm_clear_cmd_interrupt(qm, 0);
if (ret) {
- dev_err(dev, "failed to get msg from PF in reset done!\n");
+ dev_err(dev, "failed to get command from PF in reset done!\n");
return ret;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
if (cmd != QM_PF_RESET_DONE) {
- dev_err(dev, "the cmd(%u) is not reset done!\n", cmd);
+ dev_err(dev, "the command(0x%x) is not reset done!\n", cmd);
ret = -EINVAL;
}
@@ -4736,22 +4906,21 @@ err_get_status:
static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- u64 msg;
- u32 cmd;
+ enum qm_ifc_cmd cmd;
+ u32 data;
int ret;
/*
* Get the msg from source by sending mailbox. Whether message is got
* successfully, destination needs to ack source by clearing the interrupt.
*/
- ret = qm_get_mb_cmd(qm, &msg, fun_num);
+ ret = qm->ops->get_ifc(qm, &cmd, &data, fun_num);
qm_clear_cmd_interrupt(qm, BIT(fun_num));
if (ret) {
- dev_err(dev, "failed to get msg from source!\n");
+ dev_err(dev, "failed to get command from source!\n");
return;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
switch (cmd) {
case QM_PF_FLR_PREPARE:
qm_pf_reset_vf_process(qm, QM_DOWN);
@@ -4763,10 +4932,10 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
qm_vf_get_qos(qm, fun_num);
break;
case QM_PF_SET_QOS:
- qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT;
+ qm->mb_qos = data;
break;
default:
- dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num);
+ dev_err(dev, "unsupported command(0x%x) sent by function(%u)!\n", cmd, fun_num);
break;
}
}
@@ -4849,7 +5018,7 @@ static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
if (qm->fun_type == QM_HW_VF)
return;
- val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
return;
@@ -4866,7 +5035,7 @@ static int qm_register_abnormal_irq(struct hisi_qm *qm)
if (qm->fun_type == QM_HW_VF)
return 0;
- val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
return 0;
@@ -4883,7 +5052,7 @@ static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;
- val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return;
@@ -4897,7 +5066,7 @@ static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;
- val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return 0;
@@ -4914,7 +5083,7 @@ static void qm_unregister_aeq_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;
- val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return;
@@ -4928,7 +5097,7 @@ static int qm_register_aeq_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;
- val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return 0;
@@ -4946,7 +5115,7 @@ static void qm_unregister_eq_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;
- val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return;
@@ -4960,7 +5129,7 @@ static int qm_register_eq_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;
- val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return 0;
@@ -5048,24 +5217,26 @@ static int qm_get_qp_num(struct hisi_qm *qm)
return 0;
}
-static int qm_pre_store_irq_type_caps(struct hisi_qm *qm)
+static int qm_pre_store_caps(struct hisi_qm *qm)
{
struct hisi_qm_cap_record *qm_cap;
struct pci_dev *pdev = qm->pdev;
size_t i, size;
- size = ARRAY_SIZE(qm_pre_store_caps);
- qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL);
+ size = ARRAY_SIZE(qm_cap_query_info);
+ qm_cap = devm_kcalloc(&pdev->dev, sizeof(*qm_cap), size, GFP_KERNEL);
if (!qm_cap)
return -ENOMEM;
for (i = 0; i < size; i++) {
- qm_cap[i].type = qm_pre_store_caps[i];
- qm_cap[i].cap_val = hisi_qm_get_hw_info(qm, qm_basic_info,
- qm_pre_store_caps[i], qm->cap_ver);
+ qm_cap[i].type = qm_cap_query_info[i].type;
+ qm_cap[i].name = qm_cap_query_info[i].name;
+ qm_cap[i].cap_val = hisi_qm_get_cap_value(qm, qm_cap_query_info,
+ i, qm->cap_ver);
}
qm->cap_tables.qm_cap_table = qm_cap;
+ qm->cap_tables.qm_cap_size = size;
return 0;
}
@@ -5102,8 +5273,22 @@ static int qm_get_hw_caps(struct hisi_qm *qm)
set_bit(cap_info[i].type, &qm->caps);
}
- /* Fetch and save the value of irq type related capability registers */
- return qm_pre_store_irq_type_caps(qm);
+ /* Fetch and save the value of qm capability registers */
+ return qm_pre_store_caps(qm);
+}
+
+static void qm_get_version(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 sub_version_id;
+
+ qm->ver = pdev->revision;
+
+ if (pdev->revision == QM_HW_V3) {
+ sub_version_id = readl(qm->io_base + QM_SUB_VERSION_ID);
+ if (sub_version_id)
+ qm->ver = sub_version_id;
+ }
}
static int qm_get_pci_res(struct hisi_qm *qm)
@@ -5125,6 +5310,8 @@ static int qm_get_pci_res(struct hisi_qm *qm)
goto err_request_mem_regions;
}
+ qm_get_version(qm);
+
ret = qm_get_hw_caps(qm);
if (ret)
goto err_ioremap;
@@ -5144,6 +5331,7 @@ static int qm_get_pci_res(struct hisi_qm *qm)
qm->db_interval = 0;
}
+ hisi_qm_pre_init(qm);
ret = qm_get_qp_num(qm);
if (ret)
goto err_db_ioremap;
@@ -5160,6 +5348,43 @@ err_request_mem_regions:
return ret;
}
+static int qm_clear_device(struct hisi_qm *qm)
+{
+ acpi_handle handle = ACPI_HANDLE(&qm->pdev->dev);
+ int ret;
+
+ if (qm->fun_type == QM_HW_VF)
+ return 0;
+
+ /* Device does not support reset, return */
+ if (!qm->err_ini->err_info_init)
+ return 0;
+ qm->err_ini->err_info_init(qm);
+
+ if (!handle)
+ return 0;
+
+ /* No reset method, return */
+ if (!acpi_has_method(handle, qm->err_info.acpi_rst))
+ return 0;
+
+ ret = qm_master_ooo_check(qm);
+ if (ret) {
+ writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+ return ret;
+ }
+
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret) {
+ writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+ return ret;
+ }
+ }
+
+ return qm_reset_device(qm);
+}
+
static int hisi_qm_pci_init(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -5189,8 +5414,14 @@ static int hisi_qm_pci_init(struct hisi_qm *qm)
goto err_get_pci_res;
}
+ ret = qm_clear_device(qm);
+ if (ret)
+ goto err_free_vectors;
+
return 0;
+err_free_vectors:
+ pci_free_irq_vectors(pdev);
err_get_pci_res:
qm_put_pci_res(qm);
err_disable_pcidev:
@@ -5365,8 +5596,6 @@ int hisi_qm_init(struct hisi_qm *qm)
struct device *dev = &pdev->dev;
int ret;
- hisi_qm_pre_init(qm);
-
ret = hisi_qm_pci_init(qm);
if (ret)
return ret;
@@ -5491,7 +5720,6 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
int ret;
- u32 val;
ret = qm->ops->set_msi(qm, false);
if (ret) {
@@ -5499,17 +5727,14 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm)
return ret;
}
- /* shutdown OOO register */
- writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
- qm->io_base + ACC_MASTER_GLOBAL_CTRL);
-
- ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
- val,
- (val == ACC_MASTER_TRANS_RETURN_RW),
- POLL_PERIOD, POLL_TIMEOUT);
- if (ret) {
- pci_emerg(pdev, "Bus lock! Please reset system.\n");
+ ret = qm_master_ooo_check(qm);
+ if (ret)
return ret;
+
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret)
+ return ret;
}
ret = qm_set_pf_mse(qm, false);
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index afdddf87cc34..ef0cb733c92c 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -458,7 +458,7 @@ static void sec_ipv6_hashmask(struct sec_dev_info *info, u32 hash_mask[])
static int sec_ipv4_hashmask(struct sec_dev_info *info, u32 hash_mask)
{
if (hash_mask & SEC_HASH_IPV4_MASK) {
- dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n ");
+ dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n");
return -EINVAL;
}
@@ -1304,7 +1304,7 @@ MODULE_DEVICE_TABLE(acpi, sec_acpi_match);
static struct platform_driver sec_driver = {
.probe = sec_probe,
- .remove_new = sec_remove,
+ .remove = sec_remove,
.driver = {
.name = "hisi_sec_platform_driver",
.of_match_table = sec_match,
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 410c83712e28..703920b49c7c 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -90,9 +90,7 @@ struct sec_auth_ctx {
dma_addr_t a_key_dma;
u8 *a_key;
u8 a_key_len;
- u8 mac_len;
u8 a_alg;
- bool fallback;
struct crypto_shash *hash_tfm;
struct crypto_aead *fallback_aead_tfm;
};
@@ -220,11 +218,27 @@ enum sec_cap_type {
SEC_CORE4_ALG_BITMAP_HIGH,
};
-enum sec_cap_reg_record_idx {
- SEC_DRV_ALG_BITMAP_LOW_IDX = 0x0,
- SEC_DRV_ALG_BITMAP_HIGH_IDX,
- SEC_DEV_ALG_BITMAP_LOW_IDX,
- SEC_DEV_ALG_BITMAP_HIGH_IDX,
+enum sec_cap_table_type {
+ QM_RAS_NFE_TYPE = 0x0,
+ QM_RAS_NFE_RESET,
+ QM_RAS_CE_TYPE,
+ SEC_RAS_NFE_TYPE,
+ SEC_RAS_NFE_RESET,
+ SEC_RAS_CE_TYPE,
+ SEC_CORE_INFO,
+ SEC_CORE_EN,
+ SEC_DRV_ALG_BITMAP_LOW_TB,
+ SEC_DRV_ALG_BITMAP_HIGH_TB,
+ SEC_ALG_BITMAP_LOW,
+ SEC_ALG_BITMAP_HIGH,
+ SEC_CORE1_BITMAP_LOW,
+ SEC_CORE1_BITMAP_HIGH,
+ SEC_CORE2_BITMAP_LOW,
+ SEC_CORE2_BITMAP_HIGH,
+ SEC_CORE3_BITMAP_LOW,
+ SEC_CORE3_BITMAP_HIGH,
+ SEC_CORE4_BITMAP_LOW,
+ SEC_CORE4_BITMAP_HIGH,
};
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 93a972fcbf63..8ea5305bc320 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -57,7 +57,6 @@
#define SEC_TYPE_MASK 0x0F
#define SEC_DONE_MASK 0x0001
#define SEC_ICV_MASK 0x000E
-#define SEC_SQE_LEN_RATE_MASK 0x3
#define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth))
#define SEC_SGL_SGE_NR 128
@@ -80,16 +79,16 @@
#define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \
SEC_PBUF_LEFT_SZ(depth))
-#define SEC_SQE_LEN_RATE 4
#define SEC_SQE_CFLAG 2
#define SEC_SQE_AEAD_FLAG 3
#define SEC_SQE_DONE 0x1
#define SEC_ICV_ERR 0x2
-#define MIN_MAC_LEN 4
#define MAC_LEN_MASK 0x1U
#define MAX_INPUT_DATA_LEN 0xFFFE00
#define BITS_MASK 0xFF
+#define WORD_MASK 0x3
#define BYTE_BITS 0x8
+#define BYTES_TO_WORDS(bcount) ((bcount) >> 2)
#define SEC_XTS_NAME_SZ 0x3
#define IV_CM_CAL_NUM 2
#define IV_CL_MASK 0x7
@@ -481,8 +480,10 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
if (ctx->pbuf_supported)
sec_free_pbuf_resource(dev, qp_ctx->res);
- if (ctx->alg_type == SEC_AEAD)
+ if (ctx->alg_type == SEC_AEAD) {
sec_free_mac_resource(dev, qp_ctx->res);
+ sec_free_aiv_resource(dev, qp_ctx->res);
+ }
}
static int sec_alloc_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
@@ -689,14 +690,10 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
c_ctx->fallback = false;
- /* Currently, only XTS mode need fallback tfm when using 192bit key */
- if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
- return 0;
-
c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(c_ctx->fbtfm)) {
- pr_err("failed to alloc xts mode fallback tfm!\n");
+ pr_err("failed to alloc fallback tfm for %s!\n", alg);
return PTR_ERR(c_ctx->fbtfm);
}
@@ -856,7 +853,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
memcpy(c_ctx->c_key, key, keylen);
- if (c_ctx->fallback && c_ctx->fbtfm) {
+ if (c_ctx->fbtfm) {
ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
if (ret) {
dev_err(dev, "failed to set fallback skcipher key!\n");
@@ -946,15 +943,14 @@ static int sec_aead_mac_init(struct sec_aead_req *req)
struct aead_request *aead_req = req->aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
size_t authsize = crypto_aead_authsize(tfm);
- u8 *mac_out = req->out_mac;
struct scatterlist *sgl = aead_req->src;
+ u8 *mac_out = req->out_mac;
size_t copy_size;
off_t skip_size;
/* Copy input mac */
skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
- copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
- authsize, skip_size);
+ copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size);
if (unlikely(copy_size != authsize))
return -EINVAL;
@@ -1089,11 +1085,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
struct crypto_shash *hash_tfm = ctx->hash_tfm;
int blocksize, digestsize, ret;
- if (!keys->authkeylen) {
- pr_err("hisi_sec2: aead auth key error!\n");
- return -EINVAL;
- }
-
blocksize = crypto_shash_blocksize(hash_tfm);
digestsize = crypto_shash_digestsize(hash_tfm);
if (keys->authkeylen > blocksize) {
@@ -1105,7 +1096,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
}
ctx->a_key_len = digestsize;
} else {
- memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
+ if (keys->authkeylen)
+ memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
ctx->a_key_len = keys->authkeylen;
}
@@ -1118,10 +1110,7 @@ static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- if (unlikely(a_ctx->fallback_aead_tfm))
- return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
-
- return 0;
+ return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
}
static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
@@ -1137,7 +1126,6 @@ static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
const u32 keylen, const enum sec_hash_alg a_alg,
const enum sec_calg c_alg,
- const enum sec_mac_len mac_len,
const enum sec_cmode c_mode)
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
@@ -1149,7 +1137,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
ctx->a_ctx.a_alg = a_alg;
ctx->c_ctx.c_alg = c_alg;
- ctx->a_ctx.mac_len = mac_len;
c_ctx->c_mode = c_mode;
if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
@@ -1160,18 +1147,14 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
}
memcpy(c_ctx->c_key, key, keylen);
- if (unlikely(a_ctx->fallback_aead_tfm)) {
- ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
- if (ret)
- return ret;
- }
-
- return 0;
+ return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
}
ret = crypto_authenc_extractkeys(&keys, key, keylen);
- if (ret)
+ if (ret) {
+ dev_err(dev, "sec extract aead keys err!\n");
goto bad_key;
+ }
ret = sec_aead_aes_set_key(c_ctx, &keys);
if (ret) {
@@ -1185,10 +1168,9 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
goto bad_key;
}
- if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) ||
- (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
- ret = -EINVAL;
- dev_err(dev, "MAC or AUTH key length error!\n");
+ ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
+ if (ret) {
+ dev_err(dev, "set sec fallback key err!\n");
goto bad_key;
}
@@ -1200,27 +1182,19 @@ bad_key:
}
-#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
-static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
- u32 keylen) \
-{ \
- return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
-}
-
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
- SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
- SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
- SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
- SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
- SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
-GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
- SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
-GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
- SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
+#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode) \
+static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen) \
+{ \
+ return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode); \
+}
+
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM)
static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
@@ -1468,9 +1442,10 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
- struct sec_cipher_req *c_req = &req->c_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+ size_t authsize = crypto_aead_authsize(tfm);
struct sec_aead_req *a_req = &req->aead_req;
- size_t authsize = ctx->a_ctx.mac_len;
+ struct sec_cipher_req *c_req = &req->c_req;
u32 data_size = aead_req->cryptlen;
u8 flage = 0;
u8 cm, cl;
@@ -1511,10 +1486,8 @@ static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
- struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
- size_t authsize = crypto_aead_authsize(tfm);
- struct sec_cipher_req *c_req = &req->c_req;
struct sec_aead_req *a_req = &req->aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
@@ -1522,15 +1495,11 @@ static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
/*
* CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
* the counter must set to 0x01
+ * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length}
*/
- ctx->a_ctx.mac_len = authsize;
- /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
set_aead_auth_iv(ctx, req);
- }
-
- /* GCM 12Byte Cipher_IV == Auth_IV */
- if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
- ctx->a_ctx.mac_len = authsize;
+ } else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
+ /* GCM 12Byte Cipher_IV == Auth_IV */
memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
}
}
@@ -1540,9 +1509,11 @@ static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
{
struct sec_aead_req *a_req = &req->aead_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
- sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize);
/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
@@ -1566,9 +1537,11 @@ static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
{
struct sec_aead_req *a_req = &req->aead_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
- sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
+ sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3);
/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
sqe3->a_key_addr = sqe3->c_key_addr;
@@ -1592,15 +1565,15 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
- sec_sqe->type2.mac_key_alg =
- cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
+ sec_sqe->type2.mac_key_alg = cpu_to_le32(BYTES_TO_WORDS(authsize));
sec_sqe->type2.mac_key_alg |=
- cpu_to_le32((u32)((ctx->a_key_len) /
- SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
+ cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET);
sec_sqe->type2.mac_key_alg |=
cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
@@ -1646,16 +1619,16 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
sqe3->auth_mac_key |=
- cpu_to_le32((u32)(ctx->mac_len /
- SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
+ cpu_to_le32(BYTES_TO_WORDS(authsize) << SEC_MAC_OFFSET_V3);
sqe3->auth_mac_key |=
- cpu_to_le32((u32)(ctx->a_key_len /
- SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3);
+ cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET_V3);
sqe3->auth_mac_key |=
cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);
@@ -1701,9 +1674,9 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
{
struct aead_request *a_req = req->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ size_t authsize = crypto_aead_authsize(tfm);
struct sec_aead_req *aead_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
- size_t authsize = crypto_aead_authsize(tfm);
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct aead_request *backlog_aead_req;
struct sec_req *backlog_req;
@@ -1716,10 +1689,8 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
if (!err && c_req->encrypt) {
struct scatterlist *sgl = a_req->dst;
- sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
- aead_req->out_mac,
- authsize, a_req->cryptlen +
- a_req->assoclen);
+ sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac,
+ authsize, a_req->cryptlen + a_req->assoclen);
if (unlikely(sz != authsize)) {
dev_err(c->dev, "copy out mac err!\n");
err = -EINVAL;
@@ -1927,8 +1898,10 @@ static void sec_aead_exit(struct crypto_aead *tfm)
static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
- struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+ const char *aead_name = alg->base.cra_name;
int ret;
ret = sec_aead_init(tfm);
@@ -1937,11 +1910,20 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
return ret;
}
- auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
- if (IS_ERR(auth_ctx->hash_tfm)) {
+ a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(a_ctx->hash_tfm)) {
dev_err(ctx->dev, "aead alloc shash error!\n");
sec_aead_exit(tfm);
- return PTR_ERR(auth_ctx->hash_tfm);
+ return PTR_ERR(a_ctx->hash_tfm);
+ }
+
+ a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
+ if (IS_ERR(a_ctx->fallback_aead_tfm)) {
+ dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
+ crypto_free_shash(ctx->a_ctx.hash_tfm);
+ sec_aead_exit(tfm);
+ return PTR_ERR(a_ctx->fallback_aead_tfm);
}
return 0;
@@ -1951,6 +1933,7 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm)
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
crypto_free_shash(ctx->a_ctx.hash_tfm);
sec_aead_exit(tfm);
}
@@ -1977,7 +1960,6 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
sec_aead_exit(tfm);
return PTR_ERR(a_ctx->fallback_aead_tfm);
}
- a_ctx->fallback = false;
return 0;
}
@@ -2005,8 +1987,7 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
return sec_aead_ctx_init(tfm, "sha512");
}
-static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
- struct sec_req *sreq)
+static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
u32 cryptlen = sreq->c_req.sk_req->cryptlen;
struct device *dev = ctx->dev;
@@ -2028,10 +2009,6 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
}
break;
case SEC_CMODE_CTR:
- if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
- dev_err(dev, "skcipher HW version error!\n");
- ret = -EINVAL;
- }
break;
default:
ret = -EINVAL;
@@ -2040,17 +2017,21 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
return ret;
}
-static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+static int sec_skcipher_param_check(struct sec_ctx *ctx,
+ struct sec_req *sreq, bool *need_fallback)
{
struct skcipher_request *sk_req = sreq->c_req.sk_req;
struct device *dev = ctx->dev;
u8 c_alg = ctx->c_ctx.c_alg;
- if (unlikely(!sk_req->src || !sk_req->dst ||
- sk_req->cryptlen > MAX_INPUT_DATA_LEN)) {
+ if (unlikely(!sk_req->src || !sk_req->dst)) {
dev_err(dev, "skcipher input param error!\n");
return -EINVAL;
}
+
+ if (sk_req->cryptlen > MAX_INPUT_DATA_LEN)
+ *need_fallback = true;
+
sreq->c_req.c_len = sk_req->cryptlen;
if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
@@ -2108,6 +2089,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
struct sec_req *req = skcipher_request_ctx(sk_req);
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ bool need_fallback = false;
int ret;
if (!sk_req->cryptlen) {
@@ -2121,11 +2103,11 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
req->c_req.encrypt = encrypt;
req->ctx = ctx;
- ret = sec_skcipher_param_check(ctx, req);
+ ret = sec_skcipher_param_check(ctx, req, &need_fallback);
if (unlikely(ret))
return -EINVAL;
- if (unlikely(ctx->c_ctx.fallback))
+ if (unlikely(ctx->c_ctx.fallback || need_fallback))
return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
return ctx->req_op->process(ctx, req);
@@ -2231,55 +2213,37 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
struct aead_request *req = sreq->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- size_t authsize = crypto_aead_authsize(tfm);
+ size_t sz = crypto_aead_authsize(tfm);
u8 c_mode = ctx->c_ctx.c_mode;
- struct device *dev = ctx->dev;
int ret;
- if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
- req->assoclen > SEC_MAX_AAD_LEN)) {
- dev_err(dev, "aead input spec error!\n");
+ if (unlikely(ctx->sec->qm.ver == QM_HW_V2 && !sreq->c_req.c_len))
return -EINVAL;
- }
- if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
- (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
- authsize & MAC_LEN_MASK)))) {
- dev_err(dev, "aead input mac length error!\n");
+ if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
+ req->assoclen > SEC_MAX_AAD_LEN))
return -EINVAL;
- }
if (c_mode == SEC_CMODE_CCM) {
- if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) {
- dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n");
+ if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN))
return -EINVAL;
- }
- ret = aead_iv_demension_check(req);
- if (ret) {
- dev_err(dev, "aead input iv param error!\n");
- return ret;
- }
- }
- if (sreq->c_req.encrypt)
- sreq->c_req.c_len = req->cryptlen;
- else
- sreq->c_req.c_len = req->cryptlen - authsize;
- if (c_mode == SEC_CMODE_CBC) {
- if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
- dev_err(dev, "aead crypto length error!\n");
+ ret = aead_iv_demension_check(req);
+ if (unlikely(ret))
+ return -EINVAL;
+ } else if (c_mode == SEC_CMODE_CBC) {
+ if (unlikely(sz & WORD_MASK))
+ return -EINVAL;
+ if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK))
return -EINVAL;
- }
}
return 0;
}
-static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq, bool *need_fallback)
{
struct aead_request *req = sreq->aead_req.aead_req;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- size_t authsize = crypto_aead_authsize(tfm);
struct device *dev = ctx->dev;
u8 c_alg = ctx->c_ctx.c_alg;
@@ -2288,12 +2252,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
return -EINVAL;
}
- if (ctx->sec->qm.ver == QM_HW_V2) {
- if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
- req->cryptlen <= authsize))) {
- ctx->a_ctx.fallback = true;
- return -EINVAL;
- }
+ if (unlikely(ctx->c_ctx.c_mode == SEC_CMODE_CBC &&
+ sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
+ dev_err(dev, "aead cbc mode input data length error!\n");
+ return -EINVAL;
}
/* Support AES or SM4 */
@@ -2302,8 +2264,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
return -EINVAL;
}
- if (unlikely(sec_aead_spec_check(ctx, sreq)))
+ if (unlikely(sec_aead_spec_check(ctx, sreq))) {
+ *need_fallback = true;
return -EINVAL;
+ }
if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
SEC_PBUF_SZ)
@@ -2319,16 +2283,9 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
bool encrypt)
{
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- struct device *dev = ctx->dev;
struct aead_request *subreq;
int ret;
- /* Kunpeng920 aead mode not support input 0 size */
- if (!a_ctx->fallback_aead_tfm) {
- dev_err(dev, "aead fallback tfm is NULL!\n");
- return -EINVAL;
- }
-
subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
if (!subreq)
return -ENOMEM;
@@ -2354,16 +2311,19 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
struct sec_req *req = aead_request_ctx(a_req);
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ size_t sz = crypto_aead_authsize(tfm);
+ bool need_fallback = false;
int ret;
req->flag = a_req->base.flags;
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz);
- ret = sec_aead_param_check(ctx, req);
+ ret = sec_aead_param_check(ctx, req, &need_fallback);
if (unlikely(ret)) {
- if (ctx->a_ctx.fallback)
+ if (need_fallback)
return sec_aead_soft_crypto(ctx, a_req, encrypt);
return -EINVAL;
}
@@ -2518,8 +2478,8 @@ int sec_register_to_crypto(struct hisi_qm *qm)
u64 alg_mask;
int ret = 0;
- alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
- SEC_DRV_ALG_BITMAP_LOW_IDX);
+ alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB,
+ SEC_DRV_ALG_BITMAP_LOW_TB);
mutex_lock(&sec_algs_lock);
if (sec_available_devs) {
@@ -2551,8 +2511,8 @@ void sec_unregister_from_crypto(struct hisi_qm *qm)
{
u64 alg_mask;
- alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
- SEC_DRV_ALG_BITMAP_LOW_IDX);
+ alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB,
+ SEC_DRV_ALG_BITMAP_LOW_TB);
mutex_lock(&sec_algs_lock);
if (--sec_available_devs)
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index 27a0ee5ad913..04725b514382 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -23,17 +23,6 @@ enum sec_hash_alg {
SEC_A_HMAC_SHA512 = 0x15,
};
-enum sec_mac_len {
- SEC_HMAC_CCM_MAC = 16,
- SEC_HMAC_GCM_MAC = 16,
- SEC_SM3_MAC = 32,
- SEC_HMAC_SM3_MAC = 32,
- SEC_HMAC_MD5_MAC = 16,
- SEC_HMAC_SHA1_MAC = 20,
- SEC_HMAC_SHA256_MAC = 32,
- SEC_HMAC_SHA512_MAC = 64,
-};
-
enum sec_cmode {
SEC_CMODE_ECB = 0x0,
SEC_CMODE_CBC = 0x1,
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index c290d8937b19..72cf48d1f3ab 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -14,9 +14,9 @@
#include <linux/seq_file.h>
#include <linux/topology.h>
#include <linux/uacce.h>
-
#include "sec.h"
+#define CAP_FILE_PERMISSION 0444
#define SEC_VF_NUM 63
#define SEC_QUEUE_NUM_V1 4096
#define PCI_DEVICE_ID_HUAWEI_SEC_PF 0xa255
@@ -99,8 +99,8 @@
#define SEC_DBGFS_VAL_MAX_LEN 20
#define SEC_SINGLE_PORT_MAX_TRANS 0x2060
-#define SEC_SQE_MASK_OFFSET 64
-#define SEC_SQE_MASK_LEN 48
+#define SEC_SQE_MASK_OFFSET 16
+#define SEC_SQE_MASK_LEN 108
#define SEC_SHAPER_TYPE_RATE 400
#define SEC_DFX_BASE 0x301000
@@ -152,7 +152,7 @@ static const struct hisi_qm_cap_info sec_basic_info[] = {
{SEC_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x1, 0x1, 0x1},
{SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4},
{SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4},
- {SEC_CORE_ENABLE_BITMAP, 0x3140, 32, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
+ {SEC_CORE_ENABLE_BITMAP, 0x3140, 0, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
{SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x18670CF},
{SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C},
{SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
@@ -167,11 +167,34 @@ static const struct hisi_qm_cap_info sec_basic_info[] = {
{SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
};
-static const u32 sec_pre_store_caps[] = {
- SEC_DRV_ALG_BITMAP_LOW,
- SEC_DRV_ALG_BITMAP_HIGH,
- SEC_DEV_ALG_BITMAP_LOW,
- SEC_DEV_ALG_BITMAP_HIGH,
+static const struct hisi_qm_cap_query_info sec_cap_query_info[] = {
+ {QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C77, 0x7C77},
+ {QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC77, 0x6C77},
+ {QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8},
+ {SEC_RAS_NFE_TYPE, "SEC_RAS_NFE_TYPE ", 0x3130, 0x0, 0x177, 0x60177},
+ {SEC_RAS_NFE_RESET, "SEC_RAS_NFE_RESET ", 0x3134, 0x0, 0x177, 0x177},
+ {SEC_RAS_CE_TYPE, "SEC_RAS_CE_TYPE ", 0x3138, 0x0, 0x88, 0xC088},
+ {SEC_CORE_INFO, "SEC_CORE_INFO ", 0x313c, 0x110404, 0x110404, 0x110404},
+ {SEC_CORE_EN, "SEC_CORE_EN ", 0x3140, 0x17F, 0x17F, 0xF},
+ {SEC_DRV_ALG_BITMAP_LOW_TB, "SEC_DRV_ALG_BITMAP_LOW ",
+ 0x3144, 0x18050CB, 0x18050CB, 0x18670CF},
+ {SEC_DRV_ALG_BITMAP_HIGH_TB, "SEC_DRV_ALG_BITMAP_HIGH ",
+ 0x3148, 0x395C, 0x395C, 0x395C},
+ {SEC_ALG_BITMAP_LOW, "SEC_ALG_BITMAP_LOW ",
+ 0x314c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_ALG_BITMAP_HIGH, "SEC_ALG_BITMAP_HIGH ", 0x3150, 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE1_BITMAP_LOW, "SEC_CORE1_BITMAP_LOW ",
+ 0x3154, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE1_BITMAP_HIGH, "SEC_CORE1_BITMAP_HIGH ", 0x3158, 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE2_BITMAP_LOW, "SEC_CORE2_BITMAP_LOW ",
+ 0x315c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE2_BITMAP_HIGH, "SEC_CORE2_BITMAP_HIGH ", 0x3160, 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE3_BITMAP_LOW, "SEC_CORE3_BITMAP_LOW ",
+ 0x3164, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE3_BITMAP_HIGH, "SEC_CORE3_BITMAP_HIGH ", 0x3168, 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE4_BITMAP_LOW, "SEC_CORE4_BITMAP_LOW ",
+ 0x316c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE4_BITMAP_HIGH, "SEC_CORE4_BITMAP_HIGH ", 0x3170, 0x3FFF, 0x3FFF, 0x3FFF},
};
static const struct qm_dev_alg sec_dev_algs[] = { {
@@ -322,7 +345,7 @@ static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
{
pf_q_num_flag = true;
- return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
+ return hisi_qm_q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
}
static const struct kernel_param_ops sec_pf_q_num_ops = {
@@ -838,6 +861,26 @@ static int sec_regs_show(struct seq_file *s, void *unused)
DEFINE_SHOW_ATTRIBUTE(sec_regs);
+static int sec_cap_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+ u32 i, size;
+
+ size = qm->cap_tables.qm_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name,
+ qm->cap_tables.qm_cap_table[i].cap_val);
+
+ size = qm->cap_tables.dev_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name,
+ qm->cap_tables.dev_cap_table[i].cap_val);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(sec_cap_regs);
+
static int sec_core_debug_init(struct hisi_qm *qm)
{
struct dfx_diff_registers *sec_regs = qm->debug.acc_diff_regs;
@@ -872,6 +915,9 @@ static int sec_core_debug_init(struct hisi_qm *qm)
tmp_d, data, &sec_atomic64_ops);
}
+ debugfs_create_file("cap_regs", CAP_FILE_PERMISSION,
+ qm->debug.debug_root, qm, &sec_cap_regs_fops);
+
return 0;
}
@@ -901,37 +947,36 @@ static int sec_debugfs_init(struct hisi_qm *qm)
struct device *dev = &qm->pdev->dev;
int ret;
- qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
- sec_debugfs_root);
- qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
- qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
-
ret = hisi_qm_regs_debugfs_init(qm, sec_diff_regs, ARRAY_SIZE(sec_diff_regs));
if (ret) {
dev_warn(dev, "Failed to init SEC diff regs!\n");
- goto debugfs_remove;
+ return ret;
}
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ sec_debugfs_root);
+ qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
+
hisi_qm_debug_init(qm);
ret = sec_debug_init(qm);
if (ret)
- goto failed_to_create;
+ goto debugfs_remove;
return 0;
-failed_to_create:
- hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
debugfs_remove:
- debugfs_remove_recursive(sec_debugfs_root);
+ debugfs_remove_recursive(qm->debug.debug_root);
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
return ret;
}
static void sec_debugfs_exit(struct hisi_qm *qm)
{
- hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
-
debugfs_remove_recursive(qm->debug.debug_root);
+
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
}
static int sec_show_last_regs_init(struct hisi_qm *qm)
@@ -1011,11 +1056,15 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm)
static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
- u32 nfe;
-
writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
- nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
- writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
+}
+
+static void sec_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ u32 nfe_mask;
+
+ nfe_mask = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe_mask & (~err_type), qm->io_base + SEC_RAS_NFE_REG);
}
static void sec_open_axi_master_ooo(struct hisi_qm *qm)
@@ -1027,6 +1076,38 @@ static void sec_open_axi_master_ooo(struct hisi_qm *qm)
writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG);
}
+static enum acc_err_result sec_get_err_result(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = sec_get_hw_err_status(qm);
+ if (err_status) {
+ if (err_status & qm->err_info.ecc_2bits_mask)
+ qm->err_status.is_dev_ecc_mbit = true;
+ sec_log_hw_error(qm, err_status);
+
+ if (err_status & qm->err_info.dev_reset_mask) {
+ /* Disable the same error reporting until device is recovered. */
+ sec_disable_error_report(qm, err_status);
+ return ACC_ERR_NEED_RESET;
+ }
+ sec_clear_hw_err_status(qm, err_status);
+ }
+
+ return ACC_ERR_RECOVERED;
+}
+
+static bool sec_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = sec_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_shutdown_mask)
+ return true;
+
+ return false;
+}
+
static void sec_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -1053,12 +1134,13 @@ static const struct hisi_qm_err_ini sec_err_ini = {
.hw_err_disable = sec_hw_error_disable,
.get_dev_hw_err_status = sec_get_hw_err_status,
.clear_dev_hw_err_status = sec_clear_hw_err_status,
- .log_dev_hw_err = sec_log_hw_error,
.open_axi_master_ooo = sec_open_axi_master_ooo,
.open_sva_prefetch = sec_open_sva_prefetch,
.close_sva_prefetch = sec_close_sva_prefetch,
.show_last_dfx_regs = sec_show_last_dfx_regs,
.err_info_init = sec_err_info_init,
+ .get_err_result = sec_get_err_result,
+ .dev_is_abnormal = sec_dev_is_abnormal,
};
static int sec_pf_probe_init(struct sec_dev *sec)
@@ -1066,9 +1148,6 @@ static int sec_pf_probe_init(struct sec_dev *sec)
struct hisi_qm *qm = &sec->qm;
int ret;
- qm->err_ini = &sec_err_ini;
- qm->err_ini->err_info_init(qm);
-
ret = sec_set_user_domain_and_cache(qm);
if (ret)
return ret;
@@ -1089,18 +1168,20 @@ static int sec_pre_store_cap_reg(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
size_t i, size;
- size = ARRAY_SIZE(sec_pre_store_caps);
+ size = ARRAY_SIZE(sec_cap_query_info);
sec_cap = devm_kzalloc(&pdev->dev, sizeof(*sec_cap) * size, GFP_KERNEL);
if (!sec_cap)
return -ENOMEM;
for (i = 0; i < size; i++) {
- sec_cap[i].type = sec_pre_store_caps[i];
- sec_cap[i].cap_val = hisi_qm_get_hw_info(qm, sec_basic_info,
- sec_pre_store_caps[i], qm->cap_ver);
+ sec_cap[i].type = sec_cap_query_info[i].type;
+ sec_cap[i].name = sec_cap_query_info[i].name;
+ sec_cap[i].cap_val = hisi_qm_get_cap_value(qm, sec_cap_query_info,
+ i, qm->cap_ver);
}
qm->cap_tables.dev_cap_table = sec_cap;
+ qm->cap_tables.dev_cap_size = size;
return 0;
}
@@ -1111,7 +1192,6 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
int ret;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->mode = uacce_mode;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
@@ -1123,6 +1203,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = pf_q_num;
qm->debug.curr_qm_qp_num = pf_q_num;
qm->qm_list = &sec_devices;
+ qm->err_ini = &sec_err_ini;
if (pf_q_num_flag)
set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
@@ -1149,8 +1230,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
hisi_qm_uninit(qm);
return ret;
}
-
- alg_msk = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH_IDX, SEC_DEV_ALG_BITMAP_LOW_IDX);
+ alg_msk = sec_get_alg_bitmap(qm, SEC_ALG_BITMAP_HIGH, SEC_ALG_BITMAP_LOW);
ret = hisi_qm_set_algs(qm, alg_msk, sec_dev_algs, ARRAY_SIZE(sec_dev_algs));
if (ret) {
pci_err(qm->pdev, "Failed to set sec algs!\n");
@@ -1187,6 +1267,12 @@ static int sec_probe_init(struct sec_dev *sec)
static void sec_probe_uninit(struct hisi_qm *qm)
{
+ if (qm->fun_type == QM_HW_VF)
+ return;
+
+ sec_debug_regs_clear(qm);
+ sec_show_last_regs_uninit(qm);
+ sec_close_sva_prefetch(qm);
hisi_qm_dev_err_uninit(qm);
}
@@ -1275,7 +1361,6 @@ err_qm_del_list:
sec_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
err_probe_uninit:
- sec_show_last_regs_uninit(qm);
sec_probe_uninit(qm);
err_qm_uninit:
sec_qm_uninit(qm);
@@ -1297,11 +1382,6 @@ static void sec_remove(struct pci_dev *pdev)
sec_debugfs_exit(qm);
(void)hisi_qm_stop(qm, QM_NORMAL);
-
- if (qm->fun_type == QM_HW_PF)
- sec_debug_regs_clear(qm);
- sec_show_last_regs_uninit(qm);
-
sec_probe_uninit(qm);
sec_qm_uninit(qm);
@@ -1324,7 +1404,8 @@ static struct pci_driver sec_pci_driver = {
.probe = sec_probe,
.remove = sec_remove,
.err_handler = &sec_err_handler,
- .sriov_configure = hisi_qm_sriov_configure,
+ .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
+ hisi_qm_sriov_configure : NULL,
.shutdown = hisi_qm_dev_shutdown,
.driver.pm = &sec_pm_ops,
};
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 0beca257c20b..c974f95cd126 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -161,9 +161,6 @@ static struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool,
struct mem_block *block;
u32 block_index, offset;
- if (!pool || !hw_sgl_dma || index >= pool->count)
- return ERR_PTR(-EINVAL);
-
block = pool->mem_block;
block_index = index / pool->sgl_num_per_block;
offset = index % pool->sgl_num_per_block;
@@ -228,9 +225,9 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
dma_addr_t curr_sgl_dma = 0;
struct acc_hw_sge *curr_hw_sge;
struct scatterlist *sg;
- int sg_n;
+ int sg_n, ret;
- if (!dev || !sgl || !pool || !hw_sgl_dma)
+ if (!dev || !sgl || !pool || !hw_sgl_dma || index >= pool->count)
return ERR_PTR(-EINVAL);
sg_n = sg_nents(sgl);
@@ -243,14 +240,15 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
if (sg_n_mapped > pool->sge_nr) {
dev_err(dev, "the number of entries in input scatterlist is bigger than SGL pool setting.\n");
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto err_unmap;
}
curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
if (IS_ERR(curr_hw_sgl)) {
dev_err(dev, "Get SGL error!\n");
- dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto err_unmap;
}
curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
curr_hw_sge = curr_hw_sgl->sge_entries;
@@ -265,6 +263,11 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
*hw_sgl_dma = curr_sgl_dma;
return curr_hw_sgl;
+
+err_unmap:
+ dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl);
diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c
index 451b167bcc73..ac74df4a9471 100644
--- a/drivers/crypto/hisilicon/trng/trng.c
+++ b/drivers/crypto/hisilicon/trng/trng.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
+#include <crypto/internal/rng.h>
#include <linux/acpi.h>
#include <linux/crypto.h>
#include <linux/err.h>
@@ -13,7 +14,6 @@
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/random.h>
-#include <crypto/internal/rng.h>
#define HISI_TRNG_REG 0x00F0
#define HISI_TRNG_BYTES 4
@@ -121,7 +121,7 @@ static int hisi_trng_generate(struct crypto_rng *tfm, const u8 *src,
u32 i;
if (dlen > SW_DRBG_BLOCKS_NUM * SW_DRBG_BYTES || dlen == 0) {
- pr_err("dlen(%d) exceeds limit(%d)!\n", dlen,
+ pr_err("dlen(%u) exceeds limit(%d)!\n", dlen,
SW_DRBG_BLOCKS_NUM * SW_DRBG_BYTES);
return -EINVAL;
}
@@ -324,7 +324,7 @@ MODULE_DEVICE_TABLE(acpi, hisi_trng_acpi_match);
static struct platform_driver hisi_trng_driver = {
.probe = hisi_trng_probe,
- .remove_new = hisi_trng_remove,
+ .remove = hisi_trng_remove,
.driver = {
.name = "hisi-trng-v2",
.acpi_match_table = ACPI_PTR(hisi_trng_acpi_match),
diff --git a/drivers/crypto/hisilicon/zip/Makefile b/drivers/crypto/hisilicon/zip/Makefile
index a936f099ee22..13de020b77d6 100644
--- a/drivers/crypto/hisilicon/zip/Makefile
+++ b/drivers/crypto/hisilicon/zip/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += hisi_zip.o
-hisi_zip-objs = zip_main.o zip_crypto.o
+hisi_zip-objs = zip_main.o zip_crypto.o dae_main.o
diff --git a/drivers/crypto/hisilicon/zip/dae_main.c b/drivers/crypto/hisilicon/zip/dae_main.c
new file mode 100644
index 000000000000..6f22e4c36e49
--- /dev/null
+++ b/drivers/crypto/hisilicon/zip/dae_main.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 HiSilicon Limited. */
+
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uacce.h>
+#include "zip.h"
+
+/* memory */
+#define DAE_MEM_START_OFFSET 0x331040
+#define DAE_MEM_DONE_OFFSET 0x331044
+#define DAE_MEM_START_MASK 0x1
+#define DAE_MEM_DONE_MASK 0x1
+#define DAE_REG_RD_INTVRL_US 10
+#define DAE_REG_RD_TMOUT_US USEC_PER_SEC
+
+#define DAE_ALG_NAME "hashagg"
+
+/* error */
+#define DAE_AXI_CFG_OFFSET 0x331000
+#define DAE_AXI_SHUTDOWN_MASK (BIT(0) | BIT(5))
+#define DAE_ERR_SOURCE_OFFSET 0x331C84
+#define DAE_ERR_STATUS_OFFSET 0x331C88
+#define DAE_ERR_CE_OFFSET 0x331CA0
+#define DAE_ERR_CE_MASK BIT(3)
+#define DAE_ERR_NFE_OFFSET 0x331CA4
+#define DAE_ERR_NFE_MASK 0x17
+#define DAE_ERR_FE_OFFSET 0x331CA8
+#define DAE_ERR_FE_MASK 0
+#define DAE_ECC_MBIT_MASK BIT(2)
+#define DAE_ECC_INFO_OFFSET 0x33400C
+#define DAE_ERR_SHUTDOWN_OFFSET 0x331CAC
+#define DAE_ERR_SHUTDOWN_MASK 0x17
+#define DAE_ERR_ENABLE_OFFSET 0x331C80
+#define DAE_ERR_ENABLE_MASK (DAE_ERR_FE_MASK | DAE_ERR_NFE_MASK | DAE_ERR_CE_MASK)
+#define DAE_AM_CTRL_GLOBAL_OFFSET 0x330000
+#define DAE_AM_RETURN_OFFSET 0x330150
+#define DAE_AM_RETURN_MASK 0x3
+#define DAE_AXI_CFG_OFFSET 0x331000
+#define DAE_AXI_SHUTDOWN_EN_MASK (BIT(0) | BIT(5))
+
+struct hisi_dae_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
+static const struct hisi_dae_hw_error dae_hw_error[] = {
+ { .int_msk = BIT(0), .msg = "dae_axi_bus_err" },
+ { .int_msk = BIT(1), .msg = "dae_axi_poison_err" },
+ { .int_msk = BIT(2), .msg = "dae_ecc_2bit_err" },
+ { .int_msk = BIT(3), .msg = "dae_ecc_1bit_err" },
+ { .int_msk = BIT(4), .msg = "dae_fsm_hbeat_err" },
+};
+
+static inline bool dae_is_support(struct hisi_qm *qm)
+{
+ if (test_bit(QM_SUPPORT_DAE, &qm->caps))
+ return true;
+
+ return false;
+}
+
+int hisi_dae_set_user_domain(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ val = readl(qm->io_base + DAE_MEM_START_OFFSET);
+ val |= DAE_MEM_START_MASK;
+ writel(val, qm->io_base + DAE_MEM_START_OFFSET);
+ ret = readl_relaxed_poll_timeout(qm->io_base + DAE_MEM_DONE_OFFSET, val,
+ val & DAE_MEM_DONE_MASK,
+ DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to init dae memory!\n");
+
+ return ret;
+}
+
+int hisi_dae_set_alg(struct hisi_qm *qm)
+{
+ size_t len;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ if (!qm->uacce)
+ return 0;
+
+ len = strlen(qm->uacce->algs);
+ /* A line break may be required */
+ if (len + strlen(DAE_ALG_NAME) + 1 >= QM_DEV_ALG_MAX_LEN) {
+ pci_err(qm->pdev, "algorithm name is too long!\n");
+ return -EINVAL;
+ }
+
+ if (len)
+ strcat((char *)qm->uacce->algs, "\n");
+
+ strcat((char *)qm->uacce->algs, DAE_ALG_NAME);
+
+ return 0;
+}
+
+static void hisi_dae_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
+{
+ u32 axi_val, err_val;
+
+ axi_val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
+ if (enable) {
+ axi_val |= DAE_AXI_SHUTDOWN_MASK;
+ err_val = DAE_ERR_SHUTDOWN_MASK;
+ } else {
+ axi_val &= ~DAE_AXI_SHUTDOWN_MASK;
+ err_val = 0;
+ }
+
+ writel(axi_val, qm->io_base + DAE_AXI_CFG_OFFSET);
+ writel(err_val, qm->io_base + DAE_ERR_SHUTDOWN_OFFSET);
+}
+
+void hisi_dae_hw_error_enable(struct hisi_qm *qm)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ /* clear dae hw error source if having */
+ writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_SOURCE_OFFSET);
+
+ /* configure error type */
+ writel(DAE_ERR_CE_MASK, qm->io_base + DAE_ERR_CE_OFFSET);
+ writel(DAE_ERR_NFE_MASK, qm->io_base + DAE_ERR_NFE_OFFSET);
+ writel(DAE_ERR_FE_MASK, qm->io_base + DAE_ERR_FE_OFFSET);
+
+ hisi_dae_master_ooo_ctrl(qm, true);
+
+ /* enable dae hw error interrupts */
+ writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_ENABLE_OFFSET);
+}
+
+void hisi_dae_hw_error_disable(struct hisi_qm *qm)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ writel(0, qm->io_base + DAE_ERR_ENABLE_OFFSET);
+ hisi_dae_master_ooo_ctrl(qm, false);
+}
+
+static u32 hisi_dae_get_hw_err_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + DAE_ERR_STATUS_OFFSET);
+}
+
+static void hisi_dae_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ writel(err_sts, qm->io_base + DAE_ERR_SOURCE_OFFSET);
+}
+
+static void hisi_dae_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ writel(DAE_ERR_NFE_MASK & (~err_type), qm->io_base + DAE_ERR_NFE_OFFSET);
+}
+
+static void hisi_dae_log_hw_error(struct hisi_qm *qm, u32 err_type)
+{
+ const struct hisi_dae_hw_error *err = dae_hw_error;
+ struct device *dev = &qm->pdev->dev;
+ u32 ecc_info;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dae_hw_error); i++) {
+ err = &dae_hw_error[i];
+ if (!(err->int_msk & err_type))
+ continue;
+
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+
+ if (err->int_msk & DAE_ECC_MBIT_MASK) {
+ ecc_info = readl(qm->io_base + DAE_ECC_INFO_OFFSET);
+ dev_err(dev, "dae multi ecc sram info 0x%x\n", ecc_info);
+ }
+ }
+}
+
+enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ if (!dae_is_support(qm))
+ return ACC_ERR_NONE;
+
+ err_status = hisi_dae_get_hw_err_status(qm);
+ if (!err_status)
+ return ACC_ERR_NONE;
+
+ hisi_dae_log_hw_error(qm, err_status);
+
+ if (err_status & DAE_ERR_NFE_MASK) {
+ /* Disable the same error reporting until device is recovered. */
+ hisi_dae_disable_error_report(qm, err_status);
+ return ACC_ERR_NEED_RESET;
+ }
+ hisi_dae_clear_hw_err_status(qm, err_status);
+
+ return ACC_ERR_RECOVERED;
+}
+
+bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ if (!dae_is_support(qm))
+ return false;
+
+ err_status = hisi_dae_get_hw_err_status(qm);
+ if (err_status & DAE_ERR_NFE_MASK)
+ return true;
+
+ return false;
+}
+
+int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ val = readl(qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
+ val |= BIT(0);
+ writel(val, qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + DAE_AM_RETURN_OFFSET,
+ val, (val == DAE_AM_RETURN_MASK),
+ DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to close dae axi ooo!\n");
+
+ return ret;
+}
+
+void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (!dae_is_support(qm))
+ return;
+
+ val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
+
+ writel(val & ~DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
+ writel(val | DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
+}
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index f2e6da3240ae..9fb2a9c01132 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -81,8 +81,34 @@ struct hisi_zip_sqe {
u32 rsvd1[4];
};
+enum zip_cap_table_type {
+ QM_RAS_NFE_TYPE,
+ QM_RAS_NFE_RESET,
+ QM_RAS_CE_TYPE,
+ ZIP_RAS_NFE_TYPE,
+ ZIP_RAS_NFE_RESET,
+ ZIP_RAS_CE_TYPE,
+ ZIP_CORE_INFO,
+ ZIP_CORE_EN,
+ ZIP_DRV_ALG_BITMAP_TB,
+ ZIP_ALG_BITMAP,
+ ZIP_CORE1_BITMAP,
+ ZIP_CORE2_BITMAP,
+ ZIP_CORE3_BITMAP,
+ ZIP_CORE4_BITMAP,
+ ZIP_CORE5_BITMAP,
+};
+
int zip_create_qps(struct hisi_qp **qps, int qp_num, int node);
int hisi_zip_register_to_crypto(struct hisi_qm *qm);
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg);
+int hisi_dae_set_user_domain(struct hisi_qm *qm);
+int hisi_dae_set_alg(struct hisi_qm *qm);
+void hisi_dae_hw_error_disable(struct hisi_qm *qm);
+void hisi_dae_hw_error_enable(struct hisi_qm *qm);
+void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm);
+int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm);
+bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm);
+enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 94e2d66b04b6..7327f8f29b01 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -54,7 +54,7 @@ struct hisi_zip_req {
struct hisi_zip_req_q {
struct hisi_zip_req *q;
unsigned long *req_bitmap;
- rwlock_t req_lock;
+ spinlock_t req_lock;
u16 size;
};
@@ -116,17 +116,17 @@ static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx,
struct hisi_zip_req *req_cache;
int req_id;
- write_lock(&req_q->req_lock);
+ spin_lock(&req_q->req_lock);
req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
if (req_id >= req_q->size) {
- write_unlock(&req_q->req_lock);
+ spin_unlock(&req_q->req_lock);
dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
return ERR_PTR(-EAGAIN);
}
set_bit(req_id, req_q->req_bitmap);
- write_unlock(&req_q->req_lock);
+ spin_unlock(&req_q->req_lock);
req_cache = q + req_id;
req_cache->req_id = req_id;
@@ -140,9 +140,9 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
{
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
- write_lock(&req_q->req_lock);
+ spin_lock(&req_q->req_lock);
clear_bit(req->req_id, req_q->req_bitmap);
- write_unlock(&req_q->req_lock);
+ spin_unlock(&req_q->req_lock);
}
static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
@@ -213,6 +213,7 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
{
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
+ struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct acomp_req *a_req = req->req;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
@@ -244,7 +245,9 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
/* send command to start a task */
atomic64_inc(&dfx->send_cnt);
+ spin_lock_bh(&req_q->req_lock);
ret = hisi_qp_send(qp, &zip_sqe);
+ spin_unlock_bh(&req_q->req_lock);
if (unlikely(ret < 0)) {
atomic64_inc(&dfx->send_busy_cnt);
ret = -EAGAIN;
@@ -456,7 +459,7 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
goto err_free_comp_q;
}
- rwlock_init(&req_q->req_lock);
+ spin_lock_init(&req_q->req_lock);
req_q->q = kcalloc(req_q->size, sizeof(struct hisi_zip_req),
GFP_KERNEL);
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index c065fd867161..d8ba23b7cc7d 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -14,6 +14,7 @@
#include <linux/uacce.h>
#include "zip.h"
+#define CAP_FILE_PERMISSION 0444
#define PCI_DEVICE_ID_HUAWEI_ZIP_PF 0xa250
#define HZIP_QUEUE_NUM_V1 4096
@@ -37,7 +38,7 @@
#define HZIP_QM_IDEL_STATUS 0x3040e4
#define HZIP_CORE_DFX_BASE 0x301000
-#define HZIP_CLOCK_GATED_CONTL 0X301004
+#define HZIP_CORE_DFX_DECOMP_BASE 0x304000
#define HZIP_CORE_DFX_COMP_0 0x302000
#define HZIP_CORE_DFX_COMP_1 0x303000
#define HZIP_CORE_DFX_DECOMP_0 0x304000
@@ -48,6 +49,7 @@
#define HZIP_CORE_DFX_DECOMP_5 0x309000
#define HZIP_CORE_REGS_BASE_LEN 0xB0
#define HZIP_CORE_REGS_DFX_LEN 0x28
+#define HZIP_CORE_ADDR_INTRVL 0x1000
#define HZIP_CORE_INT_SOURCE 0x3010A0
#define HZIP_CORE_INT_MASK_REG 0x3010A4
@@ -249,46 +251,22 @@ static struct hisi_qm_cap_info zip_basic_cap_info[] = {
{ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0}
};
-enum zip_pre_store_cap_idx {
- ZIP_CORE_NUM_CAP_IDX = 0x0,
- ZIP_CLUSTER_COMP_NUM_CAP_IDX,
- ZIP_CLUSTER_DECOMP_NUM_CAP_IDX,
- ZIP_DECOMP_ENABLE_BITMAP_IDX,
- ZIP_COMP_ENABLE_BITMAP_IDX,
- ZIP_DRV_ALG_BITMAP_IDX,
- ZIP_DEV_ALG_BITMAP_IDX,
-};
-
-static const u32 zip_pre_store_caps[] = {
- ZIP_CORE_NUM_CAP,
- ZIP_CLUSTER_COMP_NUM_CAP,
- ZIP_CLUSTER_DECOMP_NUM_CAP,
- ZIP_DECOMP_ENABLE_BITMAP,
- ZIP_COMP_ENABLE_BITMAP,
- ZIP_DRV_ALG_BITMAP,
- ZIP_DEV_ALG_BITMAP,
-};
-
-enum {
- HZIP_COMP_CORE0,
- HZIP_COMP_CORE1,
- HZIP_DECOMP_CORE0,
- HZIP_DECOMP_CORE1,
- HZIP_DECOMP_CORE2,
- HZIP_DECOMP_CORE3,
- HZIP_DECOMP_CORE4,
- HZIP_DECOMP_CORE5,
-};
-
-static const u64 core_offsets[] = {
- [HZIP_COMP_CORE0] = 0x302000,
- [HZIP_COMP_CORE1] = 0x303000,
- [HZIP_DECOMP_CORE0] = 0x304000,
- [HZIP_DECOMP_CORE1] = 0x305000,
- [HZIP_DECOMP_CORE2] = 0x306000,
- [HZIP_DECOMP_CORE3] = 0x307000,
- [HZIP_DECOMP_CORE4] = 0x308000,
- [HZIP_DECOMP_CORE5] = 0x309000,
+static const struct hisi_qm_cap_query_info zip_cap_query_info[] = {
+ {QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C57, 0x7C77},
+ {QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC57, 0x6C77},
+ {QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8},
+ {ZIP_RAS_NFE_TYPE, "ZIP_RAS_NFE_TYPE ", 0x3130, 0x0, 0x7FE, 0x1FFE},
+ {ZIP_RAS_NFE_RESET, "ZIP_RAS_NFE_RESET ", 0x3134, 0x0, 0x7FE, 0x7FE},
+ {ZIP_RAS_CE_TYPE, "ZIP_RAS_CE_TYPE ", 0x3138, 0x0, 0x1, 0x1},
+ {ZIP_CORE_INFO, "ZIP_CORE_INFO ", 0x313C, 0x12080206, 0x12080206, 0x12050203},
+ {ZIP_CORE_EN, "ZIP_CORE_EN ", 0x3140, 0xFC0003, 0xFC0003, 0x1C0003},
+ {ZIP_DRV_ALG_BITMAP_TB, "ZIP_DRV_ALG_BITMAP ", 0x3144, 0x0, 0x0, 0x30},
+ {ZIP_ALG_BITMAP, "ZIP_ALG_BITMAP ", 0x3148, 0xF, 0xF, 0x3F},
+ {ZIP_CORE1_BITMAP, "ZIP_CORE1_BITMAP ", 0x314C, 0x5, 0x5, 0xD5},
+ {ZIP_CORE2_BITMAP, "ZIP_CORE2_BITMAP ", 0x3150, 0x5, 0x5, 0xD5},
+ {ZIP_CORE3_BITMAP, "ZIP_CORE3_BITMAP ", 0x3154, 0xA, 0xA, 0x2A},
+ {ZIP_CORE4_BITMAP, "ZIP_CORE4_BITMAP ", 0x3158, 0xA, 0xA, 0x2A},
+ {ZIP_CORE5_BITMAP, "ZIP_CORE5_BITMAP ", 0x315C, 0xA, 0xA, 0x2A},
};
static const struct debugfs_reg32 hzip_dfx_regs[] = {
@@ -423,7 +401,7 @@ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
pf_q_num_flag = true;
- return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF);
+ return hisi_qm_q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF);
}
static const struct kernel_param_ops pf_q_num_ops = {
@@ -463,7 +441,7 @@ bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
{
u32 cap_val;
- cap_val = qm->cap_tables.dev_cap_table[ZIP_DRV_ALG_BITMAP_IDX].cap_val;
+ cap_val = qm->cap_tables.dev_cap_table[ZIP_DRV_ALG_BITMAP_TB].cap_val;
if ((alg & cap_val) == alg)
return true;
@@ -551,6 +529,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
{
void __iomem *base = qm->io_base;
u32 dcomp_bm, comp_bm;
+ u32 zip_core_en;
/* qm user domain */
writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
@@ -588,8 +567,12 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
}
/* let's open all compression/decompression cores */
- dcomp_bm = qm->cap_tables.dev_cap_table[ZIP_DECOMP_ENABLE_BITMAP_IDX].cap_val;
- comp_bm = qm->cap_tables.dev_cap_table[ZIP_COMP_ENABLE_BITMAP_IDX].cap_val;
+
+ zip_core_en = qm->cap_tables.dev_cap_table[ZIP_CORE_EN].cap_val;
+ dcomp_bm = (zip_core_en >> zip_basic_cap_info[ZIP_DECOMP_ENABLE_BITMAP].shift) &
+ zip_basic_cap_info[ZIP_DECOMP_ENABLE_BITMAP].mask;
+ comp_bm = (zip_core_en >> zip_basic_cap_info[ZIP_COMP_ENABLE_BITMAP].shift) &
+ zip_basic_cap_info[ZIP_COMP_ENABLE_BITMAP].mask;
writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL);
/* enable sqc,cqc writeback */
@@ -599,7 +582,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
hisi_zip_enable_clock_gate(qm);
- return 0;
+ return hisi_dae_set_user_domain(qm);
}
static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
@@ -648,6 +631,8 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
/* enable ZIP hw error interrupts */
writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ hisi_dae_hw_error_enable(qm);
}
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
@@ -660,6 +645,8 @@ static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG);
hisi_zip_master_ooo_ctrl(qm, false);
+
+ hisi_dae_hw_error_disable(qm);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -807,17 +794,38 @@ static int hisi_zip_regs_show(struct seq_file *s, void *unused)
DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs);
+static void __iomem *get_zip_core_addr(struct hisi_qm *qm, int core_num)
+{
+ u8 zip_comp_core_num;
+ u32 zip_core_info;
+
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_comp_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].mask;
+
+ if (core_num < zip_comp_core_num)
+ return qm->io_base + HZIP_CORE_DFX_BASE +
+ (core_num + 1) * HZIP_CORE_ADDR_INTRVL;
+
+ return qm->io_base + HZIP_CORE_DFX_DECOMP_BASE +
+ (core_num - zip_comp_core_num) * HZIP_CORE_ADDR_INTRVL;
+}
+
static int hisi_zip_core_debug_init(struct hisi_qm *qm)
{
u32 zip_core_num, zip_comp_core_num;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
+ u32 zip_core_info;
struct dentry *tmp_d;
char buf[HZIP_BUF_SIZE];
int i;
- zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
- zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
+ zip_comp_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].mask;
for (i = 0; i < zip_core_num; i++) {
if (i < zip_comp_core_num)
@@ -832,7 +840,7 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
regset->regs = hzip_dfx_regs;
regset->nregs = ARRAY_SIZE(hzip_dfx_regs);
- regset->base = qm->io_base + core_offsets[i];
+ regset->base = get_zip_core_addr(qm, i);
regset->dev = dev;
tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
@@ -843,6 +851,26 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
return 0;
}
+static int zip_cap_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+ u32 i, size;
+
+ size = qm->cap_tables.qm_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name,
+ qm->cap_tables.qm_cap_table[i].cap_val);
+
+ size = qm->cap_tables.dev_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name,
+ qm->cap_tables.dev_cap_table[i].cap_val);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(zip_cap_regs);
+
static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
{
struct dfx_diff_registers *hzip_regs = qm->debug.acc_diff_regs;
@@ -863,6 +891,9 @@ static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
if (qm->fun_type == QM_HW_PF && hzip_regs)
debugfs_create_file("diff_regs", 0444, tmp_dir,
qm, &hzip_diff_regs_fops);
+
+ debugfs_create_file("cap_regs", CAP_FILE_PERMISSION,
+ qm->debug.debug_root, qm, &zip_cap_regs_fops);
}
static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
@@ -887,49 +918,53 @@ static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
static int hisi_zip_debugfs_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- struct dentry *dev_d;
int ret;
- dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root);
-
- qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
- qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
- qm->debug.debug_root = dev_d;
ret = hisi_qm_regs_debugfs_init(qm, hzip_diff_regs, ARRAY_SIZE(hzip_diff_regs));
if (ret) {
dev_warn(dev, "Failed to init ZIP diff regs!\n");
- goto debugfs_remove;
+ return ret;
}
+ qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ hzip_debugfs_root);
+
hisi_qm_debug_init(qm);
if (qm->fun_type == QM_HW_PF) {
ret = hisi_zip_ctrl_debug_init(qm);
if (ret)
- goto failed_to_create;
+ goto debugfs_remove;
}
hisi_zip_dfx_debug_init(qm);
return 0;
-failed_to_create:
- hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
debugfs_remove:
- debugfs_remove_recursive(hzip_debugfs_root);
+ debugfs_remove_recursive(qm->debug.debug_root);
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
return ret;
}
/* hisi_zip_debug_regs_clear() - clear the zip debug regs */
static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
{
+ u32 zip_core_info;
+ u8 zip_core_num;
int i, j;
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
+
/* enable register read_clear bit */
writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
- for (i = 0; i < ARRAY_SIZE(core_offsets); i++)
+ for (i = 0; i < zip_core_num; i++)
for (j = 0; j < ARRAY_SIZE(hzip_dfx_regs); j++)
- readl(qm->io_base + core_offsets[i] +
+ readl(get_zip_core_addr(qm, i) +
hzip_dfx_regs[j].offset);
/* disable register read_clear bit */
@@ -940,10 +975,10 @@ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
static void hisi_zip_debugfs_exit(struct hisi_qm *qm)
{
- hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
-
debugfs_remove_recursive(qm->debug.debug_root);
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
+
if (qm->fun_type == QM_HW_PF) {
hisi_zip_debug_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
@@ -956,10 +991,13 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
struct qm_debug *debug = &qm->debug;
void __iomem *io_base;
+ u32 zip_core_info;
u32 zip_core_num;
int i, j, idx;
- zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num,
sizeof(unsigned int), GFP_KERNEL);
@@ -972,7 +1010,7 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
}
for (i = 0; i < zip_core_num; i++) {
- io_base = qm->io_base + core_offsets[i];
+ io_base = get_zip_core_addr(qm, i);
for (j = 0; j < core_dfx_regs_num; j++) {
idx = com_dfx_regs_num + i * core_dfx_regs_num + j;
debug->last_words[idx] = readl_relaxed(
@@ -1001,6 +1039,7 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
u32 zip_core_num, zip_comp_core_num;
struct qm_debug *debug = &qm->debug;
char buf[HZIP_BUF_SIZE];
+ u32 zip_core_info;
void __iomem *base;
int i, j, idx;
u32 val;
@@ -1015,8 +1054,11 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
hzip_com_dfx_regs[i].name, debug->last_words[i], val);
}
- zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
- zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
+ zip_comp_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].mask;
for (i = 0; i < zip_core_num; i++) {
if (i < zip_comp_core_num)
@@ -1024,7 +1066,7 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
else
scnprintf(buf, sizeof(buf), "Decomp_core-%d",
i - zip_comp_core_num);
- base = qm->io_base + core_offsets[i];
+ base = get_zip_core_addr(qm, i);
pci_info(qm->pdev, "==>%s:\n", buf);
/* dump last word for dfx regs during control resetting */
@@ -1069,11 +1111,15 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
- u32 nfe;
-
writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
- nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
- writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+}
+
+static void hisi_zip_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ u32 nfe_mask;
+
+ nfe_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe_mask & (~err_type), qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
}
static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
@@ -1087,6 +1133,8 @@ static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ hisi_dae_open_axi_master_ooo(qm);
}
static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
@@ -1103,6 +1151,51 @@ static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
qm->io_base + HZIP_CORE_INT_SET);
}
+static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
+{
+ enum acc_err_result zip_result = ACC_ERR_NONE;
+ enum acc_err_result dae_result;
+ u32 err_status;
+
+ /* Get device hardware new error status */
+ err_status = hisi_zip_get_hw_err_status(qm);
+ if (err_status) {
+ if (err_status & qm->err_info.ecc_2bits_mask)
+ qm->err_status.is_dev_ecc_mbit = true;
+ hisi_zip_log_hw_error(qm, err_status);
+
+ if (err_status & qm->err_info.dev_reset_mask) {
+ /* Disable the same error reporting until device is recovered. */
+ hisi_zip_disable_error_report(qm, err_status);
+ return ACC_ERR_NEED_RESET;
+ } else {
+ hisi_zip_clear_hw_err_status(qm, err_status);
+ }
+ }
+
+ dae_result = hisi_dae_get_err_result(qm);
+
+ return (zip_result == ACC_ERR_NEED_RESET ||
+ dae_result == ACC_ERR_NEED_RESET) ?
+ ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
+}
+
+static bool hisi_zip_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hisi_zip_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_shutdown_mask)
+ return true;
+
+ return hisi_dae_dev_is_abnormal(qm);
+}
+
+static int hisi_zip_set_priv_status(struct hisi_qm *qm)
+{
+ return hisi_dae_close_axi_master_ooo(qm);
+}
+
static void hisi_zip_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -1130,13 +1223,15 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.hw_err_disable = hisi_zip_hw_error_disable,
.get_dev_hw_err_status = hisi_zip_get_hw_err_status,
.clear_dev_hw_err_status = hisi_zip_clear_hw_err_status,
- .log_dev_hw_err = hisi_zip_log_hw_error,
.open_axi_master_ooo = hisi_zip_open_axi_master_ooo,
.close_axi_master_ooo = hisi_zip_close_axi_master_ooo,
.open_sva_prefetch = hisi_zip_open_sva_prefetch,
.close_sva_prefetch = hisi_zip_close_sva_prefetch,
.show_last_dfx_regs = hisi_zip_show_last_dfx_regs,
.err_info_init = hisi_zip_err_info_init,
+ .get_err_result = hisi_zip_get_err_result,
+ .set_priv_status = hisi_zip_set_priv_status,
+ .dev_is_abnormal = hisi_zip_dev_is_abnormal,
};
static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
@@ -1151,8 +1246,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
hisi_zip->ctrl = ctrl;
ctrl->hisi_zip = hisi_zip;
- qm->err_ini = &hisi_zip_err_ini;
- qm->err_ini->err_info_init(qm);
ret = hisi_zip_set_user_domain_and_cache(qm);
if (ret)
@@ -1179,18 +1272,20 @@ static int zip_pre_store_cap_reg(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
size_t i, size;
- size = ARRAY_SIZE(zip_pre_store_caps);
+ size = ARRAY_SIZE(zip_cap_query_info);
zip_cap = devm_kzalloc(&pdev->dev, sizeof(*zip_cap) * size, GFP_KERNEL);
if (!zip_cap)
return -ENOMEM;
for (i = 0; i < size; i++) {
- zip_cap[i].type = zip_pre_store_caps[i];
- zip_cap[i].cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- zip_pre_store_caps[i], qm->cap_ver);
+ zip_cap[i].type = zip_cap_query_info[i].type;
+ zip_cap[i].name = zip_cap_query_info[i].name;
+ zip_cap[i].cap_val = hisi_qm_get_cap_value(qm, zip_cap_query_info,
+ i, qm->cap_ver);
}
qm->cap_tables.dev_cap_table = zip_cap;
+ qm->cap_tables.dev_cap_size = size;
return 0;
}
@@ -1201,7 +1296,6 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
int ret;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->mode = uacce_mode;
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
@@ -1213,6 +1307,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = pf_q_num;
qm->debug.curr_qm_qp_num = pf_q_num;
qm->qm_list = &zip_devices;
+ qm->err_ini = &hisi_zip_err_ini;
if (pf_q_num_flag)
set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
@@ -1237,17 +1332,24 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
ret = zip_pre_store_cap_reg(qm);
if (ret) {
pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
- hisi_qm_uninit(qm);
- return ret;
+ goto err_qm_uninit;
}
- alg_msk = qm->cap_tables.dev_cap_table[ZIP_DEV_ALG_BITMAP_IDX].cap_val;
+ alg_msk = qm->cap_tables.dev_cap_table[ZIP_ALG_BITMAP].cap_val;
ret = hisi_qm_set_algs(qm, alg_msk, zip_dev_algs, ARRAY_SIZE(zip_dev_algs));
if (ret) {
pci_err(qm->pdev, "Failed to set zip algs!\n");
- hisi_qm_uninit(qm);
+ goto err_qm_uninit;
}
+ ret = hisi_dae_set_alg(qm);
+ if (ret)
+ goto err_qm_uninit;
+
+ return 0;
+
+err_qm_uninit:
+ hisi_qm_uninit(qm);
return ret;
}
@@ -1279,6 +1381,16 @@ static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)
return 0;
}
+static void hisi_zip_probe_uninit(struct hisi_qm *qm)
+{
+ if (qm->fun_type == QM_HW_VF)
+ return;
+
+ hisi_zip_show_last_regs_uninit(qm);
+ hisi_zip_close_sva_prefetch(qm);
+ hisi_qm_dev_err_uninit(qm);
+}
+
static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_zip *hisi_zip;
@@ -1305,7 +1417,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = hisi_qm_start(qm);
if (ret)
- goto err_dev_err_uninit;
+ goto err_probe_uninit;
ret = hisi_zip_debugfs_init(qm);
if (ret)
@@ -1344,9 +1456,8 @@ err_qm_del_list:
hisi_zip_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
-err_dev_err_uninit:
- hisi_zip_show_last_regs_uninit(qm);
- hisi_qm_dev_err_uninit(qm);
+err_probe_uninit:
+ hisi_zip_probe_uninit(qm);
err_qm_uninit:
hisi_zip_qm_uninit(qm);
@@ -1368,8 +1479,7 @@ static void hisi_zip_remove(struct pci_dev *pdev)
hisi_zip_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
- hisi_zip_show_last_regs_uninit(qm);
- hisi_qm_dev_err_uninit(qm);
+ hisi_zip_probe_uninit(qm);
hisi_zip_qm_uninit(qm);
}