summaryrefslogtreecommitdiff
path: root/drivers/crypto/intel/qat
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/intel/qat')
-rw-r--r--drivers/crypto/intel/qat/Kconfig7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c40
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c112
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.c19
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.h5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c52
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h5
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs.c191
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c2
9 files changed, 269 insertions, 164 deletions
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
index 359c61f0c8a1..4b4861460dd4 100644
--- a/drivers/crypto/intel/qat/Kconfig
+++ b/drivers/crypto/intel/qat/Kconfig
@@ -6,12 +6,11 @@ config CRYPTO_DEV_QAT
select CRYPTO_SKCIPHER
select CRYPTO_AKCIPHER
select CRYPTO_DH
- select CRYPTO_HMAC
select CRYPTO_RSA
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- select CRYPTO_SHA512
select CRYPTO_LIB_AES
+ select CRYPTO_LIB_SHA1
+ select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_SHA512
select FW_LOADER
select CRC8
diff --git a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
index 48c62a14a6a7..c2e6f0cb7480 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
@@ -89,26 +89,14 @@ err_chrdev_unreg:
return -EFAULT;
}
-static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
- unsigned long arg)
+static struct adf_user_cfg_ctl_data *adf_ctl_alloc_resources(unsigned long arg)
{
struct adf_user_cfg_ctl_data *cfg_data;
- cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
- if (!cfg_data)
- return -ENOMEM;
-
- /* Initialize device id to NO DEVICE as 0 is a valid device id */
- cfg_data->device_id = ADF_CFG_NO_DEVICE;
-
- if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
+ cfg_data = memdup_user((void __user *)arg, sizeof(*cfg_data));
+ if (IS_ERR(cfg_data))
pr_err("QAT: failed to copy from user cfg_data.\n");
- kfree(cfg_data);
- return -EIO;
- }
-
- *ctl_data = cfg_data;
- return 0;
+ return cfg_data;
}
static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
@@ -188,13 +176,13 @@ out_err:
static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
unsigned long arg)
{
- int ret;
struct adf_user_cfg_ctl_data *ctl_data;
struct adf_accel_dev *accel_dev;
+ int ret = 0;
- ret = adf_ctl_alloc_resources(&ctl_data, arg);
- if (ret)
- return ret;
+ ctl_data = adf_ctl_alloc_resources(arg);
+ if (IS_ERR(ctl_data))
+ return PTR_ERR(ctl_data);
accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
if (!accel_dev) {
@@ -267,9 +255,9 @@ static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
int ret;
struct adf_user_cfg_ctl_data *ctl_data;
- ret = adf_ctl_alloc_resources(&ctl_data, arg);
- if (ret)
- return ret;
+ ctl_data = adf_ctl_alloc_resources(arg);
+ if (IS_ERR(ctl_data))
+ return PTR_ERR(ctl_data);
if (adf_devmgr_verify_id(ctl_data->device_id)) {
pr_err("QAT: Device %d not found\n", ctl_data->device_id);
@@ -301,9 +289,9 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
struct adf_user_cfg_ctl_data *ctl_data;
struct adf_accel_dev *accel_dev;
- ret = adf_ctl_alloc_resources(&ctl_data, arg);
- if (ret)
- return ret;
+ ctl_data = adf_ctl_alloc_resources(arg);
+ if (IS_ERR(ctl_data))
+ return PTR_ERR(ctl_data);
ret = -ENODEV;
accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c
index cf804f95838a..faa60b04c406 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c
@@ -21,6 +21,25 @@
#define SLICE_IDX(sl) offsetof(struct icp_qat_fw_init_admin_slice_cnt, sl##_cnt)
+#define ADF_GEN6_TL_CMDQ_WAIT_COUNTER(_name) \
+ ADF_TL_COUNTER("cmdq_wait_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_CMDQ_REG_OFF(_name, reg_tm_cmdq_wait_cnt, gen6))
+#define ADF_GEN6_TL_CMDQ_EXEC_COUNTER(_name) \
+ ADF_TL_COUNTER("cmdq_exec_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_CMDQ_REG_OFF(_name, reg_tm_cmdq_exec_cnt, gen6))
+#define ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(_name) \
+ ADF_TL_COUNTER("cmdq_drain_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_CMDQ_REG_OFF(_name, reg_tm_cmdq_drain_cnt, \
+ gen6))
+
+#define CPR_QUEUE_COUNT 5
+#define DCPR_QUEUE_COUNT 3
+#define PKE_QUEUE_COUNT 1
+#define WAT_QUEUE_COUNT 7
+#define WCP_QUEUE_COUNT 7
+#define USC_QUEUE_COUNT 3
+#define ATH_QUEUE_COUNT 2
+
/* Device level counters. */
static const struct adf_tl_dbg_counter dev_counters[] = {
/* PCIe partial transactions. */
@@ -57,6 +76,10 @@ static const struct adf_tl_dbg_counter dev_counters[] = {
/* Maximum uTLB used. */
ADF_TL_COUNTER(AT_MAX_UTLB_USED_NAME, ADF_TL_SIMPLE_COUNT,
ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_max_utlb_used)),
+ /* Ring Empty average[ns] across all rings */
+ ADF_TL_COUNTER_LATENCY(RE_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_re_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_re_cnt)),
};
/* Accelerator utilization counters */
@@ -95,6 +118,80 @@ static const struct adf_tl_dbg_counter sl_exec_counters[ADF_TL_SL_CNT_COUNT] = {
[SLICE_IDX(ath)] = ADF_GEN6_TL_SL_EXEC_COUNTER(ath),
};
+static const struct adf_tl_dbg_counter cnv_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(cnv),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(cnv),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(cnv)
+};
+
+#define NUM_CMDQ_COUNTERS ARRAY_SIZE(cnv_cmdq_counters)
+
+static const struct adf_tl_dbg_counter dcprz_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(dcprz),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(dcprz),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(dcprz)
+};
+
+static_assert(ARRAY_SIZE(dcprz_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter pke_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(pke),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(pke),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(pke)
+};
+
+static_assert(ARRAY_SIZE(pke_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter wat_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(wat),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(wat),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(wat)
+};
+
+static_assert(ARRAY_SIZE(wat_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter wcp_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(wcp),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(wcp),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(wcp)
+};
+
+static_assert(ARRAY_SIZE(wcp_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter ucs_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(ucs),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(ucs),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(ucs)
+};
+
+static_assert(ARRAY_SIZE(ucs_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter ath_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(ath),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(ath),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(ath)
+};
+
+static_assert(ARRAY_SIZE(ath_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+/* CMDQ drain counters. */
+static const struct adf_tl_dbg_counter *cmdq_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression accelerator execution count. */
+ [SLICE_IDX(cpr)] = cnv_cmdq_counters,
+ /* Decompression accelerator execution count. */
+ [SLICE_IDX(dcpr)] = dcprz_cmdq_counters,
+ /* PKE execution count. */
+ [SLICE_IDX(pke)] = pke_cmdq_counters,
+ /* Wireless Authentication accelerator execution count. */
+ [SLICE_IDX(wat)] = wat_cmdq_counters,
+ /* Wireless Cipher accelerator execution count. */
+ [SLICE_IDX(wcp)] = wcp_cmdq_counters,
+ /* UCS accelerator execution count. */
+ [SLICE_IDX(ucs)] = ucs_cmdq_counters,
+ /* Authentication accelerator execution count. */
+ [SLICE_IDX(ath)] = ath_cmdq_counters,
+};
+
/* Ring pair counters. */
static const struct adf_tl_dbg_counter rp_counters[] = {
/* PCIe partial transactions. */
@@ -122,12 +219,17 @@ static const struct adf_tl_dbg_counter rp_counters[] = {
/* Payload DevTLB miss rate. */
ADF_TL_COUNTER(AT_PAYLD_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT,
ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_miss)),
+ /* Ring Empty average[ns]. */
+ ADF_TL_COUNTER_LATENCY(RE_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_re_acc),
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_re_cnt)),
};
void adf_gen6_init_tl_data(struct adf_tl_hw_data *tl_data)
{
tl_data->layout_sz = ADF_GEN6_TL_LAYOUT_SZ;
tl_data->slice_reg_sz = ADF_GEN6_TL_SLICE_REG_SZ;
+ tl_data->cmdq_reg_sz = ADF_GEN6_TL_CMDQ_REG_SZ;
tl_data->rp_reg_sz = ADF_GEN6_TL_RP_REG_SZ;
tl_data->num_hbuff = ADF_GEN6_TL_NUM_HIST_BUFFS;
tl_data->max_rp = ADF_GEN6_TL_MAX_RP_NUM;
@@ -139,8 +241,18 @@ void adf_gen6_init_tl_data(struct adf_tl_hw_data *tl_data)
tl_data->num_dev_counters = ARRAY_SIZE(dev_counters);
tl_data->sl_util_counters = sl_util_counters;
tl_data->sl_exec_counters = sl_exec_counters;
+ tl_data->cmdq_counters = cmdq_counters;
+ tl_data->num_cmdq_counters = NUM_CMDQ_COUNTERS;
tl_data->rp_counters = rp_counters;
tl_data->num_rp_counters = ARRAY_SIZE(rp_counters);
tl_data->max_sl_cnt = ADF_GEN6_TL_MAX_SLICES_PER_TYPE;
+
+ tl_data->multiplier.cpr_cnt = CPR_QUEUE_COUNT;
+ tl_data->multiplier.dcpr_cnt = DCPR_QUEUE_COUNT;
+ tl_data->multiplier.pke_cnt = PKE_QUEUE_COUNT;
+ tl_data->multiplier.wat_cnt = WAT_QUEUE_COUNT;
+ tl_data->multiplier.wcp_cnt = WCP_QUEUE_COUNT;
+ tl_data->multiplier.ucs_cnt = USC_QUEUE_COUNT;
+ tl_data->multiplier.ath_cnt = ATH_QUEUE_COUNT;
}
EXPORT_SYMBOL_GPL(adf_gen6_init_tl_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
index 74fb0c2ed241..b64142db1f0d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
@@ -212,6 +212,23 @@ int adf_tl_halt(struct adf_accel_dev *accel_dev)
return ret;
}
+static void adf_set_cmdq_cnt(struct adf_accel_dev *accel_dev,
+ struct adf_tl_hw_data *tl_data)
+{
+ struct icp_qat_fw_init_admin_slice_cnt *slice_cnt, *cmdq_cnt;
+
+ slice_cnt = &accel_dev->telemetry->slice_cnt;
+ cmdq_cnt = &accel_dev->telemetry->cmdq_cnt;
+
+ cmdq_cnt->cpr_cnt = slice_cnt->cpr_cnt * tl_data->multiplier.cpr_cnt;
+ cmdq_cnt->dcpr_cnt = slice_cnt->dcpr_cnt * tl_data->multiplier.dcpr_cnt;
+ cmdq_cnt->pke_cnt = slice_cnt->pke_cnt * tl_data->multiplier.pke_cnt;
+ cmdq_cnt->wat_cnt = slice_cnt->wat_cnt * tl_data->multiplier.wat_cnt;
+ cmdq_cnt->wcp_cnt = slice_cnt->wcp_cnt * tl_data->multiplier.wcp_cnt;
+ cmdq_cnt->ucs_cnt = slice_cnt->ucs_cnt * tl_data->multiplier.ucs_cnt;
+ cmdq_cnt->ath_cnt = slice_cnt->ath_cnt * tl_data->multiplier.ath_cnt;
+}
+
int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
{
struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
@@ -235,6 +252,8 @@ int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
return ret;
}
+ adf_set_cmdq_cnt(accel_dev, tl_data);
+
telemetry->hbuffs = state;
atomic_set(&telemetry->state, state);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
index e54a406cc1b4..02d75c3c214a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
@@ -28,19 +28,23 @@ struct dentry;
struct adf_tl_hw_data {
size_t layout_sz;
size_t slice_reg_sz;
+ size_t cmdq_reg_sz;
size_t rp_reg_sz;
size_t msg_cnt_off;
const struct adf_tl_dbg_counter *dev_counters;
const struct adf_tl_dbg_counter *sl_util_counters;
const struct adf_tl_dbg_counter *sl_exec_counters;
+ const struct adf_tl_dbg_counter **cmdq_counters;
const struct adf_tl_dbg_counter *rp_counters;
u8 num_hbuff;
u8 cpp_ns_per_cycle;
u8 bw_units_to_bytes;
u8 num_dev_counters;
u8 num_rp_counters;
+ u8 num_cmdq_counters;
u8 max_rp;
u8 max_sl_cnt;
+ struct icp_qat_fw_init_admin_slice_cnt multiplier;
};
struct adf_telemetry {
@@ -69,6 +73,7 @@ struct adf_telemetry {
struct mutex wr_lock;
struct delayed_work work_ctx;
struct icp_qat_fw_init_admin_slice_cnt slice_cnt;
+ struct icp_qat_fw_init_admin_slice_cnt cmdq_cnt;
};
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
index a32db273842a..b81f70576683 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
@@ -339,6 +339,48 @@ static int tl_calc_and_print_sl_counters(struct adf_accel_dev *accel_dev,
return 0;
}
+static int tl_print_cmdq_counter(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct seq_file *s, u8 cnt_id, u8 counter)
+{
+ size_t cmdq_regs_sz = GET_TL_DATA(telemetry->accel_dev).cmdq_reg_sz;
+ size_t offset_inc = cnt_id * cmdq_regs_sz;
+ struct adf_tl_dbg_counter slice_ctr;
+ char cnt_name[MAX_COUNT_NAME_SIZE];
+
+ slice_ctr = *(ctr + counter);
+ slice_ctr.offset1 += offset_inc;
+ snprintf(cnt_name, MAX_COUNT_NAME_SIZE, "%s%d", slice_ctr.name, cnt_id);
+
+ return tl_calc_and_print_counter(telemetry, s, &slice_ctr, cnt_name);
+}
+
+static int tl_calc_and_print_cmdq_counters(struct adf_accel_dev *accel_dev,
+ struct seq_file *s, u8 cnt_type,
+ u8 cnt_id)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ const struct adf_tl_dbg_counter **cmdq_tl_counters;
+ const struct adf_tl_dbg_counter *ctr;
+ u8 counter;
+ int ret;
+
+ cmdq_tl_counters = tl_data->cmdq_counters;
+ ctr = cmdq_tl_counters[cnt_type];
+
+ for (counter = 0; counter < tl_data->num_cmdq_counters; counter++) {
+ ret = tl_print_cmdq_counter(telemetry, ctr, s, cnt_id, counter);
+ if (ret) {
+ dev_notice(&GET_DEV(accel_dev),
+ "invalid slice utilization counter type\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void tl_print_msg_cnt(struct seq_file *s, u32 msg_cnt)
{
seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, SNAPSHOT_CNT_MSG);
@@ -352,6 +394,7 @@ static int tl_print_dev_data(struct adf_accel_dev *accel_dev,
struct adf_telemetry *telemetry = accel_dev->telemetry;
const struct adf_tl_dbg_counter *dev_tl_counters;
u8 num_dev_counters = tl_data->num_dev_counters;
+ u8 *cmdq_cnt = (u8 *)&telemetry->cmdq_cnt;
u8 *sl_cnt = (u8 *)&telemetry->slice_cnt;
const struct adf_tl_dbg_counter *ctr;
unsigned int i;
@@ -387,6 +430,15 @@ static int tl_print_dev_data(struct adf_accel_dev *accel_dev,
}
}
+ /* Print per command queue telemetry. */
+ for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) {
+ for (j = 0; j < cmdq_cnt[i]; j++) {
+ ret = tl_calc_and_print_cmdq_counters(accel_dev, s, i, j);
+ if (ret)
+ return ret;
+ }
+ }
+
return 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
index 11cc9eae19b3..97c5eeaa1b17 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
@@ -17,6 +17,7 @@ struct adf_accel_dev;
#define LAT_ACC_NAME "gp_lat_acc_avg"
#define BW_IN_NAME "bw_in"
#define BW_OUT_NAME "bw_out"
+#define RE_ACC_NAME "re_acc_avg"
#define PAGE_REQ_LAT_NAME "at_page_req_lat_avg"
#define AT_TRANS_LAT_NAME "at_trans_lat_avg"
#define AT_MAX_UTLB_USED_NAME "at_max_tlb_used"
@@ -43,6 +44,10 @@ struct adf_accel_dev;
(ADF_TL_DEV_REG_OFF(slice##_slices[0], qat_gen) + \
offsetof(struct adf_##qat_gen##_tl_slice_data_regs, reg))
+#define ADF_TL_CMDQ_REG_OFF(slice, reg, qat_gen) \
+ (ADF_TL_DEV_REG_OFF(slice##_cmdq[0], qat_gen) + \
+ offsetof(struct adf_##qat_gen##_tl_cmdq_data_regs, reg))
+
#define ADF_TL_RP_REG_OFF(reg, qat_gen) \
(ADF_TL_DATA_REG_OFF(tl_ring_pairs_data_regs[0], qat_gen) + \
offsetof(struct adf_##qat_gen##_tl_ring_pair_data_regs, reg))
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c
index 43e6dd9b77b7..7f638a62e3ad 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c
@@ -5,12 +5,10 @@
#include <linux/crypto.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/cipher.h>
-#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/aes.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
-#include <crypto/hmac.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
@@ -68,16 +66,10 @@ struct qat_alg_aead_ctx {
dma_addr_t dec_cd_paddr;
struct icp_qat_fw_la_bulk_req enc_fw_req;
struct icp_qat_fw_la_bulk_req dec_fw_req;
- struct crypto_shash *hash_tfm;
enum icp_qat_hw_auth_algo qat_hash_alg;
+ unsigned int hash_digestsize;
+ unsigned int hash_blocksize;
struct qat_crypto_instance *inst;
- union {
- struct sha1_state sha1;
- struct sha256_state sha256;
- struct sha512_state sha512;
- };
- char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
- char opad[SHA512_BLOCK_SIZE];
};
struct qat_alg_skcipher_ctx {
@@ -94,125 +86,57 @@ struct qat_alg_skcipher_ctx {
int mode;
};
-static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
-{
- switch (qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- return ICP_QAT_HW_SHA1_STATE1_SZ;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- return ICP_QAT_HW_SHA256_STATE1_SZ;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- return ICP_QAT_HW_SHA512_STATE1_SZ;
- default:
- return -EFAULT;
- }
-}
-
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
struct qat_alg_aead_ctx *ctx,
const u8 *auth_key,
unsigned int auth_keylen)
{
- SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
- int block_size = crypto_shash_blocksize(ctx->hash_tfm);
- int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
- __be32 *hash_state_out;
- __be64 *hash512_state_out;
- int i, offset;
-
- memset(ctx->ipad, 0, block_size);
- memset(ctx->opad, 0, block_size);
- shash->tfm = ctx->hash_tfm;
-
- if (auth_keylen > block_size) {
- int ret = crypto_shash_digest(shash, auth_key,
- auth_keylen, ctx->ipad);
- if (ret)
- return ret;
-
- memcpy(ctx->opad, ctx->ipad, digest_size);
- } else {
- memcpy(ctx->ipad, auth_key, auth_keylen);
- memcpy(ctx->opad, auth_key, auth_keylen);
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1: {
+ struct hmac_sha1_key key;
+ __be32 *istate = (__be32 *)hash->sha.state1;
+ __be32 *ostate = (__be32 *)(hash->sha.state1 +
+ round_up(sizeof(key.istate.h), 8));
+
+ hmac_sha1_preparekey(&key, auth_key, auth_keylen);
+ for (int i = 0; i < ARRAY_SIZE(key.istate.h); i++) {
+ istate[i] = cpu_to_be32(key.istate.h[i]);
+ ostate[i] = cpu_to_be32(key.ostate.h[i]);
+ }
+ memzero_explicit(&key, sizeof(key));
+ return 0;
}
-
- for (i = 0; i < block_size; i++) {
- char *ipad_ptr = ctx->ipad + i;
- char *opad_ptr = ctx->opad + i;
- *ipad_ptr ^= HMAC_IPAD_VALUE;
- *opad_ptr ^= HMAC_OPAD_VALUE;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256: {
+ struct hmac_sha256_key key;
+ __be32 *istate = (__be32 *)hash->sha.state1;
+ __be32 *ostate = (__be32 *)(hash->sha.state1 +
+ sizeof(key.key.istate.h));
+
+ hmac_sha256_preparekey(&key, auth_key, auth_keylen);
+ for (int i = 0; i < ARRAY_SIZE(key.key.istate.h); i++) {
+ istate[i] = cpu_to_be32(key.key.istate.h[i]);
+ ostate[i] = cpu_to_be32(key.key.ostate.h[i]);
+ }
+ memzero_explicit(&key, sizeof(key));
+ return 0;
}
-
- if (crypto_shash_init(shash))
- return -EFAULT;
-
- if (crypto_shash_update(shash, ctx->ipad, block_size))
- return -EFAULT;
-
- hash_state_out = (__be32 *)hash->sha.state1;
- hash512_state_out = (__be64 *)hash_state_out;
-
- switch (ctx->qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export_core(shash, &ctx->sha1))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export_core(shash, &ctx->sha256))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export_core(shash, &ctx->sha512))
- return -EFAULT;
- for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
- *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
- break;
- default:
- return -EFAULT;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512: {
+ struct hmac_sha512_key key;
+ __be64 *istate = (__be64 *)hash->sha.state1;
+ __be64 *ostate = (__be64 *)(hash->sha.state1 +
+ sizeof(key.key.istate.h));
+
+ hmac_sha512_preparekey(&key, auth_key, auth_keylen);
+ for (int i = 0; i < ARRAY_SIZE(key.key.istate.h); i++) {
+ istate[i] = cpu_to_be64(key.key.istate.h[i]);
+ ostate[i] = cpu_to_be64(key.key.ostate.h[i]);
+ }
+ memzero_explicit(&key, sizeof(key));
+ return 0;
}
-
- if (crypto_shash_init(shash))
- return -EFAULT;
-
- if (crypto_shash_update(shash, ctx->opad, block_size))
- return -EFAULT;
-
- offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
- if (offset < 0)
- return -EFAULT;
-
- hash_state_out = (__be32 *)(hash->sha.state1 + offset);
- hash512_state_out = (__be64 *)hash_state_out;
-
- switch (ctx->qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export_core(shash, &ctx->sha1))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export_core(shash, &ctx->sha256))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export_core(shash, &ctx->sha512))
- return -EFAULT;
- for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
- *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
- break;
default:
return -EFAULT;
}
- memzero_explicit(ctx->ipad, block_size);
- memzero_explicit(ctx->opad, block_size);
- return 0;
}
static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
@@ -259,7 +183,7 @@ static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
ctx->qat_hash_alg, digestsize);
hash->sha.inner_setup.auth_counter.counter =
- cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+ cpu_to_be32(ctx->hash_blocksize);
if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
return -EFAULT;
@@ -326,7 +250,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
struct icp_qat_hw_cipher_algo_blk *cipher =
(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
sizeof(struct icp_qat_hw_auth_setup) +
- roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
+ roundup(ctx->hash_digestsize, 8) * 2);
struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
@@ -346,7 +270,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
ctx->qat_hash_alg,
digestsize);
hash->sha.inner_setup.auth_counter.counter =
- cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+ cpu_to_be32(ctx->hash_blocksize);
if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
return -EFAULT;
@@ -368,7 +292,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
cipher_cd_ctrl->cipher_cfg_offset =
(sizeof(struct icp_qat_hw_auth_setup) +
- roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
+ roundup(ctx->hash_digestsize, 8) * 2) >> 3;
ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
@@ -1150,32 +1074,35 @@ static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
}
static int qat_alg_aead_init(struct crypto_aead *tfm,
- enum icp_qat_hw_auth_algo hash,
- const char *hash_name)
+ enum icp_qat_hw_auth_algo hash_alg,
+ unsigned int hash_digestsize,
+ unsigned int hash_blocksize)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
- ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
- if (IS_ERR(ctx->hash_tfm))
- return PTR_ERR(ctx->hash_tfm);
- ctx->qat_hash_alg = hash;
+ ctx->qat_hash_alg = hash_alg;
+ ctx->hash_digestsize = hash_digestsize;
+ ctx->hash_blocksize = hash_blocksize;
crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
return 0;
}
static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
{
- return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1,
+ SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE);
}
static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
{
- return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256,
+ SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE);
}
static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
{
- return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512,
+ SHA512_DIGEST_SIZE, SHA512_BLOCK_SIZE);
}
static void qat_alg_aead_exit(struct crypto_aead *tfm)
@@ -1184,8 +1111,6 @@ static void qat_alg_aead_exit(struct crypto_aead *tfm)
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev;
- crypto_free_shash(ctx->hash_tfm);
-
if (!inst)
return;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index 21d652a1c8ef..18c3e4416dc5 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -1900,7 +1900,7 @@ static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
if (sobj_hdr)
sobj_chunk_num = sobj_hdr->num_chunks;
- mobj_hdr = kzalloc((uobj_chunk_num + sobj_chunk_num) *
+ mobj_hdr = kcalloc(size_add(uobj_chunk_num, sobj_chunk_num),
sizeof(*mobj_hdr), GFP_KERNEL);
if (!mobj_hdr)
return -ENOMEM;