diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-19 10:56:58 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-19 10:56:58 -0700 |
commit | 4305ca0087dd99c3c3e0e2ac8a228b7e53a21c78 (patch) | |
tree | 064a870f63d8a579e3d1a5193cc266ea326b32a7 /drivers/ufs/core | |
parent | 661fb4e68cf62bf52eacfcd9b3b0d93fe4260c5b (diff) | |
parent | 23cef42d17413d099f44ea42b622fbf23b04646f (diff) |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"Updates to the usual drivers (ufs, lpfc, qla2xxx, mpi3mr) plus some
misc small fixes.
The only core changes are to both bsg and scsi to pass in the device
instead of setting it afterwards as q->queuedata, so no functional
change"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (69 commits)
scsi: aha152x: Use DECLARE_COMPLETION_ONSTACK for non-constant completion
scsi: qla2xxx: Convert comma to semicolon
scsi: qla2xxx: Update version to 10.02.09.300-k
scsi: qla2xxx: Use QP lock to search for bsg
scsi: qla2xxx: Reduce fabric scan duplicate code
scsi: qla2xxx: Fix optrom version displayed in FDMI
scsi: qla2xxx: During vport delete send async logout explicitly
scsi: qla2xxx: Complete command early within lock
scsi: qla2xxx: Fix flash read failure
scsi: qla2xxx: Return ENOBUFS if sg_cnt is more than one for ELS cmds
scsi: qla2xxx: Fix for possible memory corruption
scsi: qla2xxx: validate nvme_local_port correctly
scsi: qla2xxx: Unable to act on RSCN for port online
scsi: ufs: exynos: Add support for Flash Memory Protector (FMP)
scsi: ufs: core: Add UFSHCD_QUIRK_KEYS_IN_PRDT
scsi: ufs: core: Add fill_crypto_prdt variant op
scsi: ufs: core: Add UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE
scsi: ufs: core: fold ufshcd_clear_keyslot() into its caller
scsi: ufs: core: Add UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE
scsi: ufs: mcq: Make .get_hba_mac() optional
...
Diffstat (limited to 'drivers/ufs/core')
-rw-r--r-- | drivers/ufs/core/ufs-mcq.c | 89 | ||||
-rw-r--r-- | drivers/ufs/core/ufs-sysfs.c | 73 | ||||
-rw-r--r-- | drivers/ufs/core/ufshcd-crypto.c | 34 | ||||
-rw-r--r-- | drivers/ufs/core/ufshcd-crypto.h | 36 | ||||
-rw-r--r-- | drivers/ufs/core/ufshcd-priv.h | 15 | ||||
-rw-r--r-- | drivers/ufs/core/ufshcd.c | 163 |
6 files changed, 295 insertions, 115 deletions
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index c532416aec22..5891cdacd0b3 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -18,6 +18,7 @@ #include <linux/iopoll.h> #define MAX_QUEUE_SUP GENMASK(7, 0) +#define QCFGPTR GENMASK(23, 16) #define UFS_MCQ_MIN_RW_QUEUES 2 #define UFS_MCQ_MIN_READ_QUEUES 0 #define UFS_MCQ_MIN_POLL_QUEUES 0 @@ -25,7 +26,6 @@ #define QUEUE_ID_OFFSET 16 #define MCQ_CFG_MAC_MASK GENMASK(16, 8) -#define MCQ_QCFG_SIZE 0x40 #define MCQ_ENTRY_SIZE_IN_DWORD 8 #define CQE_UCD_BA GENMASK_ULL(63, 7) @@ -117,6 +117,19 @@ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, } /** + * ufshcd_mcq_queue_cfg_addr - get an start address of the MCQ Queue Config + * Registers. + * @hba: per adapter instance + * + * Return: Start address of MCQ Queue Config Registers in HCI + */ +unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba) +{ + return FIELD_GET(QCFGPTR, hba->mcq_capabilities) * 0x200; +} +EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr); + +/** * ufshcd_mcq_decide_queue_depth - decide the queue depth * @hba: per adapter instance * @@ -124,7 +137,6 @@ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, * * MAC - Max. Active Command of the Host Controller (HC) * HC wouldn't send more than this commands to the device. - * It is mandatory to implement get_hba_mac() to enable MCQ mode. * Calculates and adjusts the queue depth based on the depth * supported by the HC and ufs device. */ @@ -132,12 +144,21 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba) { int mac; - /* Mandatory to implement get_hba_mac() */ - mac = ufshcd_mcq_vops_get_hba_mac(hba); - if (mac < 0) { - dev_err(hba->dev, "Failed to get mac, err=%d\n", mac); - return mac; + if (!hba->vops || !hba->vops->get_hba_mac) { + /* + * Extract the maximum number of active transfer tasks value + * from the host controller capabilities register. This value is + * 0-based. + */ + hba->capabilities = + ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); + mac = hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_MCQ; + mac++; + } else { + mac = hba->vops->get_hba_mac(hba); } + if (mac < 0) + goto err; WARN_ON_ONCE(!hba->dev_info.bqueuedepth); /* @@ -146,6 +167,10 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba) * shared queuing architecture is enabled. */ return min_t(int, mac, hba->dev_info.bqueuedepth); + +err: + dev_err(hba->dev, "Failed to get mac, err=%d\n", mac); + return mac; } static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) @@ -165,6 +190,15 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) return -EOPNOTSUPP; } + /* + * Device should support at least one I/O queue to handle device + * commands via hba->dev_cmd_queue. + */ + if (hba_maxq == poll_queues) { + dev_err(hba->dev, "At least one non-poll queue required\n"); + return -EOPNOTSUPP; + } + rem = hba_maxq; if (rw_queues) { @@ -227,12 +261,6 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba) return 0; } - -/* Operation and runtime registers configuration */ -#define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i)) -#define MCQ_OPR_OFFSET_n(p, i) \ - (hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i)) - static void __iomem *mcq_opr_base(struct ufs_hba *hba, enum ufshcd_mcq_opr n, int i) { @@ -337,29 +365,29 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) /* Submission Queue Lower Base Address */ ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr), - MCQ_CFG_n(REG_SQLBA, i)); + ufshcd_mcq_cfg_offset(REG_SQLBA, i)); /* Submission Queue Upper Base Address */ ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr), - MCQ_CFG_n(REG_SQUBA, i)); + ufshcd_mcq_cfg_offset(REG_SQUBA, i)); /* Submission Queue Doorbell Address Offset */ - ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i), - MCQ_CFG_n(REG_SQDAO, i)); + ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQD, i), + ufshcd_mcq_cfg_offset(REG_SQDAO, i)); /* Submission Queue Interrupt Status Address Offset */ - ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i), - MCQ_CFG_n(REG_SQISAO, i)); + ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQIS, i), + ufshcd_mcq_cfg_offset(REG_SQISAO, i)); /* Completion Queue Lower Base Address */ ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr), - MCQ_CFG_n(REG_CQLBA, i)); + ufshcd_mcq_cfg_offset(REG_CQLBA, i)); /* Completion Queue Upper Base Address */ ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr), - MCQ_CFG_n(REG_CQUBA, i)); + ufshcd_mcq_cfg_offset(REG_CQUBA, i)); /* Completion Queue Doorbell Address Offset */ - ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i), - MCQ_CFG_n(REG_CQDAO, i)); + ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQD, i), + ufshcd_mcq_cfg_offset(REG_CQDAO, i)); /* Completion Queue Interrupt Status Address Offset */ - ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i), - MCQ_CFG_n(REG_CQISAO, i)); + ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQIS, i), + ufshcd_mcq_cfg_offset(REG_CQISAO, i)); /* Save the base addresses for quicker access */ hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP; @@ -376,7 +404,7 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) /* Completion Queue Enable|Size to Completion Queue Attribute */ ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize, - MCQ_CFG_n(REG_CQATTR, i)); + ufshcd_mcq_cfg_offset(REG_CQATTR, i)); /* * Submission Qeueue Enable|Size|Completion Queue ID to @@ -384,7 +412,7 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) */ ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize | (i << QUEUE_ID_OFFSET), - MCQ_CFG_n(REG_SQATTR, i)); + ufshcd_mcq_cfg_offset(REG_SQATTR, i)); } } EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational); @@ -399,9 +427,16 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi); void ufshcd_mcq_enable(struct ufs_hba *hba) { ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG); + hba->mcq_enabled = true; } EXPORT_SYMBOL_GPL(ufshcd_mcq_enable); +void ufshcd_mcq_disable(struct ufs_hba *hba) +{ + ufshcd_rmwl(hba, MCQ_MODE_SELECT, 0, REG_UFS_MEM_CFG); + hba->mcq_enabled = false; +} + void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg) { ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA); diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index 3d049967f6bc..e80a32421a8c 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -1340,6 +1340,78 @@ static const struct attribute_group ufs_sysfs_flags_group = { .attrs = ufs_sysfs_device_flags, }; +static ssize_t max_number_of_rtt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 rtt; + int ret; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + up(&hba->host_sem); + return -EBUSY; + } + + ufshcd_rpm_get_sync(hba); + ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt); + ufshcd_rpm_put_sync(hba); + + if (ret) + goto out; + + ret = sysfs_emit(buf, "0x%08X\n", rtt); + +out: + up(&hba->host_sem); + return ret; +} + +static ssize_t max_number_of_rtt_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_dev_info *dev_info = &hba->dev_info; + struct scsi_device *sdev; + unsigned int rtt; + int ret; + + if (kstrtouint(buf, 0, &rtt)) + return -EINVAL; + + if (rtt > dev_info->rtt_cap) { + dev_err(dev, "rtt can be at most bDeviceRTTCap\n"); + return -EINVAL; + } + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + ret = -EBUSY; + goto out; + } + + ufshcd_rpm_get_sync(hba); + + shost_for_each_device(sdev, hba->host) + blk_mq_freeze_queue(sdev->request_queue); + + ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt); + + shost_for_each_device(sdev, hba->host) + blk_mq_unfreeze_queue(sdev->request_queue); + + ufshcd_rpm_put_sync(hba); + +out: + up(&hba->host_sem); + return ret < 0 ? ret : count; +} + +static DEVICE_ATTR_RW(max_number_of_rtt); + static inline bool ufshcd_is_wb_attrs(enum attr_idn idn) { return idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS && @@ -1387,7 +1459,6 @@ UFS_ATTRIBUTE(max_data_in_size, _MAX_DATA_IN); UFS_ATTRIBUTE(max_data_out_size, _MAX_DATA_OUT); UFS_ATTRIBUTE(reference_clock_frequency, _REF_CLK_FREQ); UFS_ATTRIBUTE(configuration_descriptor_lock, _CONF_DESC_LOCK); -UFS_ATTRIBUTE(max_number_of_rtt, _MAX_NUM_OF_RTT); UFS_ATTRIBUTE(exception_event_control, _EE_CONTROL); UFS_ATTRIBUTE(exception_event_status, _EE_STATUS); UFS_ATTRIBUTE(ffu_status, _FFU_STATUS); diff --git a/drivers/ufs/core/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c index f2c4422cab86..a714dad82cd1 100644 --- a/drivers/ufs/core/ufshcd-crypto.c +++ b/drivers/ufs/core/ufshcd-crypto.c @@ -95,8 +95,12 @@ static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile, return err; } -static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot) +static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile, + const struct blk_crypto_key *key, + unsigned int slot) { + struct ufs_hba *hba = + container_of(profile, struct ufs_hba, crypto_profile); /* * Clear the crypto cfg on the device. Clearing CFGE * might not be sufficient, so just clear the entire cfg. @@ -106,16 +110,10 @@ static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot) return ufshcd_program_key(hba, &cfg, slot); } -static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile, - const struct blk_crypto_key *key, - unsigned int slot) -{ - struct ufs_hba *hba = - container_of(profile, struct ufs_hba, crypto_profile); - - return ufshcd_clear_keyslot(hba, slot); -} - +/* + * Reprogram the keyslots if needed, and return true if CRYPTO_GENERAL_ENABLE + * should be used in the host controller initialization sequence. + */ bool ufshcd_crypto_enable(struct ufs_hba *hba) { if (!(hba->caps & UFSHCD_CAP_CRYPTO)) @@ -123,6 +121,10 @@ bool ufshcd_crypto_enable(struct ufs_hba *hba) /* Reset might clear all keys, so reprogram all the keys. */ blk_crypto_reprogram_all_keys(&hba->crypto_profile); + + if (hba->quirks & UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE) + return false; + return true; } @@ -159,6 +161,9 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba) int err = 0; enum blk_crypto_mode_num blk_mode_num; + if (hba->quirks & UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE) + return 0; + /* * Don't use crypto if either the hardware doesn't advertise the * standard crypto capability bit *or* if the vendor specific driver @@ -228,9 +233,10 @@ void ufshcd_init_crypto(struct ufs_hba *hba) if (!(hba->caps & UFSHCD_CAP_CRYPTO)) return; - /* Clear all keyslots - the number of keyslots is (CFGC + 1) */ - for (slot = 0; slot < hba->crypto_capabilities.config_count + 1; slot++) - ufshcd_clear_keyslot(hba, slot); + /* Clear all keyslots. */ + for (slot = 0; slot < hba->crypto_profile.num_slots; slot++) + hba->crypto_profile.ll_ops.keyslot_evict(&hba->crypto_profile, + NULL, slot); } void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q) diff --git a/drivers/ufs/core/ufshcd-crypto.h b/drivers/ufs/core/ufshcd-crypto.h index be8596f20ba2..89bb97c14c15 100644 --- a/drivers/ufs/core/ufshcd-crypto.h +++ b/drivers/ufs/core/ufshcd-crypto.h @@ -37,6 +37,33 @@ ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, h->dunu = cpu_to_le32(upper_32_bits(lrbp->data_unit_num)); } +static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp) +{ + struct scsi_cmnd *cmd = lrbp->cmd; + const struct bio_crypt_ctx *crypt_ctx = scsi_cmd_to_rq(cmd)->crypt_ctx; + + if (crypt_ctx && hba->vops && hba->vops->fill_crypto_prdt) + return hba->vops->fill_crypto_prdt(hba, crypt_ctx, + lrbp->ucd_prdt_ptr, + scsi_sg_count(cmd)); + return 0; +} + +static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp) +{ + if (!(hba->quirks & UFSHCD_QUIRK_KEYS_IN_PRDT)) + return; + + if (!(scsi_cmd_to_rq(lrbp->cmd)->crypt_ctx)) + return; + + /* Zeroize the PRDT because it can contain cryptographic keys. */ + memzero_explicit(lrbp->ucd_prdt_ptr, + ufshcd_sg_entry_size(hba) * scsi_sg_count(lrbp->cmd)); +} + bool ufshcd_crypto_enable(struct ufs_hba *hba); int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba); @@ -54,6 +81,15 @@ static inline void ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, struct request_desc_header *h) { } +static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp) +{ + return 0; +} + +static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp) { } + static inline bool ufshcd_crypto_enable(struct ufs_hba *hba) { return false; diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index f42d99ce5bf1..ce36154ce963 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -64,16 +64,11 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag, struct cq_entry *cqe); int ufshcd_mcq_init(struct ufs_hba *hba); +void ufshcd_mcq_disable(struct ufs_hba *hba); int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba); int ufshcd_mcq_memory_alloc(struct ufs_hba *hba); -void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba); -void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds); -u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i); -void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i); struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, struct request *req); -unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, - struct ufs_hw_queue *hwq); void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba, struct ufs_hw_queue *hwq); bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd); @@ -255,14 +250,6 @@ static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba) return -EOPNOTSUPP; } -static inline int ufshcd_mcq_vops_get_hba_mac(struct ufs_hba *hba) -{ - if (hba->vops && hba->vops->get_hba_mac) - return hba->vops->get_hba_mac(hba); - - return -EOPNOTSUPP; -} - static inline int ufshcd_mcq_vops_op_runtime_config(struct ufs_hba *hba) { if (hba->vops && hba->vops->op_runtime_config) diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 702fa1996992..dc757ba47522 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -102,6 +102,9 @@ /* Default RTC update every 10 seconds */ #define UFS_RTC_UPDATE_INTERVAL_MS (10 * MSEC_PER_SEC) +/* bMaxNumOfRTT is equal to two after device manufacturing */ +#define DEFAULT_MAX_NUM_RTT 2 + /* UFSHC 4.0 compliant HC support this mode. */ static bool use_mcq_mode = true; @@ -161,8 +164,6 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs); enum { UFSHCD_MAX_CHANNEL = 0, UFSHCD_MAX_ID = 1, - UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED, - UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED, }; static const char *const ufshcd_state_name[] = { @@ -452,7 +453,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); - if (is_mcq_enabled(hba)) { + if (hba->mcq_enabled) { struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq); hwq_id = hwq->id; @@ -1560,7 +1561,8 @@ static int ufshcd_devfreq_target(struct device *dev, ktime_to_us(ktime_sub(ktime_get(), start)), ret); out: - if (sched_clk_scaling_suspend_work && !scale_up) + if (sched_clk_scaling_suspend_work && + (!scale_up || hba->clk_scaling.suspend_on_no_request)) queue_work(hba->clk_scaling.workq, &hba->clk_scaling.suspend_work); @@ -2300,7 +2302,7 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag, if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) ufshcd_start_monitor(hba, lrbp); - if (is_mcq_enabled(hba)) { + if (hba->mcq_enabled) { int utrd_size = sizeof(struct utp_transfer_req_desc); struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr; struct utp_transfer_req_desc *dest; @@ -2400,11 +2402,13 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; /* nutrs and nutmrs are 0 based values */ - hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; + hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1; hba->nutmrs = ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; hba->reserved_slot = hba->nutrs - 1; + hba->nortt = FIELD_GET(MASK_NUMBER_OUTSTANDING_RTT, hba->capabilities) + 1; + /* Read crypto capabilities */ err = ufshcd_hba_init_crypto_capabilities(hba); if (err) { @@ -2636,7 +2640,7 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd)); - return 0; + return ufshcd_crypto_fill_prdt(hba, lrbp); } /** @@ -2997,7 +3001,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) goto out; } - if (is_mcq_enabled(hba)) + if (hba->mcq_enabled) hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); ufshcd_send_command(hba, tag, hwq); @@ -3056,7 +3060,7 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) unsigned long flags; int err; - if (is_mcq_enabled(hba)) { + if (hba->mcq_enabled) { /* * MCQ mode. Clean up the MCQ resources similar to * what the ufshcd_utrl_clear() does for SDB mode. @@ -3166,7 +3170,7 @@ retry: __func__, lrbp->task_tag); /* MCQ mode */ - if (is_mcq_enabled(hba)) { + if (hba->mcq_enabled) { /* successfully cleared the command, retry if needed */ if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) err = -EAGAIN; @@ -3988,11 +3992,11 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba) */ static int ufshcd_dme_link_startup(struct ufs_hba *hba) { - struct uic_command uic_cmd = {0}; + struct uic_command uic_cmd = { + .command = UIC_CMD_DME_LINK_STARTUP, + }; int ret; - uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; - ret = ufshcd_send_uic_cmd(hba, &uic_cmd); if (ret) dev_dbg(hba->dev, @@ -4010,11 +4014,11 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba) */ static int ufshcd_dme_reset(struct ufs_hba *hba) { - struct uic_command uic_cmd = {0}; + struct uic_command uic_cmd = { + .command = UIC_CMD_DME_RESET, + }; int ret; - uic_cmd.command = UIC_CMD_DME_RESET; - ret = ufshcd_send_uic_cmd(hba, &uic_cmd); if (ret) dev_err(hba->dev, @@ -4049,11 +4053,11 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt); */ static int ufshcd_dme_enable(struct ufs_hba *hba) { - struct uic_command uic_cmd = {0}; + struct uic_command uic_cmd = { + .command = UIC_CMD_DME_ENABLE, + }; int ret; - uic_cmd.command = UIC_CMD_DME_ENABLE; - ret = ufshcd_send_uic_cmd(hba, &uic_cmd); if (ret) dev_err(hba->dev, @@ -4106,7 +4110,12 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set, u32 mib_val, u8 peer) { - struct uic_command uic_cmd = {0}; + struct uic_command uic_cmd = { + .command = peer ? UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET, + .argument1 = attr_sel, + .argument2 = UIC_ARG_ATTR_TYPE(attr_set), + .argument3 = mib_val, + }; static const char *const action[] = { "dme-set", "dme-peer-set" @@ -4115,12 +4124,6 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, int ret; int retries = UFS_UIC_COMMAND_RETRIES; - uic_cmd.command = peer ? - UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; - uic_cmd.argument1 = attr_sel; - uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); - uic_cmd.argument3 = mib_val; - do { /* for peer attributes we retry upon failure */ ret = ufshcd_send_uic_cmd(hba, &uic_cmd); @@ -4150,7 +4153,10 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr); int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, u32 *mib_val, u8 peer) { - struct uic_command uic_cmd = {0}; + struct uic_command uic_cmd = { + .command = peer ? UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET, + .argument1 = attr_sel, + }; static const char *const action[] = { "dme-get", "dme-peer-get" @@ -4184,10 +4190,6 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, } } - uic_cmd.command = peer ? - UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; - uic_cmd.argument1 = attr_sel; - do { /* for peer attributes we retry upon failure */ ret = ufshcd_send_uic_cmd(hba, &uic_cmd); @@ -4320,7 +4322,11 @@ out_unlock: */ int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) { - struct uic_command uic_cmd = {0}; + struct uic_command uic_cmd = { + .command = UIC_CMD_DME_SET, + .argument1 = UIC_ARG_MIB(PA_PWRMODE), + .argument3 = mode, + }; int ret; if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { @@ -4333,9 +4339,6 @@ int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) } } - uic_cmd.command = UIC_CMD_DME_SET; - uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); - uic_cmd.argument3 = mode; ufshcd_hold(hba); ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); ufshcd_release(hba); @@ -4376,13 +4379,14 @@ EXPORT_SYMBOL_GPL(ufshcd_link_recovery); int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) { - int ret; - struct uic_command uic_cmd = {0}; + struct uic_command uic_cmd = { + .command = UIC_CMD_DME_HIBER_ENTER, + }; ktime_t start = ktime_get(); + int ret; ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); - uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter", ktime_to_us(ktime_sub(ktime_get(), start)), ret); @@ -4400,13 +4404,14 @@ EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter); int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) { - struct uic_command uic_cmd = {0}; + struct uic_command uic_cmd = { + .command = UIC_CMD_DME_HIBER_EXIT, + }; int ret; ktime_t start = ktime_get(); ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); - uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit", ktime_to_us(ktime_sub(ktime_get(), start)), ret); @@ -5476,6 +5481,7 @@ void ufshcd_release_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd = lrbp->cmd; scsi_dma_unmap(cmd); + ufshcd_crypto_clear_prdt(hba, lrbp); ufshcd_release(hba); ufshcd_clk_scaling_update_busy(hba); } @@ -5558,7 +5564,7 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num) u32 tr_doorbell; struct ufs_hw_queue *hwq; - if (is_mcq_enabled(hba)) { + if (hba->mcq_enabled) { hwq = &hba->uhq[queue_num]; return ufshcd_mcq_poll_cqe_lock(hba, hwq); @@ -6199,7 +6205,7 @@ out: /* Complete requests that have door-bell cleared */ static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl) { - if (is_mcq_enabled(hba)) + if (hba->mcq_enabled) ufshcd_mcq_compl_pending_transfer(hba, force_compl); else ufshcd_transfer_req_compl(hba); @@ -6456,7 +6462,7 @@ static bool ufshcd_abort_one(struct request *rq, void *priv) *ret ? "failed" : "succeeded"); /* Release cmd in MCQ mode if abort succeeds */ - if (is_mcq_enabled(hba) && (*ret == 0)) { + if (hba->mcq_enabled && (*ret == 0)) { hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd)); if (!hwq) return 0; @@ -7389,7 +7395,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) goto out; } - if (is_mcq_enabled(hba)) { + if (hba->mcq_enabled) { for (pos = 0; pos < hba->nutrs; pos++) { lrbp = &hba->lrb[pos]; if (ufshcd_cmd_inflight(lrbp->cmd) && @@ -7485,7 +7491,7 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) */ dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", __func__, tag); - if (is_mcq_enabled(hba)) { + if (hba->mcq_enabled) { /* MCQ mode */ if (ufshcd_cmd_inflight(lrbp->cmd)) { /* sleep for max. 200us same delay as in SDB mode */ @@ -7563,7 +7569,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) ufshcd_hold(hba); - if (!is_mcq_enabled(hba)) { + if (!hba->mcq_enabled) { reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); if (!test_bit(tag, &hba->outstanding_reqs)) { /* If command is already aborted/completed, return FAILED. */ @@ -7596,7 +7602,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) } hba->req_abort_count++; - if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) { + if (!hba->mcq_enabled && !(reg & (1 << tag))) { /* only execute this code in single doorbell mode */ dev_err(hba->dev, "%s: cmd was completed, but without a notifying intr, tag = %d", @@ -7623,7 +7629,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) goto release; } - if (is_mcq_enabled(hba)) { + if (hba->mcq_enabled) { /* MCQ mode. Branch off to handle abort for mcq mode */ err = ufshcd_mcq_abort(cmd); goto release; @@ -8125,6 +8131,38 @@ out: dev_info->b_ext_iid_en = ext_iid_en; } +static void ufshcd_set_rtt(struct ufs_hba *hba) +{ + struct ufs_dev_info *dev_info = &hba->dev_info; + u32 rtt = 0; + u32 dev_rtt = 0; + int host_rtt_cap = hba->vops && hba->vops->max_num_rtt ? + hba->vops->max_num_rtt : hba->nortt; + + /* RTT override makes sense only for UFS-4.0 and above */ + if (dev_info->wspecversion < 0x400) + return; + + if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &dev_rtt)) { + dev_err(hba->dev, "failed reading bMaxNumOfRTT\n"); + return; + } + + /* do not override if it was already written */ + if (dev_rtt != DEFAULT_MAX_NUM_RTT) + return; + + rtt = min_t(int, dev_info->rtt_cap, host_rtt_cap); + + if (rtt == dev_rtt) + return; + + if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt)) + dev_err(hba->dev, "failed writing bMaxNumOfRTT\n"); +} + void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, const struct ufs_dev_quirk *fixups) { @@ -8260,6 +8298,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba) desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1]; dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH]; + dev_info->rtt_cap = desc_buf[DEVICE_DESC_PARAM_RTT_CAP]; + model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; err = ufshcd_read_string_desc(hba, model_index, @@ -8512,6 +8552,8 @@ static int ufshcd_device_params_init(struct ufs_hba *hba) goto out; } + ufshcd_set_rtt(hba); + ufshcd_get_ref_clk_gating_wait(hba); if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, @@ -8642,6 +8684,9 @@ static int ufshcd_alloc_mcq(struct ufs_hba *hba) if (ret) goto err; + hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; + hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED; + return 0; err: hba->nutrs = old_nutrs; @@ -8663,12 +8708,6 @@ static void ufshcd_config_mcq(struct ufs_hba *hba) ufshcd_mcq_make_queues_operational(hba); ufshcd_mcq_config_mac(hba, hba->nutrs); - hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; - hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED; - - ufshcd_mcq_enable(hba); - hba->mcq_enabled = true; - dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n", hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT], hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL], @@ -8696,8 +8735,10 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) ufshcd_set_link_active(hba); /* Reconfigure MCQ upon reset */ - if (is_mcq_enabled(hba) && !init_dev_params) + if (hba->mcq_enabled && !init_dev_params) { ufshcd_config_mcq(hba); + ufshcd_mcq_enable(hba); + } /* Verify device initialization by sending NOP OUT UPIU */ ret = ufshcd_verify_dev_init(hba); @@ -8718,11 +8759,13 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) if (ret) return ret; if (is_mcq_supported(hba) && !hba->scsi_host_added) { + ufshcd_mcq_enable(hba); ret = ufshcd_alloc_mcq(hba); if (!ret) { ufshcd_config_mcq(hba); } else { /* Continue with SDB mode */ + ufshcd_mcq_disable(hba); use_mcq_mode = false; dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", ret); @@ -8736,6 +8779,7 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) } else if (is_mcq_supported(hba)) { /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */ ufshcd_config_mcq(hba); + ufshcd_mcq_enable(hba); } } @@ -8921,8 +8965,6 @@ static const struct scsi_host_template ufshcd_driver_template = { .eh_timed_out = ufshcd_eh_timed_out, .this_id = -1, .sg_tablesize = SG_ALL, - .cmd_per_lun = UFSHCD_CMD_PER_LUN, - .can_queue = UFSHCD_CAN_QUEUE, .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX, .max_sectors = SZ_1M / SECTOR_SIZE, .max_host_blocked = 1, @@ -10179,7 +10221,8 @@ void ufshcd_remove(struct ufs_hba *hba) blk_mq_destroy_queue(hba->tmf_queue); blk_put_queue(hba->tmf_queue); blk_mq_free_tag_set(&hba->tmf_tag_set); - scsi_remove_host(hba->host); + if (hba->scsi_host_added) + scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); ufshcd_hba_stop(hba); @@ -10458,6 +10501,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) dev_err(hba->dev, "scsi_add_host failed\n"); goto out_disable; } + hba->scsi_host_added = true; } hba->tmf_tag_set = (struct blk_mq_tag_set) { @@ -10540,7 +10584,8 @@ free_tmf_queue: free_tmf_tag_set: blk_mq_free_tag_set(&hba->tmf_tag_set); out_remove_scsi_host: - scsi_remove_host(hba->host); + if (hba->scsi_host_added) + scsi_remove_host(hba->host); out_disable: hba->is_irq_enabled = false; ufshcd_hba_exit(hba); |