diff options
Diffstat (limited to 'drivers/ufs')
26 files changed, 2735 insertions, 1358 deletions
diff --git a/drivers/ufs/core/ufs-fault-injection.c b/drivers/ufs/core/ufs-fault-injection.c index 169540417079..55db38e75cc4 100644 --- a/drivers/ufs/core/ufs-fault-injection.c +++ b/drivers/ufs/core/ufs-fault-injection.c @@ -3,6 +3,7 @@ #include <linux/kconfig.h> #include <linux/types.h> #include <linux/fault-inject.h> +#include <linux/debugfs.h> #include <linux/module.h> #include <ufs/ufshcd.h> #include "ufs-fault-injection.h" diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 0787456c2b89..240ce135bbfb 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -7,7 +7,7 @@ * Can Guo <quic_cang@quicinc.com> */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -18,6 +18,7 @@ #include <linux/iopoll.h> #define MAX_QUEUE_SUP GENMASK(7, 0) +#define QCFGPTR GENMASK(23, 16) #define UFS_MCQ_MIN_RW_QUEUES 2 #define UFS_MCQ_MIN_READ_QUEUES 0 #define UFS_MCQ_MIN_POLL_QUEUES 0 @@ -25,7 +26,6 @@ #define QUEUE_ID_OFFSET 16 #define MCQ_CFG_MAC_MASK GENMASK(16, 8) -#define MCQ_QCFG_SIZE 0x40 #define MCQ_ENTRY_SIZE_IN_DWORD 8 #define CQE_UCD_BA GENMASK_ULL(63, 7) @@ -94,7 +94,7 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds) val = ufshcd_readl(hba, REG_UFS_MCQ_CFG); val &= ~MCQ_CFG_MAC_MASK; - val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds); + val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds - 1); ufshcd_writel(hba, val, REG_UFS_MCQ_CFG); } EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac); @@ -105,19 +105,31 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac); * @hba: per adapter instance * @req: pointer to the request to be issued * - * Return: the hardware queue instance on which the request would - * be queued. + * Return: the hardware queue instance on which the request will be or has + * been queued. %NULL if the request has already been freed. */ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, struct request *req) { - u32 utag = blk_mq_unique_tag(req); - u32 hwq = blk_mq_unique_tag_to_hwq(utag); + struct blk_mq_hw_ctx *hctx = READ_ONCE(req->mq_hctx); - return &hba->uhq[hwq]; + return hctx ? &hba->uhq[hctx->queue_num] : NULL; } /** + * ufshcd_mcq_queue_cfg_addr - get an start address of the MCQ Queue Config + * Registers. + * @hba: per adapter instance + * + * Return: Start address of MCQ Queue Config Registers in HCI + */ +unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba) +{ + return FIELD_GET(QCFGPTR, hba->mcq_capabilities) * 0x200; +} +EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr); + +/** * ufshcd_mcq_decide_queue_depth - decide the queue depth * @hba: per adapter instance * @@ -125,7 +137,6 @@ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, * * MAC - Max. Active Command of the Host Controller (HC) * HC wouldn't send more than this commands to the device. - * It is mandatory to implement get_hba_mac() to enable MCQ mode. * Calculates and adjusts the queue depth based on the depth * supported by the HC and ufs device. */ @@ -133,12 +144,21 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba) { int mac; - /* Mandatory to implement get_hba_mac() */ - mac = ufshcd_mcq_vops_get_hba_mac(hba); - if (mac < 0) { - dev_err(hba->dev, "Failed to get mac, err=%d\n", mac); - return mac; + if (!hba->vops || !hba->vops->get_hba_mac) { + /* + * Extract the maximum number of active transfer tasks value + * from the host controller capabilities register. This value is + * 0-based. + */ + hba->capabilities = + ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); + mac = hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_MCQ; + mac++; + } else { + mac = hba->vops->get_hba_mac(hba); } + if (mac < 0) + goto err; WARN_ON_ONCE(!hba->dev_info.bqueuedepth); /* @@ -147,6 +167,10 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba) * shared queuing architecture is enabled. */ return min_t(int, mac, hba->dev_info.bqueuedepth); + +err: + dev_err(hba->dev, "Failed to get mac, err=%d\n", mac); + return mac; } static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) @@ -166,6 +190,15 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) return -EOPNOTSUPP; } + /* + * Device should support at least one I/O queue to handle device + * commands via hba->dev_cmd_queue. + */ + if (hba_maxq == poll_queues) { + dev_err(hba->dev, "At least one non-poll queue required\n"); + return -EOPNOTSUPP; + } + rem = hba_maxq; if (rw_queues) { @@ -228,12 +261,6 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba) return 0; } - -/* Operation and runtime registers configuration */ -#define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i)) -#define MCQ_OPR_OFFSET_n(p, i) \ - (hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i)) - static void __iomem *mcq_opr_base(struct ufs_hba *hba, enum ufshcd_mcq_opr n, int i) { @@ -258,9 +285,7 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis); * Current MCQ specification doesn't provide a Task Tag or its equivalent in * the Completion Queue Entry. Find the Task Tag using an indirect method. */ -static int ufshcd_mcq_get_tag(struct ufs_hba *hba, - struct ufs_hw_queue *hwq, - struct cq_entry *cqe) +static int ufshcd_mcq_get_tag(struct ufs_hba *hba, struct cq_entry *cqe) { u64 addr; @@ -278,7 +303,7 @@ static void ufshcd_mcq_process_cqe(struct ufs_hba *hba, struct ufs_hw_queue *hwq) { struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq); - int tag = ufshcd_mcq_get_tag(hba, hwq, cqe); + int tag = ufshcd_mcq_get_tag(hba, cqe); if (cqe->command_desc_base_addr) { ufshcd_compl_one_cqe(hba, tag, cqe); @@ -340,29 +365,29 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) /* Submission Queue Lower Base Address */ ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr), - MCQ_CFG_n(REG_SQLBA, i)); + ufshcd_mcq_cfg_offset(REG_SQLBA, i)); /* Submission Queue Upper Base Address */ ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr), - MCQ_CFG_n(REG_SQUBA, i)); + ufshcd_mcq_cfg_offset(REG_SQUBA, i)); /* Submission Queue Doorbell Address Offset */ - ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i), - MCQ_CFG_n(REG_SQDAO, i)); + ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQD, i), + ufshcd_mcq_cfg_offset(REG_SQDAO, i)); /* Submission Queue Interrupt Status Address Offset */ - ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i), - MCQ_CFG_n(REG_SQISAO, i)); + ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQIS, i), + ufshcd_mcq_cfg_offset(REG_SQISAO, i)); /* Completion Queue Lower Base Address */ ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr), - MCQ_CFG_n(REG_CQLBA, i)); + ufshcd_mcq_cfg_offset(REG_CQLBA, i)); /* Completion Queue Upper Base Address */ ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr), - MCQ_CFG_n(REG_CQUBA, i)); + ufshcd_mcq_cfg_offset(REG_CQUBA, i)); /* Completion Queue Doorbell Address Offset */ - ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i), - MCQ_CFG_n(REG_CQDAO, i)); + ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQD, i), + ufshcd_mcq_cfg_offset(REG_CQDAO, i)); /* Completion Queue Interrupt Status Address Offset */ - ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i), - MCQ_CFG_n(REG_CQISAO, i)); + ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQIS, i), + ufshcd_mcq_cfg_offset(REG_CQISAO, i)); /* Save the base addresses for quicker access */ hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP; @@ -379,7 +404,7 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) /* Completion Queue Enable|Size to Completion Queue Attribute */ ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize, - MCQ_CFG_n(REG_CQATTR, i)); + ufshcd_mcq_cfg_offset(REG_CQATTR, i)); /* * Submission Qeueue Enable|Size|Completion Queue ID to @@ -387,11 +412,24 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) */ ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize | (i << QUEUE_ID_OFFSET), - MCQ_CFG_n(REG_SQATTR, i)); + ufshcd_mcq_cfg_offset(REG_SQATTR, i)); } } EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational); +void ufshcd_mcq_enable(struct ufs_hba *hba) +{ + ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG); + hba->mcq_enabled = true; +} +EXPORT_SYMBOL_GPL(ufshcd_mcq_enable); + +void ufshcd_mcq_disable(struct ufs_hba *hba) +{ + ufshcd_rmwl(hba, MCQ_MODE_SELECT, 0, REG_UFS_MEM_CFG); + hba->mcq_enabled = false; +} + void ufshcd_mcq_enable_esi(struct ufs_hba *hba) { ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2, @@ -511,6 +549,8 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag) if (!cmd) return -EINVAL; hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); + if (!hwq) + return 0; } else { hwq = hba->dev_cmd_queue; } @@ -529,17 +569,22 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag) opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id); writel(nexus, opr_sqd_base + REG_SQCTI); - /* SQRTCy.ICU = 1 */ - writel(SQ_ICU, opr_sqd_base + REG_SQRTC); + /* Initiate Cleanup */ + writel(readl(opr_sqd_base + REG_SQRTC) | SQ_ICU, + opr_sqd_base + REG_SQRTC); - /* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */ + /* Wait until SQRTSy.CUS = 1. Report SQRTSy.RTC. */ reg = opr_sqd_base + REG_SQRTS; err = read_poll_timeout(readl, val, val & SQ_CUS, 20, MCQ_POLL_US, false, reg); if (err) - dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n", - __func__, id, task_tag, - FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg))); + dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%d\n", + __func__, id, task_tag, err); + else + dev_info(hba->dev, + "%s, hwq %d: cleanup return code (RTC) %ld\n", + __func__, id, + FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg))); if (ufshcd_mcq_sq_start(hba, hwq)) err = -ETIMEDOUT; @@ -597,8 +642,7 @@ static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba, addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA; while (sq_head_slot != hwq->sq_tail_slot) { - utrd = hwq->sqe_base_addr + - sq_head_slot * sizeof(struct utp_transfer_req_desc); + utrd = hwq->sqe_base_addr + sq_head_slot; match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA; if (addr == match) { ufshcd_mcq_nullify_sqe(utrd); @@ -631,20 +675,20 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) struct ufshcd_lrb *lrbp = &hba->lrb[tag]; struct ufs_hw_queue *hwq; unsigned long flags; - int err = FAILED; + int err; if (!ufshcd_cmd_inflight(lrbp->cmd)) { dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n", __func__, tag); - goto out; + return FAILED; } /* Skip task abort in case previous aborts failed and report failure */ if (lrbp->req_abort_skip) { dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n", __func__, tag); - goto out; + return FAILED; } hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); @@ -656,7 +700,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) */ dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n", __func__, hwq->id, tag); - goto out; + return FAILED; } /* @@ -664,18 +708,17 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) * in the completion queue either. Query the device to see if * the command is being processed in the device. */ - if (ufshcd_try_to_abort_task(hba, tag)) { + err = ufshcd_try_to_abort_task(hba, tag); + if (err) { dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err); lrbp->req_abort_skip = true; - goto out; + return FAILED; } - err = SUCCESS; spin_lock_irqsave(&hwq->cq_lock, flags); if (ufshcd_cmd_inflight(lrbp->cmd)) ufshcd_release_scsi_cmd(hba, lrbp); spin_unlock_irqrestore(&hwq->cq_lock, flags); -out: - return err; + return SUCCESS; } diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index e6d12289e017..3438269a5440 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -4,7 +4,7 @@ #include <linux/err.h> #include <linux/string.h> #include <linux/bitfield.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <ufs/ufs.h> #include <ufs/unipro.h> @@ -198,6 +198,24 @@ static u32 ufshcd_us_to_ahit(unsigned int timer) FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale); } +static int ufshcd_read_hci_reg(struct ufs_hba *hba, u32 *val, unsigned int reg) +{ + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + up(&hba->host_sem); + return -EBUSY; + } + + ufshcd_rpm_get_sync(hba); + ufshcd_hold(hba); + *val = ufshcd_readl(hba, reg); + ufshcd_release(hba); + ufshcd_rpm_put_sync(hba); + + up(&hba->host_sem); + return 0; +} + static ssize_t auto_hibern8_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -208,23 +226,11 @@ static ssize_t auto_hibern8_show(struct device *dev, if (!ufshcd_is_auto_hibern8_supported(hba)) return -EOPNOTSUPP; - down(&hba->host_sem); - if (!ufshcd_is_user_access_allowed(hba)) { - ret = -EBUSY; - goto out; - } - - pm_runtime_get_sync(hba->dev); - ufshcd_hold(hba); - ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); - ufshcd_release(hba); - pm_runtime_put_sync(hba->dev); - - ret = sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit)); + ret = ufshcd_read_hci_reg(hba, &ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); + if (ret) + return ret; -out: - up(&hba->host_sem); - return ret; + return sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit)); } static ssize_t auto_hibern8_store(struct device *dev, @@ -405,6 +411,53 @@ static ssize_t wb_flush_threshold_store(struct device *dev, return count; } +/** + * pm_qos_enable_show - sysfs handler to show pm qos enable value + * @dev: device associated with the UFS controller + * @attr: sysfs attribute handle + * @buf: buffer for sysfs file + * + * Print 1 if PM QoS feature is enabled, 0 if disabled. + * + * Returns number of characters written to @buf. + */ +static ssize_t pm_qos_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", hba->pm_qos_enabled); +} + +/** + * pm_qos_enable_store - sysfs handler to store value + * @dev: device associated with the UFS controller + * @attr: sysfs attribute handle + * @buf: buffer for sysfs file + * @count: stores buffer characters count + * + * Input 0 to disable PM QoS and 1 value to enable. + * Default state: 1 + * + * Return: number of characters written to @buf on success, < 0 upon failure. + */ +static ssize_t pm_qos_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + bool value; + + if (kstrtobool(buf, &value)) + return -EINVAL; + + if (value) + ufshcd_pm_qos_init(hba); + else + ufshcd_pm_qos_exit(hba); + + return count; +} + static DEVICE_ATTR_RW(rpm_lvl); static DEVICE_ATTR_RO(rpm_target_dev_state); static DEVICE_ATTR_RO(rpm_target_link_state); @@ -416,6 +469,7 @@ static DEVICE_ATTR_RW(wb_on); static DEVICE_ATTR_RW(enable_wb_buf_flush); static DEVICE_ATTR_RW(wb_flush_threshold); static DEVICE_ATTR_RW(rtc_update_ms); +static DEVICE_ATTR_RW(pm_qos_enable); static struct attribute *ufs_sysfs_ufshcd_attrs[] = { &dev_attr_rpm_lvl.attr, @@ -429,6 +483,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = { &dev_attr_enable_wb_buf_flush.attr, &dev_attr_wb_flush_threshold.attr, &dev_attr_rtc_update_ms.attr, + &dev_attr_pm_qos_enable.attr, NULL }; @@ -470,6 +525,58 @@ static const struct attribute_group ufs_sysfs_capabilities_group = { .attrs = ufs_sysfs_capabilities_attrs, }; +static ssize_t version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "0x%x\n", hba->ufs_version); +} + +static ssize_t product_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + u32 val; + struct ufs_hba *hba = dev_get_drvdata(dev); + + ret = ufshcd_read_hci_reg(hba, &val, REG_CONTROLLER_PID); + if (ret) + return ret; + + return sysfs_emit(buf, "0x%x\n", val); +} + +static ssize_t man_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + u32 val; + struct ufs_hba *hba = dev_get_drvdata(dev); + + ret = ufshcd_read_hci_reg(hba, &val, REG_CONTROLLER_MID); + if (ret) + return ret; + + return sysfs_emit(buf, "0x%x\n", val); +} + +static DEVICE_ATTR_RO(version); +static DEVICE_ATTR_RO(product_id); +static DEVICE_ATTR_RO(man_id); + +static struct attribute *ufs_sysfs_ufshci_cap_attrs[] = { + &dev_attr_version.attr, + &dev_attr_product_id.attr, + &dev_attr_man_id.attr, + NULL +}; + +static const struct attribute_group ufs_sysfs_ufshci_group = { + .name = "ufshci_capabilities", + .attrs = ufs_sysfs_ufshci_cap_attrs, +}; + static ssize_t monitor_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -563,6 +670,9 @@ static ssize_t read_req_latency_avg_show(struct device *dev, struct ufs_hba *hba = dev_get_drvdata(dev); struct ufs_hba_monitor *m = &hba->monitor; + if (!m->nr_req[READ]) + return sysfs_emit(buf, "0\n"); + return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[READ]), m->nr_req[READ])); } @@ -630,6 +740,9 @@ static ssize_t write_req_latency_avg_show(struct device *dev, struct ufs_hba *hba = dev_get_drvdata(dev); struct ufs_hba_monitor *m = &hba->monitor; + if (!m->nr_req[WRITE]) + return sysfs_emit(buf, "0\n"); + return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[WRITE]), m->nr_req[WRITE])); } @@ -1291,6 +1404,81 @@ static const struct attribute_group ufs_sysfs_flags_group = { .attrs = ufs_sysfs_device_flags, }; +static ssize_t max_number_of_rtt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 rtt; + int ret; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + up(&hba->host_sem); + return -EBUSY; + } + + ufshcd_rpm_get_sync(hba); + ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt); + ufshcd_rpm_put_sync(hba); + + if (ret) + goto out; + + ret = sysfs_emit(buf, "0x%08X\n", rtt); + +out: + up(&hba->host_sem); + return ret; +} + +static ssize_t max_number_of_rtt_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_dev_info *dev_info = &hba->dev_info; + struct scsi_device *sdev; + unsigned int memflags; + unsigned int rtt; + int ret; + + if (kstrtouint(buf, 0, &rtt)) + return -EINVAL; + + if (rtt > dev_info->rtt_cap) { + dev_err(dev, "rtt can be at most bDeviceRTTCap\n"); + return -EINVAL; + } + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + ret = -EBUSY; + goto out; + } + + ufshcd_rpm_get_sync(hba); + + memflags = memalloc_noio_save(); + shost_for_each_device(sdev, hba->host) + blk_mq_freeze_queue_nomemsave(sdev->request_queue); + + ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt); + + shost_for_each_device(sdev, hba->host) + blk_mq_unfreeze_queue_nomemrestore(sdev->request_queue); + memalloc_noio_restore(memflags); + + ufshcd_rpm_put_sync(hba); + +out: + up(&hba->host_sem); + return ret < 0 ? ret : count; +} + +static DEVICE_ATTR_RW(max_number_of_rtt); + static inline bool ufshcd_is_wb_attrs(enum attr_idn idn) { return idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS && @@ -1338,7 +1526,6 @@ UFS_ATTRIBUTE(max_data_in_size, _MAX_DATA_IN); UFS_ATTRIBUTE(max_data_out_size, _MAX_DATA_OUT); UFS_ATTRIBUTE(reference_clock_frequency, _REF_CLK_FREQ); UFS_ATTRIBUTE(configuration_descriptor_lock, _CONF_DESC_LOCK); -UFS_ATTRIBUTE(max_number_of_rtt, _MAX_NUM_OF_RTT); UFS_ATTRIBUTE(exception_event_control, _EE_CONTROL); UFS_ATTRIBUTE(exception_event_status, _EE_STATUS); UFS_ATTRIBUTE(ffu_status, _FFU_STATUS); @@ -1382,6 +1569,7 @@ static const struct attribute_group ufs_sysfs_attributes_group = { static const struct attribute_group *ufs_sysfs_groups[] = { &ufs_sysfs_default_group, &ufs_sysfs_capabilities_group, + &ufs_sysfs_ufshci_group, &ufs_sysfs_monitor_group, &ufs_sysfs_power_info_group, &ufs_sysfs_device_descriptor_group, diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c index 374e5aae4e7e..252186124669 100644 --- a/drivers/ufs/core/ufs_bsg.c +++ b/drivers/ufs/core/ufs_bsg.c @@ -170,7 +170,7 @@ static int ufs_bsg_request(struct bsg_job *job) break; case UPIU_TRANSACTION_UIC_CMD: memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE); - ret = ufshcd_send_uic_cmd(hba, &uc); + ret = ufshcd_send_bsg_uic_cmd(hba, &uc); if (ret) dev_err(hba->dev, "send uic cmd: error code %d\n", ret); @@ -194,10 +194,12 @@ out: ufshcd_rpm_put_sync(hba); kfree(buff); bsg_reply->result = ret; - job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply); /* complete the job here only if no error */ - if (ret == 0) + if (ret == 0) { + job->reply_len = rpmb ? sizeof(struct ufs_rpmb_reply) : + sizeof(struct ufs_bsg_reply); bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len); + } return ret; } @@ -216,6 +218,7 @@ void ufs_bsg_remove(struct ufs_hba *hba) return; bsg_remove_queue(hba->bsg_queue); + hba->bsg_queue = NULL; device_del(bsg_dev); put_device(bsg_dev); @@ -253,9 +256,11 @@ int ufs_bsg_probe(struct ufs_hba *hba) if (ret) goto out; - q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, NULL, 0); + q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), NULL, ufs_bsg_request, + NULL, 0); if (IS_ERR(q)) { ret = PTR_ERR(q); + device_del(bsg_dev); goto out; } diff --git a/drivers/ufs/core/ufs_trace.h b/drivers/ufs/core/ufs_trace.h new file mode 100644 index 000000000000..84deca2b841d --- /dev/null +++ b/drivers/ufs/core/ufs_trace.h @@ -0,0 +1,405 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ufs + +#if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_UFS_H + +#include <ufs/ufs.h> +#include <linux/tracepoint.h> + +#define str_opcode(opcode) \ + __print_symbolic(opcode, \ + { WRITE_16, "WRITE_16" }, \ + { WRITE_10, "WRITE_10" }, \ + { READ_16, "READ_16" }, \ + { READ_10, "READ_10" }, \ + { SYNCHRONIZE_CACHE, "SYNC" }, \ + { UNMAP, "UNMAP" }) + +#define UFS_LINK_STATES \ + EM(UIC_LINK_OFF_STATE, "UIC_LINK_OFF_STATE") \ + EM(UIC_LINK_ACTIVE_STATE, "UIC_LINK_ACTIVE_STATE") \ + EMe(UIC_LINK_HIBERN8_STATE, "UIC_LINK_HIBERN8_STATE") + +#define UFS_PWR_MODES \ + EM(UFS_ACTIVE_PWR_MODE, "UFS_ACTIVE_PWR_MODE") \ + EM(UFS_SLEEP_PWR_MODE, "UFS_SLEEP_PWR_MODE") \ + EM(UFS_POWERDOWN_PWR_MODE, "UFS_POWERDOWN_PWR_MODE") \ + EMe(UFS_DEEPSLEEP_PWR_MODE, "UFS_DEEPSLEEP_PWR_MODE") + +#define UFSCHD_CLK_GATING_STATES \ + EM(CLKS_OFF, "CLKS_OFF") \ + EM(CLKS_ON, "CLKS_ON") \ + EM(REQ_CLKS_OFF, "REQ_CLKS_OFF") \ + EMe(REQ_CLKS_ON, "REQ_CLKS_ON") + +#define UFS_CMD_TRACE_STRINGS \ + EM(UFS_CMD_SEND, "send_req") \ + EM(UFS_CMD_COMP, "complete_rsp") \ + EM(UFS_DEV_COMP, "dev_complete") \ + EM(UFS_QUERY_SEND, "query_send") \ + EM(UFS_QUERY_COMP, "query_complete") \ + EM(UFS_QUERY_ERR, "query_complete_err") \ + EM(UFS_TM_SEND, "tm_send") \ + EM(UFS_TM_COMP, "tm_complete") \ + EMe(UFS_TM_ERR, "tm_complete_err") + +#define UFS_CMD_TRACE_TSF_TYPES \ + EM(UFS_TSF_CDB, "CDB") \ + EM(UFS_TSF_OSF, "OSF") \ + EM(UFS_TSF_TM_INPUT, "TM_INPUT") \ + EMe(UFS_TSF_TM_OUTPUT, "TM_OUTPUT") + +/* Enums require being exported to userspace, for user tool parsing */ +#undef EM +#undef EMe +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +UFS_LINK_STATES; +UFS_PWR_MODES; +UFSCHD_CLK_GATING_STATES; +UFS_CMD_TRACE_STRINGS +UFS_CMD_TRACE_TSF_TYPES + +/* + * Now redefine the EM() and EMe() macros to map the enums to the strings + * that will be printed in the output. + */ +#undef EM +#undef EMe +#define EM(a, b) {a, b}, +#define EMe(a, b) {a, b} + +#define show_ufs_cmd_trace_str(str_t) \ + __print_symbolic(str_t, UFS_CMD_TRACE_STRINGS) +#define show_ufs_cmd_trace_tsf(tsf) \ + __print_symbolic(tsf, UFS_CMD_TRACE_TSF_TYPES) + +TRACE_EVENT(ufshcd_clk_gating, + + TP_PROTO(const char *dev_name, int state), + + TP_ARGS(dev_name, state), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __field(int, state) + ), + + TP_fast_assign( + __assign_str(dev_name); + __entry->state = state; + ), + + TP_printk("%s: gating state changed to %s", + __get_str(dev_name), + __print_symbolic(__entry->state, UFSCHD_CLK_GATING_STATES)) +); + +TRACE_EVENT(ufshcd_clk_scaling, + + TP_PROTO(const char *dev_name, const char *state, const char *clk, + u32 prev_state, u32 curr_state), + + TP_ARGS(dev_name, state, clk, prev_state, curr_state), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __string(state, state) + __string(clk, clk) + __field(u32, prev_state) + __field(u32, curr_state) + ), + + TP_fast_assign( + __assign_str(dev_name); + __assign_str(state); + __assign_str(clk); + __entry->prev_state = prev_state; + __entry->curr_state = curr_state; + ), + + TP_printk("%s: %s %s from %u to %u Hz", + __get_str(dev_name), __get_str(state), __get_str(clk), + __entry->prev_state, __entry->curr_state) +); + +TRACE_EVENT(ufshcd_auto_bkops_state, + + TP_PROTO(const char *dev_name, const char *state), + + TP_ARGS(dev_name, state), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __string(state, state) + ), + + TP_fast_assign( + __assign_str(dev_name); + __assign_str(state); + ), + + TP_printk("%s: auto bkops - %s", + __get_str(dev_name), __get_str(state)) +); + +DECLARE_EVENT_CLASS(ufshcd_profiling_template, + TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, + int err), + + TP_ARGS(dev_name, profile_info, time_us, err), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __string(profile_info, profile_info) + __field(s64, time_us) + __field(int, err) + ), + + TP_fast_assign( + __assign_str(dev_name); + __assign_str(profile_info); + __entry->time_us = time_us; + __entry->err = err; + ), + + TP_printk("%s: %s: took %lld usecs, err %d", + __get_str(dev_name), __get_str(profile_info), + __entry->time_us, __entry->err) +); + +DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_hibern8, + TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, + int err), + TP_ARGS(dev_name, profile_info, time_us, err)); + +DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_gating, + TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, + int err), + TP_ARGS(dev_name, profile_info, time_us, err)); + +DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_scaling, + TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, + int err), + TP_ARGS(dev_name, profile_info, time_us, err)); + +DECLARE_EVENT_CLASS(ufshcd_template, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + + TP_ARGS(dev_name, err, usecs, dev_state, link_state), + + TP_STRUCT__entry( + __field(s64, usecs) + __field(int, err) + __string(dev_name, dev_name) + __field(int, dev_state) + __field(int, link_state) + ), + + TP_fast_assign( + __entry->usecs = usecs; + __entry->err = err; + __assign_str(dev_name); + __entry->dev_state = dev_state; + __entry->link_state = link_state; + ), + + TP_printk( + "%s: took %lld usecs, dev_state: %s, link_state: %s, err %d", + __get_str(dev_name), + __entry->usecs, + __print_symbolic(__entry->dev_state, UFS_PWR_MODES), + __print_symbolic(__entry->link_state, UFS_LINK_STATES), + __entry->err + ) +); + +DEFINE_EVENT(ufshcd_template, ufshcd_system_suspend, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_system_resume, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_runtime_suspend, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_runtime_resume, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_init, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_wl_suspend, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_wl_resume, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_suspend, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +TRACE_EVENT(ufshcd_command, + TP_PROTO(struct scsi_device *sdev, enum ufs_trace_str_t str_t, + unsigned int tag, u32 doorbell, u32 hwq_id, int transfer_len, + u32 intr, u64 lba, u8 opcode, u8 group_id), + + TP_ARGS(sdev, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba, + opcode, group_id), + + TP_STRUCT__entry( + __field(struct scsi_device *, sdev) + __field(enum ufs_trace_str_t, str_t) + __field(unsigned int, tag) + __field(u32, doorbell) + __field(u32, hwq_id) + __field(u32, intr) + __field(u64, lba) + __field(int, transfer_len) + __field(u8, opcode) + __field(u8, group_id) + ), + + TP_fast_assign( + __entry->sdev = sdev; + __entry->str_t = str_t; + __entry->tag = tag; + __entry->doorbell = doorbell; + __entry->hwq_id = hwq_id; + __entry->intr = intr; + __entry->lba = lba; + __entry->transfer_len = transfer_len; + __entry->opcode = opcode; + __entry->group_id = group_id; + ), + + TP_printk( + "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x (%s), group_id: 0x%x, hwq_id: %d", + show_ufs_cmd_trace_str(__entry->str_t), + dev_name(&__entry->sdev->sdev_dev), __entry->tag, + __entry->doorbell, __entry->transfer_len, __entry->intr, + __entry->lba, (u32)__entry->opcode, str_opcode(__entry->opcode), + (u32)__entry->group_id, __entry->hwq_id + ) +); + +TRACE_EVENT(ufshcd_uic_command, + TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, u32 cmd, + u32 arg1, u32 arg2, u32 arg3), + + TP_ARGS(dev_name, str_t, cmd, arg1, arg2, arg3), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __field(enum ufs_trace_str_t, str_t) + __field(u32, cmd) + __field(u32, arg1) + __field(u32, arg2) + __field(u32, arg3) + ), + + TP_fast_assign( + __assign_str(dev_name); + __entry->str_t = str_t; + __entry->cmd = cmd; + __entry->arg1 = arg1; + __entry->arg2 = arg2; + __entry->arg3 = arg3; + ), + + TP_printk( + "%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x", + show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name), + __entry->cmd, __entry->arg1, __entry->arg2, __entry->arg3 + ) +); + +TRACE_EVENT(ufshcd_upiu, + TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, void *hdr, + void *tsf, enum ufs_trace_tsf_t tsf_t), + + TP_ARGS(dev_name, str_t, hdr, tsf, tsf_t), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __field(enum ufs_trace_str_t, str_t) + __array(unsigned char, hdr, 12) + __array(unsigned char, tsf, 16) + __field(enum ufs_trace_tsf_t, tsf_t) + ), + + TP_fast_assign( + __assign_str(dev_name); + __entry->str_t = str_t; + memcpy(__entry->hdr, hdr, sizeof(__entry->hdr)); + memcpy(__entry->tsf, tsf, sizeof(__entry->tsf)); + __entry->tsf_t = tsf_t; + ), + + TP_printk( + "%s: %s: HDR:%s, %s:%s", + show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name), + __print_hex(__entry->hdr, sizeof(__entry->hdr)), + show_ufs_cmd_trace_tsf(__entry->tsf_t), + __print_hex(__entry->tsf, sizeof(__entry->tsf)) + ) +); + +TRACE_EVENT(ufshcd_exception_event, + + TP_PROTO(const char *dev_name, u16 status), + + TP_ARGS(dev_name, status), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __field(u16, status) + ), + + TP_fast_assign( + __assign_str(dev_name); + __entry->status = status; + ), + + TP_printk("%s: status 0x%x", + __get_str(dev_name), __entry->status + ) +); + +#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../drivers/ufs/core +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE ufs_trace + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/drivers/ufs/core/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c index f2c4422cab86..694ff7578fc1 100644 --- a/drivers/ufs/core/ufshcd-crypto.c +++ b/drivers/ufs/core/ufshcd-crypto.c @@ -17,20 +17,14 @@ static const struct ufs_crypto_alg_entry { }, }; -static int ufshcd_program_key(struct ufs_hba *hba, - const union ufs_crypto_cfg_entry *cfg, int slot) +static void ufshcd_program_key(struct ufs_hba *hba, + const union ufs_crypto_cfg_entry *cfg, int slot) { int i; u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg); - int err = 0; ufshcd_hold(hba); - if (hba->vops && hba->vops->program_key) { - err = hba->vops->program_key(hba, cfg, slot); - goto out; - } - /* Ensure that CFGE is cleared before programming the key */ ufshcd_writel(hba, 0, slot_offset + 16 * sizeof(cfg->reg_val[0])); for (i = 0; i < 16; i++) { @@ -43,17 +37,14 @@ static int ufshcd_program_key(struct ufs_hba *hba, /* Dword 16 must be written last */ ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[16]), slot_offset + 16 * sizeof(cfg->reg_val[0])); -out: ufshcd_release(hba); - return err; } static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile, const struct blk_crypto_key *key, unsigned int slot) { - struct ufs_hba *hba = - container_of(profile, struct ufs_hba, crypto_profile); + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); const union ufs_crypto_cap_entry *ccap_array = hba->crypto_cap_array; const struct ufs_crypto_alg_entry *alg = &ufs_crypto_algs[key->crypto_cfg.crypto_mode]; @@ -61,7 +52,6 @@ static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile, int i; int cap_idx = -1; union ufs_crypto_cfg_entry cfg = {}; - int err; BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0); for (i = 0; i < hba->crypto_capabilities.num_crypto_cap; i++) { @@ -89,33 +79,31 @@ static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile, memcpy(cfg.crypto_key, key->raw, key->size); } - err = ufshcd_program_key(hba, &cfg, slot); + ufshcd_program_key(hba, &cfg, slot); memzero_explicit(&cfg, sizeof(cfg)); - return err; + return 0; } -static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot) +static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile, + const struct blk_crypto_key *key, + unsigned int slot) { + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); /* * Clear the crypto cfg on the device. Clearing CFGE * might not be sufficient, so just clear the entire cfg. */ union ufs_crypto_cfg_entry cfg = {}; - return ufshcd_program_key(hba, &cfg, slot); -} - -static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile, - const struct blk_crypto_key *key, - unsigned int slot) -{ - struct ufs_hba *hba = - container_of(profile, struct ufs_hba, crypto_profile); - - return ufshcd_clear_keyslot(hba, slot); + ufshcd_program_key(hba, &cfg, slot); + return 0; } +/* + * Reprogram the keyslots if needed, and return true if CRYPTO_GENERAL_ENABLE + * should be used in the host controller initialization sequence. + */ bool ufshcd_crypto_enable(struct ufs_hba *hba) { if (!(hba->caps & UFSHCD_CAP_CRYPTO)) @@ -123,6 +111,10 @@ bool ufshcd_crypto_enable(struct ufs_hba *hba) /* Reset might clear all keys, so reprogram all the keys. */ blk_crypto_reprogram_all_keys(&hba->crypto_profile); + + if (hba->quirks & UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE) + return false; + return true; } @@ -159,6 +151,9 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba) int err = 0; enum blk_crypto_mode_num blk_mode_num; + if (hba->quirks & UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE) + return 0; + /* * Don't use crypto if either the hardware doesn't advertise the * standard crypto capability bit *or* if the vendor specific driver @@ -228,9 +223,10 @@ void ufshcd_init_crypto(struct ufs_hba *hba) if (!(hba->caps & UFSHCD_CAP_CRYPTO)) return; - /* Clear all keyslots - the number of keyslots is (CFGC + 1) */ - for (slot = 0; slot < hba->crypto_capabilities.config_count + 1; slot++) - ufshcd_clear_keyslot(hba, slot); + /* Clear all keyslots. */ + for (slot = 0; slot < hba->crypto_profile.num_slots; slot++) + hba->crypto_profile.ll_ops.keyslot_evict(&hba->crypto_profile, + NULL, slot); } void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q) diff --git a/drivers/ufs/core/ufshcd-crypto.h b/drivers/ufs/core/ufshcd-crypto.h index be8596f20ba2..89bb97c14c15 100644 --- a/drivers/ufs/core/ufshcd-crypto.h +++ b/drivers/ufs/core/ufshcd-crypto.h @@ -37,6 +37,33 @@ ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, h->dunu = cpu_to_le32(upper_32_bits(lrbp->data_unit_num)); } +static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp) +{ + struct scsi_cmnd *cmd = lrbp->cmd; + const struct bio_crypt_ctx *crypt_ctx = scsi_cmd_to_rq(cmd)->crypt_ctx; + + if (crypt_ctx && hba->vops && hba->vops->fill_crypto_prdt) + return hba->vops->fill_crypto_prdt(hba, crypt_ctx, + lrbp->ucd_prdt_ptr, + scsi_sg_count(cmd)); + return 0; +} + +static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp) +{ + if (!(hba->quirks & UFSHCD_QUIRK_KEYS_IN_PRDT)) + return; + + if (!(scsi_cmd_to_rq(lrbp->cmd)->crypt_ctx)) + return; + + /* Zeroize the PRDT because it can contain cryptographic keys. */ + memzero_explicit(lrbp->ucd_prdt_ptr, + ufshcd_sg_entry_size(hba) * scsi_sg_count(lrbp->cmd)); +} + bool ufshcd_crypto_enable(struct ufs_hba *hba); int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba); @@ -54,6 +81,15 @@ static inline void ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, struct request_desc_header *h) { } +static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp) +{ + return 0; +} + +static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp) { } + static inline bool ufshcd_crypto_enable(struct ufs_hba *hba) { return false; diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index f42d99ce5bf1..786f20ef2238 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -64,16 +64,11 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag, struct cq_entry *cqe); int ufshcd_mcq_init(struct ufs_hba *hba); +void ufshcd_mcq_disable(struct ufs_hba *hba); int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba); int ufshcd_mcq_memory_alloc(struct ufs_hba *hba); -void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba); -void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds); -u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i); -void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i); struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, struct request *req); -unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, - struct ufs_hw_queue *hwq); void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba, struct ufs_hw_queue *hwq); bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd); @@ -89,6 +84,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, u8 **buf, bool ascii); int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd); +int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd); int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, struct utp_upiu_req *req_upiu, @@ -241,12 +237,6 @@ static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba, hba->vops->config_scaling_param(hba, p, data); } -static inline void ufshcd_vops_reinit_notify(struct ufs_hba *hba) -{ - if (hba->vops && hba->vops->reinit_notify) - hba->vops->reinit_notify(hba); -} - static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba) { if (hba->vops && hba->vops->mcq_config_resource) @@ -255,14 +245,6 @@ static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba) return -EOPNOTSUPP; } -static inline int ufshcd_mcq_vops_get_hba_mac(struct ufs_hba *hba) -{ - if (hba->vops && hba->vops->get_hba_mac) - return hba->vops->get_hba_mac(hba); - - return -EOPNOTSUPP; -} - static inline int ufshcd_mcq_vops_op_runtime_config(struct ufs_hba *hba) { if (hba->vops && hba->vops->op_runtime_config) @@ -329,6 +311,11 @@ static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba) return pm_runtime_get_sync(&hba->ufs_device_wlun->sdev_gendev); } +static inline int ufshcd_rpm_get_if_active(struct ufs_hba *hba) +{ + return pm_runtime_get_if_active(&hba->ufs_device_wlun->sdev_gendev); +} + static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba) { return pm_runtime_put_sync(&hba->ufs_device_wlun->sdev_gendev); diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 3b89c9d4aa40..464f13da259a 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -36,10 +36,10 @@ #include "ufs-fault-injection.h" #include "ufs_bsg.h" #include "ufshcd-crypto.h" -#include <asm/unaligned.h> +#include <linux/unaligned.h> #define CREATE_TRACE_POINTS -#include <trace/events/ufs.h> +#include "ufs_trace.h" #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ UTP_TASK_REQ_COMPL |\ @@ -51,8 +51,10 @@ /* UIC command timeout, unit: ms */ -#define UIC_CMD_TIMEOUT 500 - +enum { + UIC_CMD_TIMEOUT_DEFAULT = 500, + UIC_CMD_TIMEOUT_MAX = 2000, +}; /* NOP OUT retries waiting for NOP IN response */ #define NOP_OUT_RETRIES 10 /* Timeout after 50 msecs if NOP OUT hangs without response */ @@ -102,6 +104,9 @@ /* Default RTC update every 10 seconds */ #define UFS_RTC_UPDATE_INTERVAL_MS (10 * MSEC_PER_SEC) +/* bMaxNumOfRTT is equal to two after device manufacturing */ +#define DEFAULT_MAX_NUM_RTT 2 + /* UFSHC 4.0 compliant HC support this mode. */ static bool use_mcq_mode = true; @@ -113,6 +118,23 @@ static bool is_mcq_supported(struct ufs_hba *hba) module_param(use_mcq_mode, bool, 0644); MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default"); +static unsigned int uic_cmd_timeout = UIC_CMD_TIMEOUT_DEFAULT; + +static int uic_cmd_timeout_set(const char *val, const struct kernel_param *kp) +{ + return param_set_uint_minmax(val, kp, UIC_CMD_TIMEOUT_DEFAULT, + UIC_CMD_TIMEOUT_MAX); +} + +static const struct kernel_param_ops uic_cmd_timeout_ops = { + .set = uic_cmd_timeout_set, + .get = param_get_uint, +}; + +module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644); +MODULE_PARM_DESC(uic_cmd_timeout, + "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 2 seconds inclusively"); + #define ufshcd_toggle_vreg(_dev, _vreg, _on) \ ({ \ int _ret; \ @@ -161,8 +183,6 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs); enum { UFSHCD_MAX_CHANNEL = 0, UFSHCD_MAX_ID = 1, - UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED, - UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED, }; static const char *const ufshcd_state_name[] = { @@ -238,10 +258,15 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state, return UFS_PM_LVL_0; } +static bool ufshcd_has_pending_tasks(struct ufs_hba *hba) +{ + return hba->outstanding_tasks || hba->active_uic_cmd || + hba->uic_async_done; +} + static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba) { - return (hba->clk_gating.active_reqs || hba->outstanding_reqs || hba->outstanding_tasks || - hba->active_uic_cmd || hba->uic_async_done); + return scsi_host_busy(hba->host) || ufshcd_has_pending_tasks(hba); } static const struct ufs_dev_quirk ufs_fixups[] = { @@ -278,6 +303,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba); static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); static void ufshcd_hba_exit(struct ufs_hba *hba); +static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params); static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params); static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); @@ -329,18 +355,6 @@ static void ufshcd_configure_wb(struct ufs_hba *hba) ufshcd_wb_toggle_buf_flush(hba, true); } -static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba) -{ - if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt)) - scsi_unblock_requests(hba->host); -} - -static void ufshcd_scsi_block_requests(struct ufs_hba *hba) -{ - if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1) - scsi_block_requests(hba->host); -} - static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, enum ufs_trace_str_t str_t) { @@ -452,7 +466,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); - if (is_mcq_enabled(hba)) { + if (hba->mcq_enabled) { struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq); hwq_id = hwq->id; @@ -614,8 +628,8 @@ static void ufshcd_print_host_state(struct ufs_hba *hba) const struct scsi_device *sdev_ufs = hba->ufs_device_wlun; dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); - dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n", - hba->outstanding_reqs, hba->outstanding_tasks); + dev_err(hba->dev, "%d outstanding reqs, tasks=0x%lx\n", + scsi_host_busy(hba->host), hba->outstanding_tasks); dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", hba->saved_err, hba->saved_uic_err); dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", @@ -719,25 +733,15 @@ EXPORT_SYMBOL_GPL(ufshcd_delay_us); * Return: -ETIMEDOUT on error, zero on success. */ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, - u32 val, unsigned long interval_us, - unsigned long timeout_ms) + u32 val, unsigned long interval_us, + unsigned long timeout_ms) { - int err = 0; - unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); - - /* ignore bits that we don't intend to wait on */ - val = val & mask; + u32 v; - while ((ufshcd_readl(hba, reg) & mask) != val) { - usleep_range(interval_us, interval_us + 50); - if (time_after(jiffies, timeout)) { - if ((ufshcd_readl(hba, reg) & mask) != val) - err = -ETIMEDOUT; - break; - } - } + val &= mask; /* ignore bits that we don't intend to wait on */ - return err; + return read_poll_timeout(ufshcd_readl, v, (v & mask) == val, + interval_us, timeout_ms * 1000, false, hba, reg); } /** @@ -748,8 +752,6 @@ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, */ static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) { - if (hba->ufs_version == ufshci_version(1, 0)) - return INTERRUPT_MASK_ALL_VER_10; if (hba->ufs_version <= ufshci_version(2, 0)) return INTERRUPT_MASK_ALL_VER_11; @@ -990,28 +992,46 @@ bool ufshcd_is_hba_active(struct ufs_hba *hba) } EXPORT_SYMBOL_GPL(ufshcd_is_hba_active); -u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba) +/** + * ufshcd_pm_qos_init - initialize PM QoS request + * @hba: per adapter instance + */ +void ufshcd_pm_qos_init(struct ufs_hba *hba) { - /* HCI version 1.0 and 1.1 supports UniPro 1.41 */ - if (hba->ufs_version <= ufshci_version(1, 1)) - return UFS_UNIPRO_VER_1_41; - else - return UFS_UNIPRO_VER_1_6; + + if (hba->pm_qos_enabled) + return; + + cpu_latency_qos_add_request(&hba->pm_qos_req, PM_QOS_DEFAULT_VALUE); + + if (cpu_latency_qos_request_active(&hba->pm_qos_req)) + hba->pm_qos_enabled = true; } -EXPORT_SYMBOL(ufshcd_get_local_unipro_ver); -static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba) +/** + * ufshcd_pm_qos_exit - remove request from PM QoS + * @hba: per adapter instance + */ +void ufshcd_pm_qos_exit(struct ufs_hba *hba) { - /* - * If both host and device support UniPro ver1.6 or later, PA layer - * parameters tuning happens during link startup itself. - * - * We can manually tune PA layer parameters if either host or device - * doesn't support UniPro ver 1.6 or later. But to keep manual tuning - * logic simple, we will only do manual tuning if local unipro version - * doesn't support ver1.6 or later. - */ - return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6; + if (!hba->pm_qos_enabled) + return; + + cpu_latency_qos_remove_request(&hba->pm_qos_req); + hba->pm_qos_enabled = false; +} + +/** + * ufshcd_pm_qos_update - update PM QoS request + * @hba: per adapter instance + * @on: If True, vote for perf PM QoS mode otherwise power save mode + */ +static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on) +{ + if (!hba->pm_qos_enabled) + return; + + cpu_latency_qos_update_request(&hba->pm_qos_req, on ? 0 : PM_QOS_DEFAULT_VALUE); } /** @@ -1160,8 +1180,11 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, hba->devfreq->previous_freq); else ufshcd_set_clk_freq(hba, !scale_up); + goto out; } + ufshcd_pm_qos_update(hba, scale_up); + out: trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), (scale_up ? "up" : "down"), @@ -1216,11 +1239,13 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, static u32 ufshcd_pending_cmds(struct ufs_hba *hba) { const struct scsi_device *sdev; + unsigned long flags; u32 pending = 0; - lockdep_assert_held(hba->host->host_lock); + spin_lock_irqsave(hba->host->host_lock, flags); __shost_for_each_device(sdev, hba->host) pending += sbitmap_weight(&sdev->budget_map); + spin_unlock_irqrestore(hba->host->host_lock, flags); return pending; } @@ -1234,7 +1259,6 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba) static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us) { - unsigned long flags; int ret = 0; u32 tm_doorbell; u32 tr_pending; @@ -1242,7 +1266,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, ktime_t start; ufshcd_hold(hba); - spin_lock_irqsave(hba->host->host_lock, flags); /* * Wait for all the outstanding tasks/transfer requests. * Verify by checking the doorbell registers are clear. @@ -1263,7 +1286,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, break; } - spin_unlock_irqrestore(hba->host->host_lock, flags); io_schedule_timeout(msecs_to_jiffies(20)); if (ktime_to_us(ktime_sub(ktime_get(), start)) > wait_timeout_us) { @@ -1275,7 +1297,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, */ do_last_check = true; } - spin_lock_irqsave(hba->host->host_lock, flags); } while (tm_doorbell || tr_pending); if (timeout) { @@ -1285,7 +1306,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, ret = -EBUSY; } out: - spin_unlock_irqrestore(hba->host->host_lock, flags); ufshcd_release(hba); return ret; } @@ -1347,7 +1367,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) * make sure that there are no outstanding requests when * clock scaling is in progress */ - ufshcd_scsi_block_requests(hba); + blk_mq_quiesce_tagset(&hba->host->tag_set); mutex_lock(&hba->wb_mutex); down_write(&hba->clk_scaling_lock); @@ -1356,7 +1376,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) ret = -EBUSY; up_write(&hba->clk_scaling_lock); mutex_unlock(&hba->wb_mutex); - ufshcd_scsi_unblock_requests(hba); + blk_mq_unquiesce_tagset(&hba->host->tag_set); goto out; } @@ -1377,7 +1397,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc mutex_unlock(&hba->wb_mutex); - ufshcd_scsi_unblock_requests(hba); + blk_mq_unquiesce_tagset(&hba->host->tag_set); ufshcd_release(hba); } @@ -1432,16 +1452,16 @@ static void ufshcd_clk_scaling_suspend_work(struct work_struct *work) { struct ufs_hba *hba = container_of(work, struct ufs_hba, clk_scaling.suspend_work); - unsigned long irq_flags; - spin_lock_irqsave(hba->host->host_lock, irq_flags); - if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) { - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); - return; + scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock) + { + if (hba->clk_scaling.active_reqs || + hba->clk_scaling.is_suspended) + return; + + hba->clk_scaling.is_suspended = true; + hba->clk_scaling.window_start_t = 0; } - hba->clk_scaling.is_suspended = true; - hba->clk_scaling.window_start_t = 0; - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); devfreq_suspend_device(hba->devfreq); } @@ -1450,15 +1470,13 @@ static void ufshcd_clk_scaling_resume_work(struct work_struct *work) { struct ufs_hba *hba = container_of(work, struct ufs_hba, clk_scaling.resume_work); - unsigned long irq_flags; - spin_lock_irqsave(hba->host->host_lock, irq_flags); - if (!hba->clk_scaling.is_suspended) { - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); - return; + scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock) + { + if (!hba->clk_scaling.is_suspended) + return; + hba->clk_scaling.is_suspended = false; } - hba->clk_scaling.is_suspended = false; - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); devfreq_resume_device(hba->devfreq); } @@ -1472,7 +1490,6 @@ static int ufshcd_devfreq_target(struct device *dev, bool scale_up = false, sched_clk_scaling_suspend_work = false; struct list_head *clk_list = &hba->clk_list_head; struct ufs_clk_info *clki; - unsigned long irq_flags; if (!ufshcd_is_clkscaling_supported(hba)) return -EINVAL; @@ -1493,43 +1510,38 @@ static int ufshcd_devfreq_target(struct device *dev, *freq = (unsigned long) clk_round_rate(clki->clk, *freq); } - spin_lock_irqsave(hba->host->host_lock, irq_flags); - if (ufshcd_eh_in_progress(hba)) { - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); - return 0; - } + scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock) + { + if (ufshcd_eh_in_progress(hba)) + return 0; - /* Skip scaling clock when clock scaling is suspended */ - if (hba->clk_scaling.is_suspended) { - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); - dev_warn(hba->dev, "clock scaling is suspended, skip"); - return 0; - } + /* Skip scaling clock when clock scaling is suspended */ + if (hba->clk_scaling.is_suspended) { + dev_warn(hba->dev, "clock scaling is suspended, skip"); + return 0; + } - if (!hba->clk_scaling.active_reqs) - sched_clk_scaling_suspend_work = true; + if (!hba->clk_scaling.active_reqs) + sched_clk_scaling_suspend_work = true; - if (list_empty(clk_list)) { - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); - goto out; - } + if (list_empty(clk_list)) + goto out; - /* Decide based on the target or rounded-off frequency and update */ - if (hba->use_pm_opp) - scale_up = *freq > hba->clk_scaling.target_freq; - else - scale_up = *freq == clki->max_freq; + /* Decide based on the target or rounded-off frequency and update */ + if (hba->use_pm_opp) + scale_up = *freq > hba->clk_scaling.target_freq; + else + scale_up = *freq == clki->max_freq; - if (!hba->use_pm_opp && !scale_up) - *freq = clki->min_freq; + if (!hba->use_pm_opp && !scale_up) + *freq = clki->min_freq; - /* Update the frequency */ - if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) { - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); - ret = 0; - goto out; /* no state change required */ + /* Update the frequency */ + if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) { + ret = 0; + goto out; /* no state change required */ + } } - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); start = ktime_get(); ret = ufshcd_devfreq_scale(hba, *freq, scale_up); @@ -1541,7 +1553,8 @@ static int ufshcd_devfreq_target(struct device *dev, ktime_to_us(ktime_sub(ktime_get(), start)), ret); out: - if (sched_clk_scaling_suspend_work && !scale_up) + if (sched_clk_scaling_suspend_work && + (!scale_up || hba->clk_scaling.suspend_on_no_request)) queue_work(hba->clk_scaling.workq, &hba->clk_scaling.suspend_work); @@ -1553,7 +1566,6 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev, { struct ufs_hba *hba = dev_get_drvdata(dev); struct ufs_clk_scaling *scaling = &hba->clk_scaling; - unsigned long flags; ktime_t curr_t; if (!ufshcd_is_clkscaling_supported(hba)) @@ -1561,7 +1573,8 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev, memset(stat, 0, sizeof(*stat)); - spin_lock_irqsave(hba->host->host_lock, flags); + guard(spinlock_irqsave)(&hba->clk_scaling.lock); + curr_t = ktime_get(); if (!scaling->window_start_t) goto start_window; @@ -1597,7 +1610,7 @@ start_window: scaling->busy_start_t = 0; scaling->is_busy_started = false; } - spin_unlock_irqrestore(hba->host->host_lock, flags); + return 0; } @@ -1661,19 +1674,19 @@ static void ufshcd_devfreq_remove(struct ufs_hba *hba) static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) { - unsigned long flags; bool suspend = false; cancel_work_sync(&hba->clk_scaling.suspend_work); cancel_work_sync(&hba->clk_scaling.resume_work); - spin_lock_irqsave(hba->host->host_lock, flags); - if (!hba->clk_scaling.is_suspended) { - suspend = true; - hba->clk_scaling.is_suspended = true; - hba->clk_scaling.window_start_t = 0; + scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock) + { + if (!hba->clk_scaling.is_suspended) { + suspend = true; + hba->clk_scaling.is_suspended = true; + hba->clk_scaling.window_start_t = 0; + } } - spin_unlock_irqrestore(hba->host->host_lock, flags); if (suspend) devfreq_suspend_device(hba->devfreq); @@ -1681,15 +1694,15 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) static void ufshcd_resume_clkscaling(struct ufs_hba *hba) { - unsigned long flags; bool resume = false; - spin_lock_irqsave(hba->host->host_lock, flags); - if (hba->clk_scaling.is_suspended) { - resume = true; - hba->clk_scaling.is_suspended = false; + scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock) + { + if (hba->clk_scaling.is_suspended) { + resume = true; + hba->clk_scaling.is_suspended = false; + } } - spin_unlock_irqrestore(hba->host->host_lock, flags); if (resume) devfreq_resume_device(hba->devfreq); @@ -1764,8 +1777,6 @@ static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba) static void ufshcd_init_clk_scaling(struct ufs_hba *hba) { - char wq_name[sizeof("ufs_clkscaling_00")]; - if (!ufshcd_is_clkscaling_supported(hba)) return; @@ -1777,9 +1788,10 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba) INIT_WORK(&hba->clk_scaling.resume_work, ufshcd_clk_scaling_resume_work); - snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d", - hba->host->host_no); - hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); + spin_lock_init(&hba->clk_scaling.lock); + + hba->clk_scaling.workq = alloc_ordered_workqueue( + "ufs_clkscaling_%d", WQ_MEM_RECLAIM, hba->host->host_no); hba->clk_scaling.is_initialized = true; } @@ -1798,19 +1810,16 @@ static void ufshcd_exit_clk_scaling(struct ufs_hba *hba) static void ufshcd_ungate_work(struct work_struct *work) { int ret; - unsigned long flags; struct ufs_hba *hba = container_of(work, struct ufs_hba, clk_gating.ungate_work); cancel_delayed_work_sync(&hba->clk_gating.gate_work); - spin_lock_irqsave(hba->host->host_lock, flags); - if (hba->clk_gating.state == CLKS_ON) { - spin_unlock_irqrestore(hba->host->host_lock, flags); - return; + scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) { + if (hba->clk_gating.state == CLKS_ON) + return; } - spin_unlock_irqrestore(hba->host->host_lock, flags); ufshcd_hba_vreg_set_hpm(hba); ufshcd_setup_clocks(hba, true); @@ -1845,7 +1854,7 @@ void ufshcd_hold(struct ufs_hba *hba) if (!ufshcd_is_clkgating_allowed(hba) || !hba->clk_gating.is_initialized) return; - spin_lock_irqsave(hba->host->host_lock, flags); + spin_lock_irqsave(&hba->clk_gating.lock, flags); hba->clk_gating.active_reqs++; start: @@ -1861,11 +1870,11 @@ start: */ if (ufshcd_can_hibern8_during_gating(hba) && ufshcd_is_link_hibern8(hba)) { - spin_unlock_irqrestore(hba->host->host_lock, flags); + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); flush_result = flush_work(&hba->clk_gating.ungate_work); if (hba->clk_gating.is_suspended && !flush_result) return; - spin_lock_irqsave(hba->host->host_lock, flags); + spin_lock_irqsave(&hba->clk_gating.lock, flags); goto start; } break; @@ -1894,17 +1903,17 @@ start: */ fallthrough; case REQ_CLKS_ON: - spin_unlock_irqrestore(hba->host->host_lock, flags); + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); flush_work(&hba->clk_gating.ungate_work); /* Make sure state is CLKS_ON before returning */ - spin_lock_irqsave(hba->host->host_lock, flags); + spin_lock_irqsave(&hba->clk_gating.lock, flags); goto start; default: dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", __func__, hba->clk_gating.state); break; } - spin_unlock_irqrestore(hba->host->host_lock, flags); + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); } EXPORT_SYMBOL_GPL(ufshcd_hold); @@ -1912,28 +1921,32 @@ static void ufshcd_gate_work(struct work_struct *work) { struct ufs_hba *hba = container_of(work, struct ufs_hba, clk_gating.gate_work.work); - unsigned long flags; int ret; - spin_lock_irqsave(hba->host->host_lock, flags); - /* - * In case you are here to cancel this work the gating state - * would be marked as REQ_CLKS_ON. In this case save time by - * skipping the gating work and exit after changing the clock - * state to CLKS_ON. - */ - if (hba->clk_gating.is_suspended || - (hba->clk_gating.state != REQ_CLKS_OFF)) { - hba->clk_gating.state = CLKS_ON; - trace_ufshcd_clk_gating(dev_name(hba->dev), - hba->clk_gating.state); - goto rel_lock; - } + scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) { + /* + * In case you are here to cancel this work the gating state + * would be marked as REQ_CLKS_ON. In this case save time by + * skipping the gating work and exit after changing the clock + * state to CLKS_ON. + */ + if (hba->clk_gating.is_suspended || + hba->clk_gating.state != REQ_CLKS_OFF) { + hba->clk_gating.state = CLKS_ON; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); + return; + } - if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) - goto rel_lock; + if (hba->clk_gating.active_reqs) + return; + } - spin_unlock_irqrestore(hba->host->host_lock, flags); + scoped_guard(spinlock_irqsave, hba->host->host_lock) { + if (ufshcd_is_ufs_dev_busy(hba) || + hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) + return; + } /* put the link into hibern8 mode before turning off clocks */ if (ufshcd_can_hibern8_during_gating(hba)) { @@ -1944,7 +1957,7 @@ static void ufshcd_gate_work(struct work_struct *work) __func__, ret); trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); - goto out; + return; } ufshcd_set_link_hibern8(hba); } @@ -1964,33 +1977,34 @@ static void ufshcd_gate_work(struct work_struct *work) * prevent from doing cancel work multiple times when there are * new requests arriving before the current cancel work is done. */ - spin_lock_irqsave(hba->host->host_lock, flags); + guard(spinlock_irqsave)(&hba->clk_gating.lock); if (hba->clk_gating.state == REQ_CLKS_OFF) { hba->clk_gating.state = CLKS_OFF; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); } -rel_lock: - spin_unlock_irqrestore(hba->host->host_lock, flags); -out: - return; } -/* host lock must be held before calling this variant */ static void __ufshcd_release(struct ufs_hba *hba) { + lockdep_assert_held(&hba->clk_gating.lock); + if (!ufshcd_is_clkgating_allowed(hba)) return; hba->clk_gating.active_reqs--; if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || - hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || - hba->outstanding_tasks || !hba->clk_gating.is_initialized || - hba->active_uic_cmd || hba->uic_async_done || + !hba->clk_gating.is_initialized || hba->clk_gating.state == CLKS_OFF) return; + scoped_guard(spinlock_irqsave, hba->host->host_lock) { + if (ufshcd_has_pending_tasks(hba) || + hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) + return; + } + hba->clk_gating.state = REQ_CLKS_OFF; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); queue_delayed_work(hba->clk_gating.clk_gating_workq, @@ -2000,11 +2014,8 @@ static void __ufshcd_release(struct ufs_hba *hba) void ufshcd_release(struct ufs_hba *hba) { - unsigned long flags; - - spin_lock_irqsave(hba->host->host_lock, flags); + guard(spinlock_irqsave)(&hba->clk_gating.lock); __ufshcd_release(hba); - spin_unlock_irqrestore(hba->host->host_lock, flags); } EXPORT_SYMBOL_GPL(ufshcd_release); @@ -2019,11 +2030,9 @@ static ssize_t ufshcd_clkgate_delay_show(struct device *dev, void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value) { struct ufs_hba *hba = dev_get_drvdata(dev); - unsigned long flags; - spin_lock_irqsave(hba->host->host_lock, flags); + guard(spinlock_irqsave)(&hba->clk_gating.lock); hba->clk_gating.delay_ms = value; - spin_unlock_irqrestore(hba->host->host_lock, flags); } EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set); @@ -2051,7 +2060,6 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ufs_hba *hba = dev_get_drvdata(dev); - unsigned long flags; u32 value; if (kstrtou32(buf, 0, &value)) @@ -2059,9 +2067,10 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev, value = !!value; - spin_lock_irqsave(hba->host->host_lock, flags); + guard(spinlock_irqsave)(&hba->clk_gating.lock); + if (value == hba->clk_gating.is_enabled) - goto out; + return count; if (value) __ufshcd_release(hba); @@ -2069,8 +2078,7 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev, hba->clk_gating.active_reqs++; hba->clk_gating.is_enabled = value; -out: - spin_unlock_irqrestore(hba->host->host_lock, flags); + return count; } @@ -2103,8 +2111,6 @@ static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba) static void ufshcd_init_clk_gating(struct ufs_hba *hba) { - char wq_name[sizeof("ufs_clk_gating_00")]; - if (!ufshcd_is_clkgating_allowed(hba)) return; @@ -2114,10 +2120,9 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba) INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); - snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d", - hba->host->host_no); - hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name, - WQ_MEM_RECLAIM | WQ_HIGHPRI); + hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue( + "ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI, + hba->host->host_no); ufshcd_init_clk_gating_sysfs(hba); @@ -2144,19 +2149,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) { bool queue_resume_work = false; ktime_t curr_t = ktime_get(); - unsigned long flags; if (!ufshcd_is_clkscaling_supported(hba)) return; - spin_lock_irqsave(hba->host->host_lock, flags); + guard(spinlock_irqsave)(&hba->clk_scaling.lock); + if (!hba->clk_scaling.active_reqs++) queue_resume_work = true; - if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) { - spin_unlock_irqrestore(hba->host->host_lock, flags); + if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) return; - } if (queue_resume_work) queue_work(hba->clk_scaling.workq, @@ -2172,18 +2175,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) hba->clk_scaling.busy_start_t = curr_t; hba->clk_scaling.is_busy_started = true; } - spin_unlock_irqrestore(hba->host->host_lock, flags); } static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) { struct ufs_clk_scaling *scaling = &hba->clk_scaling; - unsigned long flags; if (!ufshcd_is_clkscaling_supported(hba)) return; - spin_lock_irqsave(hba->host->host_lock, flags); + guard(spinlock_irqsave)(&hba->clk_scaling.lock); + hba->clk_scaling.active_reqs--; if (!scaling->active_reqs && scaling->is_busy_started) { scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), @@ -2191,7 +2193,6 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) scaling->busy_start_t = 0; scaling->is_busy_started = false; } - spin_unlock_irqrestore(hba->host->host_lock, flags); } static inline int ufshcd_monitor_opcode2dir(u8 opcode) @@ -2281,7 +2282,7 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag, if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) ufshcd_start_monitor(hba, lrbp); - if (is_mcq_enabled(hba)) { + if (hba->mcq_enabled) { int utrd_size = sizeof(struct utp_transfer_req_desc); struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr; struct utp_transfer_req_desc *dest; @@ -2377,15 +2378,15 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) int err; hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); - if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) - hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; /* nutrs and nutmrs are 0 based values */ - hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; + hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1; hba->nutmrs = ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; hba->reserved_slot = hba->nutrs - 1; + hba->nortt = FIELD_GET(MASK_NUMBER_OUTSTANDING_RTT, hba->capabilities) + 1; + /* Read crypto capabilities */ err = ufshcd_hba_init_crypto_capabilities(hba); if (err) { @@ -2393,13 +2394,22 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) return err; } + /* + * The UFSHCI 3.0 specification does not define MCQ_SUPPORT and + * LSDB_SUPPORT, but [31:29] as reserved bits with reset value 0s, which + * means we can simply read values regardless of version. + */ hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities); - if (!hba->mcq_sup) - return 0; + /* + * 0h: legacy single doorbell support is available + * 1h: indicate that legacy single doorbell support has been removed + */ + if (!(hba->quirks & UFSHCD_QUIRK_BROKEN_LSDBS_CAP)) + hba->lsdb_sup = !FIELD_GET(MASK_LSDB_SUPPORT, hba->capabilities); + else + hba->lsdb_sup = true; hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP); - hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT, - hba->mcq_capabilities); return 0; } @@ -2415,7 +2425,7 @@ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) { u32 val; int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY, - 500, UIC_CMD_TIMEOUT * 1000, false, hba, + 500, uic_cmd_timeout * 1000, false, hba, REG_CONTROLLER_STATUS); return ret == 0; } @@ -2475,7 +2485,7 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) lockdep_assert_held(&hba->uic_cmd_mutex); if (wait_for_completion_timeout(&uic_cmd->done, - msecs_to_jiffies(UIC_CMD_TIMEOUT))) { + msecs_to_jiffies(uic_cmd_timeout))) { ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; } else { ret = -ETIMEDOUT; @@ -2501,13 +2511,11 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result * @hba: per adapter instance * @uic_cmd: UIC command - * @completion: initialize the completion only if this is set to true * * Return: 0 only if success. */ static int -__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, - bool completion) +__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) { lockdep_assert_held(&hba->uic_cmd_mutex); @@ -2517,8 +2525,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, return -EIO; } - if (completion) - init_completion(&uic_cmd->done); + init_completion(&uic_cmd->done); uic_cmd->cmd_active = 1; ufshcd_dispatch_uic_cmd(hba, uic_cmd); @@ -2544,7 +2551,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) mutex_lock(&hba->uic_cmd_mutex); ufshcd_add_delay_before_dme_cmd(hba); - ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); + ret = __ufshcd_send_uic_cmd(hba, uic_cmd); if (!ret) ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); @@ -2617,7 +2624,7 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd)); - return 0; + return ufshcd_crypto_fill_prdt(hba, lrbp); } /** @@ -2629,14 +2636,7 @@ static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) { u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); - if (hba->ufs_version == ufshci_version(1, 0)) { - u32 rw; - rw = set & INTERRUPT_MASK_RW_VER_10; - set = rw | ((set ^ intrs) & intrs); - } else { - set |= intrs; - } - + set |= intrs; ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); } @@ -2649,34 +2649,30 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) { u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); - if (hba->ufs_version == ufshci_version(1, 0)) { - u32 rw; - rw = (set & INTERRUPT_MASK_RW_VER_10) & - ~(intrs & INTERRUPT_MASK_RW_VER_10); - set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10); - - } else { - set &= ~intrs; - } - + set &= ~intrs; ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); } /** * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request * descriptor according to request + * @hba: per adapter instance * @lrbp: pointer to local reference block * @upiu_flags: flags required in the header * @cmd_dir: requests data direction * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments) */ -static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags, - enum dma_data_direction cmd_dir, int ehs_length) +static void +ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, + u8 *upiu_flags, enum dma_data_direction cmd_dir, + int ehs_length) { struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; struct request_desc_header *h = &req_desc->header; enum utp_data_direction data_direction; + lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; + *h = (typeof(*h)){ }; if (cmd_dir == DMA_FROM_DEVICE) { @@ -2736,7 +2732,6 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags) ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length); cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE); - memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len); memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); @@ -2809,12 +2804,8 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba, u8 upiu_flags; int ret = 0; - if (hba->ufs_version <= ufshci_version(1, 1)) - lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; - else - lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; + ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, 0); - ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0); if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags); else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) @@ -2837,18 +2828,32 @@ static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) unsigned int ioprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); u8 upiu_flags; - if (hba->ufs_version <= ufshci_version(1, 1)) - lrbp->command_type = UTP_CMD_TYPE_SCSI; - else - lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; - - ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, - lrbp->cmd->sc_data_direction, 0); + ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0); if (ioprio_class == IOPRIO_CLASS_RT) upiu_flags |= UPIU_CMD_FLAGS_CP; ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); } +static void __ufshcd_setup_cmd(struct ufshcd_lrb *lrbp, struct scsi_cmnd *cmd, u8 lun, int tag) +{ + memset(lrbp->ucd_req_ptr, 0, sizeof(*lrbp->ucd_req_ptr)); + + lrbp->cmd = cmd; + lrbp->task_tag = tag; + lrbp->lun = lun; + ufshcd_prepare_lrbp_crypto(cmd ? scsi_cmd_to_rq(cmd) : NULL, lrbp); +} + +static void ufshcd_setup_scsi_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, + struct scsi_cmnd *cmd, u8 lun, int tag) +{ + __ufshcd_setup_cmd(lrbp, cmd, lun, tag); + lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba); + lrbp->req_abort_skip = false; + + ufshcd_comp_scsi_upiu(hba, lrbp); +} + /** * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID * @upiu_wlun_id: UPIU W-LUN id @@ -2904,9 +2909,8 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr; dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr + i * ufshcd_get_ucd_size(hba); - u16 response_offset = offsetof(struct utp_transfer_cmd_desc, - response_upiu); - u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); + u16 response_offset = le16_to_cpu(utrdlp[i].response_upiu_offset); + u16 prdt_offset = le16_to_cpu(utrdlp[i].prd_table_offset); lrb->utr_descriptor_ptr = utrdlp + i; lrb->utrd_dma_addr = hba->utrdl_dma_addr + @@ -2982,16 +2986,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) ufshcd_hold(hba); lrbp = &hba->lrb[tag]; - lrbp->cmd = cmd; - lrbp->task_tag = tag; - lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); - lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba); - - ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp); - - lrbp->req_abort_skip = false; - ufshcd_comp_scsi_upiu(hba, lrbp); + ufshcd_setup_scsi_cmd(hba, lrbp, cmd, ufshcd_scsi_to_upiu_lun(cmd->device->lun), tag); err = ufshcd_map_sg(hba, lrbp); if (err) { @@ -2999,7 +2995,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) goto out; } - if (is_mcq_enabled(hba)) + if (hba->mcq_enabled) hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); ufshcd_send_command(hba, tag, hwq); @@ -3016,15 +3012,18 @@ out: return err; } -static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, - struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag) +static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, + enum dev_cmd_type cmd_type, u8 lun, int tag) { - lrbp->cmd = NULL; - lrbp->task_tag = tag; - lrbp->lun = 0; /* device management cmd is not specific to any LUN */ + __ufshcd_setup_cmd(lrbp, NULL, lun, tag); lrbp->intr_cmd = true; /* No interrupt aggregation */ - ufshcd_prepare_lrbp_crypto(NULL, lrbp); hba->dev_cmd.type = cmd_type; +} + +static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag) +{ + ufshcd_setup_dev_cmd(hba, lrbp, cmd_type, 0, tag); return ufshcd_compose_devman_upiu(hba, lrbp); } @@ -3037,16 +3036,7 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, */ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd) { - struct request *rq; - - if (!cmd) - return false; - - rq = scsi_cmd_to_rq(cmd); - if (!blk_mq_request_started(rq)) - return false; - - return true; + return cmd && blk_mq_rq_state(scsi_cmd_to_rq(cmd)) == MQ_RQ_IN_FLIGHT; } /* @@ -3058,10 +3048,9 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd) static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) { u32 mask; - unsigned long flags; int err; - if (is_mcq_enabled(hba)) { + if (hba->mcq_enabled) { /* * MCQ mode. Clean up the MCQ resources similar to * what the ufshcd_utrl_clear() does for SDB mode. @@ -3078,9 +3067,7 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) mask = 1U << task_tag; /* clear outstanding transaction before retry */ - spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_utrl_clear(hba, mask); - spin_unlock_irqrestore(hba->host->host_lock, flags); /* * wait for h/w to clear corresponding bit in door-bell. @@ -3117,8 +3104,13 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) case UPIU_TRANSACTION_QUERY_RSP: { u8 response = lrbp->ucd_rsp_ptr->header.response; - if (response == 0) + if (response == 0) { err = ufshcd_copy_query_response(hba, lrbp); + } else { + err = -EINVAL; + dev_err(hba->dev, "%s: unexpected response in Query RSP: %x\n", + __func__, response); + } break; } case UPIU_TRANSACTION_REJECT_UPIU: @@ -3171,8 +3163,10 @@ retry: __func__, lrbp->task_tag); /* MCQ mode */ - if (is_mcq_enabled(hba)) { - err = ufshcd_clear_cmd(hba, lrbp->task_tag); + if (hba->mcq_enabled) { + /* successfully cleared the command, retry if needed */ + if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) + err = -EAGAIN; hba->dev_cmd.complete = NULL; return err; } @@ -3229,6 +3223,39 @@ retry: return err; } +static void ufshcd_dev_man_lock(struct ufs_hba *hba) +{ + ufshcd_hold(hba); + mutex_lock(&hba->dev_cmd.lock); + down_read(&hba->clk_scaling_lock); +} + +static void ufshcd_dev_man_unlock(struct ufs_hba *hba) +{ + up_read(&hba->clk_scaling_lock); + mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); +} + +static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, + const u32 tag, int timeout) +{ + DECLARE_COMPLETION_ONSTACK(wait); + int err; + + hba->dev_cmd.complete = &wait; + + ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); + + ufshcd_send_command(hba, tag, hba->dev_cmd_queue); + err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); + + ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, + (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); + + return err; +} + /** * ufshcd_exec_dev_cmd - API for sending device management requests * @hba: UFS hba @@ -3243,34 +3270,18 @@ retry: static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, int timeout) { - DECLARE_COMPLETION_ONSTACK(wait); const u32 tag = hba->reserved_slot; - struct ufshcd_lrb *lrbp; + struct ufshcd_lrb *lrbp = &hba->lrb[tag]; int err; /* Protects use of hba->reserved_slot. */ lockdep_assert_held(&hba->dev_cmd.lock); - down_read(&hba->clk_scaling_lock); - - lrbp = &hba->lrb[tag]; - lrbp->cmd = NULL; err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); if (unlikely(err)) - goto out; - - hba->dev_cmd.complete = &wait; - - ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); - - ufshcd_send_command(hba, tag, hba->dev_cmd_queue); - err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); - ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, - (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); + return err; -out: - up_read(&hba->clk_scaling_lock); - return err; + return ufshcd_issue_dev_cmd(hba, lrbp, tag, timeout); } /** @@ -3340,8 +3351,8 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, BUG_ON(!hba); - ufshcd_hold(hba); - mutex_lock(&hba->dev_cmd.lock); + ufshcd_dev_man_lock(hba); + ufshcd_init_query(hba, &request, &response, opcode, idn, index, selector); @@ -3383,8 +3394,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, MASK_QUERY_UPIU_FLAG_LOC) & 0x1; out_unlock: - mutex_unlock(&hba->dev_cmd.lock); - ufshcd_release(hba); + ufshcd_dev_man_unlock(hba); return err; } @@ -3414,9 +3424,8 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, return -EINVAL; } - ufshcd_hold(hba); + ufshcd_dev_man_lock(hba); - mutex_lock(&hba->dev_cmd.lock); ufshcd_init_query(hba, &request, &response, opcode, idn, index, selector); @@ -3446,8 +3455,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, *attr_val = be32_to_cpu(response->upiu_res.value); out_unlock: - mutex_unlock(&hba->dev_cmd.lock); - ufshcd_release(hba); + ufshcd_dev_man_unlock(hba); return err; } @@ -3510,9 +3518,8 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, return -EINVAL; } - ufshcd_hold(hba); + ufshcd_dev_man_lock(hba); - mutex_lock(&hba->dev_cmd.lock); ufshcd_init_query(hba, &request, &response, opcode, idn, index, selector); hba->dev_cmd.query.descriptor = desc_buf; @@ -3545,8 +3552,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, out_unlock: hba->dev_cmd.query.descriptor = NULL; - mutex_unlock(&hba->dev_cmd.lock); - ufshcd_release(hba); + ufshcd_dev_man_unlock(hba); return err; } @@ -3979,11 +3985,11 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba) */ static int ufshcd_dme_link_startup(struct ufs_hba *hba) { - struct uic_command uic_cmd = {0}; + struct uic_command uic_cmd = { + .command = UIC_CMD_DME_LINK_STARTUP, + }; int ret; - uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; - ret = ufshcd_send_uic_cmd(hba, &uic_cmd); if (ret) dev_dbg(hba->dev, @@ -4001,11 +4007,11 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba) */ static int ufshcd_dme_reset(struct ufs_hba *hba) { - struct uic_command uic_cmd = {0}; + struct uic_command uic_cmd = { + .command = UIC_CMD_DME_RESET, + }; int ret; - uic_cmd.command = UIC_CMD_DME_RESET; - ret = ufshcd_send_uic_cmd(hba, &uic_cmd); if (ret) dev_err(hba->dev, @@ -4040,11 +4046,11 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt); */ static int ufshcd_dme_enable(struct ufs_hba *hba) { - struct uic_command uic_cmd = {0}; + struct uic_command uic_cmd = { + .command = UIC_CMD_DME_ENABLE, + }; int ret; - uic_cmd.command = UIC_CMD_DME_ENABLE; - ret = ufshcd_send_uic_cmd(hba, &uic_cmd); if (ret) dev_err(hba->dev, @@ -4077,11 +4083,16 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US - delta; else - return; /* no more delay required */ + min_sleep_time_us = 0; /* no more delay required */ } - /* allow sleep for extra 50us if needed */ - usleep_range(min_sleep_time_us, min_sleep_time_us + 50); + if (min_sleep_time_us > 0) { + /* allow sleep for extra 50us if needed */ + usleep_range(min_sleep_time_us, min_sleep_time_us + 50); + } + + /* update the last_dme_cmd_tstamp */ + hba->last_dme_cmd_tstamp = ktime_get(); } /** @@ -4097,7 +4108,12 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set, u32 mib_val, u8 peer) { - struct uic_command uic_cmd = {0}; + struct uic_command uic_cmd = { + .command = peer ? UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET, + .argument1 = attr_sel, + .argument2 = UIC_ARG_ATTR_TYPE(attr_set), + .argument3 = mib_val, + }; static const char *const action[] = { "dme-set", "dme-peer-set" @@ -4106,12 +4122,6 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, int ret; int retries = UFS_UIC_COMMAND_RETRIES; - uic_cmd.command = peer ? - UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; - uic_cmd.argument1 = attr_sel; - uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); - uic_cmd.argument3 = mib_val; - do { /* for peer attributes we retry upon failure */ ret = ufshcd_send_uic_cmd(hba, &uic_cmd); @@ -4141,7 +4151,10 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr); int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, u32 *mib_val, u8 peer) { - struct uic_command uic_cmd = {0}; + struct uic_command uic_cmd = { + .command = peer ? UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET, + .argument1 = attr_sel, + }; static const char *const action[] = { "dme-get", "dme-peer-get" @@ -4175,10 +4188,6 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, } } - uic_cmd.command = peer ? - UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; - uic_cmd.argument1 = attr_sel; - do { /* for peer attributes we retry upon failure */ ret = ufshcd_send_uic_cmd(hba, &uic_cmd); @@ -4242,11 +4251,11 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) * Make sure UIC command completion interrupt is disabled before * issuing UIC command. */ - wmb(); + ufshcd_readl(hba, REG_INTERRUPT_ENABLE); reenable_intr = true; } spin_unlock_irqrestore(hba->host->host_lock, flags); - ret = __ufshcd_send_uic_cmd(hba, cmd, false); + ret = __ufshcd_send_uic_cmd(hba, cmd); if (ret) { dev_err(hba->dev, "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", @@ -4255,7 +4264,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) } if (!wait_for_completion_timeout(hba->uic_async_done, - msecs_to_jiffies(UIC_CMD_TIMEOUT))) { + msecs_to_jiffies(uic_cmd_timeout))) { dev_err(hba->dev, "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n", cmd->command, cmd->argument3); @@ -4302,6 +4311,42 @@ out_unlock: } /** + * ufshcd_send_bsg_uic_cmd - Send UIC commands requested via BSG layer and retrieve the result + * @hba: per adapter instance + * @uic_cmd: UIC command + * + * Return: 0 only if success. + */ +int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) +{ + int ret; + + if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) + return 0; + + ufshcd_hold(hba); + + if (uic_cmd->argument1 == UIC_ARG_MIB(PA_PWRMODE) && + uic_cmd->command == UIC_CMD_DME_SET) { + ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd); + goto out; + } + + mutex_lock(&hba->uic_cmd_mutex); + ufshcd_add_delay_before_dme_cmd(hba); + + ret = __ufshcd_send_uic_cmd(hba, uic_cmd); + if (!ret) + ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); + + mutex_unlock(&hba->uic_cmd_mutex); + +out: + ufshcd_release(hba); + return ret; +} + +/** * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage * using DME_SET primitives. * @hba: per adapter instance @@ -4311,7 +4356,11 @@ out_unlock: */ int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) { - struct uic_command uic_cmd = {0}; + struct uic_command uic_cmd = { + .command = UIC_CMD_DME_SET, + .argument1 = UIC_ARG_MIB(PA_PWRMODE), + .argument3 = mode, + }; int ret; if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { @@ -4324,9 +4373,6 @@ int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) } } - uic_cmd.command = UIC_CMD_DME_SET; - uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); - uic_cmd.argument3 = mode; ufshcd_hold(hba); ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); ufshcd_release(hba); @@ -4367,13 +4413,14 @@ EXPORT_SYMBOL_GPL(ufshcd_link_recovery); int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) { - int ret; - struct uic_command uic_cmd = {0}; + struct uic_command uic_cmd = { + .command = UIC_CMD_DME_HIBER_ENTER, + }; ktime_t start = ktime_get(); + int ret; ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); - uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter", ktime_to_us(ktime_sub(ktime_get(), start)), ret); @@ -4391,13 +4438,14 @@ EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter); int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) { - struct uic_command uic_cmd = {0}; + struct uic_command uic_cmd = { + .command = UIC_CMD_DME_HIBER_EXIT, + }; int ret; ktime_t start = ktime_get(); ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); - uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit", ktime_to_us(ktime_sub(ktime_get(), start)), ret); @@ -4494,6 +4542,14 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) return -EINVAL; } + if (pwr_info->lane_rx != pwr_info->lane_tx) { + dev_err(hba->dev, "%s: asymmetric connected lanes. rx=%d, tx=%d\n", + __func__, + pwr_info->lane_rx, + pwr_info->lane_tx); + return -EINVAL; + } + /* * First, get the maximum gears of HS speed. * If a zero value, it means there is no HSGEAR capability. @@ -4606,9 +4662,6 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba, dev_err(hba->dev, "%s: power mode change failed %d\n", __func__, ret); } else { - ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, - pwr_mode); - memcpy(&hba->pwr_info, pwr_mode, sizeof(struct ufs_pa_layer_attr)); } @@ -4637,6 +4690,10 @@ int ufshcd_config_pwr_mode(struct ufs_hba *hba, ret = ufshcd_change_power_mode(hba, &final_params); + if (!ret) + ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, + &final_params); + return ret; } EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode); @@ -4725,12 +4782,6 @@ int ufshcd_make_hba_operational(struct ufs_hba *hba) REG_UTP_TASK_REQ_LIST_BASE_H); /* - * Make sure base address and interrupt setup are updated before - * enabling the run/stop registers below. - */ - wmb(); - - /* * UCRDY, UTMRLDY and UTRLRDY bits must be 1 */ reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); @@ -4752,20 +4803,14 @@ EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational); */ void ufshcd_hba_stop(struct ufs_hba *hba) { - unsigned long flags; int err; - /* - * Obtain the host lock to prevent that the controller is disabled - * while the UFS interrupt handler is active on another CPU. - */ - spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_disable_irq(hba); ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); - spin_unlock_irqrestore(hba->host->host_lock, flags); - err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, CONTROLLER_ENABLE, CONTROLLER_DISABLE, 10, 1); + ufshcd_enable_irq(hba); if (err) dev_err(hba->dev, "%s: Controller disable failed\n", __func__); } @@ -4783,51 +4828,44 @@ EXPORT_SYMBOL_GPL(ufshcd_hba_stop); */ static int ufshcd_hba_execute_hce(struct ufs_hba *hba) { - int retry_outer = 3; - int retry_inner; + int retry; -start: - if (ufshcd_is_hba_active(hba)) - /* change controller state to "reset state" */ - ufshcd_hba_stop(hba); + for (retry = 3; retry > 0; retry--) { + if (ufshcd_is_hba_active(hba)) + /* change controller state to "reset state" */ + ufshcd_hba_stop(hba); + + /* UniPro link is disabled at this point */ + ufshcd_set_link_off(hba); - /* UniPro link is disabled at this point */ - ufshcd_set_link_off(hba); + ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); - ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); + /* start controller initialization sequence */ + ufshcd_hba_start(hba); - /* start controller initialization sequence */ - ufshcd_hba_start(hba); + /* + * To initialize a UFS host controller HCE bit must be set to 1. + * During initialization the HCE bit value changes from 1->0->1. + * When the host controller completes initialization sequence + * it sets the value of HCE bit to 1. The same HCE bit is read back + * to check if the controller has completed initialization sequence. + * So without this delay the value HCE = 1, set in the previous + * instruction might be read back. + * This delay can be changed based on the controller. + */ + ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); - /* - * To initialize a UFS host controller HCE bit must be set to 1. - * During initialization the HCE bit value changes from 1->0->1. - * When the host controller completes initialization sequence - * it sets the value of HCE bit to 1. The same HCE bit is read back - * to check if the controller has completed initialization sequence. - * So without this delay the value HCE = 1, set in the previous - * instruction might be read back. - * This delay can be changed based on the controller. - */ - ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); + /* wait for the host controller to complete initialization */ + if (!ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, CONTROLLER_ENABLE, + CONTROLLER_ENABLE, 1000, 50)) + break; - /* wait for the host controller to complete initialization */ - retry_inner = 50; - while (!ufshcd_is_hba_active(hba)) { - if (retry_inner) { - retry_inner--; - } else { - dev_err(hba->dev, - "Controller enable failed\n"); - if (retry_outer) { - retry_outer--; - goto start; - } - return -EIO; - } - usleep_range(1000, 1100); + dev_err(hba->dev, "Enabling the controller failed\n"); } + if (!retry) + return -EIO; + /* enable UIC related interrupts */ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); @@ -5027,8 +5065,8 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) int err = 0; int retries; - ufshcd_hold(hba); - mutex_lock(&hba->dev_cmd.lock); + ufshcd_dev_man_lock(hba); + for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, hba->nop_out_timeout); @@ -5038,8 +5076,8 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); } - mutex_unlock(&hba->dev_cmd.lock); - ufshcd_release(hba); + + ufshcd_dev_man_unlock(hba); if (err) dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); @@ -5142,12 +5180,12 @@ set_qdepth: } /** - * ufshcd_slave_alloc - handle initial SCSI device configurations + * ufshcd_sdev_init - handle initial SCSI device configurations * @sdev: pointer to SCSI device * * Return: success. */ -static int ufshcd_slave_alloc(struct scsi_device *sdev) +static int ufshcd_sdev_init(struct scsi_device *sdev) { struct ufs_hba *hba; @@ -5190,17 +5228,19 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) } /** - * ufshcd_slave_configure - adjust SCSI device configurations + * ufshcd_sdev_configure - adjust SCSI device configurations * @sdev: pointer to SCSI device + * @lim: queue limits * * Return: 0 (success). */ -static int ufshcd_slave_configure(struct scsi_device *sdev) +static int ufshcd_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct ufs_hba *hba = shost_priv(sdev->host); struct request_queue *q = sdev->request_queue; - blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); + lim->dma_pad_mask = PRDT_DATA_BYTE_COUNT_PAD - 1; /* * Block runtime-pm until all consumers are added. @@ -5226,10 +5266,10 @@ static int ufshcd_slave_configure(struct scsi_device *sdev) } /** - * ufshcd_slave_destroy - remove SCSI device configurations + * ufshcd_sdev_destroy - remove SCSI device configurations * @sdev: pointer to SCSI device */ -static void ufshcd_slave_destroy(struct scsi_device *sdev) +static void ufshcd_sdev_destroy(struct scsi_device *sdev) { struct ufs_hba *hba; unsigned long flags; @@ -5378,10 +5418,12 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, } break; case OCS_ABORTED: - result |= DID_ABORT << 16; - break; case OCS_INVALID_COMMAND_STATUS: result |= DID_REQUEUE << 16; + dev_warn(hba->dev, + "OCS %s from controller for tag %d\n", + (ocs == OCS_ABORTED ? "aborted" : "invalid"), + lrbp->task_tag); break; case OCS_INVALID_CMD_TABLE_ATTR: case OCS_INVALID_PRDT_ATTR: @@ -5438,32 +5480,37 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) { irqreturn_t retval = IRQ_NONE; + struct uic_command *cmd; spin_lock(hba->host->host_lock); + cmd = hba->active_uic_cmd; + if (WARN_ON_ONCE(!cmd)) + goto unlock; + if (ufshcd_is_auto_hibern8_error(hba, intr_status)) hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); - if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { - hba->active_uic_cmd->argument2 |= - ufshcd_get_uic_cmd_result(hba); - hba->active_uic_cmd->argument3 = - ufshcd_get_dme_attr_val(hba); + if (intr_status & UIC_COMMAND_COMPL) { + cmd->argument2 |= ufshcd_get_uic_cmd_result(hba); + cmd->argument3 = ufshcd_get_dme_attr_val(hba); if (!hba->uic_async_done) - hba->active_uic_cmd->cmd_active = 0; - complete(&hba->active_uic_cmd->done); + cmd->cmd_active = 0; + complete(&cmd->done); retval = IRQ_HANDLED; } - if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) { - hba->active_uic_cmd->cmd_active = 0; + if (intr_status & UFSHCD_UIC_PWR_MASK && hba->uic_async_done) { + cmd->cmd_active = 0; complete(hba->uic_async_done); retval = IRQ_HANDLED; } if (retval == IRQ_HANDLED) - ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, - UFS_CMD_COMP); + ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP); + +unlock: spin_unlock(hba->host->host_lock); + return retval; } @@ -5474,6 +5521,7 @@ void ufshcd_release_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd = lrbp->cmd; scsi_dma_unmap(cmd); + ufshcd_crypto_clear_prdt(hba, lrbp); ufshcd_release(hba); ufshcd_clk_scaling_update_busy(hba); } @@ -5493,6 +5541,7 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag, lrbp = &hba->lrb[task_tag]; lrbp->compl_time_stamp = ktime_get(); + lrbp->compl_time_stamp_local_clock = local_clock(); cmd = lrbp->cmd; if (cmd) { if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) @@ -5502,15 +5551,12 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag, ufshcd_release_scsi_cmd(hba, lrbp); /* Do not touch lrbp after scsi done */ scsi_done(cmd); - } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || - lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { - if (hba->dev_cmd.complete) { - if (cqe) { - ocs = le32_to_cpu(cqe->status) & MASK_OCS; - lrbp->utr_descriptor_ptr->header.ocs = ocs; - } - complete(hba->dev_cmd.complete); + } else if (hba->dev_cmd.complete) { + if (cqe) { + ocs = le32_to_cpu(cqe->status) & MASK_OCS; + lrbp->utr_descriptor_ptr->header.ocs = ocs; } + complete(hba->dev_cmd.complete); } } @@ -5559,7 +5605,7 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num) u32 tr_doorbell; struct ufs_hw_queue *hwq; - if (is_mcq_enabled(hba)) { + if (hba->mcq_enabled) { hwq = &hba->uhq[queue_num]; return ufshcd_mcq_poll_cqe_lock(hba, hwq); @@ -5602,7 +5648,6 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba, struct ufshcd_lrb *lrbp; struct scsi_cmnd *cmd; unsigned long flags; - u32 hwq_num, utag; int tag; for (tag = 0; tag < hba->nutrs; tag++) { @@ -5612,9 +5657,7 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba, test_bit(SCMD_STATE_COMPLETE, &cmd->state)) continue; - utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd)); - hwq_num = blk_mq_unique_tag_to_hwq(utag); - hwq = &hba->uhq[hwq_num]; + hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); if (force_compl) { ufshcd_mcq_compl_all_cqes_lock(hba, hwq); @@ -5855,12 +5898,11 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) /** * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status * @hba: per-adapter instance - * @status: bkops_status value * * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn * flag in the device to permit background operations if the device - * bkops_status is greater than or equal to "status" argument passed to - * this function, disable otherwise. + * bkops_status is greater than or equal to the "hba->urgent_bkops_lvl", + * disable otherwise. * * Return: 0 for success, non-zero in case of failure. * @@ -5868,11 +5910,11 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) * to know whether auto bkops is enabled or disabled after this function * returns control to it. */ -static int ufshcd_bkops_ctrl(struct ufs_hba *hba, - enum bkops_status status) +static int ufshcd_bkops_ctrl(struct ufs_hba *hba) { - int err; + enum bkops_status status = hba->urgent_bkops_lvl; u32 curr_status = 0; + int err; err = ufshcd_get_bkops_status(hba, &curr_status); if (err) { @@ -5894,23 +5936,6 @@ out: return err; } -/** - * ufshcd_urgent_bkops - handle urgent bkops exception event - * @hba: per-adapter instance - * - * Enable fBackgroundOpsEn flag in the device to permit background - * operations. - * - * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled - * and negative error value for any other failure. - * - * Return: 0 upon success; < 0 upon failure. - */ -static int ufshcd_urgent_bkops(struct ufs_hba *hba) -{ - return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl); -} - static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) { return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, @@ -5954,24 +5979,6 @@ out: __func__, err); } -static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status) -{ - u32 value; - - if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, - QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value)) - return; - - dev_info(hba->dev, "exception Tcase %d\n", value - 80); - - ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP); - - /* - * A placeholder for the platform vendors to add whatever additional - * steps required - */ -} - static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn) { u8 index; @@ -6179,12 +6186,11 @@ static void ufshcd_exception_event_handler(struct work_struct *work) u32 status = 0; hba = container_of(work, struct ufs_hba, eeh_work); - ufshcd_scsi_block_requests(hba); err = ufshcd_get_ee_status(hba, &status); if (err) { dev_err(hba->dev, "%s: failed to get exception status %d\n", __func__, err); - goto out; + return; } trace_ufshcd_exception_event(dev_name(hba->dev), status); @@ -6193,17 +6199,15 @@ static void ufshcd_exception_event_handler(struct work_struct *work) ufshcd_bkops_exception_event_handler(hba); if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP) - ufshcd_temp_exception_event_handler(hba, status); + ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP); ufs_debugfs_exception_event(hba, status); -out: - ufshcd_scsi_unblock_requests(hba); } /* Complete requests that have door-bell cleared */ static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl) { - if (is_mcq_enabled(hba)) + if (hba->mcq_enabled) ufshcd_mcq_compl_pending_transfer(hba, force_compl); else ufshcd_transfer_req_compl(hba); @@ -6363,15 +6367,14 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba) ufshcd_suspend_clkscaling(hba); ufshcd_clk_scaling_allow(hba, false); } - ufshcd_scsi_block_requests(hba); /* Wait for ongoing ufshcd_queuecommand() calls to finish. */ - blk_mq_wait_quiesce_done(&hba->host->tag_set); + blk_mq_quiesce_tagset(&hba->host->tag_set); cancel_work_sync(&hba->eeh_work); } static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) { - ufshcd_scsi_unblock_requests(hba); + blk_mq_unquiesce_tagset(&hba->host->tag_set); ufshcd_release(hba); if (ufshcd_is_clkscaling_supported(hba)) ufshcd_clk_scaling_suspend(hba, false); @@ -6450,24 +6453,12 @@ static bool ufshcd_abort_one(struct request *rq, void *priv) struct scsi_device *sdev = cmd->device; struct Scsi_Host *shost = sdev->host; struct ufs_hba *hba = shost_priv(shost); - struct ufshcd_lrb *lrbp = &hba->lrb[tag]; - struct ufs_hw_queue *hwq; - unsigned long flags; *ret = ufshcd_try_to_abort_task(hba, tag); dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag, hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1, *ret ? "failed" : "succeeded"); - /* Release cmd in MCQ mode if abort succeeds */ - if (is_mcq_enabled(hba) && (*ret == 0)) { - hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd)); - spin_lock_irqsave(&hwq->cq_lock, flags); - if (ufshcd_cmd_inflight(lrbp->cmd)) - ufshcd_release_scsi_cmd(hba, lrbp); - spin_unlock_irqrestore(&hwq->cq_lock, flags); - } - return *ret == 0; } @@ -6549,7 +6540,8 @@ again: if (ufshcd_err_handling_should_stop(hba)) goto skip_err_handling; - if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { + if ((hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) && + !hba->force_reset) { bool ret; spin_unlock_irqrestore(hba->host->host_lock, flags); @@ -6996,14 +6988,11 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) { int err = 0; u32 mask = 1 << tag; - unsigned long flags; if (!test_bit(tag, &hba->outstanding_tasks)) goto out; - spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_utmrl_clear(hba, tag); - spin_unlock_irqrestore(hba->host->host_lock, flags); /* poll for max. 1 sec to clear door bell register by h/w */ err = ufshcd_wait_for_register(hba, @@ -7046,15 +7035,13 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); - /* send command to the controller */ __set_bit(task_tag, &hba->outstanding_tasks); - ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); - /* Make sure that doorbell is committed immediately */ - wmb(); - spin_unlock_irqrestore(host->host_lock, flags); + /* send command to the controller */ + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); + ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND); /* wait until the task management command is completed */ @@ -7159,35 +7146,21 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, enum query_opcode desc_op) { - DECLARE_COMPLETION_ONSTACK(wait); const u32 tag = hba->reserved_slot; - struct ufshcd_lrb *lrbp; + struct ufshcd_lrb *lrbp = &hba->lrb[tag]; int err = 0; u8 upiu_flags; /* Protects use of hba->reserved_slot. */ lockdep_assert_held(&hba->dev_cmd.lock); - down_read(&hba->clk_scaling_lock); + ufshcd_setup_dev_cmd(hba, lrbp, cmd_type, 0, tag); - lrbp = &hba->lrb[tag]; - lrbp->cmd = NULL; - lrbp->task_tag = tag; - lrbp->lun = 0; - lrbp->intr_cmd = true; - ufshcd_prepare_lrbp_crypto(NULL, lrbp); - hba->dev_cmd.type = cmd_type; - - if (hba->ufs_version <= ufshci_version(1, 1)) - lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; - else - lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; + ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, 0); /* update the task tag in the request upiu */ req_upiu->header.task_tag = tag; - ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0); - /* just copy the upiu request as it is */ memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr)); if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) { @@ -7201,17 +7174,12 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); - hba->dev_cmd.complete = &wait; - - ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); - - ufshcd_send_command(hba, tag, hba->dev_cmd_queue); /* * ignore the returning value here - ufshcd_check_query_response is * bound to fail since dev_cmd.query and dev_cmd.type were left empty. * read the response directly ignoring all errors. */ - ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT); + ufshcd_issue_dev_cmd(hba, lrbp, tag, QUERY_REQ_TIMEOUT); /* just copy the upiu response as it is */ memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu)); @@ -7234,7 +7202,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); - up_read(&hba->clk_scaling_lock); return err; } @@ -7273,13 +7240,11 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, cmd_type = DEV_CMD_TYPE_NOP; fallthrough; case UPIU_TRANSACTION_QUERY_REQ: - ufshcd_hold(hba); - mutex_lock(&hba->dev_cmd.lock); + ufshcd_dev_man_lock(hba); err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu, desc_buff, buff_len, cmd_type, desc_op); - mutex_unlock(&hba->dev_cmd.lock); - ufshcd_release(hba); + ufshcd_dev_man_unlock(hba); break; case UPIU_TRANSACTION_TASK_REQ: @@ -7329,41 +7294,21 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list, enum dma_data_direction dir) { - DECLARE_COMPLETION_ONSTACK(wait); const u32 tag = hba->reserved_slot; - struct ufshcd_lrb *lrbp; + struct ufshcd_lrb *lrbp = &hba->lrb[tag]; int err = 0; int result; u8 upiu_flags; u8 *ehs_data; u16 ehs_len; + int ehs = (hba->capabilities & MASK_EHSLUTRD_SUPPORTED) ? 2 : 0; /* Protects use of hba->reserved_slot. */ - ufshcd_hold(hba); - mutex_lock(&hba->dev_cmd.lock); - down_read(&hba->clk_scaling_lock); + ufshcd_dev_man_lock(hba); - lrbp = &hba->lrb[tag]; - lrbp->cmd = NULL; - lrbp->task_tag = tag; - lrbp->lun = UFS_UPIU_RPMB_WLUN; + ufshcd_setup_dev_cmd(hba, lrbp, DEV_CMD_TYPE_RPMB, UFS_UPIU_RPMB_WLUN, tag); - lrbp->intr_cmd = true; - ufshcd_prepare_lrbp_crypto(NULL, lrbp); - hba->dev_cmd.type = DEV_CMD_TYPE_RPMB; - - /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */ - lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; - - /* - * According to UFSHCI 4.0 specification page 24, if EHSLUTRDS is 0, host controller takes - * EHS length from CMD UPIU, and SW driver use EHS Length field in CMD UPIU. if it is 1, - * HW controller takes EHS length from UTRD. - */ - if (hba->capabilities & MASK_EHSLUTRD_SUPPORTED) - ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2); - else - ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 0); + ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, ehs); /* update the task tag */ req_upiu->header.task_tag = tag; @@ -7378,11 +7323,7 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); - hba->dev_cmd.complete = &wait; - - ufshcd_send_command(hba, tag, hba->dev_cmd_queue); - - err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT); + err = ufshcd_issue_dev_cmd(hba, lrbp, tag, ADVANCED_RPMB_REQ_TIMEOUT); if (!err) { /* Just copy the upiu response as it is */ @@ -7407,9 +7348,8 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r } } - up_read(&hba->clk_scaling_lock); - mutex_unlock(&hba->dev_cmd.lock); - ufshcd_release(hba); + ufshcd_dev_man_unlock(hba); + return err ? : result; } @@ -7441,7 +7381,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) goto out; } - if (is_mcq_enabled(hba)) { + if (hba->mcq_enabled) { for (pos = 0; pos < hba->nutrs; pos++) { lrbp = &hba->lrb[pos]; if (ufshcd_cmd_inflight(lrbp->cmd) && @@ -7517,10 +7457,9 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap) int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) { struct ufshcd_lrb *lrbp = &hba->lrb[tag]; - int err = 0; + int err; int poll_cnt; u8 resp = 0xF; - u32 reg; for (poll_cnt = 100; poll_cnt; poll_cnt--) { err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, @@ -7535,46 +7474,27 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) * cmd not pending in the device, check if it is * in transition. */ - dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", + dev_info( + hba->dev, + "%s: cmd with tag %d not pending in the device.\n", __func__, tag); - if (is_mcq_enabled(hba)) { - /* MCQ mode */ - if (ufshcd_cmd_inflight(lrbp->cmd)) { - /* sleep for max. 200us same delay as in SDB mode */ - usleep_range(100, 200); - continue; - } - /* command completed already */ - dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n", - __func__, tag); - goto out; - } - - /* Single Doorbell Mode */ - reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); - if (reg & (1 << tag)) { - /* sleep for max. 200us to stabilize */ - usleep_range(100, 200); - continue; + if (!ufshcd_cmd_inflight(lrbp->cmd)) { + dev_info(hba->dev, + "%s: cmd with tag=%d completed.\n", + __func__, tag); + return 0; } - /* command completed already */ - dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", - __func__, tag); - goto out; + usleep_range(100, 200); } else { dev_err(hba->dev, "%s: no response from device. tag = %d, err %d\n", __func__, tag, err); - if (!err) - err = resp; /* service response error */ - goto out; + return err ? : resp; } } - if (!poll_cnt) { - err = -EBUSY; - goto out; - } + if (!poll_cnt) + return -EBUSY; err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, UFS_ABORT_TASK, &resp); @@ -7584,7 +7504,7 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) dev_err(hba->dev, "%s: issued. tag = %d, err %d\n", __func__, tag, err); } - goto out; + return err; } err = ufshcd_clear_cmd(hba, tag); @@ -7592,7 +7512,6 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", __func__, tag, err); -out: return err; } @@ -7615,7 +7534,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) ufshcd_hold(hba); - if (!is_mcq_enabled(hba)) { + if (!hba->mcq_enabled) { reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); if (!test_bit(tag, &hba->outstanding_reqs)) { /* If command is already aborted/completed, return FAILED. */ @@ -7648,7 +7567,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) } hba->req_abort_count++; - if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) { + if (!hba->mcq_enabled && !(reg & (1 << tag))) { /* only execute this code in single doorbell mode */ dev_err(hba->dev, "%s: cmd was completed, but without a notifying intr, tag = %d", @@ -7675,7 +7594,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) goto release; } - if (is_mcq_enabled(hba)) { + if (hba->mcq_enabled) { /* MCQ mode. Branch off to handle abort for mcq mode */ err = ufshcd_mcq_abort(cmd); goto release; @@ -7716,6 +7635,29 @@ release: } /** + * ufshcd_process_probe_result - Process the ufshcd_probe_hba() result. + * @hba: UFS host controller instance. + * @probe_start: time when the ufshcd_probe_hba() call started. + * @ret: ufshcd_probe_hba() return value. + */ +static void ufshcd_process_probe_result(struct ufs_hba *hba, + ktime_t probe_start, int ret) +{ + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (ret) + hba->ufshcd_state = UFSHCD_STATE_ERROR; + else if (hba->ufshcd_state == UFSHCD_STATE_RESET) + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + + trace_ufshcd_init(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), probe_start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); +} + +/** * ufshcd_host_reset_and_restore - reset and restore host controller * @hba: per-adapter instance * @@ -7744,8 +7686,14 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) err = ufshcd_hba_enable(hba); /* Establish the link again and restore the device */ - if (!err) - err = ufshcd_probe_hba(hba, false); + if (!err) { + ktime_t probe_start = ktime_get(); + + err = ufshcd_device_init(hba, /*init_dev_params=*/false); + if (!err) + err = ufshcd_probe_hba(hba, false); + ufshcd_process_probe_result(hba, probe_start, err); + } if (err) dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); @@ -7987,11 +7935,13 @@ out: static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev) { + struct Scsi_Host *shost = sdev->host; + scsi_autopm_get_device(sdev); blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev); if (sdev->rpm_autosuspend) pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev, - RPM_AUTOSUSPEND_DELAY_MS); + shost->rpm_autosuspend_delay); scsi_autopm_put_device(sdev); } @@ -8150,29 +8100,36 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf) } } -static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf) +static void ufshcd_set_rtt(struct ufs_hba *hba) { struct ufs_dev_info *dev_info = &hba->dev_info; - u32 ext_ufs_feature; - u32 ext_iid_en = 0; - int err; + u32 rtt = 0; + u32 dev_rtt = 0; + int host_rtt_cap = hba->vops && hba->vops->max_num_rtt ? + hba->vops->max_num_rtt : hba->nortt; - /* Only UFS-4.0 and above may support EXT_IID */ + /* RTT override makes sense only for UFS-4.0 and above */ if (dev_info->wspecversion < 0x400) - goto out; + return; - ext_ufs_feature = get_unaligned_be32(desc_buf + - DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); - if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP)) - goto out; + if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &dev_rtt)) { + dev_err(hba->dev, "failed reading bMaxNumOfRTT\n"); + return; + } - err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, - QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en); - if (err) - dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err); + /* do not override if it was already written */ + if (dev_rtt != DEFAULT_MAX_NUM_RTT) + return; -out: - dev_info->b_ext_iid_en = ext_iid_en; + rtt = min_t(int, dev_info->rtt_cap, host_rtt_cap); + + if (rtt == dev_rtt) + return; + + if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt)) + dev_err(hba->dev, "failed writing bMaxNumOfRTT\n"); } void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, @@ -8223,10 +8180,13 @@ static void ufshcd_update_rtc(struct ufs_hba *hba) */ val = ts64.tv_sec - hba->dev_info.rtc_time_baseline; - ufshcd_rpm_get_sync(hba); + /* Skip update RTC if RPM state is not RPM_ACTIVE */ + if (ufshcd_rpm_get_if_active(hba) <= 0) + return; + err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED, 0, 0, &val); - ufshcd_rpm_put_sync(hba); + ufshcd_rpm_put(hba); if (err) dev_err(hba->dev, "%s: Failed to update rtc %d\n", __func__, err); @@ -8241,7 +8201,9 @@ static void ufshcd_rtc_work(struct work_struct *work) hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work); /* Update RTC only when there are no requests in progress and UFSHCI is operational */ - if (!ufshcd_is_ufs_dev_busy(hba) && hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) + if (!ufshcd_is_ufs_dev_busy(hba) && + hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL && + !hba->clk_gating.active_reqs) ufshcd_update_rtc(hba); if (ufshcd_is_ufs_dev_active(hba) && hba->dev_info.rtc_update_period) @@ -8310,6 +8272,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba) desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1]; dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH]; + dev_info->rtt_cap = desc_buf[DEVICE_DESC_PARAM_RTT_CAP]; + model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; err = ufshcd_read_string_desc(hba, model_index, @@ -8331,9 +8295,6 @@ static int ufs_get_device_desc(struct ufs_hba *hba) ufs_init_rtc(hba, desc_buf); - if (hba->ext_iid_sup) - ufshcd_ext_iid_probe(hba, desc_buf); - /* * ufshcd_read_string_desc returns size of the string * reset the error value @@ -8354,83 +8315,6 @@ static void ufs_put_device_desc(struct ufs_hba *hba) } /** - * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro - * @hba: per-adapter instance - * - * PA_TActivate parameter can be tuned manually if UniPro version is less than - * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's - * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce - * the hibern8 exit latency. - * - * Return: zero on success, non-zero error value on failure. - */ -static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba) -{ - int ret = 0; - u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate; - - ret = ufshcd_dme_peer_get(hba, - UIC_ARG_MIB_SEL( - RX_MIN_ACTIVATETIME_CAPABILITY, - UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), - &peer_rx_min_activatetime); - if (ret) - goto out; - - /* make sure proper unit conversion is applied */ - tuned_pa_tactivate = - ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US) - / PA_TACTIVATE_TIME_UNIT_US); - ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), - tuned_pa_tactivate); - -out: - return ret; -} - -/** - * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro - * @hba: per-adapter instance - * - * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than - * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's - * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY. - * This optimal value can help reduce the hibern8 exit latency. - * - * Return: zero on success, non-zero error value on failure. - */ -static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba) -{ - int ret = 0; - u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0; - u32 max_hibern8_time, tuned_pa_hibern8time; - - ret = ufshcd_dme_get(hba, - UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY, - UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), - &local_tx_hibern8_time_cap); - if (ret) - goto out; - - ret = ufshcd_dme_peer_get(hba, - UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY, - UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), - &peer_rx_hibern8_time_cap); - if (ret) - goto out; - - max_hibern8_time = max(local_tx_hibern8_time_cap, - peer_rx_hibern8_time_cap); - /* make sure proper unit conversion is applied */ - tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US) - / PA_HIBERN8_TIME_UNIT_US); - ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), - tuned_pa_hibern8time); -out: - return ret; -} - -/** * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is * less than device PA_TACTIVATE time. * @hba: per-adapter instance @@ -8502,11 +8386,6 @@ out: static void ufshcd_tune_unipro_params(struct ufs_hba *hba) { - if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { - ufshcd_tune_pa_tactivate(hba); - ufshcd_tune_pa_hibern8time(hba); - } - ufshcd_vops_apply_dev_quirks(hba); if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) @@ -8644,6 +8523,8 @@ static int ufshcd_device_params_init(struct ufs_hba *hba) goto out; } + ufshcd_set_rtt(hba); + ufshcd_get_ref_clk_gating_wait(hba); if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, @@ -8670,9 +8551,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba) if (dev_info->wspecversion < 0x400) return; - ufshcd_hold(hba); - - mutex_lock(&hba->dev_cmd.lock); + ufshcd_dev_man_lock(hba); ufshcd_init_query(hba, &request, &response, UPIU_QUERY_OPCODE_WRITE_ATTR, @@ -8690,8 +8569,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba) dev_err(hba->dev, "%s: failed to set timestamp %d\n", __func__, err); - mutex_unlock(&hba->dev_cmd.lock); - ufshcd_release(hba); + ufshcd_dev_man_unlock(hba); } /** @@ -8724,6 +8602,14 @@ static int ufshcd_add_lus(struct ufs_hba *hba) ufshcd_init_clk_scaling_sysfs(hba); } + /* + * The RTC update code accesses the hba->ufs_device_wlun->sdev_gendev + * pointer and hence must only be started after the WLUN pointer has + * been initialized by ufshcd_scsi_add_wlus(). + */ + schedule_delayed_work(&hba->ufs_rtc_update_work, + msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS)); + ufs_bsg_probe(hba); scsi_scan_host(hba->host); @@ -8777,6 +8663,9 @@ static int ufshcd_alloc_mcq(struct ufs_hba *hba) if (ret) goto err; + hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; + hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED; + return 0; err: hba->nutrs = old_nutrs; @@ -8798,24 +8687,49 @@ static void ufshcd_config_mcq(struct ufs_hba *hba) ufshcd_mcq_make_queues_operational(hba); ufshcd_mcq_config_mac(hba, hba->nutrs); - hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; - hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED; - - /* Select MCQ mode */ - ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1, - REG_UFS_MEM_CFG); - hba->mcq_enabled = true; - dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n", hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT], hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL], hba->nutrs); } +static int ufshcd_post_device_init(struct ufs_hba *hba) +{ + int ret; + + ufshcd_tune_unipro_params(hba); + + /* UFS device is also active now */ + ufshcd_set_ufs_dev_active(hba); + ufshcd_force_reset_auto_bkops(hba); + + ufshcd_set_timestamp_attr(hba); + + if (!hba->max_pwr_info.is_valid) + return 0; + + /* + * Set the right value to bRefClkFreq before attempting to + * switch to HS gears. + */ + if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) + ufshcd_set_dev_ref_clk(hba); + /* Gear up to HS gear. */ + ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); + if (ret) { + dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", + __func__, ret); + return ret; + } + + return 0; +} + static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) { int ret; - struct Scsi_Host *host = hba->host; + + WARN_ON_ONCE(!hba->scsi_host_added); hba->ufshcd_state = UFSHCD_STATE_RESET; @@ -8833,8 +8747,10 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) ufshcd_set_link_active(hba); /* Reconfigure MCQ upon reset */ - if (is_mcq_enabled(hba) && !init_dev_params) + if (hba->mcq_enabled && !init_dev_params) { ufshcd_config_mcq(hba); + ufshcd_mcq_enable(hba); + } /* Verify device initialization by sending NOP OUT UPIU */ ret = ufshcd_verify_dev_init(hba); @@ -8854,55 +8770,14 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) ret = ufshcd_device_params_init(hba); if (ret) return ret; - if (is_mcq_supported(hba) && !hba->scsi_host_added) { - ret = ufshcd_alloc_mcq(hba); - if (!ret) { - ufshcd_config_mcq(hba); - } else { - /* Continue with SDB mode */ - use_mcq_mode = false; - dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", - ret); - } - ret = scsi_add_host(host, hba->dev); - if (ret) { - dev_err(hba->dev, "scsi_add_host failed\n"); - return ret; - } - hba->scsi_host_added = true; - } else if (is_mcq_supported(hba)) { - /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */ + if (is_mcq_supported(hba) && + hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) { ufshcd_config_mcq(hba); + ufshcd_mcq_enable(hba); } } - ufshcd_tune_unipro_params(hba); - - /* UFS device is also active now */ - ufshcd_set_ufs_dev_active(hba); - ufshcd_force_reset_auto_bkops(hba); - - ufshcd_set_timestamp_attr(hba); - schedule_delayed_work(&hba->ufs_rtc_update_work, - msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS)); - - /* Gear up to HS gear if supported */ - if (hba->max_pwr_info.is_valid) { - /* - * Set the right value to bRefClkFreq before attempting to - * switch to HS gears. - */ - if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) - ufshcd_set_dev_ref_clk(hba); - ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); - if (ret) { - dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", - __func__, ret); - return ret; - } - } - - return 0; + return ufshcd_post_device_init(hba); } /** @@ -8916,32 +8791,26 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) */ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) { - ktime_t start = ktime_get(); - unsigned long flags; int ret; - ret = ufshcd_device_init(hba, init_dev_params); - if (ret) - goto out; - if (!hba->pm_op_in_progress && (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) { /* Reset the device and controller before doing reinit */ ufshcd_device_reset(hba); + ufs_put_device_desc(hba); ufshcd_hba_stop(hba); - ufshcd_vops_reinit_notify(hba); ret = ufshcd_hba_enable(hba); if (ret) { dev_err(hba->dev, "Host controller enable failed\n"); ufshcd_print_evt_hist(hba); ufshcd_print_host_state(hba); - goto out; + return ret; } /* Reinit the device */ ret = ufshcd_device_init(hba, init_dev_params); if (ret) - goto out; + return ret; } ufshcd_print_pwr_info(hba); @@ -8961,18 +8830,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) ufshcd_write_ee_control(hba); ufshcd_configure_auto_hibern8(hba); -out: - spin_lock_irqsave(hba->host->host_lock, flags); - if (ret) - hba->ufshcd_state = UFSHCD_STATE_ERROR; - else if (hba->ufshcd_state == UFSHCD_STATE_RESET) - hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; - spin_unlock_irqrestore(hba->host->host_lock, flags); - - trace_ufshcd_init(dev_name(hba->dev), ret, - ktime_to_us(ktime_sub(ktime_get(), start)), - hba->curr_dev_pwr_mode, hba->uic_link_state); - return ret; + return 0; } /** @@ -8983,11 +8841,14 @@ out: static void ufshcd_async_scan(void *data, async_cookie_t cookie) { struct ufs_hba *hba = (struct ufs_hba *)data; + ktime_t probe_start; int ret; down(&hba->host_sem); /* Initialize hba, detect and initialize UFS device */ + probe_start = ktime_get(); ret = ufshcd_probe_hba(hba, true); + ufshcd_process_probe_result(hba, probe_start, ret); up(&hba->host_sem); if (ret) goto out; @@ -9021,7 +8882,7 @@ static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd) dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n", __func__, hba->outstanding_tasks); - return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE; + return scsi_host_busy(hba->host) ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE; } static const struct attribute_group *ufshcd_driver_groups[] = { @@ -9047,9 +8908,9 @@ static const struct scsi_host_template ufshcd_driver_template = { .map_queues = ufshcd_map_queues, .queuecommand = ufshcd_queuecommand, .mq_poll = ufshcd_poll, - .slave_alloc = ufshcd_slave_alloc, - .slave_configure = ufshcd_slave_configure, - .slave_destroy = ufshcd_slave_destroy, + .sdev_init = ufshcd_sdev_init, + .sdev_configure = ufshcd_sdev_configure, + .sdev_destroy = ufshcd_sdev_destroy, .change_queue_depth = ufshcd_change_queue_depth, .eh_abort_handler = ufshcd_abort, .eh_device_reset_handler = ufshcd_eh_device_reset_handler, @@ -9057,15 +8918,12 @@ static const struct scsi_host_template ufshcd_driver_template = { .eh_timed_out = ufshcd_eh_timed_out, .this_id = -1, .sg_tablesize = SG_ALL, - .cmd_per_lun = UFSHCD_CMD_PER_LUN, - .can_queue = UFSHCD_CAN_QUEUE, .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX, .max_sectors = SZ_1M / SECTOR_SIZE, .max_host_blocked = 1, .track_queue_depth = 1, .skip_settle_delay = 1, .sdev_groups = ufshcd_driver_groups, - .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS, }; static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, @@ -9238,7 +9096,6 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) int ret = 0; struct ufs_clk_info *clki; struct list_head *head = &hba->clk_list_head; - unsigned long flags; ktime_t start = ktime_get(); bool clk_state_changed = false; @@ -9280,18 +9137,19 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) if (ret) return ret; + if (!ufshcd_is_clkscaling_supported(hba)) + ufshcd_pm_qos_update(hba, on); out: if (ret) { list_for_each_entry(clki, head, list) { if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) clk_disable_unprepare(clki->clk); } - } else if (!ret && on) { - spin_lock_irqsave(hba->host->host_lock, flags); - hba->clk_gating.state = CLKS_ON; + } else if (!ret && on && hba->clk_gating.is_initialized) { + scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) + hba->clk_gating.state = CLKS_ON; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); - spin_unlock_irqrestore(hba->host->host_lock, flags); } if (clk_state_changed) @@ -9457,6 +9315,7 @@ out: static void ufshcd_hba_exit(struct ufs_hba *hba) { if (hba->is_powered) { + ufshcd_pm_qos_exit(hba); ufshcd_exit_clk_scaling(hba); ufshcd_exit_clk_gating(hba); if (hba->eh_wq) @@ -9476,7 +9335,17 @@ static int ufshcd_execute_start_stop(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr) { const unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 }; + struct scsi_failure failure_defs[] = { + { + .allowed = 2, + .result = SCMD_FAILURE_RESULT_ANY, + }, + }; + struct scsi_failures failures = { + .failure_definitions = failure_defs, + }; const struct scsi_exec_args args = { + .failures = &failures, .sshdr = sshdr, .req_flags = BLK_MQ_REQ_PM, .scmd_flags = SCMD_FAIL_IF_RECOVERING, @@ -9502,7 +9371,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, struct scsi_sense_hdr sshdr; struct scsi_device *sdp; unsigned long flags; - int ret, retries; + int ret; spin_lock_irqsave(hba->host->host_lock, flags); sdp = hba->ufs_device_wlun; @@ -9528,15 +9397,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, * callbacks hence set the RQF_PM flag so that it doesn't resume the * already suspended childs. */ - for (retries = 3; retries > 0; --retries) { - ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr); - /* - * scsi_execute() only returns a negative value if the request - * queue is dying. - */ - if (ret <= 0) - break; - } + ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr); if (ret) { sdev_printk(KERN_WARNING, sdp, "START_STOP failed for power mode: %d, result %x\n", @@ -9745,7 +9606,10 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) /* UFS device & link must be active before we enter in this function */ if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { - ret = -EINVAL; + /* Wait err handler finish or trigger err recovery */ + if (!ufshcd_eh_in_progress(hba)) + ufshcd_force_error_recovery(hba); + ret = -EBUSY; goto enable_scaling; } @@ -9756,7 +9620,7 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) * allow background operations if bkops status shows * that performance might be impacted. */ - ret = ufshcd_urgent_bkops(hba); + ret = ufshcd_bkops_ctrl(hba); if (ret) { /* * If return err in suspend flow, IO will hang. @@ -9945,7 +9809,7 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) * If BKOPs operations are urgently needed at this moment then * keep auto-bkops enabled or else disable it. */ - ufshcd_urgent_bkops(hba); + ufshcd_bkops_ctrl(hba); if (hba->ee_usr_mask) ufshcd_write_ee_control(hba); @@ -10109,6 +9973,7 @@ static int ufshcd_suspend(struct ufs_hba *hba) ufshcd_vreg_set_lpm(hba); /* Put the host controller in low power mode if possible */ ufshcd_hba_vreg_set_lpm(hba); + ufshcd_pm_qos_update(hba, false); return ret; } @@ -10278,7 +10143,9 @@ static void ufshcd_wl_shutdown(struct device *dev) shost_for_each_device(sdev, hba->host) { if (sdev == hba->ufs_device_wlun) continue; - scsi_device_quiesce(sdev); + mutex_lock(&sdev->state_mutex); + scsi_device_set_state(sdev, SDEV_OFFLINE); + mutex_unlock(&sdev->state_mutex); } __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); @@ -10304,10 +10171,12 @@ void ufshcd_remove(struct ufs_hba *hba) ufs_hwmon_remove(hba); ufs_bsg_remove(hba); ufs_sysfs_remove_nodes(hba->dev); + cancel_delayed_work_sync(&hba->ufs_rtc_update_work); blk_mq_destroy_queue(hba->tmf_queue); blk_put_queue(hba->tmf_queue); blk_mq_free_tag_set(&hba->tmf_tag_set); - scsi_remove_host(hba->host); + if (hba->scsi_host_added) + scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); ufshcd_hba_stop(hba); @@ -10348,10 +10217,7 @@ int ufshcd_system_restore(struct device *dev) * are updated with the latest queue addresses. Only after * updating these addresses, we can queue the new commands. */ - mb(); - - /* Resuming from hibernate, assume that link was OFF */ - ufshcd_set_link_off(hba); + ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H); return 0; @@ -10366,16 +10232,6 @@ EXPORT_SYMBOL_GPL(ufshcd_system_thaw); #endif /* CONFIG_PM_SLEEP */ /** - * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA) - * @hba: pointer to Host Bus Adapter (HBA) - */ -void ufshcd_dealloc_host(struct ufs_hba *hba) -{ - scsi_host_put(hba->host); -} -EXPORT_SYMBOL_GPL(ufshcd_dealloc_host); - -/** * ufshcd_set_dma_mask - Set dma mask based on the controller * addressing capability * @hba: per adapter instance @@ -10384,6 +10240,8 @@ EXPORT_SYMBOL_GPL(ufshcd_dealloc_host); */ static int ufshcd_set_dma_mask(struct ufs_hba *hba) { + if (hba->vops && hba->vops->set_dma_mask) + return hba->vops->set_dma_mask(hba); if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) return 0; @@ -10392,11 +10250,25 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba) } /** + * ufshcd_devres_release - devres cleanup handler, invoked during release of + * hba->dev + * @host: pointer to SCSI host + */ +static void ufshcd_devres_release(void *host) +{ + scsi_host_put(host); +} + +/** * ufshcd_alloc_host - allocate Host Bus Adapter (HBA) * @dev: pointer to device handle * @hba_handle: driver private handle * * Return: 0 on success, non-zero value on failure. + * + * NOTE: There is no corresponding ufshcd_dealloc_host() because this function + * keeps track of its allocations using devres and deallocates everything on + * device removal automatically. */ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) { @@ -10418,6 +10290,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) err = -ENOMEM; goto out_error; } + + err = devm_add_action_or_reset(dev, ufshcd_devres_release, + host); + if (err) + return dev_err_probe(dev, err, + "failed to add ufshcd dealloc action\n"); + host->nr_maps = HCTX_TYPE_POLL + 1; hba = shost_priv(host); hba->host = host; @@ -10447,6 +10326,73 @@ static const struct blk_mq_ops ufshcd_tmf_ops = { .queue_rq = ufshcd_queue_tmf, }; +static int ufshcd_add_scsi_host(struct ufs_hba *hba) +{ + int err; + + if (is_mcq_supported(hba)) { + ufshcd_mcq_enable(hba); + err = ufshcd_alloc_mcq(hba); + if (!err) { + ufshcd_config_mcq(hba); + } else { + /* Continue with SDB mode */ + ufshcd_mcq_disable(hba); + use_mcq_mode = false; + dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", + err); + } + } + if (!is_mcq_supported(hba) && !hba->lsdb_sup) { + dev_err(hba->dev, + "%s: failed to initialize (legacy doorbell mode not supported)\n", + __func__); + return -EINVAL; + } + + err = scsi_add_host(hba->host, hba->dev); + if (err) { + dev_err(hba->dev, "scsi_add_host failed\n"); + return err; + } + hba->scsi_host_added = true; + + hba->tmf_tag_set = (struct blk_mq_tag_set) { + .nr_hw_queues = 1, + .queue_depth = hba->nutmrs, + .ops = &ufshcd_tmf_ops, + }; + err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); + if (err < 0) + goto remove_scsi_host; + hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL); + if (IS_ERR(hba->tmf_queue)) { + err = PTR_ERR(hba->tmf_queue); + goto free_tmf_tag_set; + } + hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, + sizeof(*hba->tmf_rqs), GFP_KERNEL); + if (!hba->tmf_rqs) { + err = -ENOMEM; + goto free_tmf_queue; + } + + return 0; + +free_tmf_queue: + blk_mq_destroy_queue(hba->tmf_queue); + blk_put_queue(hba->tmf_queue); + +free_tmf_tag_set: + blk_mq_free_tag_set(&hba->tmf_tag_set); + +remove_scsi_host: + if (hba->scsi_host_added) + scsi_remove_host(hba->host); + + return err; +} + /** * ufshcd_init - Driver initialization routine * @hba: per-adapter instance @@ -10460,7 +10406,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) int err; struct Scsi_Host *host = hba->host; struct device *dev = hba->dev; - char eh_wq_name[sizeof("ufs_eh_wq_00")]; /* * dev_set_drvdata() must be called before any callbacks are registered @@ -10480,6 +10425,27 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->irq = irq; hba->vps = &ufs_hba_vps; + /* + * Initialize clk_gating.lock early since it is being used in + * ufshcd_setup_clocks() + */ + spin_lock_init(&hba->clk_gating.lock); + + /* + * Set the default power management level for runtime and system PM. + * Host controller drivers can override them in their + * 'ufs_hba_variant_ops::init' callback. + * + * Default power saving mode is to keep UFS link in Hibern8 state + * and UFS device in sleep state. + */ + hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( + UFS_SLEEP_PWR_MODE, + UIC_LINK_HIBERN8_STATE); + hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( + UFS_SLEEP_PWR_MODE, + UIC_LINK_HIBERN8_STATE); + err = ufshcd_hba_init(hba); if (err) goto out_error; @@ -10520,12 +10486,15 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) host->max_cmd_len = UFS_CDB_SIZE; host->queuecommand_may_block = !!(hba->caps & UFSHCD_CAP_CLK_GATING); + /* Use default RPM delay if host not set */ + if (host->rpm_autosuspend_delay == 0) + host->rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS; + hba->max_pwr_info.is_valid = false; /* Initialize work queues */ - snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d", - hba->host->host_no); - hba->eh_wq = create_singlethread_workqueue(eh_wq_name); + hba->eh_wq = alloc_ordered_workqueue("ufs_eh_wq_%d", WQ_MEM_RECLAIM, + hba->host->host_no); if (!hba->eh_wq) { dev_err(hba->dev, "%s: failed to create eh workqueue\n", __func__); @@ -10565,7 +10534,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) * Make sure that UFS interrupts are disabled and any pending interrupt * status is cleared before registering UFS interrupt handler. */ - mb(); + ufshcd_readl(hba, REG_INTERRUPT_ENABLE); /* IRQ registration */ err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); @@ -10576,35 +10545,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->is_irq_enabled = true; } - if (!is_mcq_supported(hba)) { - err = scsi_add_host(host, hba->dev); - if (err) { - dev_err(hba->dev, "scsi_add_host failed\n"); - goto out_disable; - } - } - - hba->tmf_tag_set = (struct blk_mq_tag_set) { - .nr_hw_queues = 1, - .queue_depth = hba->nutmrs, - .ops = &ufshcd_tmf_ops, - .flags = BLK_MQ_F_NO_SCHED, - }; - err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); - if (err < 0) - goto out_remove_scsi_host; - hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set); - if (IS_ERR(hba->tmf_queue)) { - err = PTR_ERR(hba->tmf_queue); - goto free_tmf_tag_set; - } - hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, - sizeof(*hba->tmf_rqs), GFP_KERNEL); - if (!hba->tmf_rqs) { - err = -ENOMEM; - goto free_tmf_queue; - } - /* Reset the attached device */ ufshcd_device_reset(hba); @@ -10616,21 +10556,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) dev_err(hba->dev, "Host controller enable failed\n"); ufshcd_print_evt_hist(hba); ufshcd_print_host_state(hba); - goto free_tmf_queue; + goto out_disable; } - /* - * Set the default power management level for runtime and system PM. - * Default power saving mode is to keep UFS link in Hibern8 state - * and UFS device in sleep state. - */ - hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( - UFS_SLEEP_PWR_MODE, - UIC_LINK_HIBERN8_STATE); - hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( - UFS_SLEEP_PWR_MODE, - UIC_LINK_HIBERN8_STATE); - INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work); INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work); @@ -10642,7 +10570,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) /* Hold auto suspend until async scan completes */ pm_runtime_get_sync(dev); - atomic_set(&hba->scsi_block_reqs_cnt, 0); + /* * We are assuming that device wasn't put in sleep/power-down * state exclusively during the boot stage before kernel. @@ -10651,19 +10579,56 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) */ ufshcd_set_ufs_dev_active(hba); + /* Initialize hba, detect and initialize UFS device */ + ktime_t probe_start = ktime_get(); + + hba->ufshcd_state = UFSHCD_STATE_RESET; + + err = ufshcd_link_startup(hba); + if (err) + goto out_disable; + + if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION) + goto initialized; + + /* Debug counters initialization */ + ufshcd_clear_dbg_ufs_stats(hba); + + /* UniPro link is active now */ + ufshcd_set_link_active(hba); + + /* Verify device initialization by sending NOP OUT UPIU */ + err = ufshcd_verify_dev_init(hba); + if (err) + goto out_disable; + + /* Initiate UFS initialization, and waiting until completion */ + err = ufshcd_complete_dev_init(hba); + if (err) + goto out_disable; + + err = ufshcd_device_params_init(hba); + if (err) + goto out_disable; + + err = ufshcd_post_device_init(hba); + +initialized: + ufshcd_process_probe_result(hba, probe_start, err); + if (err) + goto out_disable; + + err = ufshcd_add_scsi_host(hba); + if (err) + goto out_disable; + async_schedule(ufshcd_async_scan, hba); ufs_sysfs_add_nodes(hba->dev); device_enable_async_suspend(dev); + ufshcd_pm_qos_init(hba); return 0; -free_tmf_queue: - blk_mq_destroy_queue(hba->tmf_queue); - blk_put_queue(hba->tmf_queue); -free_tmf_tag_set: - blk_mq_free_tag_set(&hba->tmf_tag_set); -out_remove_scsi_host: - scsi_remove_host(hba->host); out_disable: hba->is_irq_enabled = false; ufshcd_hba_exit(hba); @@ -10844,7 +10809,6 @@ static void ufshcd_check_header_layout(void) static struct scsi_driver ufs_dev_wlun_template = { .gendrv = { .name = "ufs_device_wlun", - .owner = THIS_MODULE, .probe = ufshcd_wl_probe, .remove = ufshcd_wl_remove, .pm = &ufshcd_wl_pm_ops, diff --git a/drivers/ufs/host/cdns-pltfrm.c b/drivers/ufs/host/cdns-pltfrm.c index bb30267da471..e793e3538c48 100644 --- a/drivers/ufs/host/cdns-pltfrm.c +++ b/drivers/ufs/host/cdns-pltfrm.c @@ -136,7 +136,7 @@ static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba) * Make sure the register was updated, * UniPro layer will not work with an incorrect value. */ - mb(); + ufshcd_readl(hba, CDNS_UFS_REG_HCLKDIV); return 0; } @@ -307,9 +307,7 @@ static int cdns_ufs_pltfrm_probe(struct platform_device *pdev) */ static void cdns_ufs_pltfrm_remove(struct platform_device *pdev) { - struct ufs_hba *hba = platform_get_drvdata(pdev); - - ufshcd_remove(hba); + ufshcd_pltfrm_remove(pdev); } static const struct dev_pm_ops cdns_ufs_dev_pm_ops = { @@ -321,7 +319,7 @@ static const struct dev_pm_ops cdns_ufs_dev_pm_ops = { static struct platform_driver cdns_ufs_pltfrm_driver = { .probe = cdns_ufs_pltfrm_probe, - .remove_new = cdns_ufs_pltfrm_remove, + .remove = cdns_ufs_pltfrm_remove, .driver = { .name = "cdns-ufshcd", .pm = &cdns_ufs_dev_pm_ops, diff --git a/drivers/ufs/host/tc-dwc-g210-pci.c b/drivers/ufs/host/tc-dwc-g210-pci.c index 876781fd6861..0167d8bef71a 100644 --- a/drivers/ufs/host/tc-dwc-g210-pci.c +++ b/drivers/ufs/host/tc-dwc-g210-pci.c @@ -80,14 +80,12 @@ tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); - err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD); - if (err < 0) { + mmio_base = pcim_iomap_region(pdev, 0, UFSHCD); + if (IS_ERR(mmio_base)) { dev_err(&pdev->dev, "request and iomap failed\n"); - return err; + return PTR_ERR(mmio_base); } - mmio_base = pcim_iomap_table(pdev)[0]; - err = ufshcd_alloc_host(&pdev->dev, &hba); if (err) { dev_err(&pdev->dev, "Allocation failed\n"); diff --git a/drivers/ufs/host/tc-dwc-g210-pltfrm.c b/drivers/ufs/host/tc-dwc-g210-pltfrm.c index a3877592604d..454ac88c357d 100644 --- a/drivers/ufs/host/tc-dwc-g210-pltfrm.c +++ b/drivers/ufs/host/tc-dwc-g210-pltfrm.c @@ -76,10 +76,7 @@ static int tc_dwc_g210_pltfm_probe(struct platform_device *pdev) */ static void tc_dwc_g210_pltfm_remove(struct platform_device *pdev) { - struct ufs_hba *hba = platform_get_drvdata(pdev); - - pm_runtime_get_sync(&(pdev)->dev); - ufshcd_remove(hba); + ufshcd_pltfrm_remove(pdev); } static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = { @@ -89,7 +86,7 @@ static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = { static struct platform_driver tc_dwc_g210_pltfm_driver = { .probe = tc_dwc_g210_pltfm_probe, - .remove_new = tc_dwc_g210_pltfm_remove, + .remove = tc_dwc_g210_pltfm_remove, .driver = { .name = "tc-dwc-g210-pltfm", .pm = &tc_dwc_g210_pltfm_pm_ops, diff --git a/drivers/ufs/host/ti-j721e-ufs.c b/drivers/ufs/host/ti-j721e-ufs.c index 250c22df000d..21214e5d5896 100644 --- a/drivers/ufs/host/ti-j721e-ufs.c +++ b/drivers/ufs/host/ti-j721e-ufs.c @@ -83,7 +83,7 @@ MODULE_DEVICE_TABLE(of, ti_j721e_ufs_of_match); static struct platform_driver ti_j721e_ufs_driver = { .probe = ti_j721e_ufs_probe, - .remove_new = ti_j721e_ufs_remove, + .remove = ti_j721e_ufs_remove, .driver = { .name = "ti-j721e-ufs", .of_match_table = ti_j721e_ufs_of_match, diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c index 734d40f99e31..13dd5dfc03eb 100644 --- a/drivers/ufs/host/ufs-exynos.c +++ b/drivers/ufs/host/ufs-exynos.c @@ -8,6 +8,9 @@ * */ +#include <linux/unaligned.h> +#include <crypto/aes.h> +#include <linux/arm-smccc.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/module.h> @@ -25,12 +28,13 @@ #include "ufs-exynos.h" +#define DATA_UNIT_SIZE 4096 + /* * Exynos's Vendor specific registers for UFSHCI */ #define HCI_TXPRDT_ENTRY_SIZE 0x00 #define PRDT_PREFECT_EN BIT(31) -#define PRDT_SET_SIZE(x) ((x) & 0x1F) #define HCI_RXPRDT_ENTRY_SIZE 0x04 #define HCI_1US_TO_CNT_VAL 0x0C #define CNT_VAL_1US_MASK 0x3FF @@ -44,12 +48,16 @@ #define HCI_UNIPRO_APB_CLK_CTRL 0x68 #define UNIPRO_APB_CLK(v, x) (((v) & ~0xF) | ((x) & 0xF)) #define HCI_AXIDMA_RWDATA_BURST_LEN 0x6C +#define WLU_EN BIT(31) +#define WLU_BURST_LEN(x) ((x) << 27 | ((x) & 0xF)) #define HCI_GPIO_OUT 0x70 #define HCI_ERR_EN_PA_LAYER 0x78 #define HCI_ERR_EN_DL_LAYER 0x7C #define HCI_ERR_EN_N_LAYER 0x80 #define HCI_ERR_EN_T_LAYER 0x84 #define HCI_ERR_EN_DME_LAYER 0x88 +#define HCI_V2P1_CTRL 0x8C +#define IA_TICK_SEL BIT(16) #define HCI_CLKSTOP_CTRL 0xB0 #define REFCLKOUT_STOP BIT(4) #define MPHY_APBCLK_STOP BIT(3) @@ -59,6 +67,7 @@ #define CLK_STOP_MASK (REFCLKOUT_STOP | REFCLK_STOP |\ UNIPRO_MCLK_STOP | MPHY_APBCLK_STOP|\ UNIPRO_PCLK_STOP) +/* HCI_MISC is also known as HCI_FORCE_HCS */ #define HCI_MISC 0xB4 #define REFCLK_CTRL_EN BIT(7) #define UNIPRO_PCLK_CTRL_EN BIT(6) @@ -67,6 +76,10 @@ #define CLK_CTRL_EN_MASK (REFCLK_CTRL_EN |\ UNIPRO_PCLK_CTRL_EN |\ UNIPRO_MCLK_CTRL_EN) + +#define HCI_IOP_ACG_DISABLE 0x100 +#define HCI_IOP_ACG_DISABLE_EN BIT(0) + /* Device fatal error */ #define DFES_ERR_EN BIT(31) #define DFES_DEF_L2_ERRS (UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF |\ @@ -136,6 +149,9 @@ enum { /* * UNIPRO registers */ +#define UNIPRO_DME_POWERMODE_REQ_LOCALL2TIMER0 0x7888 +#define UNIPRO_DME_POWERMODE_REQ_LOCALL2TIMER1 0x788c +#define UNIPRO_DME_POWERMODE_REQ_LOCALL2TIMER2 0x7890 #define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0 0x78B8 #define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1 0x78BC #define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2 0x78C0 @@ -188,15 +204,8 @@ static inline void exynos_ufs_ungate_clks(struct exynos_ufs *ufs) exynos_ufs_ctrl_clkstop(ufs, false); } -static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) -{ - return 0; -} - -static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) +static int exynos_ufs_shareability(struct exynos_ufs *ufs) { - struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; - /* IO Coherency setting */ if (ufs->sysreg) { return regmap_update_bits(ufs->sysreg, @@ -204,11 +213,32 @@ static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) UFS_SHARABLE, UFS_SHARABLE); } - attr->tx_dif_p_nsec = 3200000; - return 0; } +static int gs101_ufs_drv_init(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + u32 reg; + + /* Enable WriteBooster */ + hba->caps |= UFSHCD_CAP_WB_EN; + + /* Enable clock gating and hibern8 */ + hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; + + /* set ACG to be controlled by UFS_ACG_DISABLE */ + reg = hci_readl(ufs, HCI_IOP_ACG_DISABLE); + hci_writel(ufs, reg & (~HCI_IOP_ACG_DISABLE_EN), HCI_IOP_ACG_DISABLE); + + return exynos_ufs_shareability(ufs); +} + +static int exynosauto_ufs_drv_init(struct exynos_ufs *ufs) +{ + return exynos_ufs_shareability(ufs); +} + static int exynosauto_ufs_post_hce_enable(struct exynos_ufs *ufs) { struct ufs_hba *hba = ufs->hba; @@ -306,8 +336,9 @@ static int exynosauto_ufs_post_pwr_change(struct exynos_ufs *ufs, static int exynos7_ufs_pre_link(struct exynos_ufs *ufs) { + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + u32 val = attr->pa_dbg_opt_suite1_val; struct ufs_hba *hba = ufs->hba; - u32 val = ufs->drv_data->uic_attr->pa_dbg_option_suite; int i; exynos_ufs_enable_ov_tm(hba); @@ -324,12 +355,13 @@ static int exynos7_ufs_pre_link(struct exynos_ufs *ufs) UIC_ARG_MIB_SEL(TX_HIBERN8_CONTROL, i), 0x0); ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_TXPHY_CFGUPDT), 0x1); udelay(1); - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val | (1 << 12)); + ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_opt_suite1_off), + val | (1 << 12)); ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_RESET_PHY), 0x1); ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_LINE_RESET), 0x1); ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_LINE_RESET_REQ), 0x1); udelay(1600); - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val); + ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_opt_suite1_off), val); return 0; } @@ -534,6 +566,9 @@ static void exynos_ufs_specify_phy_time_attr(struct exynos_ufs *ufs) struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg; + if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR) + return; + t_cfg->tx_linereset_p = exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_p_nsec); t_cfg->tx_linereset_n = @@ -712,6 +747,9 @@ static void exynos_ufs_config_smu(struct exynos_ufs *ufs) { u32 reg, val; + if (ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE) + return; + exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val); /* make encryption disabled by default */ @@ -759,6 +797,21 @@ static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs, exynos_ufs_disable_ov_tm(hba); } +#define UFS_HW_VER_MAJOR_MASK GENMASK(15, 8) + +static u32 exynos_ufs_get_hs_gear(struct ufs_hba *hba) +{ + u8 major; + + major = FIELD_GET(UFS_HW_VER_MAJOR_MASK, hba->ufs_version); + + if (major >= 3) + return UFS_HS_G4; + + /* Default is HS-G3 */ + return UFS_HS_G3; +} + static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) @@ -776,6 +829,10 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba, ufshcd_init_host_params(&host_params); + /* This driver only support symmetric gear setting e.g. hs_tx_gear == hs_rx_gear */ + host_params.hs_tx_gear = exynos_ufs_get_hs_gear(hba); + host_params.hs_rx_gear = exynos_ufs_get_hs_gear(hba); + ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); if (ret) { pr_err("%s: failed to determine capabilities\n", __func__); @@ -921,14 +978,23 @@ out_exit_phy: static void exynos_ufs_config_unipro(struct exynos_ufs *ufs) { + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; struct ufs_hba *hba = ufs->hba; - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD), - DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + if (attr->pa_dbg_clk_period_off) + ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_clk_period_off), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTRAILINGCLOCKS), ufs->drv_data->uic_attr->tx_trailingclks); - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), - ufs->drv_data->uic_attr->pa_dbg_option_suite); + + if (attr->pa_dbg_opt_suite1_off) + ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_opt_suite1_off), + attr->pa_dbg_opt_suite1_val); + + if (attr->pa_dbg_opt_suite2_off) + ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_opt_suite2_off), + attr->pa_dbg_opt_suite2_val); } static void exynos_ufs_config_intr(struct exynos_ufs *ufs, u32 errs, u8 index) @@ -1005,6 +1071,13 @@ static void exynos_ufs_fit_aggr_timeout(struct exynos_ufs *ufs) { u32 val; + /* Select function clock (mclk) for timer tick */ + if (ufs->opts & EXYNOS_UFS_OPT_TIMER_TICK_SELECT) { + val = hci_readl(ufs, HCI_V2P1_CTRL); + val |= IA_TICK_SEL; + hci_writel(ufs, val, HCI_V2P1_CTRL); + } + val = exynos_ufs_calc_time_cntr(ufs, IATOVAL_NSEC / CNTR_DIV_VAL); hci_writel(ufs, val & CNT_VAL_1US_MASK, HCI_1US_TO_CNT_VAL); } @@ -1019,8 +1092,8 @@ static int exynos_ufs_post_link(struct ufs_hba *hba) exynos_ufs_fit_aggr_timeout(ufs); hci_writel(ufs, 0xa, HCI_DATA_REORDER); - hci_writel(ufs, PRDT_SET_SIZE(12), HCI_TXPRDT_ENTRY_SIZE); - hci_writel(ufs, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE); + hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_TXPRDT_ENTRY_SIZE); + hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE); hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN); @@ -1127,6 +1200,230 @@ static inline void exynos_ufs_priv_init(struct ufs_hba *hba, hba->quirks = ufs->drv_data->quirks; } +#ifdef CONFIG_SCSI_UFS_CRYPTO + +/* + * Support for Flash Memory Protector (FMP), which is the inline encryption + * hardware on Exynos and Exynos-based SoCs. The interface to this hardware is + * not compatible with the standard UFS crypto. It requires that encryption be + * configured in the PRDT using a nonstandard extension. + */ + +enum fmp_crypto_algo_mode { + FMP_BYPASS_MODE = 0, + FMP_ALGO_MODE_AES_CBC = 1, + FMP_ALGO_MODE_AES_XTS = 2, +}; +enum fmp_crypto_key_length { + FMP_KEYLEN_256BIT = 1, +}; + +/** + * struct fmp_sg_entry - nonstandard format of PRDT entries when FMP is enabled + * + * @base: The standard PRDT entry, but with nonstandard bitfields in the high + * bits of the 'size' field, i.e. the last 32-bit word. When these + * nonstandard bitfields are zero, the data segment won't be encrypted or + * decrypted. Otherwise they specify the algorithm and key length with + * which the data segment will be encrypted or decrypted. + * @file_iv: The initialization vector (IV) with all bytes reversed + * @file_enckey: The first half of the AES-XTS key with all bytes reserved + * @file_twkey: The second half of the AES-XTS key with all bytes reserved + * @disk_iv: Unused + * @reserved: Unused + */ +struct fmp_sg_entry { + struct ufshcd_sg_entry base; + __be64 file_iv[2]; + __be64 file_enckey[4]; + __be64 file_twkey[4]; + __be64 disk_iv[2]; + __be64 reserved[2]; +}; + +#define SMC_CMD_FMP_SECURITY \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_SIP, 0x1810) +#define SMC_CMD_SMU \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_SIP, 0x1850) +#define SMC_CMD_FMP_SMU_RESUME \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_SIP, 0x1860) +#define SMU_EMBEDDED 0 +#define SMU_INIT 0 +#define CFG_DESCTYPE_3 3 + +static void exynos_ufs_fmp_init(struct ufs_hba *hba, struct exynos_ufs *ufs) +{ + struct blk_crypto_profile *profile = &hba->crypto_profile; + struct arm_smccc_res res; + int err; + + /* + * Check for the standard crypto support bit, since it's available even + * though the rest of the interface to FMP is nonstandard. + * + * This check should have the effect of preventing the driver from + * trying to use FMP on old Exynos SoCs that don't have FMP. + */ + if (!(ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES) & + MASK_CRYPTO_SUPPORT)) + return; + + /* + * The below sequence of SMC calls to enable FMP can be found in the + * downstream driver source for gs101 and other Exynos-based SoCs. It + * is the only way to enable FMP that works on SoCs such as gs101 that + * don't make the FMP registers accessible to Linux. It probably works + * on other Exynos-based SoCs too, and might even still be the only way + * that works. But this hasn't been properly tested, and this code is + * mutually exclusive with exynos_ufs_config_smu(). So for now only + * enable FMP support on SoCs with EXYNOS_UFS_OPT_UFSPR_SECURE. + */ + if (!(ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE)) + return; + + /* + * This call (which sets DESCTYPE to 0x3 in the FMPSECURITY0 register) + * is needed to make the hardware use the larger PRDT entry size. + */ + BUILD_BUG_ON(sizeof(struct fmp_sg_entry) != 128); + arm_smccc_smc(SMC_CMD_FMP_SECURITY, 0, SMU_EMBEDDED, CFG_DESCTYPE_3, + 0, 0, 0, 0, &res); + if (res.a0) { + dev_warn(hba->dev, + "SMC_CMD_FMP_SECURITY failed on init: %ld. Disabling FMP support.\n", + res.a0); + return; + } + ufshcd_set_sg_entry_size(hba, sizeof(struct fmp_sg_entry)); + + /* + * This is needed to initialize FMP. Without it, errors occur when + * inline encryption is used. + */ + arm_smccc_smc(SMC_CMD_SMU, SMU_INIT, SMU_EMBEDDED, 0, 0, 0, 0, 0, &res); + if (res.a0) { + dev_err(hba->dev, + "SMC_CMD_SMU(SMU_INIT) failed: %ld. Disabling FMP support.\n", + res.a0); + return; + } + + /* Advertise crypto capabilities to the block layer. */ + err = devm_blk_crypto_profile_init(hba->dev, profile, 0); + if (err) { + /* Only ENOMEM should be possible here. */ + dev_err(hba->dev, "Failed to initialize crypto profile: %d\n", + err); + return; + } + profile->max_dun_bytes_supported = AES_BLOCK_SIZE; + profile->dev = hba->dev; + profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] = + DATA_UNIT_SIZE; + + /* Advertise crypto support to ufshcd-core. */ + hba->caps |= UFSHCD_CAP_CRYPTO; + + /* Advertise crypto quirks to ufshcd-core. */ + hba->quirks |= UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE | + UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE | + UFSHCD_QUIRK_KEYS_IN_PRDT; + +} + +static void exynos_ufs_fmp_resume(struct ufs_hba *hba) +{ + struct arm_smccc_res res; + + if (!(hba->caps & UFSHCD_CAP_CRYPTO)) + return; + + arm_smccc_smc(SMC_CMD_FMP_SECURITY, 0, SMU_EMBEDDED, CFG_DESCTYPE_3, + 0, 0, 0, 0, &res); + if (res.a0) + dev_err(hba->dev, + "SMC_CMD_FMP_SECURITY failed on resume: %ld\n", res.a0); + + arm_smccc_smc(SMC_CMD_FMP_SMU_RESUME, 0, SMU_EMBEDDED, 0, 0, 0, 0, 0, + &res); + if (res.a0) + dev_err(hba->dev, + "SMC_CMD_FMP_SMU_RESUME failed: %ld\n", res.a0); +} + +static inline __be64 fmp_key_word(const u8 *key, int j) +{ + return cpu_to_be64(get_unaligned_le64( + key + AES_KEYSIZE_256 - (j + 1) * sizeof(u64))); +} + +/* Fill the PRDT for a request according to the given encryption context. */ +static int exynos_ufs_fmp_fill_prdt(struct ufs_hba *hba, + const struct bio_crypt_ctx *crypt_ctx, + void *prdt, unsigned int num_segments) +{ + struct fmp_sg_entry *fmp_prdt = prdt; + const u8 *enckey = crypt_ctx->bc_key->raw; + const u8 *twkey = enckey + AES_KEYSIZE_256; + u64 dun_lo = crypt_ctx->bc_dun[0]; + u64 dun_hi = crypt_ctx->bc_dun[1]; + unsigned int i; + + /* If FMP wasn't enabled, we shouldn't get any encrypted requests. */ + if (WARN_ON_ONCE(!(hba->caps & UFSHCD_CAP_CRYPTO))) + return -EIO; + + /* Configure FMP on each segment of the request. */ + for (i = 0; i < num_segments; i++) { + struct fmp_sg_entry *prd = &fmp_prdt[i]; + int j; + + /* Each segment must be exactly one data unit. */ + if (prd->base.size != cpu_to_le32(DATA_UNIT_SIZE - 1)) { + dev_err(hba->dev, + "data segment is misaligned for FMP\n"); + return -EIO; + } + + /* Set the algorithm and key length. */ + prd->base.size |= cpu_to_le32((FMP_ALGO_MODE_AES_XTS << 28) | + (FMP_KEYLEN_256BIT << 26)); + + /* Set the IV. */ + prd->file_iv[0] = cpu_to_be64(dun_hi); + prd->file_iv[1] = cpu_to_be64(dun_lo); + + /* Set the key. */ + for (j = 0; j < AES_KEYSIZE_256 / sizeof(u64); j++) { + prd->file_enckey[j] = fmp_key_word(enckey, j); + prd->file_twkey[j] = fmp_key_word(twkey, j); + } + + /* Increment the data unit number. */ + dun_lo++; + if (dun_lo == 0) + dun_hi++; + } + return 0; +} + +#else /* CONFIG_SCSI_UFS_CRYPTO */ + +static void exynos_ufs_fmp_init(struct ufs_hba *hba, struct exynos_ufs *ufs) +{ +} + +static void exynos_ufs_fmp_resume(struct ufs_hba *hba) +{ +} + +#define exynos_ufs_fmp_fill_prdt NULL + +#endif /* !CONFIG_SCSI_UFS_CRYPTO */ + static int exynos_ufs_init(struct ufs_hba *hba) { struct device *dev = hba->dev; @@ -1174,8 +1471,10 @@ static int exynos_ufs_init(struct ufs_hba *hba) exynos_ufs_priv_init(hba, ufs); + exynos_ufs_fmp_init(hba, ufs); + if (ufs->drv_data->drv_init) { - ret = ufs->drv_data->drv_init(dev, ufs); + ret = ufs->drv_data->drv_init(ufs); if (ret) { dev_err(dev, "failed to init drv-data\n"); goto out; @@ -1186,7 +1485,10 @@ static int exynos_ufs_init(struct ufs_hba *hba) if (ret) goto out; exynos_ufs_specify_phy_time_attr(ufs); + exynos_ufs_config_smu(ufs); + + hba->host->dma_alignment = DATA_UNIT_SIZE - 1; return 0; out: @@ -1227,12 +1529,12 @@ static void exynos_ufs_dev_hw_reset(struct ufs_hba *hba) hci_writel(ufs, 1 << 0, HCI_GPIO_OUT); } -static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter) +static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, enum uic_cmd_dme cmd) { struct exynos_ufs *ufs = ufshcd_get_variant(hba); struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; - if (!enter) { + if (cmd == UIC_CMD_DME_HIBER_EXIT) { if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) exynos_ufs_disable_auto_ctrl_hcc(ufs); exynos_ufs_ungate_clks(ufs); @@ -1260,30 +1562,11 @@ static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter) } } -static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter) +static void exynos_ufs_post_hibern8(struct ufs_hba *hba, enum uic_cmd_dme cmd) { struct exynos_ufs *ufs = ufshcd_get_variant(hba); - if (!enter) { - u32 cur_mode = 0; - u32 pwrmode; - - if (ufshcd_is_hs_mode(&ufs->dev_req_params)) - pwrmode = FAST_MODE; - else - pwrmode = SLOW_MODE; - - ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &cur_mode); - if (cur_mode != (pwrmode << 4 | pwrmode)) { - dev_warn(hba->dev, "%s: power mode change\n", __func__); - hba->pwr_info.pwr_rx = (cur_mode >> 4) & 0xf; - hba->pwr_info.pwr_tx = cur_mode & 0xf; - ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); - } - - if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)) - exynos_ufs_establish_connt(ufs); - } else { + if (cmd == UIC_CMD_DME_HIBER_ENTER) { ufs->entry_hibern8_t = ktime_get(); exynos_ufs_gate_clks(ufs); if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) @@ -1305,7 +1588,7 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba, * (ufshcd_async_scan()). Note: this callback may also be called * from other functions than ufshcd_init(). */ - hba->host->max_segment_size = SZ_4K; + hba->host->max_segment_size = DATA_UNIT_SIZE; if (ufs->drv_data->pre_hce_enable) { ret = ufs->drv_data->pre_hce_enable(ufs); @@ -1370,15 +1653,15 @@ static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba, } static void exynos_ufs_hibern8_notify(struct ufs_hba *hba, - enum uic_cmd_dme enter, + enum uic_cmd_dme cmd, enum ufs_notify_change_status notify) { switch ((u8)notify) { case PRE_CHANGE: - exynos_ufs_pre_hibern8(hba, enter); + exynos_ufs_pre_hibern8(hba, cmd); break; case POST_CHANGE: - exynos_ufs_post_hibern8(hba, enter); + exynos_ufs_post_hibern8(hba, cmd); break; } } @@ -1405,7 +1688,7 @@ static int exynos_ufs_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) phy_power_on(ufs->phy); exynos_ufs_config_smu(ufs); - + exynos_ufs_fmp_resume(hba); return 0; } @@ -1475,10 +1758,11 @@ static int exynosauto_ufs_vh_init(struct ufs_hba *hba) static int fsd_ufs_pre_link(struct exynos_ufs *ufs) { - int i; + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; struct ufs_hba *hba = ufs->hba; + int i; - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD), + ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_clk_period_off), DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); ufshcd_dme_set(hba, UIC_ARG_MIB(0x201), 0x12); ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40); @@ -1502,7 +1786,9 @@ static int fsd_ufs_pre_link(struct exynos_ufs *ufs) ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0); ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_AUTOMODE_THLD), 0x4E20); - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), 0x2e820183); + + ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_opt_suite1_off), + 0x2e820183); ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0); exynos_ufs_establish_connt(ufs); @@ -1510,11 +1796,6 @@ static int fsd_ufs_pre_link(struct exynos_ufs *ufs) return 0; } -static void exynos_ufs_config_scsi_dev(struct scsi_device *sdev) -{ - blk_queue_update_dma_alignment(sdev->request_queue, SZ_4K - 1); -} - static int fsd_ufs_post_link(struct exynos_ufs *ufs) { int i; @@ -1571,6 +1852,102 @@ static int fsd_ufs_pre_pwr_change(struct exynos_ufs *ufs, return 0; } +static inline u32 get_mclk_period_unipro_18(struct exynos_ufs *ufs) +{ + return (16 * 1000 * 1000000UL / ufs->mclk_rate); +} + +static int gs101_ufs_pre_link(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + int i; + u32 tx_line_reset_period, rx_line_reset_period; + + rx_line_reset_period = (RX_LINE_RESET_TIME * ufs->mclk_rate) + / NSEC_PER_MSEC; + tx_line_reset_period = (TX_LINE_RESET_TIME * ufs->mclk_rate) + / NSEC_PER_MSEC; + + unipro_writel(ufs, get_mclk_period_unipro_18(ufs), COMP_CLK_PERIOD); + + ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40); + + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD, i), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD_EN, i), 0x0); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE2, i), + (rx_line_reset_period >> 16) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE1, i), + (rx_line_reset_period >> 8) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE0, i), + (rx_line_reset_period) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x2f, i), 0x69); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x84, i), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x25, i), 0xf6); + } + + for_each_ufs_tx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD, i), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD_EN, i), + 0x02); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE2, i), + (tx_line_reset_period >> 16) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE1, i), + (tx_line_reset_period >> 8) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE0, i), + (tx_line_reset_period) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x04, i), 1); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x7F, i), 0); + } + + ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0); + ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID), 0x0); + ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID_VALID), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_CONNECTED); + ufshcd_dme_set(hba, UIC_ARG_MIB(0xA006), 0x8000); + + return 0; +} + +static int gs101_ufs_post_link(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + + /* + * Enable Write Line Unique. This field has to be 0x3 + * to support Write Line Unique transaction on gs101. + */ + hci_writel(ufs, WLU_EN | WLU_BURST_LEN(3), HCI_AXIDMA_RWDATA_BURST_LEN); + + exynos_ufs_enable_dbg_mode(hba); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0x3e8); + exynos_ufs_disable_dbg_mode(hba); + + return 0; +} + +static int gs101_ufs_pre_pwr_change(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + struct ufs_hba *hba = ufs->hba; + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000); + unipro_writel(ufs, 8064, UNIPRO_DME_POWERMODE_REQ_LOCALL2TIMER0); + unipro_writel(ufs, 28224, UNIPRO_DME_POWERMODE_REQ_LOCALL2TIMER1); + unipro_writel(ufs, 20160, UNIPRO_DME_POWERMODE_REQ_LOCALL2TIMER2); + unipro_writel(ufs, 12000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0); + unipro_writel(ufs, 32000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1); + unipro_writel(ufs, 16000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2); + + return 0; +} + static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = { .name = "exynos_ufs", .init = exynos_ufs_init, @@ -1583,7 +1960,7 @@ static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = { .hibern8_notify = exynos_ufs_hibern8_notify, .suspend = exynos_ufs_suspend, .resume = exynos_ufs_resume, - .config_scsi_dev = exynos_ufs_config_scsi_dev, + .fill_crypto_prdt = exynos_ufs_fmp_fill_prdt, }; static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = { @@ -1615,8 +1992,7 @@ static void exynos_ufs_remove(struct platform_device *pdev) struct ufs_hba *hba = platform_get_drvdata(pdev); struct exynos_ufs *ufs = ufshcd_get_variant(hba); - pm_runtime_get_sync(&(pdev)->dev); - ufshcd_remove(hba); + ufshcd_pltfrm_remove(pdev); phy_power_off(ufs->phy); phy_exit(ufs->phy); @@ -1644,7 +2020,9 @@ static struct exynos_ufs_uic_attr exynos7_uic_attr = { .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf), .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf), .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf), - .pa_dbg_option_suite = 0x30103, + .pa_dbg_clk_period_off = PA_DBG_CLK_PERIOD, + .pa_dbg_opt_suite1_val = 0x30103, + .pa_dbg_opt_suite1_off = PA_DBG_OPTION_SUITE, }; static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = { @@ -1689,13 +2067,20 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = { EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX | EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB | EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER, - .drv_init = exynos7_ufs_drv_init, .pre_link = exynos7_ufs_pre_link, .post_link = exynos7_ufs_post_link, .pre_pwr_change = exynos7_ufs_pre_pwr_change, .post_pwr_change = exynos7_ufs_post_pwr_change, }; +static struct exynos_ufs_uic_attr gs101_uic_attr = { + .tx_trailingclks = 0xff, + .pa_dbg_opt_suite1_val = 0x90913C1C, + .pa_dbg_opt_suite1_off = PA_GS101_DBG_OPTION_SUITE1, + .pa_dbg_opt_suite2_val = 0xE01C115F, + .pa_dbg_opt_suite2_off = PA_GS101_DBG_OPTION_SUITE2, +}; + static struct exynos_ufs_uic_attr fsd_uic_attr = { .tx_trailingclks = 0x10, .tx_dif_p_nsec = 3000000, /* unit: ns */ @@ -1718,7 +2103,9 @@ static struct exynos_ufs_uic_attr fsd_uic_attr = { .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf), .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf), .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf), - .pa_dbg_option_suite = 0x2E820183, + .pa_dbg_clk_period_off = PA_DBG_CLK_PERIOD, + .pa_dbg_opt_suite1_val = 0x2E820183, + .pa_dbg_opt_suite1_off = PA_DBG_OPTION_SUITE, }; static const struct exynos_ufs_drv_data fsd_ufs_drvs = { @@ -1737,7 +2124,26 @@ static const struct exynos_ufs_drv_data fsd_ufs_drvs = { .pre_pwr_change = fsd_ufs_pre_pwr_change, }; +static const struct exynos_ufs_drv_data gs101_ufs_drvs = { + .uic_attr = &gs101_uic_attr, + .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | + UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR | + UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR | + UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR | + UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL | + UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING, + .opts = EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR | + EXYNOS_UFS_OPT_UFSPR_SECURE | + EXYNOS_UFS_OPT_TIMER_TICK_SELECT, + .drv_init = gs101_ufs_drv_init, + .pre_link = gs101_ufs_pre_link, + .post_link = gs101_ufs_post_link, + .pre_pwr_change = gs101_ufs_pre_pwr_change, +}; + static const struct of_device_id exynos_ufs_of_match[] = { + { .compatible = "google,gs101-ufs", + .data = &gs101_ufs_drvs }, { .compatible = "samsung,exynos7-ufs", .data = &exynos_ufs_drvs }, { .compatible = "samsung,exynosautov9-ufs", @@ -1748,6 +2154,7 @@ static const struct of_device_id exynos_ufs_of_match[] = { .data = &fsd_ufs_drvs }, {}, }; +MODULE_DEVICE_TABLE(of, exynos_ufs_of_match); static const struct dev_pm_ops exynos_ufs_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) @@ -1758,7 +2165,7 @@ static const struct dev_pm_ops exynos_ufs_pm_ops = { static struct platform_driver exynos_ufs_pltform = { .probe = exynos_ufs_probe, - .remove_new = exynos_ufs_remove, + .remove = exynos_ufs_remove, .driver = { .name = "exynos-ufshc", .pm = &exynos_ufs_pm_ops, diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h index a4bd6646d7f1..9670dc138d1e 100644 --- a/drivers/ufs/host/ufs-exynos.h +++ b/drivers/ufs/host/ufs-exynos.h @@ -10,6 +10,12 @@ #define _UFS_EXYNOS_H_ /* + * Component registers + */ + +#define COMP_CLK_PERIOD 0x44 + +/* * UNIPRO registers */ #define UNIPRO_DBG_FORCE_DME_CTRL_STATE 0x150 @@ -30,6 +36,14 @@ #define PA_DBG_OPTION_SUITE_DYN 0x9565 /* + * Note: GS101_DBG_OPTION offsets below differ from the TRM + * but match the downstream driver. Following the TRM + * results in non-functioning UFS. + */ +#define PA_GS101_DBG_OPTION_SUITE1 0x956a +#define PA_GS101_DBG_OPTION_SUITE2 0x956d + +/* * MIBs for Transport Layer debug registers */ #define T_DBG_SKIP_INIT_HIBERN8_EXIT 0xc001 @@ -116,7 +130,7 @@ struct exynos_ufs; #define PA_HIBERN8TIME_VAL 0x20 #define PCLK_AVAIL_MIN 70000000 -#define PCLK_AVAIL_MAX 167000000 +#define PCLK_AVAIL_MAX 267000000 struct exynos_ufs_uic_attr { /* TX Attributes */ @@ -145,7 +159,11 @@ struct exynos_ufs_uic_attr { /* Common Attributes */ unsigned int cmn_pwm_clk_ctrl; /* Internal Attributes */ - unsigned int pa_dbg_option_suite; + unsigned int pa_dbg_clk_period_off; + unsigned int pa_dbg_opt_suite1_val; + unsigned int pa_dbg_opt_suite1_off; + unsigned int pa_dbg_opt_suite2_val; + unsigned int pa_dbg_opt_suite2_off; /* Changeable Attributes */ unsigned int rx_adv_fine_gran_sup_en; unsigned int rx_adv_fine_gran_step; @@ -164,7 +182,7 @@ struct exynos_ufs_drv_data { unsigned int quirks; unsigned int opts; /* SoC's specific operations */ - int (*drv_init)(struct device *dev, struct exynos_ufs *ufs); + int (*drv_init)(struct exynos_ufs *ufs); int (*pre_link)(struct exynos_ufs *ufs); int (*post_link)(struct exynos_ufs *ufs); int (*pre_pwr_change)(struct exynos_ufs *ufs, @@ -221,6 +239,8 @@ struct exynos_ufs { #define EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX BIT(3) #define EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER BIT(4) #define EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR BIT(5) +#define EXYNOS_UFS_OPT_UFSPR_SECURE BIT(6) +#define EXYNOS_UFS_OPT_TIMER_TICK_SELECT BIT(7) }; #define for_each_ufs_rx_lane(ufs, i) \ diff --git a/drivers/ufs/host/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c index 5ee73ff05251..6e6569de74d8 100644 --- a/drivers/ufs/host/ufs-hisi.c +++ b/drivers/ufs/host/ufs-hisi.c @@ -576,9 +576,7 @@ static int ufs_hisi_probe(struct platform_device *pdev) static void ufs_hisi_remove(struct platform_device *pdev) { - struct ufs_hba *hba = platform_get_drvdata(pdev); - - ufshcd_remove(hba); + ufshcd_pltfrm_remove(pdev); } static const struct dev_pm_ops ufs_hisi_pm_ops = { @@ -590,7 +588,7 @@ static const struct dev_pm_ops ufs_hisi_pm_ops = { static struct platform_driver ufs_hisi_pltform = { .probe = ufs_hisi_probe, - .remove_new = ufs_hisi_remove, + .remove = ufs_hisi_remove, .driver = { .name = "ufshcd-hisi", .pm = &ufs_hisi_pm_ops, diff --git a/drivers/ufs/host/ufs-mediatek-sip.h b/drivers/ufs/host/ufs-mediatek-sip.h new file mode 100644 index 000000000000..7d17aedf6fb8 --- /dev/null +++ b/drivers/ufs/host/ufs-mediatek-sip.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2022 MediaTek Inc. + */ + +#ifndef _UFS_MEDIATEK_SIP_H +#define _UFS_MEDIATEK_SIP_H + +#include <linux/soc/mediatek/mtk_sip_svc.h> + +/* + * SiP (Slicon Partner) commands + */ +#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276) +#define UFS_MTK_SIP_VA09_PWR_CTRL BIT(0) +#define UFS_MTK_SIP_DEVICE_RESET BIT(1) +#define UFS_MTK_SIP_CRYPTO_CTRL BIT(2) +#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3) +#define UFS_MTK_SIP_SRAM_PWR_CTRL BIT(5) +#define UFS_MTK_SIP_GET_VCC_NUM BIT(6) +#define UFS_MTK_SIP_DEVICE_PWR_CTRL BIT(7) +#define UFS_MTK_SIP_MPHY_CTRL BIT(8) +#define UFS_MTK_SIP_MTCMOS_CTRL BIT(9) + +/* + * Multi-VCC by Numbering + */ +enum ufs_mtk_vcc_num { + UFS_VCC_NONE = 0, + UFS_VCC_1, + UFS_VCC_2, + UFS_VCC_MAX +}; + +enum ufs_mtk_mphy_op { + UFS_MPHY_BACKUP = 0, + UFS_MPHY_RESTORE +}; + +/* + * SMC call wrapper function + */ +struct ufs_mtk_smc_arg { + unsigned long cmd; + struct arm_smccc_res *res; + unsigned long v1; + unsigned long v2; + unsigned long v3; + unsigned long v4; + unsigned long v5; + unsigned long v6; + unsigned long v7; +}; + + +static inline void _ufs_mtk_smc(struct ufs_mtk_smc_arg s) +{ + arm_smccc_smc(MTK_SIP_UFS_CONTROL, + s.cmd, + s.v1, s.v2, s.v3, s.v4, s.v5, s.v6, s.res); +} + +#define ufs_mtk_smc(...) \ + _ufs_mtk_smc((struct ufs_mtk_smc_arg) {__VA_ARGS__}) + +/* Sip kernel interface */ +#define ufs_mtk_va09_pwr_ctrl(res, on) \ + ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, &(res), on) + +#define ufs_mtk_crypto_ctrl(res, enable) \ + ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, &(res), enable) + +#define ufs_mtk_ref_clk_notify(on, stage, res) \ + ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, &(res), on, stage) + +#define ufs_mtk_device_reset_ctrl(high, res) \ + ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, &(res), high) + +#define ufs_mtk_sram_pwr_ctrl(on, res) \ + ufs_mtk_smc(UFS_MTK_SIP_SRAM_PWR_CTRL, &(res), on) + +#define ufs_mtk_get_vcc_num(res) \ + ufs_mtk_smc(UFS_MTK_SIP_GET_VCC_NUM, &(res)) + +#define ufs_mtk_device_pwr_ctrl(on, ufs_version, res) \ + ufs_mtk_smc(UFS_MTK_SIP_DEVICE_PWR_CTRL, &(res), on, ufs_version) + +#define ufs_mtk_mphy_ctrl(op, res) \ + ufs_mtk_smc(UFS_MTK_SIP_MPHY_CTRL, &(res), op) + +#define ufs_mtk_mtcmos_ctrl(op, res) \ + ufs_mtk_smc(UFS_MTK_SIP_MTCMOS_CTRL, &(res), op) + +#endif /* !_UFS_MEDIATEK_SIP_H */ diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c index 776bca4f70c8..135cd78109e2 100644 --- a/drivers/ufs/host/ufs-mediatek.c +++ b/drivers/ufs/host/ufs-mediatek.c @@ -17,16 +17,16 @@ #include <linux/of_platform.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> -#include <linux/pm_qos.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> -#include <linux/soc/mediatek/mtk_sip_svc.h> #include <ufs/ufshcd.h> #include "ufshcd-pltfrm.h" #include <ufs/ufs_quirks.h> #include <ufs/unipro.h> + #include "ufs-mediatek.h" +#include "ufs-mediatek-sip.h" static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq); @@ -52,6 +52,7 @@ static const struct of_device_id ufs_mtk_of_match[] = { { .compatible = "mediatek,mt8183-ufshci" }, {}, }; +MODULE_DEVICE_TABLE(of, ufs_mtk_of_match); /* * Details of UIC Errors @@ -119,6 +120,27 @@ static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba) return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO); } +static bool ufs_mtk_is_tx_skew_fix(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + return (host->caps & UFS_MTK_CAP_TX_SKEW_FIX); +} + +static bool ufs_mtk_is_rtff_mtcmos(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + return (host->caps & UFS_MTK_CAP_RTFF_MTCMOS); +} + +static bool ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + return (host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM); +} + static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) { u32 tmp; @@ -170,16 +192,23 @@ static void ufs_mtk_crypto_enable(struct ufs_hba *hba) static void ufs_mtk_host_reset(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct arm_smccc_res res; reset_control_assert(host->hci_reset); reset_control_assert(host->crypto_reset); reset_control_assert(host->unipro_reset); + reset_control_assert(host->mphy_reset); usleep_range(100, 110); reset_control_deassert(host->unipro_reset); reset_control_deassert(host->crypto_reset); reset_control_deassert(host->hci_reset); + reset_control_deassert(host->mphy_reset); + + /* restore mphy setting aftre mphy reset */ + if (host->mphy_reset) + ufs_mtk_mphy_ctrl(UFS_MPHY_RESTORE, res); } static void ufs_mtk_init_reset_control(struct ufs_hba *hba, @@ -204,6 +233,8 @@ static void ufs_mtk_init_reset(struct ufs_hba *hba) "unipro_rst"); ufs_mtk_init_reset_control(hba, &host->crypto_reset, "crypto_rst"); + ufs_mtk_init_reset_control(hba, &host->mphy_reset, + "mphy_rst"); } static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba, @@ -623,24 +654,21 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba) if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto")) host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO; - dev_info(hba->dev, "caps: 0x%x", host->caps); -} + if (of_property_read_bool(np, "mediatek,ufs-tx-skew-fix")) + host->caps |= UFS_MTK_CAP_TX_SKEW_FIX; -static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost) -{ - struct ufs_mtk_host *host = ufshcd_get_variant(hba); + if (of_property_read_bool(np, "mediatek,ufs-disable-mcq")) + host->caps |= UFS_MTK_CAP_DISABLE_MCQ; - if (!host || !host->pm_qos_init) - return; + if (of_property_read_bool(np, "mediatek,ufs-rtff-mtcmos")) + host->caps |= UFS_MTK_CAP_RTFF_MTCMOS; - cpu_latency_qos_update_request(&host->pm_qos_req, - boost ? 0 : PM_QOS_DEFAULT_VALUE); + dev_info(hba->dev, "caps: 0x%x", host->caps); } static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up) { ufs_mtk_boost_crypt(hba, scale_up); - ufs_mtk_boost_pm_qos(hba, scale_up); } static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on) @@ -660,6 +688,45 @@ static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on) } } +static void ufs_mtk_mcq_disable_irq(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + u32 irq, i; + + if (!hba->mcq_enabled) + return; + + if (host->mcq_nr_intr == 0) + return; + + for (i = 0; i < host->mcq_nr_intr; i++) { + irq = host->mcq_intr_info[i].irq; + disable_irq(irq); + } + host->is_mcq_intr_enabled = false; +} + +static void ufs_mtk_mcq_enable_irq(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + u32 irq, i; + + if (!hba->mcq_enabled) + return; + + if (host->mcq_nr_intr == 0) + return; + + if (host->is_mcq_intr_enabled == true) + return; + + for (i = 0; i < host->mcq_nr_intr; i++) { + irq = host->mcq_intr_info[i].irq; + enable_irq(irq); + } + host->is_mcq_intr_enabled = true; +} + /** * ufs_mtk_setup_clocks - enables/disable clocks * @hba: host controller instance @@ -703,8 +770,10 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, if (clk_pwr_off) ufs_mtk_pwr_ctrl(hba, false); + ufs_mtk_mcq_disable_irq(hba); } else if (on && status == POST_CHANGE) { ufs_mtk_pwr_ctrl(hba, true); + ufs_mtk_mcq_enable_irq(hba); } return ret; @@ -857,6 +926,9 @@ static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba) host->mcq_nr_intr = UFSHCD_MAX_Q_NR; pdev = container_of(hba->dev, struct platform_device, dev); + if (host->caps & UFS_MTK_CAP_DISABLE_MCQ) + goto failed; + for (i = 0; i < host->mcq_nr_intr; i++) { /* irq index 0 is legacy irq, sq/cq irq start from index 1 */ irq = platform_get_irq(pdev, i + 1); @@ -893,7 +965,9 @@ static int ufs_mtk_init(struct ufs_hba *hba) const struct of_device_id *id; struct device *dev = hba->dev; struct ufs_mtk_host *host; + struct Scsi_Host *shost = hba->host; int err = 0; + struct arm_smccc_res res; host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); if (!host) { @@ -922,6 +996,10 @@ static int ufs_mtk_init(struct ufs_hba *hba) ufs_mtk_init_reset(hba); + /* backup mphy setting if mphy can reset */ + if (host->mphy_reset) + ufs_mtk_mphy_ctrl(UFS_MPHY_BACKUP, res); + /* Enable runtime autosuspend */ hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; @@ -937,6 +1015,9 @@ static int ufs_mtk_init(struct ufs_hba *hba) /* Enable clk scaling*/ hba->caps |= UFSHCD_CAP_CLK_SCALING; + /* Set runtime pm delay to replace default */ + shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS; + hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL; hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR; hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC; @@ -945,6 +1026,9 @@ static int ufs_mtk_init(struct ufs_hba *hba) if (host->caps & UFS_MTK_CAP_DISABLE_AH8) hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; + if (host->caps & UFS_MTK_CAP_DISABLE_MCQ) + hba->quirks |= UFSHCD_QUIRK_BROKEN_LSDBS_CAP; + ufs_mtk_init_clocks(hba); /* @@ -955,14 +1039,19 @@ static int ufs_mtk_init(struct ufs_hba *hba) * Enable phy clocks specifically here. */ ufs_mtk_mphy_power_on(hba, true); + + if (ufs_mtk_is_rtff_mtcmos(hba)) { + /* First Restore here, to avoid backup unexpected value */ + ufs_mtk_mtcmos_ctrl(false, res); + + /* Power on to init */ + ufs_mtk_mtcmos_ctrl(true, res); + } + ufs_mtk_setup_clocks(hba, true, POST_CHANGE); host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER); - /* Initialize pm-qos request */ - cpu_latency_qos_add_request(&host->pm_qos_req, PM_QOS_DEFAULT_VALUE); - host->pm_qos_init = true; - goto out; out_variant_clear: @@ -1206,25 +1295,29 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba) return err; err = ufshcd_uic_hibern8_exit(hba); - if (!err) - ufshcd_set_link_active(hba); - else + if (err) return err; - if (!hba->mcq_enabled) { - err = ufshcd_make_hba_operational(hba); - } else { - ufs_mtk_config_mcq(hba, false); - ufshcd_mcq_make_queues_operational(hba); - ufshcd_mcq_config_mac(hba, hba->nutrs); - /* Enable MCQ mode */ - ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1, - REG_UFS_MEM_CFG); + /* Check link state to make sure exit h8 success */ + ufs_mtk_wait_idle_state(hba, 5); + err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); + if (err) { + dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err); + return err; } + ufshcd_set_link_active(hba); + err = ufshcd_make_hba_operational(hba); if (err) return err; + if (hba->mcq_enabled) { + ufs_mtk_config_mcq(hba, false); + ufshcd_mcq_make_queues_operational(hba); + ufshcd_mcq_config_mac(hba, hba->nutrs); + ufshcd_mcq_enable(hba); + } + return 0; } @@ -1271,27 +1364,37 @@ static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm) static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm) { - if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2) - return; + bool skip_vccqx = false; - /* Skip if VCC is assumed always-on */ - if (!hba->vreg_info.vcc) - return; - - /* Bypass LPM when device is still active */ + /* Prevent entering LPM when device is still active */ if (lpm && ufshcd_is_ufs_dev_active(hba)) return; - /* Bypass LPM if VCC is enabled */ - if (lpm && hba->vreg_info.vcc->enabled) - return; + /* Skip vccqx lpm control and control vsx only */ + if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2) + skip_vccqx = true; + + /* VCC is always-on, control vsx only */ + if (!hba->vreg_info.vcc) + skip_vccqx = true; + + /* Broken vcc keep vcc always on, most case control vsx only */ + if (lpm && hba->vreg_info.vcc && hba->vreg_info.vcc->enabled) { + /* Some device vccqx/vsx can enter lpm */ + if (ufs_mtk_is_allow_vccqx_lpm(hba)) + skip_vccqx = false; + else /* control vsx only */ + skip_vccqx = true; + } if (lpm) { - ufs_mtk_vccqx_set_lpm(hba, lpm); + if (!skip_vccqx) + ufs_mtk_vccqx_set_lpm(hba, lpm); ufs_mtk_vsx_set_lpm(hba, lpm); } else { ufs_mtk_vsx_set_lpm(hba, lpm); - ufs_mtk_vccqx_set_lpm(hba, lpm); + if (!skip_vccqx) + ufs_mtk_vccqx_set_lpm(hba, lpm); } } @@ -1342,7 +1445,7 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, if (ufshcd_is_link_off(hba)) ufs_mtk_device_reset_ctrl(0, res); - ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res); + ufs_mtk_sram_pwr_ctrl(false, res); return 0; fail: @@ -1363,7 +1466,7 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) ufs_mtk_dev_vreg_set_lpm(hba, false); - ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res); + ufs_mtk_sram_pwr_ctrl(true, res); err = ufs_mtk_mphy_power_on(hba, true); if (err) @@ -1406,6 +1509,17 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba) if (mid == UFS_VENDOR_SAMSUNG) { ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6); ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10); + } else if (mid == UFS_VENDOR_MICRON) { + /* Only for the host which have TX skew issue */ + if (ufs_mtk_is_tx_skew_fix(hba) && + (STR_PRFX_EQUAL("MT128GBCAV2U31", dev_info->model) || + STR_PRFX_EQUAL("MT256GBCAV4U31", dev_info->model) || + STR_PRFX_EQUAL("MT512GBCAV8U31", dev_info->model) || + STR_PRFX_EQUAL("MT256GBEAX4U40", dev_info->model) || + STR_PRFX_EQUAL("MT512GAYAX4U40", dev_info->model) || + STR_PRFX_EQUAL("MT001TAYAX8U40", dev_info->model))) { + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 8); + } } /* @@ -1547,6 +1661,12 @@ static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up, static int ufs_mtk_get_hba_mac(struct ufs_hba *hba) { + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + /* MCQ operation not permitted */ + if (host->caps & UFS_MTK_CAP_DISABLE_MCQ) + return -EPERM; + return MAX_SUPP_MAC; } @@ -1660,6 +1780,15 @@ static int ufs_mtk_config_esi(struct ufs_hba *hba) return ufs_mtk_config_mcq(hba, true); } +static void ufs_mtk_config_scsi_dev(struct scsi_device *sdev) +{ + struct ufs_hba *hba = shost_priv(sdev->host); + + dev_dbg(hba->dev, "lu %llu scsi device configured", sdev->lun); + if (sdev->lun == 2) + blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, sdev->request_queue); +} + /* * struct ufs_hba_mtk_vops - UFS MTK specific variant operations * @@ -1668,6 +1797,7 @@ static int ufs_mtk_config_esi(struct ufs_hba *hba) */ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = { .name = "mediatek.ufshci", + .max_num_rtt = MTK_MAX_NUM_RTT, .init = ufs_mtk_init, .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version, .setup_clocks = ufs_mtk_setup_clocks, @@ -1688,6 +1818,7 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = { .op_runtime_config = ufs_mtk_op_runtime_config, .mcq_config_resource = ufs_mtk_mcq_config_resource, .config_esi = ufs_mtk_config_esi, + .config_scsi_dev = ufs_mtk_config_scsi_dev, }; /** @@ -1748,16 +1879,14 @@ out: */ static void ufs_mtk_remove(struct platform_device *pdev) { - struct ufs_hba *hba = platform_get_drvdata(pdev); - - pm_runtime_get_sync(&(pdev)->dev); - ufshcd_remove(hba); + ufshcd_pltfrm_remove(pdev); } #ifdef CONFIG_PM_SLEEP static int ufs_mtk_system_suspend(struct device *dev) { struct ufs_hba *hba = dev_get_drvdata(dev); + struct arm_smccc_res res; int ret; ret = ufshcd_system_suspend(dev); @@ -1766,15 +1895,22 @@ static int ufs_mtk_system_suspend(struct device *dev) ufs_mtk_dev_vreg_set_lpm(hba, true); + if (ufs_mtk_is_rtff_mtcmos(hba)) + ufs_mtk_mtcmos_ctrl(false, res); + return 0; } static int ufs_mtk_system_resume(struct device *dev) { struct ufs_hba *hba = dev_get_drvdata(dev); + struct arm_smccc_res res; ufs_mtk_dev_vreg_set_lpm(hba, false); + if (ufs_mtk_is_rtff_mtcmos(hba)) + ufs_mtk_mtcmos_ctrl(true, res); + return ufshcd_system_resume(dev); } #endif @@ -1783,6 +1919,7 @@ static int ufs_mtk_system_resume(struct device *dev) static int ufs_mtk_runtime_suspend(struct device *dev) { struct ufs_hba *hba = dev_get_drvdata(dev); + struct arm_smccc_res res; int ret = 0; ret = ufshcd_runtime_suspend(dev); @@ -1791,12 +1928,19 @@ static int ufs_mtk_runtime_suspend(struct device *dev) ufs_mtk_dev_vreg_set_lpm(hba, true); + if (ufs_mtk_is_rtff_mtcmos(hba)) + ufs_mtk_mtcmos_ctrl(false, res); + return 0; } static int ufs_mtk_runtime_resume(struct device *dev) { struct ufs_hba *hba = dev_get_drvdata(dev); + struct arm_smccc_res res; + + if (ufs_mtk_is_rtff_mtcmos(hba)) + ufs_mtk_mtcmos_ctrl(true, res); ufs_mtk_dev_vreg_set_lpm(hba, false); @@ -1815,7 +1959,7 @@ static const struct dev_pm_ops ufs_mtk_pm_ops = { static struct platform_driver ufs_mtk_pltform = { .probe = ufs_mtk_probe, - .remove_new = ufs_mtk_remove, + .remove = ufs_mtk_remove, .driver = { .name = "ufshcd-mtk", .pm = &ufs_mtk_pm_ops, diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h index f76e80d91729..05d76a6bd772 100644 --- a/drivers/ufs/host/ufs-mediatek.h +++ b/drivers/ufs/host/ufs-mediatek.h @@ -7,8 +7,6 @@ #define _UFS_MEDIATEK_H #include <linux/bitops.h> -#include <linux/pm_qos.h> -#include <linux/soc/mediatek/mtk_sip_svc.h> /* * MCQ define and struct @@ -101,18 +99,6 @@ enum { }; /* - * SiP commands - */ -#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276) -#define UFS_MTK_SIP_VA09_PWR_CTRL BIT(0) -#define UFS_MTK_SIP_DEVICE_RESET BIT(1) -#define UFS_MTK_SIP_CRYPTO_CTRL BIT(2) -#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3) -#define UFS_MTK_SIP_HOST_PWR_CTRL BIT(5) -#define UFS_MTK_SIP_GET_VCC_NUM BIT(6) -#define UFS_MTK_SIP_DEVICE_PWR_CTRL BIT(7) - -/* * VS_DEBUGCLOCKENABLE */ enum { @@ -136,7 +122,17 @@ enum ufs_mtk_host_caps { UFS_MTK_CAP_VA09_PWR_CTRL = 1 << 1, UFS_MTK_CAP_DISABLE_AH8 = 1 << 2, UFS_MTK_CAP_BROKEN_VCC = 1 << 3, + + /* + * Override UFS_MTK_CAP_BROKEN_VCC's behavior to + * allow vccqx upstream to enter LPM + */ + UFS_MTK_CAP_ALLOW_VCCQX_LPM = 1 << 5, UFS_MTK_CAP_PMC_VIA_FASTAUTO = 1 << 6, + UFS_MTK_CAP_TX_SKEW_FIX = 1 << 7, + UFS_MTK_CAP_DISABLE_MCQ = 1 << 8, + /* Control MTCMOS with RTFF */ + UFS_MTK_CAP_RTFF_MTCMOS = 1 << 9, }; struct ufs_mtk_crypt_cfg { @@ -167,18 +163,17 @@ struct ufs_mtk_mcq_intr_info { struct ufs_mtk_host { struct phy *mphy; - struct pm_qos_request pm_qos_req; struct regulator *reg_va09; struct reset_control *hci_reset; struct reset_control *unipro_reset; struct reset_control *crypto_reset; + struct reset_control *mphy_reset; struct ufs_hba *hba; struct ufs_mtk_crypt_cfg *crypt; struct ufs_mtk_clk mclk; struct ufs_mtk_hw_ver hw_ver; enum ufs_mtk_host_caps caps; bool mphy_powered_on; - bool pm_qos_init; bool unipro_lpm; bool ref_clk_enabled; u16 ref_clk_ungating_wait_us; @@ -186,74 +181,15 @@ struct ufs_mtk_host { u32 ip_ver; bool mcq_set_intr; + bool is_mcq_intr_enabled; int mcq_nr_intr; struct ufs_mtk_mcq_intr_info mcq_intr_info[UFSHCD_MAX_Q_NR]; }; -/* - * Multi-VCC by Numbering - */ -enum ufs_mtk_vcc_num { - UFS_VCC_NONE = 0, - UFS_VCC_1, - UFS_VCC_2, - UFS_VCC_MAX -}; - -/* - * Host Power Control options - */ -enum { - HOST_PWR_HCI = 0, - HOST_PWR_MPHY -}; - -/* - * SMC call wrapper function - */ -struct ufs_mtk_smc_arg { - unsigned long cmd; - struct arm_smccc_res *res; - unsigned long v1; - unsigned long v2; - unsigned long v3; - unsigned long v4; - unsigned long v5; - unsigned long v6; - unsigned long v7; -}; - -static void _ufs_mtk_smc(struct ufs_mtk_smc_arg s) -{ - arm_smccc_smc(MTK_SIP_UFS_CONTROL, - s.cmd, s.v1, s.v2, s.v3, s.v4, s.v5, s.v6, s.res); -} - -#define ufs_mtk_smc(...) \ - _ufs_mtk_smc((struct ufs_mtk_smc_arg) {__VA_ARGS__}) - -/* - * SMC call interface - */ -#define ufs_mtk_va09_pwr_ctrl(res, on) \ - ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, &(res), on) - -#define ufs_mtk_crypto_ctrl(res, enable) \ - ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, &(res), enable) - -#define ufs_mtk_ref_clk_notify(on, stage, res) \ - ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, &(res), on, stage) - -#define ufs_mtk_device_reset_ctrl(high, res) \ - ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, &(res), high) - -#define ufs_mtk_host_pwr_ctrl(opt, on, res) \ - ufs_mtk_smc(UFS_MTK_SIP_HOST_PWR_CTRL, &(res), opt, on) - -#define ufs_mtk_get_vcc_num(res) \ - ufs_mtk_smc(UFS_MTK_SIP_GET_VCC_NUM, &(res)) +/* MTK delay of autosuspend: 500 ms */ +#define MTK_RPM_AUTOSUSPEND_DELAY_MS 500 -#define ufs_mtk_device_pwr_ctrl(on, ufs_ver, res) \ - ufs_mtk_smc(UFS_MTK_SIP_DEVICE_PWR_CTRL, &(res), on, ufs_ver) +/* MTK RTT support number */ +#define MTK_MAX_NUM_RTT 2 #endif /* !_UFS_MEDIATEK_H */ diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 39eef470f8fa..23b9f6efa047 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -47,7 +47,7 @@ enum { TSTBUS_MAX, }; -#define QCOM_UFS_MAX_GEAR 4 +#define QCOM_UFS_MAX_GEAR 5 #define QCOM_UFS_MAX_LANE 2 enum { @@ -67,27 +67,33 @@ static const struct __ufs_qcom_bw_table { [MODE_PWM][UFS_PWM_G2][UFS_LANE_1] = { 1844, 1000 }, [MODE_PWM][UFS_PWM_G3][UFS_LANE_1] = { 3688, 1000 }, [MODE_PWM][UFS_PWM_G4][UFS_LANE_1] = { 7376, 1000 }, + [MODE_PWM][UFS_PWM_G5][UFS_LANE_1] = { 14752, 1000 }, [MODE_PWM][UFS_PWM_G1][UFS_LANE_2] = { 1844, 1000 }, [MODE_PWM][UFS_PWM_G2][UFS_LANE_2] = { 3688, 1000 }, [MODE_PWM][UFS_PWM_G3][UFS_LANE_2] = { 7376, 1000 }, [MODE_PWM][UFS_PWM_G4][UFS_LANE_2] = { 14752, 1000 }, + [MODE_PWM][UFS_PWM_G5][UFS_LANE_2] = { 29504, 1000 }, [MODE_HS_RA][UFS_HS_G1][UFS_LANE_1] = { 127796, 1000 }, [MODE_HS_RA][UFS_HS_G2][UFS_LANE_1] = { 255591, 1000 }, [MODE_HS_RA][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 }, [MODE_HS_RA][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 }, + [MODE_HS_RA][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 }, [MODE_HS_RA][UFS_HS_G1][UFS_LANE_2] = { 255591, 1000 }, [MODE_HS_RA][UFS_HS_G2][UFS_LANE_2] = { 511181, 1000 }, [MODE_HS_RA][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 }, [MODE_HS_RA][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 }, + [MODE_HS_RA][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 }, [MODE_HS_RB][UFS_HS_G1][UFS_LANE_1] = { 149422, 1000 }, [MODE_HS_RB][UFS_HS_G2][UFS_LANE_1] = { 298189, 1000 }, [MODE_HS_RB][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 }, [MODE_HS_RB][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 }, + [MODE_HS_RB][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 }, [MODE_HS_RB][UFS_HS_G1][UFS_LANE_2] = { 298189, 1000 }, [MODE_HS_RB][UFS_HS_G2][UFS_LANE_2] = { 596378, 1000 }, [MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 }, [MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 }, - [MODE_MAX][0][0] = { 7643136, 307200 }, + [MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 }, + [MODE_MAX][0][0] = { 7643136, 819200 }, }; static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); @@ -106,11 +112,18 @@ static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host) qcom_ice_enable(host->ice); } +static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops; /* forward decl */ + static int ufs_qcom_ice_init(struct ufs_qcom_host *host) { struct ufs_hba *hba = host->hba; + struct blk_crypto_profile *profile = &hba->crypto_profile; struct device *dev = hba->dev; struct qcom_ice *ice; + union ufs_crypto_capabilities caps; + union ufs_crypto_cap_entry cap; + int err; + int i; ice = of_qcom_ice_get(dev); if (ice == ERR_PTR(-EOPNOTSUPP)) { @@ -122,8 +135,38 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host) return PTR_ERR_OR_ZERO(ice); host->ice = ice; - hba->caps |= UFSHCD_CAP_CRYPTO; + /* Initialize the blk_crypto_profile */ + + caps.reg_val = cpu_to_le32(ufshcd_readl(hba, REG_UFS_CCAP)); + + /* The number of keyslots supported is (CFGC+1) */ + err = devm_blk_crypto_profile_init(dev, profile, caps.config_count + 1); + if (err) + return err; + + profile->ll_ops = ufs_qcom_crypto_ops; + profile->max_dun_bytes_supported = 8; + profile->dev = dev; + + /* + * Currently this driver only supports AES-256-XTS. All known versions + * of ICE support it, but to be safe make sure it is really declared in + * the crypto capability registers. The crypto capability registers + * also give the supported data unit size(s). + */ + for (i = 0; i < caps.num_crypto_cap; i++) { + cap.reg_val = cpu_to_le32(ufshcd_readl(hba, + REG_UFS_CRYPTOCAP + + i * sizeof(__le32))); + if (cap.algorithm_id == UFS_CRYPTO_ALG_AES_XTS && + cap.key_size == UFS_CRYPTO_KEY_SIZE_256) + profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] |= + cap.sdus_mask * 512; + } + + hba->caps |= UFSHCD_CAP_CRYPTO; + hba->quirks |= UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE; return 0; } @@ -143,34 +186,49 @@ static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host) return 0; } -static int ufs_qcom_ice_program_key(struct ufs_hba *hba, - const union ufs_crypto_cfg_entry *cfg, - int slot) +static int ufs_qcom_ice_keyslot_program(struct blk_crypto_profile *profile, + const struct blk_crypto_key *key, + unsigned int slot) { + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); struct ufs_qcom_host *host = ufshcd_get_variant(hba); - union ufs_crypto_cap_entry cap; - bool config_enable = - cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE; + int err; /* Only AES-256-XTS has been tested so far. */ - cap = hba->crypto_cap_array[cfg->crypto_cap_idx]; - if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS || - cap.key_size != UFS_CRYPTO_KEY_SIZE_256) + if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS) return -EOPNOTSUPP; - if (config_enable) - return qcom_ice_program_key(host->ice, - QCOM_ICE_CRYPTO_ALG_AES_XTS, - QCOM_ICE_CRYPTO_KEY_SIZE_256, - cfg->crypto_key, - cfg->data_unit_size, slot); - else - return qcom_ice_evict_key(host->ice, slot); + ufshcd_hold(hba); + err = qcom_ice_program_key(host->ice, + QCOM_ICE_CRYPTO_ALG_AES_XTS, + QCOM_ICE_CRYPTO_KEY_SIZE_256, + key->raw, + key->crypto_cfg.data_unit_size / 512, + slot); + ufshcd_release(hba); + return err; } -#else +static int ufs_qcom_ice_keyslot_evict(struct blk_crypto_profile *profile, + const struct blk_crypto_key *key, + unsigned int slot) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int err; -#define ufs_qcom_ice_program_key NULL + ufshcd_hold(hba); + err = qcom_ice_evict_key(host->ice, slot); + ufshcd_release(hba); + return err; +} + +static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops = { + .keyslot_program = ufs_qcom_ice_keyslot_program, + .keyslot_evict = ufs_qcom_ice_keyslot_evict, +}; + +#else static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host) { @@ -278,9 +336,6 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) if (host->hw_ver.major >= 0x05) ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0); - - /* make sure above configuration is applied before we return */ - mb(); } /* @@ -365,6 +420,11 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) if (ret) return ret; + if (phy->power_count) { + phy_power_off(phy); + phy_exit(phy); + } + /* phy initialization - calibrate the phy */ ret = phy_init(phy); if (ret) { @@ -409,7 +469,7 @@ static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) REG_UFS_CFG2); /* Ensure that HW clock gating is enabled before next operations */ - mb(); + ufshcd_readl(hba, REG_UFS_CFG2); } static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, @@ -501,7 +561,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, * make sure above write gets applied before we return from * this function. */ - mb(); + ufshcd_readl(hba, REG_UFS_SYS1CLK_1US); } return 0; @@ -531,8 +591,7 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, * and device TX LCC are disabled once link startup is * completed. */ - if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41) - err = ufshcd_disable_host_tx_lcc(hba); + err = ufshcd_disable_host_tx_lcc(hba); break; default: @@ -690,6 +749,16 @@ static struct __ufs_qcom_bw_table ufs_qcom_get_bw_table(struct ufs_qcom_host *ho int gear = max_t(u32, p->gear_rx, p->gear_tx); int lane = max_t(u32, p->lane_rx, p->lane_tx); + if (WARN_ONCE(gear > QCOM_UFS_MAX_GEAR, + "ICC scaling for UFS Gear (%d) not supported. Using Gear (%d) bandwidth\n", + gear, QCOM_UFS_MAX_GEAR)) + gear = QCOM_UFS_MAX_GEAR; + + if (WARN_ONCE(lane > QCOM_UFS_MAX_LANE, + "ICC scaling for UFS Lane (%d) not supported. Using Lane (%d) bandwidth\n", + lane, QCOM_UFS_MAX_LANE)) + lane = QCOM_UFS_MAX_LANE; + if (ufshcd_is_hs_mode(p)) { if (p->hs_rate == PA_HS_MODE_B) return ufs_qcom_bw_table[MODE_HS_RB][gear][lane]; @@ -738,8 +807,17 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, * the second init can program the optimal PHY settings. This allows one to start * the first init with either the minimum or the maximum support gear. */ - if (hba->ufshcd_state == UFSHCD_STATE_RESET) - host->phy_gear = dev_req_params->gear_tx; + if (hba->ufshcd_state == UFSHCD_STATE_RESET) { + /* + * Skip REINIT if the negotiated gear matches with the + * initial phy_gear. Otherwise, update the phy_gear to + * program the optimal gear setting during REINIT. + */ + if (host->phy_gear == dev_req_params->gear_tx) + hba->quirks &= ~UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; + else + host->phy_gear = dev_req_params->gear_tx; + } /* enable the device ref clock before changing to HS mode */ if (!ufshcd_is_hs_mode(&hba->pwr_info) && @@ -807,12 +885,28 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); - if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC) - hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE; - return err; } +/* UFS device-specific quirks */ +static struct ufs_dev_quirk ufs_qcom_dev_fixups[] = { + { .wmanufacturerid = UFS_VENDOR_SKHYNIX, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, + { .wmanufacturerid = UFS_VENDOR_TOSHIBA, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM }, + { .wmanufacturerid = UFS_VENDOR_WDC, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE }, + {} +}; + +static void ufs_qcom_fixup_dev_quirks(struct ufs_hba *hba) +{ + ufshcd_fixup_dev_quirks(hba, ufs_qcom_dev_fixups); +} + static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) { return ufshci_version(2, 0); @@ -829,6 +923,7 @@ static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) */ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) { + const struct ufs_qcom_drvdata *drvdata = of_device_get_match_data(hba->dev); struct ufs_qcom_host *host = ufshcd_get_variant(hba); if (host->hw_ver.major == 0x2) @@ -836,6 +931,9 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) if (host->hw_ver.major > 0x3) hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; + + if (drvdata && drvdata->quirks) + hba->quirks |= drvdata->quirks; } static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host) @@ -843,15 +941,20 @@ static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host) struct ufs_host_params *host_params = &host->host_params; u32 val, dev_major; + /* + * Default to powering up the PHY to the max gear possible, which is + * backwards compatible with lower gears but not optimal from + * a power usage point of view. After device negotiation, if the + * gear is lower a reinit will be performed to program the PHY + * to the ideal gear for this combo of controller and device. + */ host->phy_gear = host_params->hs_tx_gear; if (host->hw_ver.major < 0x4) { /* - * For controllers whose major HW version is < 4, power up the - * PHY using minimum supported gear (UFS_HS_G2). Switching to - * max gear will be performed during reinit if supported. - * For newer controllers, whose major HW version is >= 4, power - * up the PHY using max supported gear. + * These controllers only have one PHY init sequence, + * let's power up the PHY using that (the minimum supported + * gear, UFS_HS_G2). */ host->phy_gear = UFS_HS_G2; } else if (host->hw_ver.major >= 0x5) { @@ -1018,6 +1121,7 @@ static int ufs_qcom_init(struct ufs_hba *hba) struct device *dev = hba->dev; struct ufs_qcom_host *host; struct ufs_clk_info *clki; + const struct ufs_qcom_drvdata *drvdata = of_device_get_match_data(hba->dev); host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); if (!host) @@ -1097,6 +1201,9 @@ static int ufs_qcom_init(struct ufs_hba *hba) dev_warn(dev, "%s: failed to configure the testbus %d\n", __func__, err); + if (drvdata && drvdata->no_phy_retention) + hba->spm_lvl = UFS_PM_LVL_5; + return 0; out_variant_clear: @@ -1196,8 +1303,10 @@ static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up) list_for_each_entry(clki, head, list) { if (!IS_ERR_OR_NULL(clki->clk) && - !strcmp(clki->name, "core_clk_unipro")) { - if (is_scale_up) + !strcmp(clki->name, "core_clk_unipro")) { + if (!clki->max_freq) + cycles_in_1us = 150; /* default for backwards compatibility */ + else if (is_scale_up) cycles_in_1us = ceil(clki->max_freq, (1000 * 1000)); else cycles_in_1us = ceil(clk_get_rate(clki->clk), (1000 * 1000)); @@ -1429,11 +1538,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) (u32)host->testbus.select_minor << offset, reg); ufs_qcom_enable_test_bus(host); - /* - * Make sure the test bus configuration is - * committed before returning. - */ - mb(); return 0; } @@ -1525,6 +1629,8 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, p->timer = DEVFREQ_TIMER_DELAYED; d->upthreshold = 70; d->downdifferential = 5; + + hba->clk_scaling.suspend_on_no_request = true; } #else static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, @@ -1534,13 +1640,6 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, } #endif -static void ufs_qcom_reinit_notify(struct ufs_hba *hba) -{ - struct ufs_qcom_host *host = ufshcd_get_variant(hba); - - phy_power_off(host->generic_phy); -} - /* Resources */ static const struct ufshcd_res_info ufs_res_info[RES_MAX] = { {.name = "ufs_mem",}, @@ -1712,8 +1811,8 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) * 2. Poll queues do not need ESI. */ nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; - ret = platform_msi_domain_alloc_irqs(hba->dev, nr_irqs, - ufs_qcom_write_msi_msg); + ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs, + ufs_qcom_write_msi_msg); if (ret) { dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret); return ret; @@ -1742,7 +1841,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) devm_free_irq(hba->dev, desc->irq, hba); } msi_unlock_descs(hba->dev); - platform_msi_domain_free_irqs(hba->dev); + platform_device_msi_free_irqs_all(hba->dev); } else { if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && host->hw_ver.step == 0) @@ -1773,13 +1872,12 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .link_startup_notify = ufs_qcom_link_startup_notify, .pwr_change_notify = ufs_qcom_pwr_change_notify, .apply_dev_quirks = ufs_qcom_apply_dev_quirks, + .fixup_dev_quirks = ufs_qcom_fixup_dev_quirks, .suspend = ufs_qcom_suspend, .resume = ufs_qcom_resume, .dbg_register_dump = ufs_qcom_dump_dbg_regs, .device_reset = ufs_qcom_device_reset, .config_scaling_param = ufs_qcom_config_scaling_param, - .program_key = ufs_qcom_ice_program_key, - .reinit_notify = ufs_qcom_reinit_notify, .mcq_config_resource = ufs_qcom_mcq_config_resource, .get_hba_mac = ufs_qcom_get_hba_mac, .op_runtime_config = ufs_qcom_op_runtime_config, @@ -1815,14 +1913,22 @@ static int ufs_qcom_probe(struct platform_device *pdev) static void ufs_qcom_remove(struct platform_device *pdev) { struct ufs_hba *hba = platform_get_drvdata(pdev); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); - pm_runtime_get_sync(&(pdev)->dev); - ufshcd_remove(hba); - platform_msi_domain_free_irqs(hba->dev); + ufshcd_pltfrm_remove(pdev); + if (host->esi_enabled) + platform_device_msi_free_irqs_all(hba->dev); } +static const struct ufs_qcom_drvdata ufs_qcom_sm8550_drvdata = { + .quirks = UFSHCD_QUIRK_BROKEN_LSDBS_CAP, + .no_phy_retention = true, +}; + static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = { - { .compatible = "qcom,ufshc"}, + { .compatible = "qcom,ufshc" }, + { .compatible = "qcom,sm8550-ufshc", .data = &ufs_qcom_sm8550_drvdata }, + { .compatible = "qcom,sm8650-ufshc", .data = &ufs_qcom_sm8550_drvdata }, {}, }; MODULE_DEVICE_TABLE(of, ufs_qcom_of_match); @@ -1850,7 +1956,7 @@ static const struct dev_pm_ops ufs_qcom_pm_ops = { static struct platform_driver ufs_qcom_pltform = { .probe = ufs_qcom_probe, - .remove_new = ufs_qcom_remove, + .remove = ufs_qcom_remove, .driver = { .name = "ufshcd-qcom", .pm = &ufs_qcom_pm_ops, @@ -1860,4 +1966,5 @@ static struct platform_driver ufs_qcom_pltform = { }; module_platform_driver(ufs_qcom_pltform); +MODULE_DESCRIPTION("Qualcomm UFS host controller driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h index 9dd9a391ebb7..919f53682beb 100644 --- a/drivers/ufs/host/ufs-qcom.h +++ b/drivers/ufs/host/ufs-qcom.h @@ -151,10 +151,10 @@ static inline void ufs_qcom_assert_reset(struct ufs_hba *hba) ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, UFS_PHY_SOFT_RESET, REG_UFS_CFG1); /* - * Make sure assertion of ufs phy reset is written to - * register before returning + * Dummy read to ensure the write takes effect before doing any sort + * of delay */ - mb(); + ufshcd_readl(hba, REG_UFS_CFG1); } static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba) @@ -162,10 +162,10 @@ static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba) ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, 0, REG_UFS_CFG1); /* - * Make sure de-assertion of ufs phy reset is written to - * register before returning + * Dummy read to ensure the write takes effect before doing any sort + * of delay */ - mb(); + ufshcd_readl(hba, REG_UFS_CFG1); } /* Host controller hardware version: major.minor.step */ @@ -217,6 +217,11 @@ struct ufs_qcom_host { bool esi_enabled; }; +struct ufs_qcom_drvdata { + enum ufshcd_quirks quirks; + bool no_phy_retention; +}; + static inline u32 ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg) { diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c index 8711e5cbc968..03cd82db751b 100644 --- a/drivers/ufs/host/ufs-renesas.c +++ b/drivers/ufs/host/ufs-renesas.c @@ -7,6 +7,7 @@ #include <linux/clk.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/iopoll.h> #include <linux/kernel.h> @@ -364,14 +365,20 @@ static int ufs_renesas_init(struct ufs_hba *hba) return -ENOMEM; ufshcd_set_variant(hba, priv); - hba->quirks |= UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS | UFSHCD_QUIRK_HIBERN_FASTAUTO; + hba->quirks |= UFSHCD_QUIRK_HIBERN_FASTAUTO; return 0; } +static int ufs_renesas_set_dma_mask(struct ufs_hba *hba) +{ + return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); +} + static const struct ufs_hba_variant_ops ufs_renesas_vops = { .name = "renesas", .init = ufs_renesas_init, + .set_dma_mask = ufs_renesas_set_dma_mask, .setup_clocks = ufs_renesas_setup_clocks, .hce_enable_notify = ufs_renesas_hce_enable_notify, .dbg_register_dump = ufs_renesas_dbg_register_dump, @@ -390,14 +397,12 @@ static int ufs_renesas_probe(struct platform_device *pdev) static void ufs_renesas_remove(struct platform_device *pdev) { - struct ufs_hba *hba = platform_get_drvdata(pdev); - - ufshcd_remove(hba); + ufshcd_pltfrm_remove(pdev); } static struct platform_driver ufs_renesas_platform = { .probe = ufs_renesas_probe, - .remove_new = ufs_renesas_remove, + .remove = ufs_renesas_remove, .driver = { .name = "ufshcd-renesas", .of_match_table = of_match_ptr(ufs_renesas_of_match), diff --git a/drivers/ufs/host/ufs-sprd.c b/drivers/ufs/host/ufs-sprd.c index d8b165908809..b1d532363f9d 100644 --- a/drivers/ufs/host/ufs-sprd.c +++ b/drivers/ufs/host/ufs-sprd.c @@ -427,10 +427,7 @@ static int ufs_sprd_probe(struct platform_device *pdev) static void ufs_sprd_remove(struct platform_device *pdev) { - struct ufs_hba *hba = platform_get_drvdata(pdev); - - pm_runtime_get_sync(&(pdev)->dev); - ufshcd_remove(hba); + ufshcd_pltfrm_remove(pdev); } static const struct dev_pm_ops ufs_sprd_pm_ops = { @@ -442,7 +439,7 @@ static const struct dev_pm_ops ufs_sprd_pm_ops = { static struct platform_driver ufs_sprd_pltform = { .probe = ufs_sprd_probe, - .remove_new = ufs_sprd_remove, + .remove = ufs_sprd_remove, .driver = { .name = "ufshcd-sprd", .pm = &ufs_sprd_pm_ops, diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c index 0aca666d2199..9cfcaad23cf9 100644 --- a/drivers/ufs/host/ufshcd-pci.c +++ b/drivers/ufs/host/ufshcd-pci.c @@ -20,6 +20,8 @@ #include <linux/acpi.h> #include <linux/gpio/consumer.h> +#define MAX_SUPP_MAC 64 + struct ufs_host { void (*late_init)(struct ufs_hba *hba); }; @@ -446,6 +448,49 @@ static int ufs_intel_mtl_init(struct ufs_hba *hba) return ufs_intel_common_init(hba); } +static int ufs_qemu_get_hba_mac(struct ufs_hba *hba) +{ + return MAX_SUPP_MAC; +} + +static int ufs_qemu_mcq_config_resource(struct ufs_hba *hba) +{ + hba->mcq_base = hba->mmio_base + ufshcd_mcq_queue_cfg_addr(hba); + + return 0; +} + +static int ufs_qemu_op_runtime_config(struct ufs_hba *hba) +{ + struct ufshcd_mcq_opr_info_t *opr; + int i; + + u32 sqdao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_SQDAO, 0)); + u32 sqisao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_SQISAO, 0)); + u32 cqdao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_CQDAO, 0)); + u32 cqisao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_CQISAO, 0)); + + hba->mcq_opr[OPR_SQD].offset = sqdao; + hba->mcq_opr[OPR_SQIS].offset = sqisao; + hba->mcq_opr[OPR_CQD].offset = cqdao; + hba->mcq_opr[OPR_CQIS].offset = cqisao; + + for (i = 0; i < OPR_MAX; i++) { + opr = &hba->mcq_opr[i]; + opr->stride = 48; + opr->base = hba->mmio_base + opr->offset; + } + + return 0; +} + +static struct ufs_hba_variant_ops ufs_qemu_hba_vops = { + .name = "qemu-pci", + .get_hba_mac = ufs_qemu_get_hba_mac, + .mcq_config_resource = ufs_qemu_mcq_config_resource, + .op_runtime_config = ufs_qemu_op_runtime_config, +}; + static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = { .name = "intel-pci", .init = ufs_intel_common_init, @@ -517,7 +562,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev) pm_runtime_forbid(&pdev->dev); pm_runtime_get_noresume(&pdev->dev); ufshcd_remove(hba); - ufshcd_dealloc_host(hba); } /** @@ -543,14 +587,12 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); - err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD); - if (err < 0) { + mmio_base = pcim_iomap_region(pdev, 0, UFSHCD); + if (IS_ERR(mmio_base)) { dev_err(&pdev->dev, "request and iomap failed\n"); - return err; + return PTR_ERR(mmio_base); } - mmio_base = pcim_iomap_table(pdev)[0]; - err = ufshcd_alloc_host(&pdev->dev, &hba); if (err) { dev_err(&pdev->dev, "Allocation failed\n"); @@ -562,7 +604,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = ufshcd_init(hba, mmio_base, pdev->irq); if (err) { dev_err(&pdev->dev, "Initialization failed\n"); - ufshcd_dealloc_host(hba); return err; } @@ -591,7 +632,8 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = { }; static const struct pci_device_id ufshcd_pci_tbl[] = { - { PCI_VENDOR_ID_REDHAT, 0x0013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_REDHAT, 0x0013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + (kernel_ulong_t)&ufs_qemu_hba_vops }, { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops }, { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops }, @@ -602,6 +644,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, { PCI_VDEVICE(INTEL, 0xA847), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, { PCI_VDEVICE(INTEL, 0x7747), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, + { PCI_VDEVICE(INTEL, 0xE447), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, { } /* terminate list */ }; diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c index a3e69ecafd27..ffe5d1d2b215 100644 --- a/drivers/ufs/host/ufshcd-pltfrm.c +++ b/drivers/ufs/host/ufshcd-pltfrm.c @@ -31,8 +31,7 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba) const char *name; u32 *clkfreq = NULL; struct ufs_clk_info *clki; - int len = 0; - size_t sz = 0; + ssize_t sz = 0; if (!np) goto out; @@ -50,15 +49,12 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba) if (cnt <= 0) goto out; - if (!of_get_property(np, "freq-table-hz", &len)) { + sz = of_property_count_u32_elems(np, "freq-table-hz"); + if (sz <= 0) { dev_info(dev, "freq-table-hz property not specified\n"); goto out; } - if (len <= 0) - goto out; - - sz = len / sizeof(*clkfreq); if (sz != 2 * cnt) { dev_err(dev, "%s len mismatch\n", "freq-table-hz"); ret = -EINVAL; @@ -272,10 +268,10 @@ static int ufshcd_parse_operating_points(struct ufs_hba *hba) const char **clk_names; int cnt, i, ret; - if (!of_find_property(np, "operating-points-v2", NULL)) + if (!of_property_present(np, "operating-points-v2")) return 0; - if (of_find_property(np, "freq-table-hz", NULL)) { + if (of_property_present(np, "freq-table-hz")) { dev_err(dev, "%s: operating-points and freq-table-hz are incompatible\n", __func__); return -EINVAL; @@ -469,21 +465,17 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, struct device *dev = &pdev->dev; mmio_base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(mmio_base)) { - err = PTR_ERR(mmio_base); - goto out; - } + if (IS_ERR(mmio_base)) + return PTR_ERR(mmio_base); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - err = irq; - goto out; - } + if (irq < 0) + return irq; err = ufshcd_alloc_host(dev, &hba); if (err) { dev_err(dev, "Allocation failed\n"); - goto out; + return err; } hba->vops = vops; @@ -492,13 +484,13 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, if (err) { dev_err(dev, "%s: clock parse failed %d\n", __func__, err); - goto dealloc_host; + return err; } err = ufshcd_parse_regulator_info(hba); if (err) { dev_err(dev, "%s: regulator init failed %d\n", __func__, err); - goto dealloc_host; + return err; } ufshcd_init_lanes_per_dir(hba); @@ -506,28 +498,38 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, err = ufshcd_parse_operating_points(hba); if (err) { dev_err(dev, "%s: OPP parse failed %d\n", __func__, err); - goto dealloc_host; + return err; } err = ufshcd_init(hba, mmio_base, irq); if (err) { dev_err_probe(dev, err, "Initialization failed with error %d\n", err); - goto dealloc_host; + return err; } pm_runtime_set_active(dev); pm_runtime_enable(dev); return 0; - -dealloc_host: - ufshcd_dealloc_host(hba); -out: - return err; } EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init); +/** + * ufshcd_pltfrm_remove - Remove ufshcd platform + * @pdev: pointer to Platform device handle + */ +void ufshcd_pltfrm_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&pdev->dev); + ufshcd_remove(hba); + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); +} +EXPORT_SYMBOL_GPL(ufshcd_pltfrm_remove); + MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver"); diff --git a/drivers/ufs/host/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h index df387be5216b..3017f8e8f93c 100644 --- a/drivers/ufs/host/ufshcd-pltfrm.h +++ b/drivers/ufs/host/ufshcd-pltfrm.h @@ -31,6 +31,7 @@ int ufshcd_negotiate_pwr_params(const struct ufs_host_params *host_params, void ufshcd_init_host_params(struct ufs_host_params *host_params); int ufshcd_pltfrm_init(struct platform_device *pdev, const struct ufs_hba_variant_ops *vops); +void ufshcd_pltfrm_remove(struct platform_device *pdev); int ufshcd_populate_vreg(struct device *dev, const char *name, struct ufs_vreg **out_vreg, bool skip_current); |