diff options
Diffstat (limited to 'drivers/ufs')
-rw-r--r-- | drivers/ufs/core/ufs-sysfs.c | 194 | ||||
-rw-r--r-- | drivers/ufs/core/ufshcd.c | 84 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-exynos.c | 4 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-qcom.c | 165 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-qcom.h | 9 |
5 files changed, 400 insertions, 56 deletions
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index de8b6acd4058..00948378a719 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -87,6 +87,23 @@ static const char *ufs_wb_resize_status_to_string(enum wb_resize_status status) } } +static const char * const ufs_hid_states[] = { + [HID_IDLE] = "idle", + [ANALYSIS_IN_PROGRESS] = "analysis_in_progress", + [DEFRAG_REQUIRED] = "defrag_required", + [DEFRAG_IN_PROGRESS] = "defrag_in_progress", + [DEFRAG_COMPLETED] = "defrag_completed", + [DEFRAG_NOT_REQUIRED] = "defrag_not_required", +}; + +static const char *ufs_hid_state_to_string(enum ufs_hid_state state) +{ + if (state < NUM_UFS_HID_STATES) + return ufs_hid_states[state]; + + return "unknown"; +} + static const char *ufshcd_uic_link_state_to_string( enum uic_link_state state) { @@ -1763,6 +1780,178 @@ static const struct attribute_group ufs_sysfs_attributes_group = { .attrs = ufs_sysfs_attributes, }; +static int hid_query_attr(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u32 *attr_val) +{ + int ret; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + up(&hba->host_sem); + return -EBUSY; + } + + ufshcd_rpm_get_sync(hba); + ret = ufshcd_query_attr(hba, opcode, idn, 0, 0, attr_val); + ufshcd_rpm_put_sync(hba); + + up(&hba->host_sem); + return ret; +} + +static ssize_t analysis_trigger_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int mode; + int ret; + + if (sysfs_streq(buf, "enable")) + mode = HID_ANALYSIS_ENABLE; + else if (sysfs_streq(buf, "disable")) + mode = HID_ANALYSIS_AND_DEFRAG_DISABLE; + else + return -EINVAL; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &mode); + + return ret < 0 ? ret : count; +} + +static DEVICE_ATTR_WO(analysis_trigger); + +static ssize_t defrag_trigger_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int mode; + int ret; + + if (sysfs_streq(buf, "enable")) + mode = HID_ANALYSIS_AND_DEFRAG_ENABLE; + else if (sysfs_streq(buf, "disable")) + mode = HID_ANALYSIS_AND_DEFRAG_DISABLE; + else + return -EINVAL; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &mode); + + return ret < 0 ? ret : count; +} + +static DEVICE_ATTR_WO(defrag_trigger); + +static ssize_t fragmented_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 value; + int ret; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_HID_AVAILABLE_SIZE, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%u\n", value); +} + +static DEVICE_ATTR_RO(fragmented_size); + +static ssize_t defrag_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 value; + int ret; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_HID_SIZE, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%u\n", value); +} + +static ssize_t defrag_size_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 value; + int ret; + + if (kstrtou32(buf, 0, &value)) + return -EINVAL; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_HID_SIZE, &value); + + return ret < 0 ? ret : count; +} + +static DEVICE_ATTR_RW(defrag_size); + +static ssize_t progress_ratio_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 value; + int ret; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_HID_PROGRESS_RATIO, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%u\n", value); +} + +static DEVICE_ATTR_RO(progress_ratio); + +static ssize_t state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 value; + int ret; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_HID_STATE, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%s\n", ufs_hid_state_to_string(value)); +} + +static DEVICE_ATTR_RO(state); + +static struct attribute *ufs_sysfs_hid[] = { + &dev_attr_analysis_trigger.attr, + &dev_attr_defrag_trigger.attr, + &dev_attr_fragmented_size.attr, + &dev_attr_defrag_size.attr, + &dev_attr_progress_ratio.attr, + &dev_attr_state.attr, + NULL, +}; + +static umode_t ufs_sysfs_hid_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct ufs_hba *hba = dev_get_drvdata(dev); + + return hba->dev_info.hid_sup ? attr->mode : 0; +} + +static const struct attribute_group ufs_sysfs_hid_group = { + .name = "hid", + .attrs = ufs_sysfs_hid, + .is_visible = ufs_sysfs_hid_is_visible, +}; + static const struct attribute_group *ufs_sysfs_groups[] = { &ufs_sysfs_default_group, &ufs_sysfs_capabilities_group, @@ -1777,6 +1966,7 @@ static const struct attribute_group *ufs_sysfs_groups[] = { &ufs_sysfs_string_descriptors_group, &ufs_sysfs_flags_group, &ufs_sysfs_attributes_group, + &ufs_sysfs_hid_group, NULL, }; @@ -1808,7 +1998,7 @@ UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1); UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8); UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4); UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1); -UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8); +UFS_UNIT_DESC_PARAM(physical_memory_resource_count, _PHY_MEM_RSRC_CNT, 8); UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2); UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1); UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4); @@ -1825,7 +2015,7 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = { &dev_attr_logical_block_count.attr, &dev_attr_erase_block_size.attr, &dev_attr_provisioning_type.attr, - &dev_attr_physical_memory_resourse_count.attr, + &dev_attr_physical_memory_resource_count.attr, &dev_attr_context_capabilities.attr, &dev_attr_large_unit_granularity.attr, &dev_attr_wb_buf_alloc_units.attr, diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 76cedd30c274..fd8015ed36a4 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -1397,6 +1397,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) * make sure that there are no outstanding requests when * clock scaling is in progress */ + mutex_lock(&hba->host->scan_mutex); blk_mq_quiesce_tagset(&hba->host->tag_set); mutex_lock(&hba->wb_mutex); down_write(&hba->clk_scaling_lock); @@ -1407,6 +1408,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) up_write(&hba->clk_scaling_lock); mutex_unlock(&hba->wb_mutex); blk_mq_unquiesce_tagset(&hba->host->tag_set); + mutex_unlock(&hba->host->scan_mutex); goto out; } @@ -1428,6 +1430,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err) mutex_unlock(&hba->wb_mutex); blk_mq_unquiesce_tagset(&hba->host->tag_set); + mutex_unlock(&hba->host->scan_mutex); ufshcd_release(hba); } @@ -2563,7 +2566,7 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) * @hba: per adapter instance * @uic_cmd: UIC command * - * Return: 0 only if success. + * Return: 0 if successful; < 0 upon failure. */ static int __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) @@ -2823,8 +2826,6 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, /* Copy the Descriptor */ if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) memcpy(ucd_req_ptr + 1, query->descriptor, len); - - memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); } static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) @@ -2837,8 +2838,6 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) .transaction_code = UPIU_TRANSACTION_NOP_OUT, .task_tag = lrbp->task_tag, }; - - memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); } /** @@ -2864,6 +2863,8 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba, else ret = -EINVAL; + memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); + return ret; } @@ -3071,6 +3072,9 @@ static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, hba->dev_cmd.type = cmd_type; } +/* + * Return: 0 upon success; < 0 upon failure. + */ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag) { @@ -3183,9 +3187,13 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) break; } + WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } +/* + * Return: 0 upon success; < 0 upon failure. + */ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int max_timeout) { @@ -3260,6 +3268,7 @@ retry: } } + WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3277,6 +3286,9 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba) ufshcd_release(hba); } +/* + * Return: 0 upon success; < 0 upon failure. + */ static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, const u32 tag, int timeout) { @@ -3364,6 +3376,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba, dev_err(hba->dev, "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n", __func__, opcode, idn, ret, retries); + WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret); return ret; } @@ -3375,7 +3388,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba, * @index: flag index to access * @flag_res: the flag value after the query request completes * - * Return: 0 for success, non-zero in case of failure. + * Return: 0 for success; < 0 upon failure. */ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res) @@ -3431,6 +3444,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, out_unlock: ufshcd_dev_man_unlock(hba); + WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3443,7 +3457,7 @@ out_unlock: * @selector: selector field * @attr_val: the attribute value after the query request completes * - * Return: 0 for success, non-zero in case of failure. + * Return: 0 upon success; < 0 upon failure. */ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) @@ -3492,6 +3506,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, out_unlock: ufshcd_dev_man_unlock(hba); + WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3506,7 +3521,7 @@ out_unlock: * @attr_val: the attribute value after the query request * completes * - * Return: 0 for success, non-zero in case of failure. + * Return: 0 for success; < 0 upon failure. */ int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, @@ -3529,9 +3544,13 @@ int ufshcd_query_attr_retry(struct ufs_hba *hba, dev_err(hba->dev, "%s: query attribute, idn %d, failed with error %d after %d retries\n", __func__, idn, ret, QUERY_REQ_RETRIES); + WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret); return ret; } +/* + * Return: 0 if successful; < 0 upon failure. + */ static int __ufshcd_query_descriptor(struct ufs_hba *hba, enum query_opcode opcode, enum desc_idn idn, u8 index, u8 selector, u8 *desc_buf, int *buf_len) @@ -3589,6 +3608,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, out_unlock: hba->dev_cmd.query.descriptor = NULL; ufshcd_dev_man_unlock(hba); + WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3605,7 +3625,7 @@ out_unlock: * The buf_len parameter will contain, on return, the length parameter * received on the response. * - * Return: 0 for success, non-zero in case of failure. + * Return: 0 for success; < 0 upon failure. */ int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode, @@ -3623,6 +3643,7 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba, break; } + WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3635,7 +3656,7 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba, * @param_read_buf: pointer to buffer where parameter would be read * @param_size: sizeof(param_read_buf) * - * Return: 0 in case of success, non-zero otherwise. + * Return: 0 in case of success; < 0 upon failure. */ int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, @@ -3702,6 +3723,7 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, out: if (is_kmalloc) kfree(desc_buf); + WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret); return ret; } @@ -3815,7 +3837,7 @@ out: * @param_read_buf: pointer to buffer where parameter would be read * @param_size: sizeof(param_read_buf) * - * Return: 0 in case of success, non-zero otherwise. + * Return: 0 in case of success; < 0 upon failure. */ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, int lun, @@ -4251,6 +4273,30 @@ out: EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); /** + * ufshcd_dme_rmw - get modify set a DME attribute + * @hba: per adapter instance + * @mask: indicates which bits to clear from the value that has been read + * @val: actual value to write + * @attr: dme attribute + */ +int ufshcd_dme_rmw(struct ufs_hba *hba, u32 mask, + u32 val, u32 attr) +{ + u32 cfg = 0; + int err; + + err = ufshcd_dme_get(hba, UIC_ARG_MIB(attr), &cfg); + if (err) + return err; + + cfg &= ~mask; + cfg |= (val & mask); + + return ufshcd_dme_set(hba, UIC_ARG_MIB(attr), cfg); +} +EXPORT_SYMBOL_GPL(ufshcd_dme_rmw); + +/** * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power * state) and waits for it to take effect. * @@ -4793,7 +4839,7 @@ out: * 3. Program UTRL and UTMRL base address * 4. Configure run-stop-registers * - * Return: 0 on success, non-zero value on failure. + * Return: 0 if successful; < 0 upon failure. */ int ufshcd_make_hba_operational(struct ufs_hba *hba) { @@ -6620,9 +6666,14 @@ static void ufshcd_err_handler(struct work_struct *work) up(&hba->host_sem); return; } - ufshcd_set_eh_in_progress(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_err_handling_prepare(hba); + + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_set_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + /* Complete requests that have door-bell cleared by h/w */ ufshcd_complete_requests(hba, false); spin_lock_irqsave(hba->host->host_lock, flags); @@ -7799,7 +7850,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) hba->silence_err_logs = false; /* scale up clocks to max frequency before full reinitialization */ - ufshcd_scale_clks(hba, ULONG_MAX, true); + if (ufshcd_is_clkscaling_supported(hba)) + ufshcd_scale_clks(hba, ULONG_MAX, true); err = ufshcd_hba_enable(hba); @@ -8411,6 +8463,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba) dev_info->rtt_cap = desc_buf[DEVICE_DESC_PARAM_RTT_CAP]; + dev_info->hid_sup = get_unaligned_be32(desc_buf + + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP) & + UFS_DEV_HID_SUPPORT; + model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; err = ufshcd_read_string_desc(hba, model_index, diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c index 3e545af536e5..f0adcd9dd553 100644 --- a/drivers/ufs/host/ufs-exynos.c +++ b/drivers/ufs/host/ufs-exynos.c @@ -1110,8 +1110,8 @@ static int exynos_ufs_post_link(struct ufs_hba *hba) hci_writel(ufs, val, HCI_TXPRDT_ENTRY_SIZE); hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE); - hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); - hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); + hci_writel(ufs, BIT(hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); + hci_writel(ufs, BIT(hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN); if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB) diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 37887ec68412..2a72e7c1d131 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -122,7 +122,9 @@ static const struct { }; static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); -static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq); +static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba, + unsigned long freq, char *name); +static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq); static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd) { @@ -506,10 +508,9 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) if (ret) return ret; - if (phy->power_count) { + if (phy->power_count) phy_power_off(phy); - phy_exit(phy); - } + /* phy initialization - calibrate the phy */ ret = phy_init(phy); @@ -551,11 +552,32 @@ out_disable_phy: */ static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) { + int err; + + /* Enable UTP internal clock gating */ ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2); /* Ensure that HW clock gating is enabled before next operations */ ufshcd_readl(hba, REG_UFS_CFG2); + + /* Enable Unipro internal clock gating */ + err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK, + DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG); + if (err) + goto out; + + err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK, + PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG); + if (err) + goto out; + + err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN, + DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN, + DME_VS_CORE_CLK_CTRL); +out: + if (err) + dev_err(hba->dev, "hw clk gating enabled failed\n"); } static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, @@ -597,13 +619,14 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, * * @hba: host controller instance * @is_pre_scale_up: flag to check if pre scale up condition. + * @freq: target opp freq * Return: zero for success and non-zero in case of a failure. */ -static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up) +static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up, unsigned long freq) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct ufs_clk_info *clki; - unsigned long core_clk_rate = 0; + unsigned long clk_freq = 0; u32 core_clk_cycles_per_us; /* @@ -615,22 +638,34 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up) if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba)) return 0; + if (hba->use_pm_opp && freq != ULONG_MAX) { + clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk"); + if (clk_freq) + goto cfg_timers; + } + list_for_each_entry(clki, &hba->clk_list_head, list) { if (!strcmp(clki->name, "core_clk")) { + if (freq == ULONG_MAX) { + clk_freq = clki->max_freq; + break; + } + if (is_pre_scale_up) - core_clk_rate = clki->max_freq; + clk_freq = clki->max_freq; else - core_clk_rate = clk_get_rate(clki->clk); + clk_freq = clk_get_rate(clki->clk); break; } } +cfg_timers: /* If frequency is smaller than 1MHz, set to 1MHz */ - if (core_clk_rate < DEFAULT_CLK_RATE_HZ) - core_clk_rate = DEFAULT_CLK_RATE_HZ; + if (clk_freq < DEFAULT_CLK_RATE_HZ) + clk_freq = DEFAULT_CLK_RATE_HZ; - core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; + core_clk_cycles_per_us = clk_freq / USEC_PER_SEC; if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); /* @@ -650,13 +685,13 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, switch (status) { case PRE_CHANGE: - if (ufs_qcom_cfg_timers(hba, false)) { + if (ufs_qcom_cfg_timers(hba, false, ULONG_MAX)) { dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", __func__); return -EINVAL; } - err = ufs_qcom_set_core_clk_ctrl(hba, ULONG_MAX); + err = ufs_qcom_set_core_clk_ctrl(hba, true, ULONG_MAX); if (err) dev_err(hba->dev, "cfg core clk ctrl failed\n"); /* @@ -928,17 +963,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, break; case POST_CHANGE: - if (ufs_qcom_cfg_timers(hba, false)) { - dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", - __func__); - /* - * we return error code at the end of the routine, - * but continue to configure UFS_PHY_TX_LANE_ENABLE - * and bus voting as usual - */ - ret = -EINVAL; - } - /* cache the power mode parameters to use internally */ memcpy(&host->dev_req_params, dev_req_params, sizeof(*dev_req_params)); @@ -1414,29 +1438,46 @@ static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba, return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg); } -static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq) +static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct list_head *head = &hba->clk_list_head; struct ufs_clk_info *clki; u32 cycles_in_1us = 0; u32 core_clk_ctrl_reg; + unsigned long clk_freq; int err; + if (hba->use_pm_opp && freq != ULONG_MAX) { + clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro"); + if (clk_freq) { + cycles_in_1us = ceil(clk_freq, HZ_PER_MHZ); + goto set_core_clk_ctrl; + } + } + list_for_each_entry(clki, head, list) { if (!IS_ERR_OR_NULL(clki->clk) && !strcmp(clki->name, "core_clk_unipro")) { - if (!clki->max_freq) + if (!clki->max_freq) { cycles_in_1us = 150; /* default for backwards compatibility */ - else if (freq == ULONG_MAX) + break; + } + + if (freq == ULONG_MAX) { cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ); - else - cycles_in_1us = ceil(freq, HZ_PER_MHZ); + break; + } + if (is_scale_up) + cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ); + else + cycles_in_1us = ceil(clk_get_rate(clki->clk), HZ_PER_MHZ); break; } } +set_core_clk_ctrl: err = ufshcd_dme_get(hba, UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), &core_clk_ctrl_reg); @@ -1473,13 +1514,13 @@ static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba, unsigned long f { int ret; - ret = ufs_qcom_cfg_timers(hba, true); + ret = ufs_qcom_cfg_timers(hba, true, freq); if (ret) { dev_err(hba->dev, "%s ufs cfg timer failed\n", __func__); return ret; } /* set unipro core clock attributes and clear clock divider */ - return ufs_qcom_set_core_clk_ctrl(hba, freq); + return ufs_qcom_set_core_clk_ctrl(hba, true, freq); } static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) @@ -1510,8 +1551,15 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba, unsigned long freq) { + int ret; + + ret = ufs_qcom_cfg_timers(hba, false, freq); + if (ret) { + dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", __func__); + return ret; + } /* set unipro core clock attributes and clear clock divider */ - return ufs_qcom_set_core_clk_ctrl(hba, freq); + return ufs_qcom_set_core_clk_ctrl(hba, false, freq); } static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, bool scale_up, @@ -2082,8 +2130,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) retain_and_null_ptr(qi); - if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && - host->hw_ver.step == 0) { + if (host->hw_ver.major >= 6) { ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), REG_UFS_CFG3); } @@ -2092,11 +2139,53 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) return 0; } +static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba, + unsigned long freq, char *name) +{ + struct ufs_clk_info *clki; + struct dev_pm_opp *opp; + unsigned long clk_freq; + int idx = 0; + bool found = false; + + opp = dev_pm_opp_find_freq_exact_indexed(hba->dev, freq, 0, true); + if (IS_ERR(opp)) { + dev_err(hba->dev, "Failed to find OPP for exact frequency %lu\n", freq); + return 0; + } + + list_for_each_entry(clki, &hba->clk_list_head, list) { + if (!strcmp(clki->name, name)) { + found = true; + break; + } + + idx++; + } + + if (!found) { + dev_err(hba->dev, "Failed to find clock '%s' in clk list\n", name); + dev_pm_opp_put(opp); + return 0; + } + + clk_freq = dev_pm_opp_get_freq_indexed(opp, idx); + + dev_pm_opp_put(opp); + + return clk_freq; +} + static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq) { - u32 gear = 0; + u32 gear = UFS_HS_DONT_CHANGE; + unsigned long unipro_freq; + + if (!hba->use_pm_opp) + return gear; - switch (freq) { + unipro_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro"); + switch (unipro_freq) { case 403000000: gear = UFS_HS_G5; break; @@ -2116,10 +2205,10 @@ static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq) break; default: dev_err(hba->dev, "%s: Unsupported clock freq : %lu\n", __func__, freq); - break; + return UFS_HS_DONT_CHANGE; } - return gear; + return min_t(u32, gear, hba->max_pwr_info.info.gear_rx); } /* diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h index 0a5cfc2dd4f7..e0e129af7c16 100644 --- a/drivers/ufs/host/ufs-qcom.h +++ b/drivers/ufs/host/ufs-qcom.h @@ -24,6 +24,15 @@ #define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B +/* bit and mask definitions for PA_VS_CLK_CFG_REG attribute */ +#define PA_VS_CLK_CFG_REG 0x9004 +#define PA_VS_CLK_CFG_REG_MASK GENMASK(8, 0) + +/* bit and mask definitions for DL_VS_CLK_CFG attribute */ +#define DL_VS_CLK_CFG 0xA00B +#define DL_VS_CLK_CFG_MASK GENMASK(9, 0) +#define DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN BIT(9) + /* QCOM UFS host controller vendor specific registers */ enum { REG_UFS_SYS1CLK_1US = 0xC0, |