summaryrefslogtreecommitdiff
path: root/drivers/ufs/host
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/host')
-rw-r--r--drivers/ufs/host/ufs-exynos.c4
-rw-r--r--drivers/ufs/host/ufs-qcom.c165
-rw-r--r--drivers/ufs/host/ufs-qcom.h9
3 files changed, 138 insertions, 40 deletions
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index 3e545af536e5..f0adcd9dd553 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -1110,8 +1110,8 @@ static int exynos_ufs_post_link(struct ufs_hba *hba)
hci_writel(ufs, val, HCI_TXPRDT_ENTRY_SIZE);
hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE);
- hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
- hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
+ hci_writel(ufs, BIT(hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
+ hci_writel(ufs, BIT(hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN);
if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 37887ec68412..2a72e7c1d131 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -122,7 +122,9 @@ static const struct {
};
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
-static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq);
+static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba,
+ unsigned long freq, char *name);
+static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq);
static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
{
@@ -506,10 +508,9 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
if (ret)
return ret;
- if (phy->power_count) {
+ if (phy->power_count)
phy_power_off(phy);
- phy_exit(phy);
- }
+
/* phy initialization - calibrate the phy */
ret = phy_init(phy);
@@ -551,11 +552,32 @@ out_disable_phy:
*/
static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
{
+ int err;
+
+ /* Enable UTP internal clock gating */
ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL,
REG_UFS_CFG2);
/* Ensure that HW clock gating is enabled before next operations */
ufshcd_readl(hba, REG_UFS_CFG2);
+
+ /* Enable Unipro internal clock gating */
+ err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK,
+ DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG);
+ if (err)
+ goto out;
+
+ err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK,
+ PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG);
+ if (err)
+ goto out;
+
+ err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
+ DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
+ DME_VS_CORE_CLK_CTRL);
+out:
+ if (err)
+ dev_err(hba->dev, "hw clk gating enabled failed\n");
}
static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
@@ -597,13 +619,14 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
*
* @hba: host controller instance
* @is_pre_scale_up: flag to check if pre scale up condition.
+ * @freq: target opp freq
* Return: zero for success and non-zero in case of a failure.
*/
-static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up)
+static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up, unsigned long freq)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_clk_info *clki;
- unsigned long core_clk_rate = 0;
+ unsigned long clk_freq = 0;
u32 core_clk_cycles_per_us;
/*
@@ -615,22 +638,34 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up)
if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba))
return 0;
+ if (hba->use_pm_opp && freq != ULONG_MAX) {
+ clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk");
+ if (clk_freq)
+ goto cfg_timers;
+ }
+
list_for_each_entry(clki, &hba->clk_list_head, list) {
if (!strcmp(clki->name, "core_clk")) {
+ if (freq == ULONG_MAX) {
+ clk_freq = clki->max_freq;
+ break;
+ }
+
if (is_pre_scale_up)
- core_clk_rate = clki->max_freq;
+ clk_freq = clki->max_freq;
else
- core_clk_rate = clk_get_rate(clki->clk);
+ clk_freq = clk_get_rate(clki->clk);
break;
}
}
+cfg_timers:
/* If frequency is smaller than 1MHz, set to 1MHz */
- if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
- core_clk_rate = DEFAULT_CLK_RATE_HZ;
+ if (clk_freq < DEFAULT_CLK_RATE_HZ)
+ clk_freq = DEFAULT_CLK_RATE_HZ;
- core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
+ core_clk_cycles_per_us = clk_freq / USEC_PER_SEC;
if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
/*
@@ -650,13 +685,13 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
- if (ufs_qcom_cfg_timers(hba, false)) {
+ if (ufs_qcom_cfg_timers(hba, false, ULONG_MAX)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
return -EINVAL;
}
- err = ufs_qcom_set_core_clk_ctrl(hba, ULONG_MAX);
+ err = ufs_qcom_set_core_clk_ctrl(hba, true, ULONG_MAX);
if (err)
dev_err(hba->dev, "cfg core clk ctrl failed\n");
/*
@@ -928,17 +963,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
break;
case POST_CHANGE:
- if (ufs_qcom_cfg_timers(hba, false)) {
- dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
- __func__);
- /*
- * we return error code at the end of the routine,
- * but continue to configure UFS_PHY_TX_LANE_ENABLE
- * and bus voting as usual
- */
- ret = -EINVAL;
- }
-
/* cache the power mode parameters to use internally */
memcpy(&host->dev_req_params,
dev_req_params, sizeof(*dev_req_params));
@@ -1414,29 +1438,46 @@ static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba,
return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg);
}
-static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq)
+static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct list_head *head = &hba->clk_list_head;
struct ufs_clk_info *clki;
u32 cycles_in_1us = 0;
u32 core_clk_ctrl_reg;
+ unsigned long clk_freq;
int err;
+ if (hba->use_pm_opp && freq != ULONG_MAX) {
+ clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro");
+ if (clk_freq) {
+ cycles_in_1us = ceil(clk_freq, HZ_PER_MHZ);
+ goto set_core_clk_ctrl;
+ }
+ }
+
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk) &&
!strcmp(clki->name, "core_clk_unipro")) {
- if (!clki->max_freq)
+ if (!clki->max_freq) {
cycles_in_1us = 150; /* default for backwards compatibility */
- else if (freq == ULONG_MAX)
+ break;
+ }
+
+ if (freq == ULONG_MAX) {
cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ);
- else
- cycles_in_1us = ceil(freq, HZ_PER_MHZ);
+ break;
+ }
+ if (is_scale_up)
+ cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ);
+ else
+ cycles_in_1us = ceil(clk_get_rate(clki->clk), HZ_PER_MHZ);
break;
}
}
+set_core_clk_ctrl:
err = ufshcd_dme_get(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
&core_clk_ctrl_reg);
@@ -1473,13 +1514,13 @@ static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba, unsigned long f
{
int ret;
- ret = ufs_qcom_cfg_timers(hba, true);
+ ret = ufs_qcom_cfg_timers(hba, true, freq);
if (ret) {
dev_err(hba->dev, "%s ufs cfg timer failed\n", __func__);
return ret;
}
/* set unipro core clock attributes and clear clock divider */
- return ufs_qcom_set_core_clk_ctrl(hba, freq);
+ return ufs_qcom_set_core_clk_ctrl(hba, true, freq);
}
static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
@@ -1510,8 +1551,15 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba, unsigned long freq)
{
+ int ret;
+
+ ret = ufs_qcom_cfg_timers(hba, false, freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", __func__);
+ return ret;
+ }
/* set unipro core clock attributes and clear clock divider */
- return ufs_qcom_set_core_clk_ctrl(hba, freq);
+ return ufs_qcom_set_core_clk_ctrl(hba, false, freq);
}
static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
@@ -2082,8 +2130,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
retain_and_null_ptr(qi);
- if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
- host->hw_ver.step == 0) {
+ if (host->hw_ver.major >= 6) {
ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
REG_UFS_CFG3);
}
@@ -2092,11 +2139,53 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
return 0;
}
+static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba,
+ unsigned long freq, char *name)
+{
+ struct ufs_clk_info *clki;
+ struct dev_pm_opp *opp;
+ unsigned long clk_freq;
+ int idx = 0;
+ bool found = false;
+
+ opp = dev_pm_opp_find_freq_exact_indexed(hba->dev, freq, 0, true);
+ if (IS_ERR(opp)) {
+ dev_err(hba->dev, "Failed to find OPP for exact frequency %lu\n", freq);
+ return 0;
+ }
+
+ list_for_each_entry(clki, &hba->clk_list_head, list) {
+ if (!strcmp(clki->name, name)) {
+ found = true;
+ break;
+ }
+
+ idx++;
+ }
+
+ if (!found) {
+ dev_err(hba->dev, "Failed to find clock '%s' in clk list\n", name);
+ dev_pm_opp_put(opp);
+ return 0;
+ }
+
+ clk_freq = dev_pm_opp_get_freq_indexed(opp, idx);
+
+ dev_pm_opp_put(opp);
+
+ return clk_freq;
+}
+
static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
{
- u32 gear = 0;
+ u32 gear = UFS_HS_DONT_CHANGE;
+ unsigned long unipro_freq;
+
+ if (!hba->use_pm_opp)
+ return gear;
- switch (freq) {
+ unipro_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro");
+ switch (unipro_freq) {
case 403000000:
gear = UFS_HS_G5;
break;
@@ -2116,10 +2205,10 @@ static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
break;
default:
dev_err(hba->dev, "%s: Unsupported clock freq : %lu\n", __func__, freq);
- break;
+ return UFS_HS_DONT_CHANGE;
}
- return gear;
+ return min_t(u32, gear, hba->max_pwr_info.info.gear_rx);
}
/*
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 0a5cfc2dd4f7..e0e129af7c16 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -24,6 +24,15 @@
#define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B
+/* bit and mask definitions for PA_VS_CLK_CFG_REG attribute */
+#define PA_VS_CLK_CFG_REG 0x9004
+#define PA_VS_CLK_CFG_REG_MASK GENMASK(8, 0)
+
+/* bit and mask definitions for DL_VS_CLK_CFG attribute */
+#define DL_VS_CLK_CFG 0xA00B
+#define DL_VS_CLK_CFG_MASK GENMASK(9, 0)
+#define DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN BIT(9)
+
/* QCOM UFS host controller vendor specific registers */
enum {
REG_UFS_SYS1CLK_1US = 0xC0,