summaryrefslogtreecommitdiff
path: root/drivers/ufs/host/ufs-mediatek.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/host/ufs-mediatek.c')
-rw-r--r--drivers/ufs/host/ufs-mediatek.c804
1 files changed, 690 insertions, 114 deletions
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 06ab1e5e8b6f..ecbbf52bf734 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -29,6 +29,7 @@
#include "ufs-mediatek-sip.h"
static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
+static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up);
#define CREATE_TRACE_POINTS
#include "ufs-mediatek-trace.h"
@@ -40,8 +41,7 @@ static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
{ .wmanufacturerid = UFS_ANY_VENDOR,
.model = UFS_ANY_MODEL,
- .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
+ .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = "H9HQ21AFAMZDAR",
.quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
@@ -50,6 +50,7 @@ static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
static const struct of_device_id ufs_mtk_of_match[] = {
{ .compatible = "mediatek,mt8183-ufshci" },
+ { .compatible = "mediatek,mt8195-ufshci" },
{},
};
MODULE_DEVICE_TABLE(of, ufs_mtk_of_match);
@@ -96,49 +97,59 @@ static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
+ return host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
}
static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
+ return host->caps & UFS_MTK_CAP_VA09_PWR_CTRL;
}
static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
+ return host->caps & UFS_MTK_CAP_BROKEN_VCC;
}
static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
+ return host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO;
}
static bool ufs_mtk_is_tx_skew_fix(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return (host->caps & UFS_MTK_CAP_TX_SKEW_FIX);
+ return host->caps & UFS_MTK_CAP_TX_SKEW_FIX;
}
static bool ufs_mtk_is_rtff_mtcmos(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return (host->caps & UFS_MTK_CAP_RTFF_MTCMOS);
+ return host->caps & UFS_MTK_CAP_RTFF_MTCMOS;
}
static bool ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return (host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM);
+ return host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM;
+}
+
+static bool ufs_mtk_is_clk_scale_ready(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_clk *mclk = &host->mclk;
+
+ return mclk->ufs_sel_clki &&
+ mclk->ufs_sel_max_clki &&
+ mclk->ufs_sel_min_clki;
}
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
@@ -267,6 +278,22 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
ufshcd_writel(hba,
ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
REG_UFS_XOUFS_CTRL);
+
+ if (host->legacy_ip_ver)
+ return 0;
+
+ /* DDR_EN setting */
+ if (host->ip_ver >= IP_VER_MT6989) {
+ ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8),
+ 0x453000, REG_UFS_MMIO_OPT_CTRL_0);
+ }
+
+ if (host->ip_ver >= IP_VER_MT6991_A0) {
+ /* Enable multi-rtt */
+ ufshcd_rmwl(hba, MRTT_EN, MRTT_EN, REG_UFS_MMIO_OPT_CTRL_0);
+ /* Enable random performance improvement */
+ ufshcd_rmwl(hba, RDN_PFM_IMPV_DIS, 0, REG_UFS_MMIO_OPT_CTRL_0);
+ }
}
return 0;
@@ -344,7 +371,16 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
- ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
+ /*
+ * If clock on timeout, assume clock is off, notify tfa do clock
+ * off setting.(keep DIFN disable, release resource)
+ * If clock off timeout, assume clock will off finally,
+ * set ref_clk_enabled directly.(keep DIFN disable, keep resource)
+ */
+ if (on)
+ ufs_mtk_ref_clk_notify(false, POST_CHANGE, res);
+ else
+ host->ref_clk_enabled = false;
return -ETIMEDOUT;
@@ -377,7 +413,7 @@ static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
+ if (!host->legacy_ip_ver && host->ip_ver >= IP_VER_MT6983) {
ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
@@ -388,12 +424,13 @@ static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
}
}
-static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
+static int ufs_mtk_wait_idle_state(struct ufs_hba *hba,
unsigned long retry_ms)
{
u64 timeout, time_checked;
u32 val, sm;
bool wait_idle;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
/* cannot use plain ktime_get() in suspend */
timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
@@ -404,8 +441,13 @@ static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
do {
time_checked = ktime_get_mono_fast_ns();
- ufs_mtk_dbg_sel(hba);
- val = ufshcd_readl(hba, REG_UFS_PROBE);
+ if (host->legacy_ip_ver || host->ip_ver < IP_VER_MT6899) {
+ ufs_mtk_dbg_sel(hba);
+ val = ufshcd_readl(hba, REG_UFS_PROBE);
+ } else {
+ val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL);
+ val = val >> 16;
+ }
sm = val & 0x1f;
@@ -424,8 +466,12 @@ static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
break;
} while (time_checked < timeout);
- if (wait_idle && sm != VS_HCE_BASE)
+ if (wait_idle && sm != VS_HCE_BASE) {
dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
@@ -433,13 +479,20 @@ static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
{
ktime_t timeout, time_checked;
u32 val;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
timeout = ktime_add_ms(ktime_get(), max_wait_ms);
do {
time_checked = ktime_get();
- ufs_mtk_dbg_sel(hba);
- val = ufshcd_readl(hba, REG_UFS_PROBE);
- val = val >> 28;
+
+ if (host->legacy_ip_ver || host->ip_ver < IP_VER_MT6899) {
+ ufs_mtk_dbg_sel(hba);
+ val = ufshcd_readl(hba, REG_UFS_PROBE);
+ val = val >> 28;
+ } else {
+ val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL);
+ val = val >> 24;
+ }
if (val == state)
return 0;
@@ -663,6 +716,9 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
if (of_property_read_bool(np, "mediatek,ufs-rtff-mtcmos"))
host->caps |= UFS_MTK_CAP_RTFF_MTCMOS;
+ if (of_property_read_bool(np, "mediatek,ufs-broken-rtc"))
+ host->caps |= UFS_MTK_CAP_MCQ_BROKEN_RTC;
+
dev_info(hba->dev, "caps: 0x%x", host->caps);
}
@@ -768,8 +824,14 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
clk_pwr_off = true;
}
- if (clk_pwr_off)
+ if (clk_pwr_off) {
ufs_mtk_pwr_ctrl(hba, false);
+ } else {
+ dev_warn(hba->dev, "Clock is not turned off, hba->ahit = 0x%x, AHIT = 0x%x\n",
+ hba->ahit,
+ ufshcd_readl(hba,
+ REG_AUTO_HIBERNATE_IDLE_TIMER));
+ }
ufs_mtk_mcq_disable_irq(hba);
} else if (on && status == POST_CHANGE) {
ufs_mtk_pwr_ctrl(hba, true);
@@ -779,6 +841,91 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
return ret;
}
+static u32 ufs_mtk_mcq_get_irq(struct ufs_hba *hba, unsigned int cpu)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct blk_mq_tag_set *tag_set = &hba->host->tag_set;
+ struct blk_mq_queue_map *map = &tag_set->map[HCTX_TYPE_DEFAULT];
+ unsigned int nr = map->nr_queues;
+ unsigned int q_index;
+
+ q_index = map->mq_map[cpu];
+ if (q_index >= nr) {
+ dev_err(hba->dev, "hwq index %d exceed %d\n",
+ q_index, nr);
+ return MTK_MCQ_INVALID_IRQ;
+ }
+
+ return host->mcq_intr_info[q_index].irq;
+}
+
+static void ufs_mtk_mcq_set_irq_affinity(struct ufs_hba *hba, unsigned int cpu)
+{
+ unsigned int irq, _cpu;
+ int ret;
+
+ irq = ufs_mtk_mcq_get_irq(hba, cpu);
+ if (irq == MTK_MCQ_INVALID_IRQ) {
+ dev_err(hba->dev, "invalid irq. unable to bind irq to cpu%d", cpu);
+ return;
+ }
+
+ /* force migrate irq of cpu0 to cpu3 */
+ _cpu = (cpu == 0) ? 3 : cpu;
+ ret = irq_set_affinity(irq, cpumask_of(_cpu));
+ if (ret) {
+ dev_err(hba->dev, "set irq %d affinity to CPU %d failed\n",
+ irq, _cpu);
+ return;
+ }
+ dev_info(hba->dev, "set irq %d affinity to CPU: %d\n", irq, _cpu);
+}
+
+static bool ufs_mtk_is_legacy_chipset(struct ufs_hba *hba, u32 hw_ip_ver)
+{
+ bool is_legacy = false;
+
+ switch (hw_ip_ver) {
+ case IP_LEGACY_VER_MT6893:
+ case IP_LEGACY_VER_MT6781:
+ /* can add other legacy chipset ID here accordingly */
+ is_legacy = true;
+ break;
+ default:
+ break;
+ }
+ dev_info(hba->dev, "legacy IP version - 0x%x, is legacy : %d", hw_ip_ver, is_legacy);
+
+ return is_legacy;
+}
+
+/*
+ * HW version format has been changed from 01MMmmmm to 1MMMmmmm, since
+ * project MT6878. In order to perform correct version comparison,
+ * version number is changed by SW for the following projects.
+ * IP_VER_MT6983 0x00360000 to 0x10360000
+ * IP_VER_MT6897 0x01440000 to 0x10440000
+ * IP_VER_MT6989 0x01450000 to 0x10450000
+ * IP_VER_MT6991 0x01460000 to 0x10460000
+ */
+static void ufs_mtk_get_hw_ip_version(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ u32 hw_ip_ver;
+
+ hw_ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
+
+ if (((hw_ip_ver & (0xFF << 24)) == (0x1 << 24)) ||
+ ((hw_ip_ver & (0xFF << 24)) == 0)) {
+ hw_ip_ver &= ~(0xFF << 24);
+ hw_ip_ver |= (0x1 << 28);
+ }
+
+ host->ip_ver = hw_ip_ver;
+
+ host->legacy_ip_ver = ufs_mtk_is_legacy_chipset(hba, hw_ip_ver);
+}
+
static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -818,8 +965,10 @@ static void ufs_mtk_init_clocks(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct list_head *head = &hba->clk_list_head;
- struct ufs_mtk_clk *mclk = &host->mclk;
struct ufs_clk_info *clki, *clki_tmp;
+ struct device *dev = hba->dev;
+ struct regulator *reg;
+ u32 volt;
/*
* Find private clocks and store them in struct ufs_mtk_clk.
@@ -837,15 +986,57 @@ static void ufs_mtk_init_clocks(struct ufs_hba *hba)
host->mclk.ufs_sel_min_clki = clki;
clk_disable_unprepare(clki->clk);
list_del(&clki->list);
+ } else if (!strcmp(clki->name, "ufs_fde")) {
+ host->mclk.ufs_fde_clki = clki;
+ } else if (!strcmp(clki->name, "ufs_fde_max_src")) {
+ host->mclk.ufs_fde_max_clki = clki;
+ clk_disable_unprepare(clki->clk);
+ list_del(&clki->list);
+ } else if (!strcmp(clki->name, "ufs_fde_min_src")) {
+ host->mclk.ufs_fde_min_clki = clki;
+ clk_disable_unprepare(clki->clk);
+ list_del(&clki->list);
}
}
- if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
- !mclk->ufs_sel_min_clki) {
+ list_for_each_entry(clki, head, list) {
+ dev_info(hba->dev, "clk \"%s\" present", clki->name);
+ }
+
+ if (!ufs_mtk_is_clk_scale_ready(hba)) {
hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
dev_info(hba->dev,
"%s: Clk-scaling not ready. Feature disabled.",
__func__);
+ return;
+ }
+
+ /*
+ * Default get vcore if dts have these settings.
+ * No matter clock scaling support or not. (may disable by customer)
+ */
+ reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
+ if (IS_ERR(reg)) {
+ dev_info(dev, "failed to get dvfsrc-vcore: %ld",
+ PTR_ERR(reg));
+ return;
+ }
+
+ if (of_property_read_u32(dev->of_node, "clk-scale-up-vcore-min",
+ &volt)) {
+ dev_info(dev, "failed to get clk-scale-up-vcore-min");
+ return;
+ }
+
+ host->mclk.reg_vcore = reg;
+ host->mclk.vcore_volt = volt;
+
+ /* If default boot is max gear, request vcore */
+ if (reg && volt && host->clk_scale_up) {
+ if (regulator_set_voltage(reg, volt, INT_MAX)) {
+ dev_info(hba->dev,
+ "Failed to set vcore to %d\n", volt);
+ }
}
}
@@ -859,7 +1050,7 @@ static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
struct arm_smccc_res res;
int err, ver;
- if (hba->vreg_info.vcc)
+ if (info->vcc)
return 0;
if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
@@ -916,6 +1107,68 @@ static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
}
}
+static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ u32 ah_ms = 10;
+ u32 ah_scale, ah_timer;
+ u32 scale_us[] = {1, 10, 100, 1000, 10000, 100000};
+
+ if (ufshcd_is_clkgating_allowed(hba)) {
+ if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) {
+ ah_scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK,
+ hba->ahit);
+ ah_timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
+ hba->ahit);
+ if (ah_scale <= 5)
+ ah_ms = ah_timer * scale_us[ah_scale] / 1000;
+ }
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.delay_ms = max(ah_ms, 10U);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+}
+
+static void ufs_mtk_fix_ahit(struct ufs_hba *hba)
+{
+ unsigned int us;
+
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ switch (hba->dev_info.wmanufacturerid) {
+ case UFS_VENDOR_SAMSUNG:
+ /* configure auto-hibern8 timer to 3.5 ms */
+ us = 3500;
+ break;
+
+ case UFS_VENDOR_MICRON:
+ /* configure auto-hibern8 timer to 2 ms */
+ us = 2000;
+ break;
+
+ default:
+ /* configure auto-hibern8 timer to 1 ms */
+ us = 1000;
+ break;
+ }
+
+ hba->ahit = ufshcd_us_to_ahit(us);
+ }
+
+ ufs_mtk_setup_clk_gating(hba);
+}
+
+static void ufs_mtk_fix_clock_scaling(struct ufs_hba *hba)
+{
+ /* UFS version is below 4.0, clock scaling is not necessary */
+ if ((hba->dev_info.wspecversion < 0x0400) &&
+ ufs_mtk_is_clk_scale_ready(hba)) {
+ hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
+
+ _ufs_mtk_clk_scale(hba, false);
+ }
+}
+
static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -1014,13 +1267,17 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable clk scaling*/
hba->caps |= UFSHCD_CAP_CLK_SCALING;
+ host->clk_scale_up = true; /* default is max freq */
/* Set runtime pm delay to replace default */
shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS;
hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
+
hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
- hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
+ if (host->caps & UFS_MTK_CAP_MCQ_BROKEN_RTC)
+ hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
+
hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
@@ -1050,7 +1307,7 @@ static int ufs_mtk_init(struct ufs_hba *hba)
ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
- host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
+ ufs_mtk_get_hw_ip_version(hba);
goto out;
@@ -1077,12 +1334,46 @@ static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
dev_req_params->gear_rx < UFS_HS_G4)
return false;
+ if (dev_req_params->pwr_tx == SLOW_MODE ||
+ dev_req_params->pwr_rx == SLOW_MODE)
+ return false;
+
return true;
}
+static void ufs_mtk_adjust_sync_length(struct ufs_hba *hba)
+{
+ int i;
+ u32 value;
+ u32 cnt, att, min;
+ struct attr_min {
+ u32 attr;
+ u32 min_value;
+ } pa_min_sync_length[] = {
+ {PA_TXHSG1SYNCLENGTH, 0x48},
+ {PA_TXHSG2SYNCLENGTH, 0x48},
+ {PA_TXHSG3SYNCLENGTH, 0x48},
+ {PA_TXHSG4SYNCLENGTH, 0x48},
+ {PA_TXHSG5SYNCLENGTH, 0x48}
+ };
+
+ cnt = sizeof(pa_min_sync_length) / sizeof(struct attr_min);
+ for (i = 0; i < cnt; i++) {
+ att = pa_min_sync_length[i].attr;
+ min = pa_min_sync_length[i].min_value;
+ ufshcd_dme_get(hba, UIC_ARG_MIB(att), &value);
+ if (value < min)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(att), min);
+
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(att), &value);
+ if (value < min)
+ ufshcd_dme_peer_set(hba, UIC_ARG_MIB(att), min);
+ }
+}
+
static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct ufs_host_params host_params;
@@ -1092,6 +1383,10 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
host_params.hs_rx_gear = UFS_HS_G5;
host_params.hs_tx_gear = UFS_HS_G5;
+ if (dev_max_params->pwr_rx == SLOW_MODE ||
+ dev_max_params->pwr_tx == SLOW_MODE)
+ host_params.desired_working_mode = UFS_PWM_MODE;
+
ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
if (ret) {
pr_info("%s: failed to determine capabilities\n",
@@ -1099,6 +1394,8 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
}
if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
+ ufs_mtk_adjust_sync_length(hba);
+
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
@@ -1115,6 +1412,28 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
PA_NO_ADAPT);
+ if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
+ DL_AFC0ReqTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
+ DL_FC1ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
+ DL_TC1ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
+ DL_AFC1ReqTimeOutVal_Default);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
+ DL_AFC0ReqTimeOutVal_Default);
+ }
+
ret = ufshcd_uic_change_pwr_mode(hba,
FASTAUTO_MODE << 4 | FASTAUTO_MODE);
@@ -1124,28 +1443,84 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
}
}
- if (host->hw_ver.major >= 3) {
+ /* if already configured to the requested pwr_mode, skip adapt */
+ if (dev_req_params->gear_rx == hba->pwr_info.gear_rx &&
+ dev_req_params->gear_tx == hba->pwr_info.gear_tx &&
+ dev_req_params->lane_rx == hba->pwr_info.lane_rx &&
+ dev_req_params->lane_tx == hba->pwr_info.lane_tx &&
+ dev_req_params->pwr_rx == hba->pwr_info.pwr_rx &&
+ dev_req_params->pwr_tx == hba->pwr_info.pwr_tx &&
+ dev_req_params->hs_rate == hba->pwr_info.hs_rate) {
+ return ret;
+ }
+
+ if (dev_req_params->pwr_rx == FAST_MODE ||
+ dev_req_params->pwr_rx == FASTAUTO_MODE) {
+ if (host->hw_ver.major >= 3) {
+ ret = ufshcd_dme_configure_adapt(hba,
+ dev_req_params->gear_tx,
+ PA_INITIAL_ADAPT);
+ } else {
+ ret = ufshcd_dme_configure_adapt(hba,
+ dev_req_params->gear_tx,
+ PA_NO_ADAPT);
+ }
+ } else {
ret = ufshcd_dme_configure_adapt(hba,
- dev_req_params->gear_tx,
- PA_INITIAL_ADAPT);
+ dev_req_params->gear_tx,
+ PA_NO_ADAPT);
+ }
+
+ return ret;
+}
+
+static int ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
+{
+ int ret;
+
+ /* disable auto-hibern8 */
+ ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
+
+ /* wait host return to idle state when auto-hibern8 off */
+ ret = ufs_mtk_wait_idle_state(hba, 5);
+ if (ret)
+ goto out;
+
+ ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
+
+out:
+ if (ret) {
+ dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
+
+ ufshcd_force_error_recovery(hba);
+
+ /* trigger error handler and break suspend */
+ ret = -EBUSY;
}
return ret;
}
static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status stage,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+ enum ufs_notify_change_status stage,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
int ret = 0;
+ static u32 reg;
switch (stage) {
case PRE_CHANGE:
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ ufs_mtk_auto_hibern8_disable(hba);
+ }
ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
dev_req_params);
break;
case POST_CHANGE:
+ if (ufshcd_is_auto_hibern8_supported(hba))
+ ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
break;
default:
ret = -EINVAL;
@@ -1179,6 +1554,7 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
{
int ret;
u32 tmp;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
ufs_mtk_get_controller_version(hba);
@@ -1204,34 +1580,33 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
+ /* Enable the 1144 functions setting */
+ if (host->ip_ver == IP_VER_MT6989) {
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp);
+ if (ret)
+ return ret;
+
+ tmp |= 0x10;
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp);
+ }
+
return ret;
}
-static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
+static void ufs_mtk_post_link(struct ufs_hba *hba)
{
- u32 ah_ms;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ u32 tmp;
- if (ufshcd_is_clkgating_allowed(hba)) {
- if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
- ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
- hba->ahit);
- else
- ah_ms = 10;
- ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
+ /* fix device PA_INIT no adapt */
+ if (host->ip_ver >= IP_VER_MT6899) {
+ ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp);
+ tmp |= 0x100;
+ ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp);
}
-}
-static void ufs_mtk_post_link(struct ufs_hba *hba)
-{
/* enable unipro clock gating feature */
ufs_mtk_cfg_unipro_cg(hba, true);
-
- /* will be configured during probe hba */
- if (ufshcd_is_auto_hibern8_supported(hba))
- hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
- FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
-
- ufs_mtk_setup_clk_gating(hba);
}
static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
@@ -1258,11 +1633,11 @@ static int ufs_mtk_device_reset(struct ufs_hba *hba)
{
struct arm_smccc_res res;
- /* disable hba before device reset */
- ufshcd_hba_stop(hba);
-
ufs_mtk_device_reset_ctrl(0, res);
+ /* disable hba in middle of device reset */
+ ufshcd_hba_stop(hba);
+
/*
* The reset signal is active low. UFS devices shall detect
* more than or equal to 1us of positive or negative RST_n
@@ -1285,21 +1660,37 @@ static int ufs_mtk_device_reset(struct ufs_hba *hba)
static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
{
int err;
+ u32 val;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
err = ufshcd_hba_enable(hba);
if (err)
return err;
err = ufs_mtk_unipro_set_lpm(hba, false);
- if (err)
+ if (err) {
+ if (host->ip_ver < IP_VER_MT6899) {
+ ufs_mtk_dbg_sel(hba);
+ val = ufshcd_readl(hba, REG_UFS_PROBE);
+ } else {
+ val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL);
+ }
+ ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)val);
+ val = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)val);
return err;
+ }
err = ufshcd_uic_hibern8_exit(hba);
if (err)
return err;
/* Check link state to make sure exit h8 success */
- ufs_mtk_wait_idle_state(hba, 5);
+ err = ufs_mtk_wait_idle_state(hba, 5);
+ if (err) {
+ dev_warn(hba->dev, "wait idle fail, err=%d\n", err);
+ return err;
+ }
err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
if (err) {
dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err);
@@ -1344,6 +1735,9 @@ static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
{
struct ufs_vreg *vccqx = NULL;
+ if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
+ return;
+
if (hba->vreg_info.vccq)
vccqx = hba->vreg_info.vccq;
else
@@ -1398,30 +1792,16 @@ static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
}
}
-static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
-{
- int ret;
-
- /* disable auto-hibern8 */
- ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
-
- /* wait host return to idle state when auto-hibern8 off */
- ufs_mtk_wait_idle_state(hba, 5);
-
- ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
- if (ret)
- dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
-}
-
static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
int err;
struct arm_smccc_res res;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (status == PRE_CHANGE) {
if (ufshcd_is_auto_hibern8_supported(hba))
- ufs_mtk_auto_hibern8_disable(hba);
+ return ufs_mtk_auto_hibern8_disable(hba);
return 0;
}
@@ -1447,6 +1827,15 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
ufs_mtk_sram_pwr_ctrl(false, res);
+ /* Release pm_qos/clk if in scale-up mode during suspend */
+ if (ufshcd_is_clkscaling_supported(hba) && (host->clk_scale_up)) {
+ ufshcd_pm_qos_update(hba, false);
+ _ufs_mtk_clk_scale(hba, false);
+ } else if ((!ufshcd_is_clkscaling_supported(hba) &&
+ hba->pwr_info.gear_rx >= UFS_HS_G5)) {
+ _ufs_mtk_clk_scale(hba, false);
+ }
+
return 0;
fail:
/*
@@ -1462,6 +1851,7 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int err;
struct arm_smccc_res res;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
ufs_mtk_dev_vreg_set_lpm(hba, false);
@@ -1472,6 +1862,15 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (err)
goto fail;
+ /* Request pm_qos/clk if in scale-up mode after resume */
+ if (ufshcd_is_clkscaling_supported(hba) && (host->clk_scale_up)) {
+ ufshcd_pm_qos_update(hba, true);
+ _ufs_mtk_clk_scale(hba, true);
+ } else if ((!ufshcd_is_clkscaling_supported(hba) &&
+ hba->pwr_info.gear_rx >= UFS_HS_G5)) {
+ _ufs_mtk_clk_scale(hba, true);
+ }
+
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_hpm(hba);
if (err)
@@ -1479,8 +1878,21 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
}
return 0;
+
fail:
- return ufshcd_link_recovery(hba);
+ /*
+ * Check if the platform (parent) device has resumed, and ensure that
+ * power, clock, and MTCMOS are all turned on.
+ */
+ err = ufshcd_link_recovery(hba);
+ if (err) {
+ dev_err(hba->dev, "Device PM: req=%d, status:%d, err:%d\n",
+ hba->dev->power.request,
+ hba->dev->power.runtime_status,
+ hba->dev->power.runtime_error);
+ }
+
+ return 0; /* Cannot return a failure, otherwise, the I/O will hang. */
}
static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
@@ -1505,6 +1917,13 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
u16 mid = dev_info->wmanufacturerid;
+ unsigned int cpu;
+
+ if (hba->mcq_enabled) {
+ /* Iterate all cpus to set affinity for mcq irqs */
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ ufs_mtk_mcq_set_irq_affinity(hba, cpu);
+ }
if (mid == UFS_VENDOR_SAMSUNG) {
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
@@ -1543,19 +1962,19 @@ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
{
ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
- if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
- (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
+ if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc) {
hba->vreg_info.vcc->always_on = true;
/*
* VCC will be kept always-on thus we don't
- * need any delay during regulator operations
+ * need any delay before putting device's VCC in LPM mode.
*/
- hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
- UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
+ hba->dev_quirks &= ~UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM;
}
ufs_mtk_vreg_fix_vcc(hba);
ufs_mtk_vreg_fix_vccqx(hba);
+ ufs_mtk_fix_ahit(hba);
+ ufs_mtk_fix_clock_scaling(hba);
}
static void ufs_mtk_event_notify(struct ufs_hba *hba,
@@ -1598,24 +2017,30 @@ static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
hba->vps->ondemand_data.downdifferential = 20;
}
-/**
- * ufs_mtk_clk_scale - Internal clk scaling operation
- *
- * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
- * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
- * Max and min clocks rate of ufs_sel defined in dts should match rate of
- * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
- * This prevent changing rate of pll clock that is shared between modules.
- *
- * @hba: per adapter instance
- * @scale_up: True for scaling up and false for scaling down
- */
-static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
+static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct ufs_mtk_clk *mclk = &host->mclk;
struct ufs_clk_info *clki = mclk->ufs_sel_clki;
- int ret = 0;
+ struct ufs_clk_info *fde_clki = mclk->ufs_fde_clki;
+ struct regulator *reg;
+ int volt, ret = 0;
+ bool clk_bind_vcore = false;
+ bool clk_fde_scale = false;
+
+ if (!hba->clk_scaling.is_initialized)
+ return;
+
+ if (!clki || !fde_clki)
+ return;
+
+ reg = host->mclk.reg_vcore;
+ volt = host->mclk.vcore_volt;
+ if (reg && volt != 0)
+ clk_bind_vcore = true;
+
+ if (mclk->ufs_fde_max_clki && mclk->ufs_fde_min_clki)
+ clk_fde_scale = true;
ret = clk_prepare_enable(clki->clk);
if (ret) {
@@ -1624,25 +2049,114 @@ static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
return;
}
+ if (clk_fde_scale) {
+ ret = clk_prepare_enable(fde_clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "fde clk_prepare_enable() fail, ret: %d\n", ret);
+ return;
+ }
+ }
+
if (scale_up) {
+ if (clk_bind_vcore) {
+ ret = regulator_set_voltage(reg, volt, INT_MAX);
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set vcore to %d\n", volt);
+ goto out;
+ }
+ }
+
ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
- clki->curr_freq = clki->max_freq;
+ if (ret) {
+ dev_info(hba->dev, "Failed to set clk mux, ret = %d\n",
+ ret);
+ }
+
+ if (clk_fde_scale) {
+ ret = clk_set_parent(fde_clki->clk,
+ mclk->ufs_fde_max_clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set fde clk mux, ret = %d\n",
+ ret);
+ }
+ }
} else {
+ if (clk_fde_scale) {
+ ret = clk_set_parent(fde_clki->clk,
+ mclk->ufs_fde_min_clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set fde clk mux, ret = %d\n",
+ ret);
+ goto out;
+ }
+ }
+
ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
- clki->curr_freq = clki->min_freq;
- }
+ if (ret) {
+ dev_info(hba->dev, "Failed to set clk mux, ret = %d\n",
+ ret);
+ goto out;
+ }
- if (ret) {
- dev_info(hba->dev,
- "Failed to set ufs_sel_clki, ret: %d\n", ret);
+ if (clk_bind_vcore) {
+ ret = regulator_set_voltage(reg, 0, INT_MAX);
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to set vcore to MIN\n");
+ }
+ }
}
+out:
clk_disable_unprepare(clki->clk);
+ if (clk_fde_scale)
+ clk_disable_unprepare(fde_clki->clk);
+}
+
+/**
+ * ufs_mtk_clk_scale - Internal clk scaling operation
+ *
+ * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
+ * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
+ * Max and min clocks rate of ufs_sel defined in dts should match rate of
+ * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
+ * This prevent changing rate of pll clock that is shared between modules.
+ *
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up and false for scaling down
+ */
+static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_clk *mclk = &host->mclk;
+ struct ufs_clk_info *clki = mclk->ufs_sel_clki;
+
+ if (host->clk_scale_up == scale_up)
+ goto out;
+
+ if (scale_up)
+ _ufs_mtk_clk_scale(hba, true);
+ else
+ _ufs_mtk_clk_scale(hba, false);
+
+ host->clk_scale_up = scale_up;
+
+ /* Must always set before clk_set_rate() */
+ if (scale_up)
+ clki->curr_freq = clki->max_freq;
+ else
+ clki->curr_freq = clki->min_freq;
+out:
trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
}
static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
+ unsigned long target_freq,
enum ufs_notify_change_status status)
{
if (!ufshcd_is_clkscaling_supported(hba))
@@ -1747,6 +2261,7 @@ static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
return ret;
}
}
+ host->is_mcq_intr_enabled = true;
return 0;
}
@@ -1830,10 +2345,12 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
static int ufs_mtk_probe(struct platform_device *pdev)
{
int err;
- struct device *dev = &pdev->dev;
- struct device_node *reset_node;
- struct platform_device *reset_pdev;
+ struct device *dev = &pdev->dev, *phy_dev = NULL;
+ struct device_node *reset_node, *phy_node = NULL;
+ struct platform_device *reset_pdev, *phy_pdev = NULL;
struct device_link *link;
+ struct ufs_hba *hba;
+ struct ufs_mtk_host *host;
reset_node = of_find_compatible_node(NULL, NULL,
"ti,syscon-reset");
@@ -1860,13 +2377,51 @@ static int ufs_mtk_probe(struct platform_device *pdev)
}
skip_reset:
+ /* find phy node */
+ phy_node = of_parse_phandle(dev->of_node, "phys", 0);
+
+ if (phy_node) {
+ phy_pdev = of_find_device_by_node(phy_node);
+ if (!phy_pdev)
+ goto skip_phy;
+ phy_dev = &phy_pdev->dev;
+
+ pm_runtime_set_active(phy_dev);
+ pm_runtime_enable(phy_dev);
+ pm_runtime_get_sync(phy_dev);
+
+ put_device(phy_dev);
+ dev_info(dev, "phys node found\n");
+ } else {
+ dev_notice(dev, "phys node not found\n");
+ }
+
+skip_phy:
/* perform generic probe */
err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
-
-out:
- if (err)
+ if (err) {
dev_err(dev, "probe failed %d\n", err);
+ goto out;
+ }
+
+ hba = platform_get_drvdata(pdev);
+ if (!hba)
+ goto out;
+ if (phy_node && phy_dev) {
+ host = ufshcd_get_variant(hba);
+ host->phy_dev = phy_dev;
+ }
+
+ /*
+ * Because the default power setting of VSx (the upper layer of
+ * VCCQ/VCCQ2) is HWLP, we need to prevent VCCQ/VCCQ2 from
+ * entering LPM.
+ */
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+
+out:
+ of_node_put(phy_node);
of_node_put(reset_node);
return err;
}
@@ -1879,10 +2434,7 @@ out:
*/
static void ufs_mtk_remove(struct platform_device *pdev)
{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- pm_runtime_get_sync(&(pdev)->dev);
- ufshcd_remove(hba);
+ ufshcd_pltfrm_remove(pdev);
}
#ifdef CONFIG_PM_SLEEP
@@ -1892,29 +2444,45 @@ static int ufs_mtk_system_suspend(struct device *dev)
struct arm_smccc_res res;
int ret;
+ if (hba->shutting_down) {
+ ret = -EBUSY;
+ goto out;
+ }
+
ret = ufshcd_system_suspend(dev);
if (ret)
- return ret;
+ goto out;
+
+ if (pm_runtime_suspended(hba->dev))
+ goto out;
ufs_mtk_dev_vreg_set_lpm(hba, true);
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(false, res);
- return 0;
+out:
+ return ret;
}
static int ufs_mtk_system_resume(struct device *dev)
{
+ int ret = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
struct arm_smccc_res res;
- ufs_mtk_dev_vreg_set_lpm(hba, false);
+ if (pm_runtime_suspended(hba->dev))
+ goto out;
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(true, res);
- return ufshcd_system_resume(dev);
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+
+out:
+ ret = ufshcd_system_resume(dev);
+
+ return ret;
}
#endif
@@ -1922,6 +2490,7 @@ static int ufs_mtk_system_resume(struct device *dev)
static int ufs_mtk_runtime_suspend(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct arm_smccc_res res;
int ret = 0;
@@ -1934,17 +2503,24 @@ static int ufs_mtk_runtime_suspend(struct device *dev)
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(false, res);
+ if (host->phy_dev)
+ pm_runtime_put_sync(host->phy_dev);
+
return 0;
}
static int ufs_mtk_runtime_resume(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct arm_smccc_res res;
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(true, res);
+ if (host->phy_dev)
+ pm_runtime_get_sync(host->phy_dev);
+
ufs_mtk_dev_vreg_set_lpm(hba, false);
return ufshcd_runtime_resume(dev);
@@ -1962,7 +2538,7 @@ static const struct dev_pm_ops ufs_mtk_pm_ops = {
static struct platform_driver ufs_mtk_pltform = {
.probe = ufs_mtk_probe,
- .remove_new = ufs_mtk_remove,
+ .remove = ufs_mtk_remove,
.driver = {
.name = "ufshcd-mtk",
.pm = &ufs_mtk_pm_ops,