diff options
Diffstat (limited to 'drivers/ufs/host/ufs-mediatek.c')
| -rw-r--r-- | drivers/ufs/host/ufs-mediatek.c | 1200 |
1 files changed, 1045 insertions, 155 deletions
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c index e68b05976f9e..ecbbf52bf734 100644 --- a/drivers/ufs/host/ufs-mediatek.c +++ b/drivers/ufs/host/ufs-mediatek.c @@ -14,27 +14,34 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/of_platform.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> -#include <linux/pm_qos.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> -#include <linux/soc/mediatek/mtk_sip_svc.h> #include <ufs/ufshcd.h> #include "ufshcd-pltfrm.h" #include <ufs/ufs_quirks.h> #include <ufs/unipro.h> + #include "ufs-mediatek.h" +#include "ufs-mediatek-sip.h" + +static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq); +static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up); #define CREATE_TRACE_POINTS #include "ufs-mediatek-trace.h" +#undef CREATE_TRACE_POINTS + +#define MAX_SUPP_MAC 64 +#define MCQ_QUEUE_OFFSET(c) ((((c) >> 16) & 0xFF) * 0x200) static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = { { .wmanufacturerid = UFS_ANY_VENDOR, .model = UFS_ANY_MODEL, - .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM | - UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, + .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, { .wmanufacturerid = UFS_VENDOR_SKHYNIX, .model = "H9HQ21AFAMZDAR", .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES }, @@ -43,8 +50,10 @@ static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = { static const struct of_device_id ufs_mtk_of_match[] = { { .compatible = "mediatek,mt8183-ufshci" }, + { .compatible = "mediatek,mt8195-ufshci" }, {}, }; +MODULE_DEVICE_TABLE(of, ufs_mtk_of_match); /* * Details of UIC Errors @@ -88,28 +97,59 @@ static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE); + return host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE; } static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL); + return host->caps & UFS_MTK_CAP_VA09_PWR_CTRL; } static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC); + return host->caps & UFS_MTK_CAP_BROKEN_VCC; } static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO); + return host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO; +} + +static bool ufs_mtk_is_tx_skew_fix(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + return host->caps & UFS_MTK_CAP_TX_SKEW_FIX; +} + +static bool ufs_mtk_is_rtff_mtcmos(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + return host->caps & UFS_MTK_CAP_RTFF_MTCMOS; +} + +static bool ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + return host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM; +} + +static bool ufs_mtk_is_clk_scale_ready(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct ufs_mtk_clk *mclk = &host->mclk; + + return mclk->ufs_sel_clki && + mclk->ufs_sel_max_clki && + mclk->ufs_sel_min_clki; } static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) @@ -163,16 +203,23 @@ static void ufs_mtk_crypto_enable(struct ufs_hba *hba) static void ufs_mtk_host_reset(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct arm_smccc_res res; reset_control_assert(host->hci_reset); reset_control_assert(host->crypto_reset); reset_control_assert(host->unipro_reset); + reset_control_assert(host->mphy_reset); usleep_range(100, 110); reset_control_deassert(host->unipro_reset); reset_control_deassert(host->crypto_reset); reset_control_deassert(host->hci_reset); + reset_control_deassert(host->mphy_reset); + + /* restore mphy setting aftre mphy reset */ + if (host->mphy_reset) + ufs_mtk_mphy_ctrl(UFS_MPHY_RESTORE, res); } static void ufs_mtk_init_reset_control(struct ufs_hba *hba, @@ -197,6 +244,8 @@ static void ufs_mtk_init_reset(struct ufs_hba *hba) "unipro_rst"); ufs_mtk_init_reset_control(hba, &host->crypto_reset, "crypto_rst"); + ufs_mtk_init_reset_control(hba, &host->mphy_reset, + "mphy_rst"); } static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba, @@ -229,6 +278,22 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba, ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80, REG_UFS_XOUFS_CTRL); + + if (host->legacy_ip_ver) + return 0; + + /* DDR_EN setting */ + if (host->ip_ver >= IP_VER_MT6989) { + ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8), + 0x453000, REG_UFS_MMIO_OPT_CTRL_0); + } + + if (host->ip_ver >= IP_VER_MT6991_A0) { + /* Enable multi-rtt */ + ufshcd_rmwl(hba, MRTT_EN, MRTT_EN, REG_UFS_MMIO_OPT_CTRL_0); + /* Enable random performance improvement */ + ufshcd_rmwl(hba, RDN_PFM_IMPV_DIS, 0, REG_UFS_MMIO_OPT_CTRL_0); + } } return 0; @@ -306,7 +371,16 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value); - ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res); + /* + * If clock on timeout, assume clock is off, notify tfa do clock + * off setting.(keep DIFN disable, release resource) + * If clock off timeout, assume clock will off finally, + * set ref_clk_enabled directly.(keep DIFN disable, keep resource) + */ + if (on) + ufs_mtk_ref_clk_notify(false, POST_CHANGE, res); + else + host->ref_clk_enabled = false; return -ETIMEDOUT; @@ -339,7 +413,7 @@ static void ufs_mtk_dbg_sel(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - if (((host->ip_ver >> 16) & 0xFF) >= 0x36) { + if (!host->legacy_ip_ver && host->ip_ver >= IP_VER_MT6983) { ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL); ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0); ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1); @@ -350,12 +424,13 @@ static void ufs_mtk_dbg_sel(struct ufs_hba *hba) } } -static void ufs_mtk_wait_idle_state(struct ufs_hba *hba, +static int ufs_mtk_wait_idle_state(struct ufs_hba *hba, unsigned long retry_ms) { u64 timeout, time_checked; u32 val, sm; bool wait_idle; + struct ufs_mtk_host *host = ufshcd_get_variant(hba); /* cannot use plain ktime_get() in suspend */ timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL; @@ -366,8 +441,13 @@ static void ufs_mtk_wait_idle_state(struct ufs_hba *hba, do { time_checked = ktime_get_mono_fast_ns(); - ufs_mtk_dbg_sel(hba); - val = ufshcd_readl(hba, REG_UFS_PROBE); + if (host->legacy_ip_ver || host->ip_ver < IP_VER_MT6899) { + ufs_mtk_dbg_sel(hba); + val = ufshcd_readl(hba, REG_UFS_PROBE); + } else { + val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL); + val = val >> 16; + } sm = val & 0x1f; @@ -386,8 +466,12 @@ static void ufs_mtk_wait_idle_state(struct ufs_hba *hba, break; } while (time_checked < timeout); - if (wait_idle && sm != VS_HCE_BASE) + if (wait_idle && sm != VS_HCE_BASE) { dev_info(hba->dev, "wait idle tmo: 0x%x\n", val); + return -ETIMEDOUT; + } + + return 0; } static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state, @@ -395,13 +479,20 @@ static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state, { ktime_t timeout, time_checked; u32 val; + struct ufs_mtk_host *host = ufshcd_get_variant(hba); timeout = ktime_add_ms(ktime_get(), max_wait_ms); do { time_checked = ktime_get(); - ufs_mtk_dbg_sel(hba); - val = ufshcd_readl(hba, REG_UFS_PROBE); - val = val >> 28; + + if (host->legacy_ip_ver || host->ip_ver < IP_VER_MT6899) { + ufs_mtk_dbg_sel(hba); + val = ufshcd_readl(hba, REG_UFS_PROBE); + val = val >> 28; + } else { + val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL); + val = val >> 24; + } if (val == state) return 0; @@ -616,24 +707,24 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba) if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto")) host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO; - dev_info(hba->dev, "caps: 0x%x", host->caps); -} + if (of_property_read_bool(np, "mediatek,ufs-tx-skew-fix")) + host->caps |= UFS_MTK_CAP_TX_SKEW_FIX; -static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost) -{ - struct ufs_mtk_host *host = ufshcd_get_variant(hba); + if (of_property_read_bool(np, "mediatek,ufs-disable-mcq")) + host->caps |= UFS_MTK_CAP_DISABLE_MCQ; - if (!host || !host->pm_qos_init) - return; + if (of_property_read_bool(np, "mediatek,ufs-rtff-mtcmos")) + host->caps |= UFS_MTK_CAP_RTFF_MTCMOS; + + if (of_property_read_bool(np, "mediatek,ufs-broken-rtc")) + host->caps |= UFS_MTK_CAP_MCQ_BROKEN_RTC; - cpu_latency_qos_update_request(&host->pm_qos_req, - boost ? 0 : PM_QOS_DEFAULT_VALUE); + dev_info(hba->dev, "caps: 0x%x", host->caps); } static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up) { ufs_mtk_boost_crypt(hba, scale_up); - ufs_mtk_boost_pm_qos(hba, scale_up); } static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on) @@ -653,13 +744,52 @@ static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on) } } +static void ufs_mtk_mcq_disable_irq(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + u32 irq, i; + + if (!hba->mcq_enabled) + return; + + if (host->mcq_nr_intr == 0) + return; + + for (i = 0; i < host->mcq_nr_intr; i++) { + irq = host->mcq_intr_info[i].irq; + disable_irq(irq); + } + host->is_mcq_intr_enabled = false; +} + +static void ufs_mtk_mcq_enable_irq(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + u32 irq, i; + + if (!hba->mcq_enabled) + return; + + if (host->mcq_nr_intr == 0) + return; + + if (host->is_mcq_intr_enabled == true) + return; + + for (i = 0; i < host->mcq_nr_intr; i++) { + irq = host->mcq_intr_info[i].irq; + enable_irq(irq); + } + host->is_mcq_intr_enabled = true; +} + /** * ufs_mtk_setup_clocks - enables/disable clocks * @hba: host controller instance * @on: If true, enable clocks else disable them. * @status: PRE_CHANGE or POST_CHANGE notify * - * Returns 0 on success, non-zero on failure. + * Return: 0 on success, non-zero on failure. */ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, enum ufs_notify_change_status status) @@ -694,15 +824,108 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, clk_pwr_off = true; } - if (clk_pwr_off) + if (clk_pwr_off) { ufs_mtk_pwr_ctrl(hba, false); + } else { + dev_warn(hba->dev, "Clock is not turned off, hba->ahit = 0x%x, AHIT = 0x%x\n", + hba->ahit, + ufshcd_readl(hba, + REG_AUTO_HIBERNATE_IDLE_TIMER)); + } + ufs_mtk_mcq_disable_irq(hba); } else if (on && status == POST_CHANGE) { ufs_mtk_pwr_ctrl(hba, true); + ufs_mtk_mcq_enable_irq(hba); } return ret; } +static u32 ufs_mtk_mcq_get_irq(struct ufs_hba *hba, unsigned int cpu) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct blk_mq_tag_set *tag_set = &hba->host->tag_set; + struct blk_mq_queue_map *map = &tag_set->map[HCTX_TYPE_DEFAULT]; + unsigned int nr = map->nr_queues; + unsigned int q_index; + + q_index = map->mq_map[cpu]; + if (q_index >= nr) { + dev_err(hba->dev, "hwq index %d exceed %d\n", + q_index, nr); + return MTK_MCQ_INVALID_IRQ; + } + + return host->mcq_intr_info[q_index].irq; +} + +static void ufs_mtk_mcq_set_irq_affinity(struct ufs_hba *hba, unsigned int cpu) +{ + unsigned int irq, _cpu; + int ret; + + irq = ufs_mtk_mcq_get_irq(hba, cpu); + if (irq == MTK_MCQ_INVALID_IRQ) { + dev_err(hba->dev, "invalid irq. unable to bind irq to cpu%d", cpu); + return; + } + + /* force migrate irq of cpu0 to cpu3 */ + _cpu = (cpu == 0) ? 3 : cpu; + ret = irq_set_affinity(irq, cpumask_of(_cpu)); + if (ret) { + dev_err(hba->dev, "set irq %d affinity to CPU %d failed\n", + irq, _cpu); + return; + } + dev_info(hba->dev, "set irq %d affinity to CPU: %d\n", irq, _cpu); +} + +static bool ufs_mtk_is_legacy_chipset(struct ufs_hba *hba, u32 hw_ip_ver) +{ + bool is_legacy = false; + + switch (hw_ip_ver) { + case IP_LEGACY_VER_MT6893: + case IP_LEGACY_VER_MT6781: + /* can add other legacy chipset ID here accordingly */ + is_legacy = true; + break; + default: + break; + } + dev_info(hba->dev, "legacy IP version - 0x%x, is legacy : %d", hw_ip_ver, is_legacy); + + return is_legacy; +} + +/* + * HW version format has been changed from 01MMmmmm to 1MMMmmmm, since + * project MT6878. In order to perform correct version comparison, + * version number is changed by SW for the following projects. + * IP_VER_MT6983 0x00360000 to 0x10360000 + * IP_VER_MT6897 0x01440000 to 0x10440000 + * IP_VER_MT6989 0x01450000 to 0x10450000 + * IP_VER_MT6991 0x01460000 to 0x10460000 + */ +static void ufs_mtk_get_hw_ip_version(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + u32 hw_ip_ver; + + hw_ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER); + + if (((hw_ip_ver & (0xFF << 24)) == (0x1 << 24)) || + ((hw_ip_ver & (0xFF << 24)) == 0)) { + hw_ip_ver &= ~(0xFF << 24); + hw_ip_ver |= (0x1 << 28); + } + + host->ip_ver = hw_ip_ver; + + host->legacy_ip_ver = ufs_mtk_is_legacy_chipset(hba, hw_ip_ver); +} + static void ufs_mtk_get_controller_version(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); @@ -742,8 +965,10 @@ static void ufs_mtk_init_clocks(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct list_head *head = &hba->clk_list_head; - struct ufs_mtk_clk *mclk = &host->mclk; struct ufs_clk_info *clki, *clki_tmp; + struct device *dev = hba->dev; + struct regulator *reg; + u32 volt; /* * Find private clocks and store them in struct ufs_mtk_clk. @@ -761,15 +986,57 @@ static void ufs_mtk_init_clocks(struct ufs_hba *hba) host->mclk.ufs_sel_min_clki = clki; clk_disable_unprepare(clki->clk); list_del(&clki->list); + } else if (!strcmp(clki->name, "ufs_fde")) { + host->mclk.ufs_fde_clki = clki; + } else if (!strcmp(clki->name, "ufs_fde_max_src")) { + host->mclk.ufs_fde_max_clki = clki; + clk_disable_unprepare(clki->clk); + list_del(&clki->list); + } else if (!strcmp(clki->name, "ufs_fde_min_src")) { + host->mclk.ufs_fde_min_clki = clki; + clk_disable_unprepare(clki->clk); + list_del(&clki->list); } } - if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki || - !mclk->ufs_sel_min_clki) { + list_for_each_entry(clki, head, list) { + dev_info(hba->dev, "clk \"%s\" present", clki->name); + } + + if (!ufs_mtk_is_clk_scale_ready(hba)) { hba->caps &= ~UFSHCD_CAP_CLK_SCALING; dev_info(hba->dev, "%s: Clk-scaling not ready. Feature disabled.", __func__); + return; + } + + /* + * Default get vcore if dts have these settings. + * No matter clock scaling support or not. (may disable by customer) + */ + reg = devm_regulator_get_optional(dev, "dvfsrc-vcore"); + if (IS_ERR(reg)) { + dev_info(dev, "failed to get dvfsrc-vcore: %ld", + PTR_ERR(reg)); + return; + } + + if (of_property_read_u32(dev->of_node, "clk-scale-up-vcore-min", + &volt)) { + dev_info(dev, "failed to get clk-scale-up-vcore-min"); + return; + } + + host->mclk.reg_vcore = reg; + host->mclk.vcore_volt = volt; + + /* If default boot is max gear, request vcore */ + if (reg && volt && host->clk_scale_up) { + if (regulator_set_voltage(reg, volt, INT_MAX)) { + dev_info(hba->dev, + "Failed to set vcore to %d\n", volt); + } } } @@ -783,7 +1050,7 @@ static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba) struct arm_smccc_res res; int err, ver; - if (hba->vreg_info.vcc) + if (info->vcc) return 0; if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) { @@ -799,7 +1066,7 @@ static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba) return 0; } - err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc); + err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc, false); if (err) return err; @@ -840,6 +1107,102 @@ static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba) } } +static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) +{ + unsigned long flags; + u32 ah_ms = 10; + u32 ah_scale, ah_timer; + u32 scale_us[] = {1, 10, 100, 1000, 10000, 100000}; + + if (ufshcd_is_clkgating_allowed(hba)) { + if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) { + ah_scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK, + hba->ahit); + ah_timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, + hba->ahit); + if (ah_scale <= 5) + ah_ms = ah_timer * scale_us[ah_scale] / 1000; + } + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.delay_ms = max(ah_ms, 10U); + spin_unlock_irqrestore(hba->host->host_lock, flags); + } +} + +static void ufs_mtk_fix_ahit(struct ufs_hba *hba) +{ + unsigned int us; + + if (ufshcd_is_auto_hibern8_supported(hba)) { + switch (hba->dev_info.wmanufacturerid) { + case UFS_VENDOR_SAMSUNG: + /* configure auto-hibern8 timer to 3.5 ms */ + us = 3500; + break; + + case UFS_VENDOR_MICRON: + /* configure auto-hibern8 timer to 2 ms */ + us = 2000; + break; + + default: + /* configure auto-hibern8 timer to 1 ms */ + us = 1000; + break; + } + + hba->ahit = ufshcd_us_to_ahit(us); + } + + ufs_mtk_setup_clk_gating(hba); +} + +static void ufs_mtk_fix_clock_scaling(struct ufs_hba *hba) +{ + /* UFS version is below 4.0, clock scaling is not necessary */ + if ((hba->dev_info.wspecversion < 0x0400) && + ufs_mtk_is_clk_scale_ready(hba)) { + hba->caps &= ~UFSHCD_CAP_CLK_SCALING; + + _ufs_mtk_clk_scale(hba, false); + } +} + +static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct platform_device *pdev; + int i; + int irq; + + host->mcq_nr_intr = UFSHCD_MAX_Q_NR; + pdev = container_of(hba->dev, struct platform_device, dev); + + if (host->caps & UFS_MTK_CAP_DISABLE_MCQ) + goto failed; + + for (i = 0; i < host->mcq_nr_intr; i++) { + /* irq index 0 is legacy irq, sq/cq irq start from index 1 */ + irq = platform_get_irq(pdev, i + 1); + if (irq < 0) { + host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ; + goto failed; + } + host->mcq_intr_info[i].hba = hba; + host->mcq_intr_info[i].irq = irq; + dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq); + } + + return; +failed: + /* invalidate irq info */ + for (i = 0; i < host->mcq_nr_intr; i++) + host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ; + + host->mcq_nr_intr = 0; +} + /** * ufs_mtk_init - find other essential mmio bases * @hba: host controller instance @@ -847,7 +1210,7 @@ static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba) * Binds PHY with controller and powers up PHY enabling clocks * and regulators. * - * Returns -EPROBE_DEFER if binding fails, returns negative error + * Return: -EPROBE_DEFER if binding fails, returns negative error * on phy power up failure and returns zero on success. */ static int ufs_mtk_init(struct ufs_hba *hba) @@ -855,7 +1218,9 @@ static int ufs_mtk_init(struct ufs_hba *hba) const struct of_device_id *id; struct device *dev = hba->dev; struct ufs_mtk_host *host; + struct Scsi_Host *shost = hba->host; int err = 0; + struct arm_smccc_res res; host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); if (!host) { @@ -876,12 +1241,18 @@ static int ufs_mtk_init(struct ufs_hba *hba) /* Initialize host capability */ ufs_mtk_init_host_caps(hba); + ufs_mtk_init_mcq_irq(hba); + err = ufs_mtk_bind_mphy(hba); if (err) goto out_variant_clear; ufs_mtk_init_reset(hba); + /* backup mphy setting if mphy can reset */ + if (host->mphy_reset) + ufs_mtk_mphy_ctrl(UFS_MPHY_BACKUP, res); + /* Enable runtime autosuspend */ hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; @@ -896,15 +1267,25 @@ static int ufs_mtk_init(struct ufs_hba *hba) /* Enable clk scaling*/ hba->caps |= UFSHCD_CAP_CLK_SCALING; + host->clk_scale_up = true; /* default is max freq */ + + /* Set runtime pm delay to replace default */ + shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS; hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL; + hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR; - hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC; + if (host->caps & UFS_MTK_CAP_MCQ_BROKEN_RTC) + hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC; + hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80); if (host->caps & UFS_MTK_CAP_DISABLE_AH8) hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; + if (host->caps & UFS_MTK_CAP_DISABLE_MCQ) + hba->quirks |= UFSHCD_QUIRK_BROKEN_LSDBS_CAP; + ufs_mtk_init_clocks(hba); /* @@ -915,13 +1296,18 @@ static int ufs_mtk_init(struct ufs_hba *hba) * Enable phy clocks specifically here. */ ufs_mtk_mphy_power_on(hba, true); - ufs_mtk_setup_clocks(hba, true, POST_CHANGE); - host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER); + if (ufs_mtk_is_rtff_mtcmos(hba)) { + /* First Restore here, to avoid backup unexpected value */ + ufs_mtk_mtcmos_ctrl(false, res); + + /* Power on to init */ + ufs_mtk_mtcmos_ctrl(true, res); + } + + ufs_mtk_setup_clocks(hba, true, POST_CHANGE); - /* Initialize pm-qos request */ - cpu_latency_qos_add_request(&host->pm_qos_req, PM_QOS_DEFAULT_VALUE); - host->pm_qos_init = true; + ufs_mtk_get_hw_ip_version(hba); goto out; @@ -948,30 +1334,68 @@ static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba, dev_req_params->gear_rx < UFS_HS_G4) return false; + if (dev_req_params->pwr_tx == SLOW_MODE || + dev_req_params->pwr_rx == SLOW_MODE) + return false; + return true; } +static void ufs_mtk_adjust_sync_length(struct ufs_hba *hba) +{ + int i; + u32 value; + u32 cnt, att, min; + struct attr_min { + u32 attr; + u32 min_value; + } pa_min_sync_length[] = { + {PA_TXHSG1SYNCLENGTH, 0x48}, + {PA_TXHSG2SYNCLENGTH, 0x48}, + {PA_TXHSG3SYNCLENGTH, 0x48}, + {PA_TXHSG4SYNCLENGTH, 0x48}, + {PA_TXHSG5SYNCLENGTH, 0x48} + }; + + cnt = sizeof(pa_min_sync_length) / sizeof(struct attr_min); + for (i = 0; i < cnt; i++) { + att = pa_min_sync_length[i].attr; + min = pa_min_sync_length[i].min_value; + ufshcd_dme_get(hba, UIC_ARG_MIB(att), &value); + if (value < min) + ufshcd_dme_set(hba, UIC_ARG_MIB(att), min); + + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(att), &value); + if (value < min) + ufshcd_dme_peer_set(hba, UIC_ARG_MIB(att), min); + } +} + static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, - struct ufs_pa_layer_attr *dev_max_params, - struct ufs_pa_layer_attr *dev_req_params) + const struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - struct ufs_dev_params host_cap; + struct ufs_host_params host_params; int ret; - ufshcd_init_pwr_dev_param(&host_cap); - host_cap.hs_rx_gear = UFS_HS_G5; - host_cap.hs_tx_gear = UFS_HS_G5; + ufshcd_init_host_params(&host_params); + host_params.hs_rx_gear = UFS_HS_G5; + host_params.hs_tx_gear = UFS_HS_G5; - ret = ufshcd_get_pwr_dev_param(&host_cap, - dev_max_params, - dev_req_params); + if (dev_max_params->pwr_rx == SLOW_MODE || + dev_max_params->pwr_tx == SLOW_MODE) + host_params.desired_working_mode = UFS_PWM_MODE; + + ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); if (ret) { pr_info("%s: failed to determine capabilities\n", __func__); } if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) { + ufs_mtk_adjust_sync_length(hba); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true); ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1); @@ -988,6 +1412,28 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE), PA_NO_ADAPT); + if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) { + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), + DL_FC0ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), + DL_TC0ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), + DL_AFC0ReqTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3), + DL_FC1ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4), + DL_TC1ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5), + DL_AFC1ReqTimeOutVal_Default); + + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal), + DL_FC0ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal), + DL_TC0ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal), + DL_AFC0ReqTimeOutVal_Default); + } + ret = ufshcd_uic_change_pwr_mode(hba, FASTAUTO_MODE << 4 | FASTAUTO_MODE); @@ -997,28 +1443,84 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, } } - if (host->hw_ver.major >= 3) { + /* if already configured to the requested pwr_mode, skip adapt */ + if (dev_req_params->gear_rx == hba->pwr_info.gear_rx && + dev_req_params->gear_tx == hba->pwr_info.gear_tx && + dev_req_params->lane_rx == hba->pwr_info.lane_rx && + dev_req_params->lane_tx == hba->pwr_info.lane_tx && + dev_req_params->pwr_rx == hba->pwr_info.pwr_rx && + dev_req_params->pwr_tx == hba->pwr_info.pwr_tx && + dev_req_params->hs_rate == hba->pwr_info.hs_rate) { + return ret; + } + + if (dev_req_params->pwr_rx == FAST_MODE || + dev_req_params->pwr_rx == FASTAUTO_MODE) { + if (host->hw_ver.major >= 3) { + ret = ufshcd_dme_configure_adapt(hba, + dev_req_params->gear_tx, + PA_INITIAL_ADAPT); + } else { + ret = ufshcd_dme_configure_adapt(hba, + dev_req_params->gear_tx, + PA_NO_ADAPT); + } + } else { ret = ufshcd_dme_configure_adapt(hba, - dev_req_params->gear_tx, - PA_INITIAL_ADAPT); + dev_req_params->gear_tx, + PA_NO_ADAPT); + } + + return ret; +} + +static int ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba) +{ + int ret; + + /* disable auto-hibern8 */ + ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER); + + /* wait host return to idle state when auto-hibern8 off */ + ret = ufs_mtk_wait_idle_state(hba, 5); + if (ret) + goto out; + + ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); + +out: + if (ret) { + dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret); + + ufshcd_force_error_recovery(hba); + + /* trigger error handler and break suspend */ + ret = -EBUSY; } return ret; } static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba, - enum ufs_notify_change_status stage, - struct ufs_pa_layer_attr *dev_max_params, - struct ufs_pa_layer_attr *dev_req_params) + enum ufs_notify_change_status stage, + const struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) { int ret = 0; + static u32 reg; switch (stage) { case PRE_CHANGE: + if (ufshcd_is_auto_hibern8_supported(hba)) { + reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); + ufs_mtk_auto_hibern8_disable(hba); + } ret = ufs_mtk_pre_pwr_change(hba, dev_max_params, dev_req_params); break; case POST_CHANGE: + if (ufshcd_is_auto_hibern8_supported(hba)) + ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER); break; default: ret = -EINVAL; @@ -1052,6 +1554,7 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba) { int ret; u32 tmp; + struct ufs_mtk_host *host = ufshcd_get_variant(hba); ufs_mtk_get_controller_version(hba); @@ -1077,34 +1580,33 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba) ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); + /* Enable the 1144 functions setting */ + if (host->ip_ver == IP_VER_MT6989) { + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp); + if (ret) + return ret; + + tmp |= 0x10; + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp); + } + return ret; } -static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) +static void ufs_mtk_post_link(struct ufs_hba *hba) { - u32 ah_ms; + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + u32 tmp; - if (ufshcd_is_clkgating_allowed(hba)) { - if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) - ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, - hba->ahit); - else - ah_ms = 10; - ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5); + /* fix device PA_INIT no adapt */ + if (host->ip_ver >= IP_VER_MT6899) { + ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp); + tmp |= 0x100; + ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp); } -} -static void ufs_mtk_post_link(struct ufs_hba *hba) -{ /* enable unipro clock gating feature */ ufs_mtk_cfg_unipro_cg(hba, true); - - /* will be configured during probe hba */ - if (ufshcd_is_auto_hibern8_supported(hba)) - hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) | - FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3); - - ufs_mtk_setup_clk_gating(hba); } static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, @@ -1131,11 +1633,11 @@ static int ufs_mtk_device_reset(struct ufs_hba *hba) { struct arm_smccc_res res; - /* disable hba before device reset */ - ufshcd_hba_stop(hba); - ufs_mtk_device_reset_ctrl(0, res); + /* disable hba in middle of device reset */ + ufshcd_hba_stop(hba); + /* * The reset signal is active low. UFS devices shall detect * more than or equal to 1us of positive or negative RST_n @@ -1158,25 +1660,55 @@ static int ufs_mtk_device_reset(struct ufs_hba *hba) static int ufs_mtk_link_set_hpm(struct ufs_hba *hba) { int err; + u32 val; + struct ufs_mtk_host *host = ufshcd_get_variant(hba); err = ufshcd_hba_enable(hba); if (err) return err; err = ufs_mtk_unipro_set_lpm(hba, false); - if (err) + if (err) { + if (host->ip_ver < IP_VER_MT6899) { + ufs_mtk_dbg_sel(hba); + val = ufshcd_readl(hba, REG_UFS_PROBE); + } else { + val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL); + } + ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)val); + val = ufshcd_readl(hba, REG_INTERRUPT_STATUS); + ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)val); return err; + } err = ufshcd_uic_hibern8_exit(hba); - if (!err) - ufshcd_set_link_active(hba); - else + if (err) + return err; + + /* Check link state to make sure exit h8 success */ + err = ufs_mtk_wait_idle_state(hba, 5); + if (err) { + dev_warn(hba->dev, "wait idle fail, err=%d\n", err); + return err; + } + err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); + if (err) { + dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err); return err; + } + ufshcd_set_link_active(hba); err = ufshcd_make_hba_operational(hba); if (err) return err; + if (hba->mcq_enabled) { + ufs_mtk_config_mcq(hba, false); + ufshcd_mcq_make_queues_operational(hba); + ufshcd_mcq_config_mac(hba, hba->nutrs); + ufshcd_mcq_enable(hba); + } + return 0; } @@ -1203,6 +1735,9 @@ static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm) { struct ufs_vreg *vccqx = NULL; + if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2) + return; + if (hba->vreg_info.vccq) vccqx = hba->vreg_info.vccq; else @@ -1223,54 +1758,50 @@ static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm) static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm) { - if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2) - return; - - /* Skip if VCC is assumed always-on */ - if (!hba->vreg_info.vcc) - return; + bool skip_vccqx = false; - /* Bypass LPM when device is still active */ + /* Prevent entering LPM when device is still active */ if (lpm && ufshcd_is_ufs_dev_active(hba)) return; - /* Bypass LPM if VCC is enabled */ - if (lpm && hba->vreg_info.vcc->enabled) - return; + /* Skip vccqx lpm control and control vsx only */ + if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2) + skip_vccqx = true; + + /* VCC is always-on, control vsx only */ + if (!hba->vreg_info.vcc) + skip_vccqx = true; + + /* Broken vcc keep vcc always on, most case control vsx only */ + if (lpm && hba->vreg_info.vcc && hba->vreg_info.vcc->enabled) { + /* Some device vccqx/vsx can enter lpm */ + if (ufs_mtk_is_allow_vccqx_lpm(hba)) + skip_vccqx = false; + else /* control vsx only */ + skip_vccqx = true; + } if (lpm) { - ufs_mtk_vccqx_set_lpm(hba, lpm); + if (!skip_vccqx) + ufs_mtk_vccqx_set_lpm(hba, lpm); ufs_mtk_vsx_set_lpm(hba, lpm); } else { ufs_mtk_vsx_set_lpm(hba, lpm); - ufs_mtk_vccqx_set_lpm(hba, lpm); + if (!skip_vccqx) + ufs_mtk_vccqx_set_lpm(hba, lpm); } } -static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba) -{ - int ret; - - /* disable auto-hibern8 */ - ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER); - - /* wait host return to idle state when auto-hibern8 off */ - ufs_mtk_wait_idle_state(hba, 5); - - ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); - if (ret) - dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret); -} - static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, enum ufs_notify_change_status status) { int err; struct arm_smccc_res res; + struct ufs_mtk_host *host = ufshcd_get_variant(hba); if (status == PRE_CHANGE) { if (ufshcd_is_auto_hibern8_supported(hba)) - ufs_mtk_auto_hibern8_disable(hba); + return ufs_mtk_auto_hibern8_disable(hba); return 0; } @@ -1294,7 +1825,16 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, if (ufshcd_is_link_off(hba)) ufs_mtk_device_reset_ctrl(0, res); - ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res); + ufs_mtk_sram_pwr_ctrl(false, res); + + /* Release pm_qos/clk if in scale-up mode during suspend */ + if (ufshcd_is_clkscaling_supported(hba) && (host->clk_scale_up)) { + ufshcd_pm_qos_update(hba, false); + _ufs_mtk_clk_scale(hba, false); + } else if ((!ufshcd_is_clkscaling_supported(hba) && + hba->pwr_info.gear_rx >= UFS_HS_G5)) { + _ufs_mtk_clk_scale(hba, false); + } return 0; fail: @@ -1311,16 +1851,26 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) { int err; struct arm_smccc_res res; + struct ufs_mtk_host *host = ufshcd_get_variant(hba); if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) ufs_mtk_dev_vreg_set_lpm(hba, false); - ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res); + ufs_mtk_sram_pwr_ctrl(true, res); err = ufs_mtk_mphy_power_on(hba, true); if (err) goto fail; + /* Request pm_qos/clk if in scale-up mode after resume */ + if (ufshcd_is_clkscaling_supported(hba) && (host->clk_scale_up)) { + ufshcd_pm_qos_update(hba, true); + _ufs_mtk_clk_scale(hba, true); + } else if ((!ufshcd_is_clkscaling_supported(hba) && + hba->pwr_info.gear_rx >= UFS_HS_G5)) { + _ufs_mtk_clk_scale(hba, true); + } + if (ufshcd_is_link_hibern8(hba)) { err = ufs_mtk_link_set_hpm(hba); if (err) @@ -1328,8 +1878,21 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) } return 0; + fail: - return ufshcd_link_recovery(hba); + /* + * Check if the platform (parent) device has resumed, and ensure that + * power, clock, and MTCMOS are all turned on. + */ + err = ufshcd_link_recovery(hba); + if (err) { + dev_err(hba->dev, "Device PM: req=%d, status:%d, err:%d\n", + hba->dev->power.request, + hba->dev->power.runtime_status, + hba->dev->power.runtime_error); + } + + return 0; /* Cannot return a failure, otherwise, the I/O will hang. */ } static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba) @@ -1354,10 +1917,28 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba) { struct ufs_dev_info *dev_info = &hba->dev_info; u16 mid = dev_info->wmanufacturerid; + unsigned int cpu; + + if (hba->mcq_enabled) { + /* Iterate all cpus to set affinity for mcq irqs */ + for (cpu = 0; cpu < nr_cpu_ids; cpu++) + ufs_mtk_mcq_set_irq_affinity(hba, cpu); + } if (mid == UFS_VENDOR_SAMSUNG) { ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6); ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10); + } else if (mid == UFS_VENDOR_MICRON) { + /* Only for the host which have TX skew issue */ + if (ufs_mtk_is_tx_skew_fix(hba) && + (STR_PRFX_EQUAL("MT128GBCAV2U31", dev_info->model) || + STR_PRFX_EQUAL("MT256GBCAV4U31", dev_info->model) || + STR_PRFX_EQUAL("MT512GBCAV8U31", dev_info->model) || + STR_PRFX_EQUAL("MT256GBEAX4U40", dev_info->model) || + STR_PRFX_EQUAL("MT512GAYAX4U40", dev_info->model) || + STR_PRFX_EQUAL("MT001TAYAX8U40", dev_info->model))) { + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 8); + } } /* @@ -1381,19 +1962,19 @@ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba) { ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups); - if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc && - (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) { + if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc) { hba->vreg_info.vcc->always_on = true; /* * VCC will be kept always-on thus we don't - * need any delay during regulator operations + * need any delay before putting device's VCC in LPM mode. */ - hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | - UFS_DEVICE_QUIRK_DELAY_AFTER_LPM); + hba->dev_quirks &= ~UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM; } ufs_mtk_vreg_fix_vcc(hba); ufs_mtk_vreg_fix_vccqx(hba); + ufs_mtk_fix_ahit(hba); + ufs_mtk_fix_clock_scaling(hba); } static void ufs_mtk_event_notify(struct ufs_hba *hba, @@ -1436,24 +2017,30 @@ static void ufs_mtk_config_scaling_param(struct ufs_hba *hba, hba->vps->ondemand_data.downdifferential = 20; } -/** - * ufs_mtk_clk_scale - Internal clk scaling operation - * - * MTK platform supports clk scaling by switching parent of ufs_sel(mux). - * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware. - * Max and min clocks rate of ufs_sel defined in dts should match rate of - * "ufs_sel_max_src" and "ufs_sel_min_src" respectively. - * This prevent changing rate of pll clock that is shared between modules. - * - * @hba: per adapter instance - * @scale_up: True for scaling up and false for scaling down - */ -static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) +static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct ufs_mtk_clk *mclk = &host->mclk; struct ufs_clk_info *clki = mclk->ufs_sel_clki; - int ret = 0; + struct ufs_clk_info *fde_clki = mclk->ufs_fde_clki; + struct regulator *reg; + int volt, ret = 0; + bool clk_bind_vcore = false; + bool clk_fde_scale = false; + + if (!hba->clk_scaling.is_initialized) + return; + + if (!clki || !fde_clki) + return; + + reg = host->mclk.reg_vcore; + volt = host->mclk.vcore_volt; + if (reg && volt != 0) + clk_bind_vcore = true; + + if (mclk->ufs_fde_max_clki && mclk->ufs_fde_min_clki) + clk_fde_scale = true; ret = clk_prepare_enable(clki->clk); if (ret) { @@ -1462,25 +2049,114 @@ static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) return; } + if (clk_fde_scale) { + ret = clk_prepare_enable(fde_clki->clk); + if (ret) { + dev_info(hba->dev, + "fde clk_prepare_enable() fail, ret: %d\n", ret); + return; + } + } + if (scale_up) { + if (clk_bind_vcore) { + ret = regulator_set_voltage(reg, volt, INT_MAX); + if (ret) { + dev_info(hba->dev, + "Failed to set vcore to %d\n", volt); + goto out; + } + } + ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk); - clki->curr_freq = clki->max_freq; + if (ret) { + dev_info(hba->dev, "Failed to set clk mux, ret = %d\n", + ret); + } + + if (clk_fde_scale) { + ret = clk_set_parent(fde_clki->clk, + mclk->ufs_fde_max_clki->clk); + if (ret) { + dev_info(hba->dev, + "Failed to set fde clk mux, ret = %d\n", + ret); + } + } } else { + if (clk_fde_scale) { + ret = clk_set_parent(fde_clki->clk, + mclk->ufs_fde_min_clki->clk); + if (ret) { + dev_info(hba->dev, + "Failed to set fde clk mux, ret = %d\n", + ret); + goto out; + } + } + ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk); - clki->curr_freq = clki->min_freq; - } + if (ret) { + dev_info(hba->dev, "Failed to set clk mux, ret = %d\n", + ret); + goto out; + } - if (ret) { - dev_info(hba->dev, - "Failed to set ufs_sel_clki, ret: %d\n", ret); + if (clk_bind_vcore) { + ret = regulator_set_voltage(reg, 0, INT_MAX); + if (ret) { + dev_info(hba->dev, + "failed to set vcore to MIN\n"); + } + } } +out: clk_disable_unprepare(clki->clk); + if (clk_fde_scale) + clk_disable_unprepare(fde_clki->clk); +} + +/** + * ufs_mtk_clk_scale - Internal clk scaling operation + * + * MTK platform supports clk scaling by switching parent of ufs_sel(mux). + * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware. + * Max and min clocks rate of ufs_sel defined in dts should match rate of + * "ufs_sel_max_src" and "ufs_sel_min_src" respectively. + * This prevent changing rate of pll clock that is shared between modules. + * + * @hba: per adapter instance + * @scale_up: True for scaling up and false for scaling down + */ +static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct ufs_mtk_clk *mclk = &host->mclk; + struct ufs_clk_info *clki = mclk->ufs_sel_clki; + + if (host->clk_scale_up == scale_up) + goto out; + + if (scale_up) + _ufs_mtk_clk_scale(hba, true); + else + _ufs_mtk_clk_scale(hba, false); + + host->clk_scale_up = scale_up; + + /* Must always set before clk_set_rate() */ + if (scale_up) + clki->curr_freq = clki->max_freq; + else + clki->curr_freq = clki->min_freq; +out: trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk)); } static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up, + unsigned long target_freq, enum ufs_notify_change_status status) { if (!ufshcd_is_clkscaling_supported(hba)) @@ -1497,6 +2173,137 @@ static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up, return 0; } +static int ufs_mtk_get_hba_mac(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + /* MCQ operation not permitted */ + if (host->caps & UFS_MTK_CAP_DISABLE_MCQ) + return -EPERM; + + return MAX_SUPP_MAC; +} + +static int ufs_mtk_op_runtime_config(struct ufs_hba *hba) +{ + struct ufshcd_mcq_opr_info_t *opr; + int i; + + hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD; + hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS; + hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD; + hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS; + + for (i = 0; i < OPR_MAX; i++) { + opr = &hba->mcq_opr[i]; + opr->stride = REG_UFS_MCQ_STRIDE; + opr->base = hba->mmio_base + opr->offset; + } + + return 0; +} + +static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + /* fail mcq initialization if interrupt is not filled properly */ + if (!host->mcq_nr_intr) { + dev_info(hba->dev, "IRQs not ready. MCQ disabled."); + return -EINVAL; + } + + hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities); + return 0; +} + +static irqreturn_t ufs_mtk_mcq_intr(int irq, void *__intr_info) +{ + struct ufs_mtk_mcq_intr_info *mcq_intr_info = __intr_info; + struct ufs_hba *hba = mcq_intr_info->hba; + struct ufs_hw_queue *hwq; + u32 events; + int qid = mcq_intr_info->qid; + + hwq = &hba->uhq[qid]; + + events = ufshcd_mcq_read_cqis(hba, qid); + if (events) + ufshcd_mcq_write_cqis(hba, events, qid); + + if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS) + ufshcd_mcq_poll_cqe_lock(hba, hwq); + + return IRQ_HANDLED; +} + +static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + u32 irq, i; + int ret; + + for (i = 0; i < host->mcq_nr_intr; i++) { + irq = host->mcq_intr_info[i].irq; + if (irq == MTK_MCQ_INVALID_IRQ) { + dev_err(hba->dev, "invalid irq. %d\n", i); + return -ENOPARAM; + } + + host->mcq_intr_info[i].qid = i; + ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD, + &host->mcq_intr_info[i]); + + dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : ""); + + if (ret) { + dev_err(hba->dev, "Cannot request irq %d\n", ret); + return ret; + } + } + host->is_mcq_intr_enabled = true; + + return 0; +} + +static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + int ret = 0; + + if (!host->mcq_set_intr) { + /* Disable irq option register */ + ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0); + + if (irq) { + ret = ufs_mtk_config_mcq_irq(hba); + if (ret) + return ret; + } + + host->mcq_set_intr = true; + } + + ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0); + ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0); + + return 0; +} + +static int ufs_mtk_config_esi(struct ufs_hba *hba) +{ + return ufs_mtk_config_mcq(hba, true); +} + +static void ufs_mtk_config_scsi_dev(struct scsi_device *sdev) +{ + struct ufs_hba *hba = shost_priv(sdev->host); + + dev_dbg(hba->dev, "lu %llu scsi device configured", sdev->lun); + if (sdev->lun == 2) + blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, sdev->request_queue); +} + /* * struct ufs_hba_mtk_vops - UFS MTK specific variant operations * @@ -1505,6 +2312,7 @@ static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up, */ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = { .name = "mediatek.ufshci", + .max_num_rtt = MTK_MAX_NUM_RTT, .init = ufs_mtk_init, .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version, .setup_clocks = ufs_mtk_setup_clocks, @@ -1520,21 +2328,29 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = { .event_notify = ufs_mtk_event_notify, .config_scaling_param = ufs_mtk_config_scaling_param, .clk_scale_notify = ufs_mtk_clk_scale_notify, + /* mcq vops */ + .get_hba_mac = ufs_mtk_get_hba_mac, + .op_runtime_config = ufs_mtk_op_runtime_config, + .mcq_config_resource = ufs_mtk_mcq_config_resource, + .config_esi = ufs_mtk_config_esi, + .config_scsi_dev = ufs_mtk_config_scsi_dev, }; /** * ufs_mtk_probe - probe routine of the driver * @pdev: pointer to Platform device handle * - * Return zero for success and non-zero for failure + * Return: zero for success and non-zero for failure. */ static int ufs_mtk_probe(struct platform_device *pdev) { int err; - struct device *dev = &pdev->dev; - struct device_node *reset_node; - struct platform_device *reset_pdev; + struct device *dev = &pdev->dev, *phy_dev = NULL; + struct device_node *reset_node, *phy_node = NULL; + struct platform_device *reset_pdev, *phy_pdev = NULL; struct device_link *link; + struct ufs_hba *hba; + struct ufs_mtk_host *host; reset_node = of_find_compatible_node(NULL, NULL, "ti,syscon-reset"); @@ -1561,13 +2377,51 @@ static int ufs_mtk_probe(struct platform_device *pdev) } skip_reset: + /* find phy node */ + phy_node = of_parse_phandle(dev->of_node, "phys", 0); + + if (phy_node) { + phy_pdev = of_find_device_by_node(phy_node); + if (!phy_pdev) + goto skip_phy; + phy_dev = &phy_pdev->dev; + + pm_runtime_set_active(phy_dev); + pm_runtime_enable(phy_dev); + pm_runtime_get_sync(phy_dev); + + put_device(phy_dev); + dev_info(dev, "phys node found\n"); + } else { + dev_notice(dev, "phys node not found\n"); + } + +skip_phy: /* perform generic probe */ err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops); + if (err) { + dev_err(dev, "probe failed %d\n", err); + goto out; + } -out: - if (err) - dev_info(dev, "probe failed %d\n", err); + hba = platform_get_drvdata(pdev); + if (!hba) + goto out; + + if (phy_node && phy_dev) { + host = ufshcd_get_variant(hba); + host->phy_dev = phy_dev; + } + + /* + * Because the default power setting of VSx (the upper layer of + * VCCQ/VCCQ2) is HWLP, we need to prevent VCCQ/VCCQ2 from + * entering LPM. + */ + ufs_mtk_dev_vreg_set_lpm(hba, false); +out: + of_node_put(phy_node); of_node_put(reset_node); return err; } @@ -1578,37 +2432,57 @@ out: * * Always return 0 */ -static int ufs_mtk_remove(struct platform_device *pdev) +static void ufs_mtk_remove(struct platform_device *pdev) { - struct ufs_hba *hba = platform_get_drvdata(pdev); - - pm_runtime_get_sync(&(pdev)->dev); - ufshcd_remove(hba); - return 0; + ufshcd_pltfrm_remove(pdev); } #ifdef CONFIG_PM_SLEEP static int ufs_mtk_system_suspend(struct device *dev) { struct ufs_hba *hba = dev_get_drvdata(dev); + struct arm_smccc_res res; int ret; + if (hba->shutting_down) { + ret = -EBUSY; + goto out; + } + ret = ufshcd_system_suspend(dev); if (ret) - return ret; + goto out; + + if (pm_runtime_suspended(hba->dev)) + goto out; ufs_mtk_dev_vreg_set_lpm(hba, true); - return 0; + if (ufs_mtk_is_rtff_mtcmos(hba)) + ufs_mtk_mtcmos_ctrl(false, res); + +out: + return ret; } static int ufs_mtk_system_resume(struct device *dev) { + int ret = 0; struct ufs_hba *hba = dev_get_drvdata(dev); + struct arm_smccc_res res; + + if (pm_runtime_suspended(hba->dev)) + goto out; + + if (ufs_mtk_is_rtff_mtcmos(hba)) + ufs_mtk_mtcmos_ctrl(true, res); ufs_mtk_dev_vreg_set_lpm(hba, false); - return ufshcd_system_resume(dev); +out: + ret = ufshcd_system_resume(dev); + + return ret; } #endif @@ -1616,6 +2490,8 @@ static int ufs_mtk_system_resume(struct device *dev) static int ufs_mtk_runtime_suspend(struct device *dev) { struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct arm_smccc_res res; int ret = 0; ret = ufshcd_runtime_suspend(dev); @@ -1624,12 +2500,26 @@ static int ufs_mtk_runtime_suspend(struct device *dev) ufs_mtk_dev_vreg_set_lpm(hba, true); + if (ufs_mtk_is_rtff_mtcmos(hba)) + ufs_mtk_mtcmos_ctrl(false, res); + + if (host->phy_dev) + pm_runtime_put_sync(host->phy_dev); + return 0; } static int ufs_mtk_runtime_resume(struct device *dev) { struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct arm_smccc_res res; + + if (ufs_mtk_is_rtff_mtcmos(hba)) + ufs_mtk_mtcmos_ctrl(true, res); + + if (host->phy_dev) + pm_runtime_get_sync(host->phy_dev); ufs_mtk_dev_vreg_set_lpm(hba, false); @@ -1648,7 +2538,7 @@ static const struct dev_pm_ops ufs_mtk_pm_ops = { static struct platform_driver ufs_mtk_pltform = { .probe = ufs_mtk_probe, - .remove = ufs_mtk_remove, + .remove = ufs_mtk_remove, .driver = { .name = "ufshcd-mtk", .pm = &ufs_mtk_pm_ops, |
