summaryrefslogtreecommitdiff
path: root/drivers/ufs/host/ufs-mediatek.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/host/ufs-mediatek.c')
-rw-r--r--drivers/ufs/host/ufs-mediatek.c330
1 files changed, 297 insertions, 33 deletions
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 182f58d0c9db..86ae73b89d4d 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -50,6 +50,7 @@ static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
static const struct of_device_id ufs_mtk_of_match[] = {
{ .compatible = "mediatek,mt8183-ufshci" },
+ { .compatible = "mediatek,mt8195-ufshci" },
{},
};
MODULE_DEVICE_TABLE(of, ufs_mtk_of_match);
@@ -96,49 +97,59 @@ static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
+ return host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
}
static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
+ return host->caps & UFS_MTK_CAP_VA09_PWR_CTRL;
}
static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
+ return host->caps & UFS_MTK_CAP_BROKEN_VCC;
}
static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
+ return host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO;
}
static bool ufs_mtk_is_tx_skew_fix(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return (host->caps & UFS_MTK_CAP_TX_SKEW_FIX);
+ return host->caps & UFS_MTK_CAP_TX_SKEW_FIX;
}
static bool ufs_mtk_is_rtff_mtcmos(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return (host->caps & UFS_MTK_CAP_RTFF_MTCMOS);
+ return host->caps & UFS_MTK_CAP_RTFF_MTCMOS;
}
static bool ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return (host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM);
+ return host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM;
+}
+
+static bool ufs_mtk_is_clk_scale_ready(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_clk *mclk = &host->mclk;
+
+ return mclk->ufs_sel_clki &&
+ mclk->ufs_sel_max_clki &&
+ mclk->ufs_sel_min_clki;
}
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
@@ -267,6 +278,13 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
ufshcd_writel(hba,
ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
REG_UFS_XOUFS_CTRL);
+
+ /* DDR_EN setting */
+ if (host->ip_ver >= IP_VER_MT6989) {
+ ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8),
+ 0x453000, REG_UFS_MMIO_OPT_CTRL_0);
+ }
+
}
return 0;
@@ -344,7 +362,16 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
- ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
+ /*
+ * If clock on timeout, assume clock is off, notify tfa do clock
+ * off setting.(keep DIFN disable, release resource)
+ * If clock off timeout, assume clock will off finally,
+ * set ref_clk_enabled directly.(keep DIFN disable, keep resource)
+ */
+ if (on)
+ ufs_mtk_ref_clk_notify(false, POST_CHANGE, res);
+ else
+ host->ref_clk_enabled = false;
return -ETIMEDOUT;
@@ -663,6 +690,9 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
if (of_property_read_bool(np, "mediatek,ufs-rtff-mtcmos"))
host->caps |= UFS_MTK_CAP_RTFF_MTCMOS;
+ if (of_property_read_bool(np, "mediatek,ufs-broken-rtc"))
+ host->caps |= UFS_MTK_CAP_MCQ_BROKEN_RTC;
+
dev_info(hba->dev, "caps: 0x%x", host->caps);
}
@@ -779,6 +809,91 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
return ret;
}
+static u32 ufs_mtk_mcq_get_irq(struct ufs_hba *hba, unsigned int cpu)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct blk_mq_tag_set *tag_set = &hba->host->tag_set;
+ struct blk_mq_queue_map *map = &tag_set->map[HCTX_TYPE_DEFAULT];
+ unsigned int nr = map->nr_queues;
+ unsigned int q_index;
+
+ q_index = map->mq_map[cpu];
+ if (q_index > nr) {
+ dev_err(hba->dev, "hwq index %d exceed %d\n",
+ q_index, nr);
+ return MTK_MCQ_INVALID_IRQ;
+ }
+
+ return host->mcq_intr_info[q_index].irq;
+}
+
+static void ufs_mtk_mcq_set_irq_affinity(struct ufs_hba *hba, unsigned int cpu)
+{
+ unsigned int irq, _cpu;
+ int ret;
+
+ irq = ufs_mtk_mcq_get_irq(hba, cpu);
+ if (irq == MTK_MCQ_INVALID_IRQ) {
+ dev_err(hba->dev, "invalid irq. unable to bind irq to cpu%d", cpu);
+ return;
+ }
+
+ /* force migrate irq of cpu0 to cpu3 */
+ _cpu = (cpu == 0) ? 3 : cpu;
+ ret = irq_set_affinity(irq, cpumask_of(_cpu));
+ if (ret) {
+ dev_err(hba->dev, "set irq %d affinity to CPU %d failed\n",
+ irq, _cpu);
+ return;
+ }
+ dev_info(hba->dev, "set irq %d affinity to CPU: %d\n", irq, _cpu);
+}
+
+static bool ufs_mtk_is_legacy_chipset(struct ufs_hba *hba, u32 hw_ip_ver)
+{
+ bool is_legacy = false;
+
+ switch (hw_ip_ver) {
+ case IP_LEGACY_VER_MT6893:
+ case IP_LEGACY_VER_MT6781:
+ /* can add other legacy chipset ID here accordingly */
+ is_legacy = true;
+ break;
+ default:
+ break;
+ }
+ dev_info(hba->dev, "legacy IP version - 0x%x, is legacy : %d", hw_ip_ver, is_legacy);
+
+ return is_legacy;
+}
+
+/*
+ * HW version format has been changed from 01MMmmmm to 1MMMmmmm, since
+ * project MT6878. In order to perform correct version comparison,
+ * version number is changed by SW for the following projects.
+ * IP_VER_MT6983 0x00360000 to 0x10360000
+ * IP_VER_MT6897 0x01440000 to 0x10440000
+ * IP_VER_MT6989 0x01450000 to 0x10450000
+ * IP_VER_MT6991 0x01460000 to 0x10460000
+ */
+static void ufs_mtk_get_hw_ip_version(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ u32 hw_ip_ver;
+
+ hw_ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
+
+ if (((hw_ip_ver & (0xFF << 24)) == (0x1 << 24)) ||
+ ((hw_ip_ver & (0xFF << 24)) == 0)) {
+ hw_ip_ver &= ~(0xFF << 24);
+ hw_ip_ver |= (0x1 << 28);
+ }
+
+ host->ip_ver = hw_ip_ver;
+
+ host->legacy_ip_ver = ufs_mtk_is_legacy_chipset(hba, hw_ip_ver);
+}
+
static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -818,8 +933,10 @@ static void ufs_mtk_init_clocks(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct list_head *head = &hba->clk_list_head;
- struct ufs_mtk_clk *mclk = &host->mclk;
struct ufs_clk_info *clki, *clki_tmp;
+ struct device *dev = hba->dev;
+ struct regulator *reg;
+ u32 volt;
/*
* Find private clocks and store them in struct ufs_mtk_clk.
@@ -837,15 +954,57 @@ static void ufs_mtk_init_clocks(struct ufs_hba *hba)
host->mclk.ufs_sel_min_clki = clki;
clk_disable_unprepare(clki->clk);
list_del(&clki->list);
+ } else if (!strcmp(clki->name, "ufs_fde")) {
+ host->mclk.ufs_fde_clki = clki;
+ } else if (!strcmp(clki->name, "ufs_fde_max_src")) {
+ host->mclk.ufs_fde_max_clki = clki;
+ clk_disable_unprepare(clki->clk);
+ list_del(&clki->list);
+ } else if (!strcmp(clki->name, "ufs_fde_min_src")) {
+ host->mclk.ufs_fde_min_clki = clki;
+ clk_disable_unprepare(clki->clk);
+ list_del(&clki->list);
}
}
- if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
- !mclk->ufs_sel_min_clki) {
+ list_for_each_entry(clki, head, list) {
+ dev_info(hba->dev, "clk \"%s\" present", clki->name);
+ }
+
+ if (!ufs_mtk_is_clk_scale_ready(hba)) {
hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
dev_info(hba->dev,
"%s: Clk-scaling not ready. Feature disabled.",
__func__);
+ return;
+ }
+
+ /*
+ * Default get vcore if dts have these settings.
+ * No matter clock scaling support or not. (may disable by customer)
+ */
+ reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
+ if (IS_ERR(reg)) {
+ dev_info(dev, "failed to get dvfsrc-vcore: %ld",
+ PTR_ERR(reg));
+ return;
+ }
+
+ if (of_property_read_u32(dev->of_node, "clk-scale-up-vcore-min",
+ &volt)) {
+ dev_info(dev, "failed to get clk-scale-up-vcore-min");
+ return;
+ }
+
+ host->mclk.reg_vcore = reg;
+ host->mclk.vcore_volt = volt;
+
+ /* If default boot is max gear, request vcore */
+ if (reg && volt && host->clk_scale_up) {
+ if (regulator_set_voltage(reg, volt, INT_MAX)) {
+ dev_info(hba->dev,
+ "Failed to set vcore to %d\n", volt);
+ }
}
}
@@ -1014,13 +1173,17 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable clk scaling*/
hba->caps |= UFSHCD_CAP_CLK_SCALING;
+ host->clk_scale_up = true; /* default is max freq */
/* Set runtime pm delay to replace default */
shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS;
hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
+
hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
- hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
+ if (host->caps & UFS_MTK_CAP_MCQ_BROKEN_RTC)
+ hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
+
hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
@@ -1050,7 +1213,7 @@ static int ufs_mtk_init(struct ufs_hba *hba)
ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
- host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
+ ufs_mtk_get_hw_ip_version(hba);
goto out;
@@ -1505,6 +1668,13 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
u16 mid = dev_info->wmanufacturerid;
+ unsigned int cpu;
+
+ if (hba->mcq_enabled) {
+ /* Iterate all cpus to set affinity for mcq irqs */
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ ufs_mtk_mcq_set_irq_affinity(hba, cpu);
+ }
if (mid == UFS_VENDOR_SAMSUNG) {
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
@@ -1598,24 +1768,30 @@ static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
hba->vps->ondemand_data.downdifferential = 20;
}
-/**
- * ufs_mtk_clk_scale - Internal clk scaling operation
- *
- * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
- * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
- * Max and min clocks rate of ufs_sel defined in dts should match rate of
- * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
- * This prevent changing rate of pll clock that is shared between modules.
- *
- * @hba: per adapter instance
- * @scale_up: True for scaling up and false for scaling down
- */
-static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
+static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct ufs_mtk_clk *mclk = &host->mclk;
struct ufs_clk_info *clki = mclk->ufs_sel_clki;
- int ret = 0;
+ struct ufs_clk_info *fde_clki = mclk->ufs_fde_clki;
+ struct regulator *reg;
+ int volt, ret = 0;
+ bool clk_bind_vcore = false;
+ bool clk_fde_scale = false;
+
+ if (!hba->clk_scaling.is_initialized)
+ return;
+
+ if (!clki || !fde_clki)
+ return;
+
+ reg = host->mclk.reg_vcore;
+ volt = host->mclk.vcore_volt;
+ if (reg && volt != 0)
+ clk_bind_vcore = true;
+
+ if (mclk->ufs_fde_max_clki && mclk->ufs_fde_min_clki)
+ clk_fde_scale = true;
ret = clk_prepare_enable(clki->clk);
if (ret) {
@@ -1624,21 +1800,109 @@ static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
return;
}
+ if (clk_fde_scale) {
+ ret = clk_prepare_enable(fde_clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "fde clk_prepare_enable() fail, ret: %d\n", ret);
+ return;
+ }
+ }
+
if (scale_up) {
+ if (clk_bind_vcore) {
+ ret = regulator_set_voltage(reg, volt, INT_MAX);
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set vcore to %d\n", volt);
+ goto out;
+ }
+ }
+
ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
- clki->curr_freq = clki->max_freq;
+ if (ret) {
+ dev_info(hba->dev, "Failed to set clk mux, ret = %d\n",
+ ret);
+ }
+
+ if (clk_fde_scale) {
+ ret = clk_set_parent(fde_clki->clk,
+ mclk->ufs_fde_max_clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set fde clk mux, ret = %d\n",
+ ret);
+ }
+ }
} else {
+ if (clk_fde_scale) {
+ ret = clk_set_parent(fde_clki->clk,
+ mclk->ufs_fde_min_clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set fde clk mux, ret = %d\n",
+ ret);
+ goto out;
+ }
+ }
+
ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
- clki->curr_freq = clki->min_freq;
- }
+ if (ret) {
+ dev_info(hba->dev, "Failed to set clk mux, ret = %d\n",
+ ret);
+ goto out;
+ }
- if (ret) {
- dev_info(hba->dev,
- "Failed to set ufs_sel_clki, ret: %d\n", ret);
+ if (clk_bind_vcore) {
+ ret = regulator_set_voltage(reg, 0, INT_MAX);
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to set vcore to MIN\n");
+ }
+ }
}
+out:
clk_disable_unprepare(clki->clk);
+ if (clk_fde_scale)
+ clk_disable_unprepare(fde_clki->clk);
+}
+
+/**
+ * ufs_mtk_clk_scale - Internal clk scaling operation
+ *
+ * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
+ * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
+ * Max and min clocks rate of ufs_sel defined in dts should match rate of
+ * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
+ * This prevent changing rate of pll clock that is shared between modules.
+ *
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up and false for scaling down
+ */
+static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_clk *mclk = &host->mclk;
+ struct ufs_clk_info *clki = mclk->ufs_sel_clki;
+
+ if (host->clk_scale_up == scale_up)
+ goto out;
+
+ if (scale_up)
+ _ufs_mtk_clk_scale(hba, true);
+ else
+ _ufs_mtk_clk_scale(hba, false);
+
+ host->clk_scale_up = scale_up;
+
+ /* Must always set before clk_set_rate() */
+ if (scale_up)
+ clki->curr_freq = clki->max_freq;
+ else
+ clki->curr_freq = clki->min_freq;
+out:
trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
}