diff options
Diffstat (limited to 'drivers/ufs')
-rw-r--r-- | drivers/ufs/core/ufs-mcq.c | 11 | ||||
-rw-r--r-- | drivers/ufs/core/ufs-sysfs.c | 2 | ||||
-rw-r--r-- | drivers/ufs/core/ufs_trace.h | 1 | ||||
-rw-r--r-- | drivers/ufs/core/ufs_trace_types.h | 24 | ||||
-rw-r--r-- | drivers/ufs/core/ufshcd.c | 60 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-exynos.c | 10 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-mediatek.c | 352 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-mediatek.h | 1 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-qcom.c | 226 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-qcom.h | 28 | ||||
-rw-r--r-- | drivers/ufs/host/ufshcd-pltfrm.c | 33 | ||||
-rw-r--r-- | drivers/ufs/host/ufshcd-pltfrm.h | 1 |
12 files changed, 545 insertions, 204 deletions
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index cc88aaa106da..c9bdd4140fd0 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -29,6 +29,10 @@ #define MCQ_ENTRY_SIZE_IN_DWORD 8 #define CQE_UCD_BA GENMASK_ULL(63, 7) +#define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\ + UFSHCD_ERROR_MASK |\ + MCQ_CQ_EVENT_STATUS) + /* Max mcq register polling time in microseconds */ #define MCQ_POLL_US 500000 @@ -355,9 +359,16 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock); void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) { struct ufs_hw_queue *hwq; + u32 intrs; u16 qsize; int i; + /* Enable required interrupts */ + intrs = UFSHCD_ENABLE_MCQ_INTRS; + if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR) + intrs &= ~MCQ_CQ_EVENT_STATUS; + ufshcd_enable_intr(hba, intrs); + for (i = 0; i < hba->nr_hw_queues; i++) { hwq = &hba->uhq[i]; hwq->id = i; diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index 4bd7d491e3c5..0086816b27cd 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -512,6 +512,8 @@ static ssize_t pm_qos_enable_show(struct device *dev, { struct ufs_hba *hba = dev_get_drvdata(dev); + guard(mutex)(&hba->pm_qos_mutex); + return sysfs_emit(buf, "%d\n", hba->pm_qos_enabled); } diff --git a/drivers/ufs/core/ufs_trace.h b/drivers/ufs/core/ufs_trace.h index caa32e23ffa5..584c2b5c6ad9 100644 --- a/drivers/ufs/core/ufs_trace.h +++ b/drivers/ufs/core/ufs_trace.h @@ -11,6 +11,7 @@ #include <ufs/ufs.h> #include <linux/tracepoint.h> +#include "ufs_trace_types.h" #define str_opcode(opcode) \ __print_symbolic(opcode, \ diff --git a/drivers/ufs/core/ufs_trace_types.h b/drivers/ufs/core/ufs_trace_types.h new file mode 100644 index 000000000000..f2d5ad1d92b9 --- /dev/null +++ b/drivers/ufs/core/ufs_trace_types.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _UFS_TRACE_TYPES_H_ +#define _UFS_TRACE_TYPES_H_ + +enum ufs_trace_str_t { + UFS_CMD_SEND, + UFS_CMD_COMP, + UFS_DEV_COMP, + UFS_QUERY_SEND, + UFS_QUERY_COMP, + UFS_QUERY_ERR, + UFS_TM_SEND, + UFS_TM_COMP, + UFS_TM_ERR +}; + +enum ufs_trace_tsf_t { + UFS_TSF_CDB, + UFS_TSF_OSF, + UFS_TSF_TM_INPUT, + UFS_TSF_TM_OUTPUT +}; + +#endif /* _UFS_TRACE_TYPES_H_ */ diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 9a43102b2b21..d9632d7c5f01 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -45,11 +45,6 @@ UTP_TASK_REQ_COMPL |\ UFSHCD_ERROR_MASK) -#define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\ - UFSHCD_ERROR_MASK |\ - MCQ_CQ_EVENT_STATUS) - - /* UIC command timeout, unit: ms */ enum { UIC_CMD_TIMEOUT_DEFAULT = 500, @@ -316,6 +311,9 @@ static const struct ufs_dev_quirk ufs_fixups[] = { { .wmanufacturerid = UFS_VENDOR_TOSHIBA, .model = "THGLF2G9D8KBADG", .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE }, + { .wmanufacturerid = UFS_VENDOR_TOSHIBA, + .model = "THGJFJT1E45BATP", + .quirk = UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT }, {} }; @@ -369,7 +367,7 @@ EXPORT_SYMBOL_GPL(ufshcd_disable_irq); * @hba: per adapter instance * @intrs: interrupt bits */ -static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) +void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) { u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); u32 new_val = old_val | intrs; @@ -606,10 +604,12 @@ void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt) lrbp = &hba->lrb[tag]; - dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", - tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000)); - dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n", - tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000)); + if (hba->monitor.enabled) { + dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", tag, + div_u64(lrbp->issue_time_stamp_local_clock, 1000)); + dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n", tag, + div_u64(lrbp->compl_time_stamp_local_clock, 1000)); + } dev_err(hba->dev, "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n", tag, (u64)lrbp->utrd_dma_addr); @@ -1045,6 +1045,7 @@ EXPORT_SYMBOL_GPL(ufshcd_is_hba_active); */ void ufshcd_pm_qos_init(struct ufs_hba *hba) { + guard(mutex)(&hba->pm_qos_mutex); if (hba->pm_qos_enabled) return; @@ -1061,6 +1062,8 @@ void ufshcd_pm_qos_init(struct ufs_hba *hba) */ void ufshcd_pm_qos_exit(struct ufs_hba *hba) { + guard(mutex)(&hba->pm_qos_mutex); + if (!hba->pm_qos_enabled) return; @@ -1075,6 +1078,8 @@ void ufshcd_pm_qos_exit(struct ufs_hba *hba) */ static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on) { + guard(mutex)(&hba->pm_qos_mutex); + if (!hba->pm_qos_enabled) return; @@ -2230,11 +2235,13 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba) static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) { bool queue_resume_work = false; - ktime_t curr_t = ktime_get(); + ktime_t curr_t; if (!ufshcd_is_clkscaling_supported(hba)) return; + curr_t = ktime_get(); + guard(spinlock_irqsave)(&hba->clk_scaling.lock); if (!hba->clk_scaling.active_reqs++) @@ -2354,10 +2361,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag, struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; unsigned long flags; - lrbp->issue_time_stamp = ktime_get(); - lrbp->issue_time_stamp_local_clock = local_clock(); - lrbp->compl_time_stamp = ktime_set(0, 0); - lrbp->compl_time_stamp_local_clock = 0; + if (hba->monitor.enabled) { + lrbp->issue_time_stamp = ktime_get(); + lrbp->issue_time_stamp_local_clock = local_clock(); + lrbp->compl_time_stamp = ktime_set(0, 0); + lrbp->compl_time_stamp_local_clock = 0; + } ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND); if (lrbp->cmd) ufshcd_clk_scaling_start_busy(hba); @@ -5622,8 +5631,10 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag, enum utp_ocs ocs; lrbp = &hba->lrb[task_tag]; - lrbp->compl_time_stamp = ktime_get(); - lrbp->compl_time_stamp_local_clock = local_clock(); + if (hba->monitor.enabled) { + lrbp->compl_time_stamp = ktime_get(); + lrbp->compl_time_stamp_local_clock = local_clock(); + } cmd = lrbp->cmd; if (cmd) { if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) @@ -6457,13 +6468,14 @@ void ufshcd_schedule_eh_work(struct ufs_hba *hba) } } -static void ufshcd_force_error_recovery(struct ufs_hba *hba) +void ufshcd_force_error_recovery(struct ufs_hba *hba) { spin_lock_irq(hba->host->host_lock); hba->force_reset = true; ufshcd_schedule_eh_work(hba); spin_unlock_irq(hba->host->host_lock); } +EXPORT_SYMBOL_GPL(ufshcd_force_error_recovery); static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) { @@ -8786,7 +8798,8 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba) struct ufs_dev_info *dev_info = &hba->dev_info; struct utp_upiu_query_v4_0 *upiu_data; - if (dev_info->wspecversion < 0x400) + if (dev_info->wspecversion < 0x400 || + hba->dev_quirks & UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT) return; ufshcd_dev_man_lock(hba); @@ -8913,16 +8926,11 @@ err: static void ufshcd_config_mcq(struct ufs_hba *hba) { int ret; - u32 intrs; ret = ufshcd_mcq_vops_config_esi(hba); hba->mcq_esi_enabled = !ret; dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : ""); - intrs = UFSHCD_ENABLE_MCQ_INTRS; - if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR) - intrs &= ~MCQ_CQ_EVENT_STATUS; - ufshcd_enable_intr(hba, intrs); ufshcd_mcq_make_queues_operational(hba); ufshcd_mcq_config_mac(hba, hba->nutrs); @@ -10756,6 +10764,10 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) mutex_init(&hba->ee_ctrl_mutex); mutex_init(&hba->wb_mutex); + + /* Initialize mutex for PM QoS request synchronization */ + mutex_init(&hba->pm_qos_mutex); + init_rwsem(&hba->clk_scaling_lock); ufshcd_init_clk_gating(hba); diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c index f0adcd9dd553..70d195179eba 100644 --- a/drivers/ufs/host/ufs-exynos.c +++ b/drivers/ufs/host/ufs-exynos.c @@ -776,7 +776,7 @@ static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs, u32 mask, sync_len; enum { SYNC_LEN_G1 = 80 * 1000, /* 80us */ - SYNC_LEN_G2 = 40 * 1000, /* 44us */ + SYNC_LEN_G2 = 40 * 1000, /* 40us */ SYNC_LEN_G3 = 20 * 1000, /* 20us */ }; int i; @@ -1896,6 +1896,13 @@ static int fsd_ufs_pre_pwr_change(struct exynos_ufs *ufs, return 0; } +static int fsd_ufs_suspend(struct exynos_ufs *ufs) +{ + exynos_ufs_gate_clks(ufs); + hci_writel(ufs, 0, HCI_GPIO_OUT); + return 0; +} + static inline u32 get_mclk_period_unipro_18(struct exynos_ufs *ufs) { return (16 * 1000 * 1000000UL / ufs->mclk_rate); @@ -2162,6 +2169,7 @@ static const struct exynos_ufs_drv_data fsd_ufs_drvs = { .pre_link = fsd_ufs_pre_link, .post_link = fsd_ufs_post_link, .pre_pwr_change = fsd_ufs_pre_pwr_change, + .suspend = fsd_ufs_suspend, }; static const struct exynos_ufs_drv_data gs101_ufs_drvs = { diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c index f902ce08c95a..758a393a9de1 100644 --- a/drivers/ufs/host/ufs-mediatek.c +++ b/drivers/ufs/host/ufs-mediatek.c @@ -29,6 +29,7 @@ #include "ufs-mediatek-sip.h" static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq); +static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up); #define CREATE_TRACE_POINTS #include "ufs-mediatek-trace.h" @@ -415,7 +416,7 @@ static void ufs_mtk_dbg_sel(struct ufs_hba *hba) } } -static void ufs_mtk_wait_idle_state(struct ufs_hba *hba, +static int ufs_mtk_wait_idle_state(struct ufs_hba *hba, unsigned long retry_ms) { u64 timeout, time_checked; @@ -451,8 +452,12 @@ static void ufs_mtk_wait_idle_state(struct ufs_hba *hba, break; } while (time_checked < timeout); - if (wait_idle && sm != VS_HCE_BASE) + if (wait_idle && sm != VS_HCE_BASE) { dev_info(hba->dev, "wait idle tmo: 0x%x\n", val); + return -ETIMEDOUT; + } + + return 0; } static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state, @@ -798,8 +803,14 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, clk_pwr_off = true; } - if (clk_pwr_off) + if (clk_pwr_off) { ufs_mtk_pwr_ctrl(hba, false); + } else { + dev_warn(hba->dev, "Clock is not turned off, hba->ahit = 0x%x, AHIT = 0x%x\n", + hba->ahit, + ufshcd_readl(hba, + REG_AUTO_HIBERNATE_IDLE_TIMER)); + } ufs_mtk_mcq_disable_irq(hba); } else if (on && status == POST_CHANGE) { ufs_mtk_pwr_ctrl(hba, true); @@ -1018,7 +1029,7 @@ static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba) struct arm_smccc_res res; int err, ver; - if (hba->vreg_info.vcc) + if (info->vcc) return 0; if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) { @@ -1075,6 +1086,80 @@ static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba) } } +static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) +{ + unsigned long flags; + u32 ah_ms = 10; + u32 ah_scale, ah_timer; + u32 scale_us[] = {1, 10, 100, 1000, 10000, 100000}; + + if (ufshcd_is_clkgating_allowed(hba)) { + if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) { + ah_scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK, + hba->ahit); + ah_timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, + hba->ahit); + if (ah_scale <= 5) + ah_ms = ah_timer * scale_us[ah_scale] / 1000; + } + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.delay_ms = max(ah_ms, 10U); + spin_unlock_irqrestore(hba->host->host_lock, flags); + } +} + +/* Convert microseconds to Auto-Hibernate Idle Timer register value */ +static u32 ufs_mtk_us_to_ahit(unsigned int timer) +{ + unsigned int scale; + + for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale) + timer /= UFSHCI_AHIBERN8_SCALE_FACTOR; + + return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) | + FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale); +} + +static void ufs_mtk_fix_ahit(struct ufs_hba *hba) +{ + unsigned int us; + + if (ufshcd_is_auto_hibern8_supported(hba)) { + switch (hba->dev_info.wmanufacturerid) { + case UFS_VENDOR_SAMSUNG: + /* configure auto-hibern8 timer to 3.5 ms */ + us = 3500; + break; + + case UFS_VENDOR_MICRON: + /* configure auto-hibern8 timer to 2 ms */ + us = 2000; + break; + + default: + /* configure auto-hibern8 timer to 1 ms */ + us = 1000; + break; + } + + hba->ahit = ufs_mtk_us_to_ahit(us); + } + + ufs_mtk_setup_clk_gating(hba); +} + +static void ufs_mtk_fix_clock_scaling(struct ufs_hba *hba) +{ + /* UFS version is below 4.0, clock scaling is not necessary */ + if ((hba->dev_info.wspecversion < 0x0400) && + ufs_mtk_is_clk_scale_ready(hba)) { + hba->caps &= ~UFSHCD_CAP_CLK_SCALING; + + _ufs_mtk_clk_scale(hba, false); + } +} + static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); @@ -1240,6 +1325,10 @@ static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba, dev_req_params->gear_rx < UFS_HS_G4) return false; + if (dev_req_params->pwr_tx == SLOW_MODE || + dev_req_params->pwr_rx == SLOW_MODE) + return false; + return true; } @@ -1255,6 +1344,10 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, host_params.hs_rx_gear = UFS_HS_G5; host_params.hs_tx_gear = UFS_HS_G5; + if (dev_max_params->pwr_rx == SLOW_MODE || + dev_max_params->pwr_tx == SLOW_MODE) + host_params.desired_working_mode = UFS_PWM_MODE; + ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); if (ret) { pr_info("%s: failed to determine capabilities\n", @@ -1278,6 +1371,28 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE), PA_NO_ADAPT); + if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) { + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), + DL_FC0ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), + DL_TC0ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), + DL_AFC0ReqTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3), + DL_FC1ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4), + DL_TC1ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5), + DL_AFC1ReqTimeOutVal_Default); + + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal), + DL_FC0ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal), + DL_TC0ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal), + DL_AFC0ReqTimeOutVal_Default); + } + ret = ufshcd_uic_change_pwr_mode(hba, FASTAUTO_MODE << 4 | FASTAUTO_MODE); @@ -1287,10 +1402,59 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, } } - if (host->hw_ver.major >= 3) { + /* if already configured to the requested pwr_mode, skip adapt */ + if (dev_req_params->gear_rx == hba->pwr_info.gear_rx && + dev_req_params->gear_tx == hba->pwr_info.gear_tx && + dev_req_params->lane_rx == hba->pwr_info.lane_rx && + dev_req_params->lane_tx == hba->pwr_info.lane_tx && + dev_req_params->pwr_rx == hba->pwr_info.pwr_rx && + dev_req_params->pwr_tx == hba->pwr_info.pwr_tx && + dev_req_params->hs_rate == hba->pwr_info.hs_rate) { + return ret; + } + + if (dev_req_params->pwr_rx == FAST_MODE || + dev_req_params->pwr_rx == FASTAUTO_MODE) { + if (host->hw_ver.major >= 3) { + ret = ufshcd_dme_configure_adapt(hba, + dev_req_params->gear_tx, + PA_INITIAL_ADAPT); + } else { + ret = ufshcd_dme_configure_adapt(hba, + dev_req_params->gear_tx, + PA_NO_ADAPT); + } + } else { ret = ufshcd_dme_configure_adapt(hba, - dev_req_params->gear_tx, - PA_INITIAL_ADAPT); + dev_req_params->gear_tx, + PA_NO_ADAPT); + } + + return ret; +} + +static int ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba) +{ + int ret; + + /* disable auto-hibern8 */ + ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER); + + /* wait host return to idle state when auto-hibern8 off */ + ret = ufs_mtk_wait_idle_state(hba, 5); + if (ret) + goto out; + + ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); + +out: + if (ret) { + dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret); + + ufshcd_force_error_recovery(hba); + + /* trigger error handler and break suspend */ + ret = -EBUSY; } return ret; @@ -1302,13 +1466,20 @@ static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba, struct ufs_pa_layer_attr *dev_req_params) { int ret = 0; + static u32 reg; switch (stage) { case PRE_CHANGE: + if (ufshcd_is_auto_hibern8_supported(hba)) { + reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); + ufs_mtk_auto_hibern8_disable(hba); + } ret = ufs_mtk_pre_pwr_change(hba, dev_max_params, dev_req_params); break; case POST_CHANGE: + if (ufshcd_is_auto_hibern8_supported(hba)) + ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER); break; default: ret = -EINVAL; @@ -1342,6 +1513,7 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba) { int ret; u32 tmp; + struct ufs_mtk_host *host = ufshcd_get_variant(hba); ufs_mtk_get_controller_version(hba); @@ -1367,34 +1539,33 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba) ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); + /* Enable the 1144 functions setting */ + if (host->ip_ver == IP_VER_MT6989) { + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp); + if (ret) + return ret; + + tmp |= 0x10; + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp); + } + return ret; } -static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) +static void ufs_mtk_post_link(struct ufs_hba *hba) { - u32 ah_ms; + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + u32 tmp; - if (ufshcd_is_clkgating_allowed(hba)) { - if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) - ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, - hba->ahit); - else - ah_ms = 10; - ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5); + /* fix device PA_INIT no adapt */ + if (host->ip_ver >= IP_VER_MT6899) { + ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp); + tmp |= 0x100; + ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp); } -} -static void ufs_mtk_post_link(struct ufs_hba *hba) -{ /* enable unipro clock gating feature */ ufs_mtk_cfg_unipro_cg(hba, true); - - /* will be configured during probe hba */ - if (ufshcd_is_auto_hibern8_supported(hba)) - hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) | - FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3); - - ufs_mtk_setup_clk_gating(hba); } static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, @@ -1421,11 +1592,11 @@ static int ufs_mtk_device_reset(struct ufs_hba *hba) { struct arm_smccc_res res; - /* disable hba before device reset */ - ufshcd_hba_stop(hba); - ufs_mtk_device_reset_ctrl(0, res); + /* disable hba in middle of device reset */ + ufshcd_hba_stop(hba); + /* * The reset signal is active low. UFS devices shall detect * more than or equal to 1us of positive or negative RST_n @@ -1462,7 +1633,11 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba) return err; /* Check link state to make sure exit h8 success */ - ufs_mtk_wait_idle_state(hba, 5); + err = ufs_mtk_wait_idle_state(hba, 5); + if (err) { + dev_warn(hba->dev, "wait idle fail, err=%d\n", err); + return err; + } err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); if (err) { dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err); @@ -1507,6 +1682,9 @@ static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm) { struct ufs_vreg *vccqx = NULL; + if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2) + return; + if (hba->vreg_info.vccq) vccqx = hba->vreg_info.vccq; else @@ -1561,21 +1739,6 @@ static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm) } } -static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba) -{ - int ret; - - /* disable auto-hibern8 */ - ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER); - - /* wait host return to idle state when auto-hibern8 off */ - ufs_mtk_wait_idle_state(hba, 5); - - ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); - if (ret) - dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret); -} - static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, enum ufs_notify_change_status status) { @@ -1584,7 +1747,7 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, if (status == PRE_CHANGE) { if (ufshcd_is_auto_hibern8_supported(hba)) - ufs_mtk_auto_hibern8_disable(hba); + return ufs_mtk_auto_hibern8_disable(hba); return 0; } @@ -1642,8 +1805,21 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) } return 0; + fail: - return ufshcd_link_recovery(hba); + /* + * Check if the platform (parent) device has resumed, and ensure that + * power, clock, and MTCMOS are all turned on. + */ + err = ufshcd_link_recovery(hba); + if (err) { + dev_err(hba->dev, "Device PM: req=%d, status:%d, err:%d\n", + hba->dev->power.request, + hba->dev->power.runtime_status, + hba->dev->power.runtime_error); + } + + return 0; /* Cannot return a failure, otherwise, the I/O will hang. */ } static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba) @@ -1726,6 +1902,8 @@ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba) ufs_mtk_vreg_fix_vcc(hba); ufs_mtk_vreg_fix_vccqx(hba); + ufs_mtk_fix_ahit(hba); + ufs_mtk_fix_clock_scaling(hba); } static void ufs_mtk_event_notify(struct ufs_hba *hba, @@ -2012,6 +2190,7 @@ static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba) return ret; } } + host->is_mcq_intr_enabled = true; return 0; } @@ -2095,10 +2274,12 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = { static int ufs_mtk_probe(struct platform_device *pdev) { int err; - struct device *dev = &pdev->dev; - struct device_node *reset_node; - struct platform_device *reset_pdev; + struct device *dev = &pdev->dev, *phy_dev = NULL; + struct device_node *reset_node, *phy_node = NULL; + struct platform_device *reset_pdev, *phy_pdev = NULL; struct device_link *link; + struct ufs_hba *hba; + struct ufs_mtk_host *host; reset_node = of_find_compatible_node(NULL, NULL, "ti,syscon-reset"); @@ -2125,13 +2306,51 @@ static int ufs_mtk_probe(struct platform_device *pdev) } skip_reset: + /* find phy node */ + phy_node = of_parse_phandle(dev->of_node, "phys", 0); + + if (phy_node) { + phy_pdev = of_find_device_by_node(phy_node); + if (!phy_pdev) + goto skip_phy; + phy_dev = &phy_pdev->dev; + + pm_runtime_set_active(phy_dev); + pm_runtime_enable(phy_dev); + pm_runtime_get_sync(phy_dev); + + put_device(phy_dev); + dev_info(dev, "phys node found\n"); + } else { + dev_notice(dev, "phys node not found\n"); + } + +skip_phy: /* perform generic probe */ err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops); - -out: - if (err) + if (err) { dev_err(dev, "probe failed %d\n", err); + goto out; + } + + hba = platform_get_drvdata(pdev); + if (!hba) + goto out; + + if (phy_node && phy_dev) { + host = ufshcd_get_variant(hba); + host->phy_dev = phy_dev; + } + + /* + * Because the default power setting of VSx (the upper layer of + * VCCQ/VCCQ2) is HWLP, we need to prevent VCCQ/VCCQ2 from + * entering LPM. + */ + ufs_mtk_dev_vreg_set_lpm(hba, false); +out: + of_node_put(phy_node); of_node_put(reset_node); return err; } @@ -2156,27 +2375,38 @@ static int ufs_mtk_system_suspend(struct device *dev) ret = ufshcd_system_suspend(dev); if (ret) - return ret; + goto out; + + if (pm_runtime_suspended(hba->dev)) + goto out; ufs_mtk_dev_vreg_set_lpm(hba, true); if (ufs_mtk_is_rtff_mtcmos(hba)) ufs_mtk_mtcmos_ctrl(false, res); - return 0; +out: + return ret; } static int ufs_mtk_system_resume(struct device *dev) { + int ret = 0; struct ufs_hba *hba = dev_get_drvdata(dev); struct arm_smccc_res res; - ufs_mtk_dev_vreg_set_lpm(hba, false); + if (pm_runtime_suspended(hba->dev)) + goto out; if (ufs_mtk_is_rtff_mtcmos(hba)) ufs_mtk_mtcmos_ctrl(true, res); - return ufshcd_system_resume(dev); + ufs_mtk_dev_vreg_set_lpm(hba, false); + +out: + ret = ufshcd_system_resume(dev); + + return ret; } #endif @@ -2184,6 +2414,7 @@ static int ufs_mtk_system_resume(struct device *dev) static int ufs_mtk_runtime_suspend(struct device *dev) { struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct arm_smccc_res res; int ret = 0; @@ -2196,17 +2427,24 @@ static int ufs_mtk_runtime_suspend(struct device *dev) if (ufs_mtk_is_rtff_mtcmos(hba)) ufs_mtk_mtcmos_ctrl(false, res); + if (host->phy_dev) + pm_runtime_put_sync(host->phy_dev); + return 0; } static int ufs_mtk_runtime_resume(struct device *dev) { struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct arm_smccc_res res; if (ufs_mtk_is_rtff_mtcmos(hba)) ufs_mtk_mtcmos_ctrl(true, res); + if (host->phy_dev) + pm_runtime_get_sync(host->phy_dev); + ufs_mtk_dev_vreg_set_lpm(hba, false); return ufshcd_runtime_resume(dev); diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h index e46dc5fa209d..dfbf78bd8664 100644 --- a/drivers/ufs/host/ufs-mediatek.h +++ b/drivers/ufs/host/ufs-mediatek.h @@ -193,6 +193,7 @@ struct ufs_mtk_host { bool is_mcq_intr_enabled; int mcq_nr_intr; struct ufs_mtk_mcq_intr_info mcq_intr_info[UFSHCD_MAX_Q_NR]; + struct device *phy_dev; }; /* MTK delay of autosuspend: 500 ms */ diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 9574fdc2bb0f..3e83dc51d538 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -38,6 +38,9 @@ #define DEEMPHASIS_3_5_dB 0x04 #define NO_DEEMPHASIS 0x0 +#define UFS_ICE_SYNC_RST_SEL BIT(3) +#define UFS_ICE_SYNC_RST_SW BIT(4) + enum { TSTBUS_UAWM, TSTBUS_UARM, @@ -494,12 +497,8 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) * If the HS-G5 PHY gear is used, update host_params->hs_rate to Rate-A, * so that the subsequent power mode change shall stick to Rate-A. */ - if (host->hw_ver.major == 0x5) { - if (host->phy_gear == UFS_HS_G5) - host_params->hs_rate = PA_HS_MODE_A; - else - host_params->hs_rate = PA_HS_MODE_B; - } + if (host->hw_ver.major == 0x5 && host->phy_gear == UFS_HS_G5) + host_params->hs_rate = PA_HS_MODE_A; mode = host_params->hs_rate == PA_HS_MODE_B ? PHY_MODE_UFS_HS_B : PHY_MODE_UFS_HS_A; @@ -751,11 +750,29 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); int err; + u32 reg_val; err = ufs_qcom_enable_lane_clks(host); if (err) return err; + if ((!ufs_qcom_is_link_active(hba)) && + host->hw_ver.major == 5 && + host->hw_ver.minor == 0 && + host->hw_ver.step == 0) { + ufshcd_writel(hba, UFS_ICE_SYNC_RST_SEL | UFS_ICE_SYNC_RST_SW, UFS_MEM_ICE_CFG); + reg_val = ufshcd_readl(hba, UFS_MEM_ICE_CFG); + reg_val &= ~(UFS_ICE_SYNC_RST_SEL | UFS_ICE_SYNC_RST_SW); + /* + * HW documentation doesn't recommend any delay between the + * reset set and clear. But we are enforcing an arbitrary delay + * to give flops enough time to settle in. + */ + usleep_range(50, 100); + ufshcd_writel(hba, reg_val, UFS_MEM_ICE_CFG); + ufshcd_readl(hba, UFS_MEM_ICE_CFG); + } + return ufs_qcom_ice_resume(host); } @@ -1096,6 +1113,18 @@ static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host) } } +static void ufs_qcom_parse_gear_limits(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct ufs_host_params *host_params = &host->host_params; + u32 hs_gear_old = host_params->hs_tx_gear; + + ufshcd_parse_gear_limits(hba, host_params); + if (host_params->hs_tx_gear != hs_gear_old) { + host->phy_gear = host_params->hs_tx_gear; + } +} + static void ufs_qcom_set_host_params(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); @@ -1162,6 +1191,13 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, case PRE_CHANGE: if (on) { ufs_qcom_icc_update_bw(host); + if (ufs_qcom_is_link_hibern8(hba)) { + err = ufs_qcom_enable_lane_clks(host); + if (err) { + dev_err(hba->dev, "enable lane clks failed, ret=%d\n", err); + return err; + } + } } else { if (!ufs_qcom_is_link_active(hba)) { /* disable device ref_clk */ @@ -1187,6 +1223,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, if (ufshcd_is_hs_mode(&hba->pwr_info)) ufs_qcom_dev_ref_clk_ctrl(host, true); } else { + if (ufs_qcom_is_link_hibern8(hba)) + ufs_qcom_disable_lane_clks(host); + ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MIN][0][0].mem_bw, ufs_qcom_bw_table[MODE_MIN][0][0].cfg_bw); } @@ -1337,6 +1376,7 @@ static int ufs_qcom_init(struct ufs_hba *hba) ufs_qcom_advertise_quirks(hba); ufs_qcom_set_host_params(hba); ufs_qcom_set_phy_gear(host); + ufs_qcom_parse_gear_limits(hba); err = ufs_qcom_ice_init(host); if (err) @@ -1742,7 +1782,7 @@ static void ufs_qcom_dump_testbus(struct ufs_hba *hba) } static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, - const char *prefix, enum ufshcd_res id) + const char *prefix, void __iomem *base) { u32 *regs __free(kfree) = NULL; size_t pos; @@ -1755,7 +1795,7 @@ static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, return -ENOMEM; for (pos = 0; pos < len; pos += 4) - regs[pos / 4] = readl(hba->res[id].base + offset + pos); + regs[pos / 4] = readl(base + offset + pos); print_hex_dump(KERN_ERR, prefix, len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, @@ -1766,30 +1806,34 @@ static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, static void ufs_qcom_dump_mcq_hci_regs(struct ufs_hba *hba) { + struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[0]; + void __iomem *mcq_vs_base = hba->mcq_base + UFS_MEM_VS_BASE; + struct dump_info { + void __iomem *base; size_t offset; size_t len; const char *prefix; - enum ufshcd_res id; }; struct dump_info mcq_dumps[] = { - {0x0, 256 * 4, "MCQ HCI-0 ", RES_MCQ}, - {0x400, 256 * 4, "MCQ HCI-1 ", RES_MCQ}, - {0x0, 5 * 4, "MCQ VS-0 ", RES_MCQ_VS}, - {0x0, 256 * 4, "MCQ SQD-0 ", RES_MCQ_SQD}, - {0x400, 256 * 4, "MCQ SQD-1 ", RES_MCQ_SQD}, - {0x800, 256 * 4, "MCQ SQD-2 ", RES_MCQ_SQD}, - {0xc00, 256 * 4, "MCQ SQD-3 ", RES_MCQ_SQD}, - {0x1000, 256 * 4, "MCQ SQD-4 ", RES_MCQ_SQD}, - {0x1400, 256 * 4, "MCQ SQD-5 ", RES_MCQ_SQD}, - {0x1800, 256 * 4, "MCQ SQD-6 ", RES_MCQ_SQD}, - {0x1c00, 256 * 4, "MCQ SQD-7 ", RES_MCQ_SQD}, + {hba->mcq_base, 0x0, 256 * 4, "MCQ HCI-0 "}, + {hba->mcq_base, 0x400, 256 * 4, "MCQ HCI-1 "}, + {mcq_vs_base, 0x0, 5 * 4, "MCQ VS-0 "}, + {opr->base, 0x0, 256 * 4, "MCQ SQD-0 "}, + {opr->base, 0x400, 256 * 4, "MCQ SQD-1 "}, + {opr->base, 0x800, 256 * 4, "MCQ SQD-2 "}, + {opr->base, 0xc00, 256 * 4, "MCQ SQD-3 "}, + {opr->base, 0x1000, 256 * 4, "MCQ SQD-4 "}, + {opr->base, 0x1400, 256 * 4, "MCQ SQD-5 "}, + {opr->base, 0x1800, 256 * 4, "MCQ SQD-6 "}, + {opr->base, 0x1c00, 256 * 4, "MCQ SQD-7 "}, + }; for (int i = 0; i < ARRAY_SIZE(mcq_dumps); i++) { ufs_qcom_dump_regs(hba, mcq_dumps[i].offset, mcq_dumps[i].len, - mcq_dumps[i].prefix, mcq_dumps[i].id); + mcq_dumps[i].prefix, mcq_dumps[i].base); cond_resched(); } } @@ -1910,116 +1954,68 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, hba->clk_scaling.suspend_on_no_request = true; } -/* Resources */ -static const struct ufshcd_res_info ufs_res_info[RES_MAX] = { - {.name = "ufs_mem",}, - {.name = "mcq",}, - /* Submission Queue DAO */ - {.name = "mcq_sqd",}, - /* Submission Queue Interrupt Status */ - {.name = "mcq_sqis",}, - /* Completion Queue DAO */ - {.name = "mcq_cqd",}, - /* Completion Queue Interrupt Status */ - {.name = "mcq_cqis",}, - /* MCQ vendor specific */ - {.name = "mcq_vs",}, -}; - static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba) { struct platform_device *pdev = to_platform_device(hba->dev); - struct ufshcd_res_info *res; - struct resource *res_mem, *res_mcq; - int i, ret; - - memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info)); - - for (i = 0; i < RES_MAX; i++) { - res = &hba->res[i]; - res->resource = platform_get_resource_byname(pdev, - IORESOURCE_MEM, - res->name); - if (!res->resource) { - dev_info(hba->dev, "Resource %s not provided\n", res->name); - if (i == RES_UFS) - return -ENODEV; - continue; - } else if (i == RES_UFS) { - res_mem = res->resource; - res->base = hba->mmio_base; - continue; - } + struct resource *res; - res->base = devm_ioremap_resource(hba->dev, res->resource); - if (IS_ERR(res->base)) { - dev_err(hba->dev, "Failed to map res %s, err=%d\n", - res->name, (int)PTR_ERR(res->base)); - ret = PTR_ERR(res->base); - res->base = NULL; - return ret; - } + /* Map the MCQ configuration region */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mcq"); + if (!res) { + dev_err(hba->dev, "MCQ resource not found in device tree\n"); + return -ENODEV; } - /* MCQ resource provided in DT */ - res = &hba->res[RES_MCQ]; - /* Bail if MCQ resource is provided */ - if (res->base) - goto out; - - /* Explicitly allocate MCQ resource from ufs_mem */ - res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL); - if (!res_mcq) - return -ENOMEM; - - res_mcq->start = res_mem->start + - MCQ_SQATTR_OFFSET(hba->mcq_capabilities); - res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1; - res_mcq->flags = res_mem->flags; - res_mcq->name = "mcq"; - - ret = insert_resource(&iomem_resource, res_mcq); - if (ret) { - dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n", - ret); - return ret; - } - - res->base = devm_ioremap_resource(hba->dev, res_mcq); - if (IS_ERR(res->base)) { - dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n", - (int)PTR_ERR(res->base)); - ret = PTR_ERR(res->base); - goto ioremap_err; + hba->mcq_base = devm_ioremap_resource(hba->dev, res); + if (IS_ERR(hba->mcq_base)) { + dev_err(hba->dev, "Failed to map MCQ region: %ld\n", + PTR_ERR(hba->mcq_base)); + return PTR_ERR(hba->mcq_base); } -out: - hba->mcq_base = res->base; return 0; -ioremap_err: - res->base = NULL; - remove_resource(res_mcq); - return ret; } static int ufs_qcom_op_runtime_config(struct ufs_hba *hba) { - struct ufshcd_res_info *mem_res, *sqdao_res; struct ufshcd_mcq_opr_info_t *opr; int i; + u32 doorbell_offsets[OPR_MAX]; - mem_res = &hba->res[RES_UFS]; - sqdao_res = &hba->res[RES_MCQ_SQD]; + /* + * Configure doorbell address offsets in MCQ configuration registers. + * These values are offsets relative to mmio_base (UFS_HCI_BASE). + * + * Memory Layout: + * - mmio_base = UFS_HCI_BASE + * - mcq_base = MCQ_CONFIG_BASE = mmio_base + (UFS_QCOM_MCQCAP_QCFGPTR * 0x200) + * - Doorbell registers are at: mmio_base + (UFS_QCOM_MCQCAP_QCFGPTR * 0x200) + + * - UFS_QCOM_MCQ_SQD_OFFSET + * - Which is also: mcq_base + UFS_QCOM_MCQ_SQD_OFFSET + */ - if (!mem_res->base || !sqdao_res->base) - return -EINVAL; + doorbell_offsets[OPR_SQD] = UFS_QCOM_SQD_ADDR_OFFSET; + doorbell_offsets[OPR_SQIS] = UFS_QCOM_SQIS_ADDR_OFFSET; + doorbell_offsets[OPR_CQD] = UFS_QCOM_CQD_ADDR_OFFSET; + doorbell_offsets[OPR_CQIS] = UFS_QCOM_CQIS_ADDR_OFFSET; + /* + * Configure MCQ operation registers. + * + * The doorbell registers are physically located within the MCQ region: + * - doorbell_physical_addr = mmio_base + doorbell_offset + * - doorbell_physical_addr = mcq_base + (doorbell_offset - MCQ_CONFIG_OFFSET) + */ for (i = 0; i < OPR_MAX; i++) { opr = &hba->mcq_opr[i]; - opr->offset = sqdao_res->resource->start - - mem_res->resource->start + 0x40 * i; - opr->stride = 0x100; - opr->base = sqdao_res->base + 0x40 * i; + opr->offset = doorbell_offsets[i]; /* Offset relative to mmio_base */ + opr->stride = UFS_QCOM_MCQ_STRIDE; /* 256 bytes between queues */ + + /* + * Calculate the actual doorbell base address within MCQ region: + * base = mcq_base + (doorbell_offset - MCQ_CONFIG_OFFSET) + */ + opr->base = hba->mcq_base + (opr->offset - UFS_QCOM_MCQ_CONFIG_OFFSET); } return 0; @@ -2034,12 +2030,8 @@ static int ufs_qcom_get_hba_mac(struct ufs_hba *hba) static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba, unsigned long *ocqs) { - struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS]; - - if (!mcq_vs_res->base) - return -EINVAL; - - *ocqs = readl(mcq_vs_res->base + UFS_MEM_CQIS_VS); + /* Read from MCQ vendor-specific register in MCQ region */ + *ocqs = readl(hba->mcq_base + UFS_MEM_CQIS_VS); return 0; } diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h index e0e129af7c16..380d02333d38 100644 --- a/drivers/ufs/host/ufs-qcom.h +++ b/drivers/ufs/host/ufs-qcom.h @@ -33,6 +33,28 @@ #define DL_VS_CLK_CFG_MASK GENMASK(9, 0) #define DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN BIT(9) +/* Qualcomm MCQ Configuration */ +#define UFS_QCOM_MCQCAP_QCFGPTR 224 /* 0xE0 in hex */ +#define UFS_QCOM_MCQ_CONFIG_OFFSET (UFS_QCOM_MCQCAP_QCFGPTR * 0x200) /* 0x1C000 */ + +/* Doorbell offsets within MCQ region (relative to MCQ_CONFIG_BASE) */ +#define UFS_QCOM_MCQ_SQD_OFFSET 0x5000 +#define UFS_QCOM_MCQ_CQD_OFFSET 0x5080 +#define UFS_QCOM_MCQ_SQIS_OFFSET 0x5040 +#define UFS_QCOM_MCQ_CQIS_OFFSET 0x50C0 +#define UFS_QCOM_MCQ_STRIDE 0x100 + +/* Calculated doorbell address offsets (relative to mmio_base) */ +#define UFS_QCOM_SQD_ADDR_OFFSET (UFS_QCOM_MCQ_CONFIG_OFFSET + UFS_QCOM_MCQ_SQD_OFFSET) +#define UFS_QCOM_CQD_ADDR_OFFSET (UFS_QCOM_MCQ_CONFIG_OFFSET + UFS_QCOM_MCQ_CQD_OFFSET) +#define UFS_QCOM_SQIS_ADDR_OFFSET (UFS_QCOM_MCQ_CONFIG_OFFSET + UFS_QCOM_MCQ_SQIS_OFFSET) +#define UFS_QCOM_CQIS_ADDR_OFFSET (UFS_QCOM_MCQ_CONFIG_OFFSET + UFS_QCOM_MCQ_CQIS_OFFSET) +#define REG_UFS_MCQ_STRIDE UFS_QCOM_MCQ_STRIDE + +/* MCQ Vendor specific address offsets (relative to MCQ_CONFIG_BASE) */ +#define UFS_MEM_VS_BASE 0x4000 +#define UFS_MEM_CQIS_VS 0x4008 + /* QCOM UFS host controller vendor specific registers */ enum { REG_UFS_SYS1CLK_1US = 0xC0, @@ -60,7 +82,7 @@ enum { UFS_AH8_CFG = 0xFC, UFS_RD_REG_MCQ = 0xD00, - + UFS_MEM_ICE_CFG = 0x2600, REG_UFS_MEM_ICE_CONFIG = 0x260C, REG_UFS_MEM_ICE_NUM_CORE = 0x2664, @@ -95,10 +117,6 @@ enum { REG_UFS_SW_H8_EXIT_CNT = 0x2710, }; -enum { - UFS_MEM_CQIS_VS = 0x8, -}; - #define UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(x) (0x000 + x) #define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x) diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c index ffe5d1d2b215..c2dafb583cf5 100644 --- a/drivers/ufs/host/ufshcd-pltfrm.c +++ b/drivers/ufs/host/ufshcd-pltfrm.c @@ -430,6 +430,39 @@ int ufshcd_negotiate_pwr_params(const struct ufs_host_params *host_params, } EXPORT_SYMBOL_GPL(ufshcd_negotiate_pwr_params); +/** + * ufshcd_parse_gear_limits - Parse DT-based gear and rate limits for UFS + * @hba: Pointer to UFS host bus adapter instance + * @host_params: Pointer to UFS host parameters structure to be updated + * + * This function reads optional device tree properties to apply + * platform-specific constraints. + * + * "limit-hs-gear": Specifies the max HS gear. + * "limit-gear-rate": Specifies the max High-Speed rate. + */ +void ufshcd_parse_gear_limits(struct ufs_hba *hba, struct ufs_host_params *host_params) +{ + struct device_node *np = hba->dev->of_node; + u32 hs_gear; + const char *hs_rate; + + if (!of_property_read_u32(np, "limit-hs-gear", &hs_gear)) { + host_params->hs_tx_gear = hs_gear; + host_params->hs_rx_gear = hs_gear; + } + + if (!of_property_read_string(np, "limit-gear-rate", &hs_rate)) { + if (!strcmp(hs_rate, "rate-a")) + host_params->hs_rate = PA_HS_MODE_A; + else if (!strcmp(hs_rate, "rate-b")) + host_params->hs_rate = PA_HS_MODE_B; + else + dev_warn(hba->dev, "Invalid rate: %s\n", hs_rate); + } +} +EXPORT_SYMBOL_GPL(ufshcd_parse_gear_limits); + void ufshcd_init_host_params(struct ufs_host_params *host_params) { *host_params = (struct ufs_host_params){ diff --git a/drivers/ufs/host/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h index 3017f8e8f93c..0a18a8aed94d 100644 --- a/drivers/ufs/host/ufshcd-pltfrm.h +++ b/drivers/ufs/host/ufshcd-pltfrm.h @@ -29,6 +29,7 @@ int ufshcd_negotiate_pwr_params(const struct ufs_host_params *host_params, const struct ufs_pa_layer_attr *dev_max, struct ufs_pa_layer_attr *agreed_pwr); void ufshcd_init_host_params(struct ufs_host_params *host_params); +void ufshcd_parse_gear_limits(struct ufs_hba *hba, struct ufs_host_params *host_params); int ufshcd_pltfrm_init(struct platform_device *pdev, const struct ufs_hba_variant_ops *vops); void ufshcd_pltfrm_remove(struct platform_device *pdev); |