summaryrefslogtreecommitdiff
path: root/drivers/ufs
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs')
-rw-r--r--drivers/ufs/core/ufs-fault-injection.c19
-rw-r--r--drivers/ufs/core/ufs-fault-injection.h13
-rw-r--r--drivers/ufs/core/ufs-sysfs.c151
-rw-r--r--drivers/ufs/core/ufshcd.c154
-rw-r--r--drivers/ufs/host/ufs-exynos.c7
-rw-r--r--drivers/ufs/host/ufs-hisi.c11
-rw-r--r--drivers/ufs/host/ufs-mediatek.c12
-rw-r--r--drivers/ufs/host/ufs-qcom.c472
-rw-r--r--drivers/ufs/host/ufs-qcom.h57
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c69
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.h10
11 files changed, 487 insertions, 488 deletions
diff --git a/drivers/ufs/core/ufs-fault-injection.c b/drivers/ufs/core/ufs-fault-injection.c
index 5b1184aac585..169540417079 100644
--- a/drivers/ufs/core/ufs-fault-injection.c
+++ b/drivers/ufs/core/ufs-fault-injection.c
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/fault-inject.h>
#include <linux/module.h>
+#include <ufs/ufshcd.h>
#include "ufs-fault-injection.h"
static int ufs_fault_get(char *buffer, const struct kernel_param *kp);
@@ -59,12 +60,22 @@ static int ufs_fault_set(const char *val, const struct kernel_param *kp)
return 0;
}
-bool ufs_trigger_eh(void)
+void ufs_fault_inject_hba_init(struct ufs_hba *hba)
{
- return should_fail(&ufs_trigger_eh_attr, 1);
+ hba->trigger_eh_attr = ufs_trigger_eh_attr;
+ hba->timeout_attr = ufs_timeout_attr;
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+ fault_create_debugfs_attr("trigger_eh_inject", hba->debugfs_root, &hba->trigger_eh_attr);
+ fault_create_debugfs_attr("timeout_inject", hba->debugfs_root, &hba->timeout_attr);
+#endif
}
-bool ufs_fail_completion(void)
+bool ufs_trigger_eh(struct ufs_hba *hba)
{
- return should_fail(&ufs_timeout_attr, 1);
+ return should_fail(&hba->trigger_eh_attr, 1);
+}
+
+bool ufs_fail_completion(struct ufs_hba *hba)
+{
+ return should_fail(&hba->timeout_attr, 1);
}
diff --git a/drivers/ufs/core/ufs-fault-injection.h b/drivers/ufs/core/ufs-fault-injection.h
index 6d0cd8e10c87..996a35769781 100644
--- a/drivers/ufs/core/ufs-fault-injection.h
+++ b/drivers/ufs/core/ufs-fault-injection.h
@@ -7,15 +7,20 @@
#include <linux/types.h>
#ifdef CONFIG_SCSI_UFS_FAULT_INJECTION
-bool ufs_trigger_eh(void);
-bool ufs_fail_completion(void);
+void ufs_fault_inject_hba_init(struct ufs_hba *hba);
+bool ufs_trigger_eh(struct ufs_hba *hba);
+bool ufs_fail_completion(struct ufs_hba *hba);
#else
-static inline bool ufs_trigger_eh(void)
+static inline void ufs_fault_inject_hba_init(struct ufs_hba *hba)
+{
+}
+
+static inline bool ufs_trigger_eh(struct ufs_hba *hba)
{
return false;
}
-static inline bool ufs_fail_completion(void)
+static inline bool ufs_fail_completion(struct ufs_hba *hba)
{
return false;
}
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index c95906443d5f..e6d12289e017 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -7,9 +7,56 @@
#include <asm/unaligned.h>
#include <ufs/ufs.h>
+#include <ufs/unipro.h>
#include "ufs-sysfs.h"
#include "ufshcd-priv.h"
+static const char *ufs_pa_pwr_mode_to_string(enum ufs_pa_pwr_mode mode)
+{
+ switch (mode) {
+ case FAST_MODE: return "FAST_MODE";
+ case SLOW_MODE: return "SLOW_MODE";
+ case FASTAUTO_MODE: return "FASTAUTO_MODE";
+ case SLOWAUTO_MODE: return "SLOWAUTO_MODE";
+ default: return "UNKNOWN";
+ }
+}
+
+static const char *ufs_hs_gear_rate_to_string(enum ufs_hs_gear_rate rate)
+{
+ switch (rate) {
+ case PA_HS_MODE_A: return "HS_RATE_A";
+ case PA_HS_MODE_B: return "HS_RATE_B";
+ default: return "UNKNOWN";
+ }
+}
+
+static const char *ufs_pwm_gear_to_string(enum ufs_pwm_gear_tag gear)
+{
+ switch (gear) {
+ case UFS_PWM_G1: return "PWM_GEAR1";
+ case UFS_PWM_G2: return "PWM_GEAR2";
+ case UFS_PWM_G3: return "PWM_GEAR3";
+ case UFS_PWM_G4: return "PWM_GEAR4";
+ case UFS_PWM_G5: return "PWM_GEAR5";
+ case UFS_PWM_G6: return "PWM_GEAR6";
+ case UFS_PWM_G7: return "PWM_GEAR7";
+ default: return "UNKNOWN";
+ }
+}
+
+static const char *ufs_hs_gear_to_string(enum ufs_hs_gear_tag gear)
+{
+ switch (gear) {
+ case UFS_HS_G1: return "HS_GEAR1";
+ case UFS_HS_G2: return "HS_GEAR2";
+ case UFS_HS_G3: return "HS_GEAR3";
+ case UFS_HS_G4: return "HS_GEAR4";
+ case UFS_HS_G5: return "HS_GEAR5";
+ default: return "UNKNOWN";
+ }
+}
+
static const char *ufshcd_uic_link_state_to_string(
enum uic_link_state state)
{
@@ -255,6 +302,35 @@ out:
return res < 0 ? res : count;
}
+static ssize_t rtc_update_ms_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", hba->dev_info.rtc_update_period);
+}
+
+static ssize_t rtc_update_ms_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned int ms;
+ bool resume_period_update = false;
+
+ if (kstrtouint(buf, 0, &ms))
+ return -EINVAL;
+
+ if (!hba->dev_info.rtc_update_period && ms > 0)
+ resume_period_update = true;
+ /* Minimum and maximum update frequency should be synchronized with all UFS vendors */
+ hba->dev_info.rtc_update_period = ms;
+
+ if (resume_period_update)
+ schedule_delayed_work(&hba->ufs_rtc_update_work,
+ msecs_to_jiffies(hba->dev_info.rtc_update_period));
+ return count;
+}
+
static ssize_t enable_wb_buf_flush_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -339,6 +415,7 @@ static DEVICE_ATTR_RW(auto_hibern8);
static DEVICE_ATTR_RW(wb_on);
static DEVICE_ATTR_RW(enable_wb_buf_flush);
static DEVICE_ATTR_RW(wb_flush_threshold);
+static DEVICE_ATTR_RW(rtc_update_ms);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
@@ -351,6 +428,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_wb_on.attr,
&dev_attr_enable_wb_buf_flush.attr,
&dev_attr_wb_flush_threshold.attr,
+ &dev_attr_rtc_update_ms.attr,
NULL
};
@@ -628,6 +706,78 @@ static const struct attribute_group ufs_sysfs_monitor_group = {
.attrs = ufs_sysfs_monitor_attrs,
};
+static ssize_t lane_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", hba->pwr_info.lane_rx);
+}
+
+static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ufs_pa_pwr_mode_to_string(hba->pwr_info.pwr_rx));
+}
+
+static ssize_t rate_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ufs_hs_gear_rate_to_string(hba->pwr_info.hs_rate));
+}
+
+static ssize_t gear_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", hba->pwr_info.hs_rate ?
+ ufs_hs_gear_to_string(hba->pwr_info.gear_rx) :
+ ufs_pwm_gear_to_string(hba->pwr_info.gear_rx));
+}
+
+static ssize_t dev_pm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string(hba->curr_dev_pwr_mode));
+}
+
+static ssize_t link_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string(hba->uic_link_state));
+}
+
+static DEVICE_ATTR_RO(lane);
+static DEVICE_ATTR_RO(mode);
+static DEVICE_ATTR_RO(rate);
+static DEVICE_ATTR_RO(gear);
+static DEVICE_ATTR_RO(dev_pm);
+static DEVICE_ATTR_RO(link_state);
+
+static struct attribute *ufs_power_info_attrs[] = {
+ &dev_attr_lane.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_rate.attr,
+ &dev_attr_gear.attr,
+ &dev_attr_dev_pm.attr,
+ &dev_attr_link_state.attr,
+ NULL
+};
+
+static const struct attribute_group ufs_sysfs_power_info_group = {
+ .name = "power_info",
+ .attrs = ufs_power_info_attrs,
+};
+
static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
u8 desc_index,
@@ -1233,6 +1383,7 @@ static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_default_group,
&ufs_sysfs_capabilities_group,
&ufs_sysfs_monitor_group,
+ &ufs_sysfs_power_info_group,
&ufs_sysfs_device_descriptor_group,
&ufs_sysfs_interconnect_descriptor_group,
&ufs_sysfs_geometry_descriptor_group,
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 16d76325039a..d1e33328ff3f 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -99,6 +99,9 @@
/* Polling time to wait for fDeviceInit */
#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
+/* Default RTC update every 10 seconds */
+#define UFS_RTC_UPDATE_INTERVAL_MS (10 * MSEC_PER_SEC)
+
/* UFSHC 4.0 compliant HC support this mode. */
static bool use_mcq_mode = true;
@@ -235,6 +238,12 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
return UFS_PM_LVL_0;
}
+static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
+{
+ return (hba->clk_gating.active_reqs || hba->outstanding_reqs || hba->outstanding_tasks ||
+ hba->active_uic_cmd || hba->uic_async_done);
+}
+
static const struct ufs_dev_quirk ufs_fixups[] = {
/* UFS cards deviations table */
{ .wmanufacturerid = UFS_VENDOR_MICRON,
@@ -289,21 +298,23 @@ static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
-static inline void ufshcd_enable_irq(struct ufs_hba *hba)
+void ufshcd_enable_irq(struct ufs_hba *hba)
{
if (!hba->is_irq_enabled) {
enable_irq(hba->irq);
hba->is_irq_enabled = true;
}
}
+EXPORT_SYMBOL_GPL(ufshcd_enable_irq);
-static inline void ufshcd_disable_irq(struct ufs_hba *hba)
+void ufshcd_disable_irq(struct ufs_hba *hba)
{
if (hba->is_irq_enabled) {
disable_irq(hba->irq);
hba->is_irq_enabled = false;
}
}
+EXPORT_SYMBOL_GPL(ufshcd_disable_irq);
static void ufshcd_configure_wb(struct ufs_hba *hba)
{
@@ -677,6 +688,8 @@ static void ufshcd_device_reset(struct ufs_hba *hba)
hba->dev_info.wb_enabled = false;
hba->dev_info.wb_buf_flush_enabled = false;
}
+ if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE)
+ hba->dev_info.rtc_time_baseline = 0;
}
if (err != -EOPNOTSUPP)
ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
@@ -1917,10 +1930,7 @@ static void ufshcd_gate_work(struct work_struct *work)
goto rel_lock;
}
- if (hba->clk_gating.active_reqs
- || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
- || hba->outstanding_reqs || hba->outstanding_tasks
- || hba->active_uic_cmd || hba->uic_async_done)
+ if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
goto rel_lock;
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -2721,6 +2731,8 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
.command_set_type = UPIU_COMMAND_SET_TYPE_SCSI,
};
+ WARN_ON_ONCE(ucd_req_ptr->header.task_tag != lrbp->task_tag);
+
ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
@@ -2993,7 +3005,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_send_command(hba, tag, hwq);
out:
- if (ufs_trigger_eh()) {
+ if (ufs_trigger_eh(hba)) {
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -4402,40 +4414,32 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
-void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
+static void ufshcd_configure_auto_hibern8(struct ufs_hba *hba)
{
- unsigned long flags;
- bool update = false;
-
if (!ufshcd_is_auto_hibern8_supported(hba))
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ahit != ahit) {
- hba->ahit = ahit;
- update = true;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
+}
+
+void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
+{
+ const u32 cur_ahit = READ_ONCE(hba->ahit);
- if (update &&
- !pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
+ if (!ufshcd_is_auto_hibern8_supported(hba) || cur_ahit == ahit)
+ return;
+
+ WRITE_ONCE(hba->ahit, ahit);
+ if (!pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
ufshcd_rpm_get_sync(hba);
ufshcd_hold(hba);
- ufshcd_auto_hibern8_enable(hba);
+ ufshcd_configure_auto_hibern8(hba);
ufshcd_release(hba);
ufshcd_rpm_put_sync(hba);
}
}
EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
-void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
-{
- if (!ufshcd_is_auto_hibern8_supported(hba))
- return;
-
- ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
-}
-
/**
* ufshcd_init_pwr_info - setting the POR (power on reset)
* values in hba power info
@@ -5650,7 +5654,7 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
!(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
ufshcd_reset_intr_aggr(hba);
- if (ufs_fail_completion())
+ if (ufs_fail_completion(hba))
return IRQ_HANDLED;
/*
@@ -8199,6 +8203,79 @@ static void ufs_fixup_device_setup(struct ufs_hba *hba)
ufshcd_vops_fixup_dev_quirks(hba);
}
+static void ufshcd_update_rtc(struct ufs_hba *hba)
+{
+ struct timespec64 ts64;
+ int err;
+ u32 val;
+
+ ktime_get_real_ts64(&ts64);
+
+ if (ts64.tv_sec < hba->dev_info.rtc_time_baseline) {
+ dev_warn_once(hba->dev, "%s: Current time precedes previous setting!\n", __func__);
+ return;
+ }
+
+ /*
+ * The Absolute RTC mode has a 136-year limit, spanning from 2010 to 2146. If a time beyond
+ * 2146 is required, it is recommended to choose the relative RTC mode.
+ */
+ val = ts64.tv_sec - hba->dev_info.rtc_time_baseline;
+
+ ufshcd_rpm_get_sync(hba);
+ err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED,
+ 0, 0, &val);
+ ufshcd_rpm_put_sync(hba);
+
+ if (err)
+ dev_err(hba->dev, "%s: Failed to update rtc %d\n", __func__, err);
+ else if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE)
+ hba->dev_info.rtc_time_baseline = ts64.tv_sec;
+}
+
+static void ufshcd_rtc_work(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+
+ hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work);
+
+ /* Update RTC only when there are no requests in progress and UFSHCI is operational */
+ if (!ufshcd_is_ufs_dev_busy(hba) && hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL)
+ ufshcd_update_rtc(hba);
+
+ if (ufshcd_is_ufs_dev_active(hba) && hba->dev_info.rtc_update_period)
+ schedule_delayed_work(&hba->ufs_rtc_update_work,
+ msecs_to_jiffies(hba->dev_info.rtc_update_period));
+}
+
+static void ufs_init_rtc(struct ufs_hba *hba, u8 *desc_buf)
+{
+ u16 periodic_rtc_update = get_unaligned_be16(&desc_buf[DEVICE_DESC_PARAM_FRQ_RTC]);
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+
+ if (periodic_rtc_update & UFS_RTC_TIME_BASELINE) {
+ dev_info->rtc_type = UFS_RTC_ABSOLUTE;
+
+ /*
+ * The concept of measuring time in Linux as the number of seconds elapsed since
+ * 00:00:00 UTC on January 1, 1970, and UFS ABS RTC is elapsed from January 1st
+ * 2010 00:00, here we need to adjust ABS baseline.
+ */
+ dev_info->rtc_time_baseline = mktime64(2010, 1, 1, 0, 0, 0) -
+ mktime64(1970, 1, 1, 0, 0, 0);
+ } else {
+ dev_info->rtc_type = UFS_RTC_RELATIVE;
+ dev_info->rtc_time_baseline = 0;
+ }
+
+ /*
+ * We ignore TIME_PERIOD defined in wPeriodicRTCUpdate because Spec does not clearly state
+ * how to calculate the specific update period for each time unit. And we disable periodic
+ * RTC update work, let user configure by sysfs node according to specific circumstance.
+ */
+ dev_info->rtc_update_period = 0;
+}
+
static int ufs_get_device_desc(struct ufs_hba *hba)
{
int err;
@@ -8251,6 +8328,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufshcd_temp_notif_probe(hba, desc_buf);
+ ufs_init_rtc(hba, desc_buf);
+
if (hba->ext_iid_sup)
ufshcd_ext_iid_probe(hba, desc_buf);
@@ -8804,6 +8883,8 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
ufshcd_force_reset_auto_bkops(hba);
ufshcd_set_timestamp_attr(hba);
+ schedule_delayed_work(&hba->ufs_rtc_update_work,
+ msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
/* Gear up to HS gear if supported */
if (hba->max_pwr_info.is_valid) {
@@ -8878,8 +8959,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
if (hba->ee_usr_mask)
ufshcd_write_ee_control(hba);
- /* Enable Auto-Hibernate if configured */
- ufshcd_auto_hibern8_enable(hba);
+ ufshcd_configure_auto_hibern8(hba);
out:
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -9362,6 +9442,7 @@ static int ufshcd_hba_init(struct ufs_hba *hba)
goto out_disable_vreg;
ufs_debugfs_hba_init(hba);
+ ufs_fault_inject_hba_init(hba);
hba->is_powered = true;
goto out;
@@ -9760,6 +9841,8 @@ vops_suspend:
ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
if (ret)
goto set_link_active;
+
+ cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
goto out;
set_link_active:
@@ -9854,6 +9937,8 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ret)
goto set_old_link_state;
ufshcd_set_timestamp_attr(hba);
+ schedule_delayed_work(&hba->ufs_rtc_update_work,
+ msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
}
if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
@@ -9876,8 +9961,7 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
}
- /* Enable Auto-Hibernate if configured */
- ufshcd_auto_hibern8_enable(hba);
+ ufshcd_configure_auto_hibern8(hba);
goto out;
@@ -10550,8 +10634,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
- INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
- ufshcd_rpm_dev_flush_recheck_work);
+ INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work);
+ INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work);
/* Set the default auto-hiberate idle timer value to 150 ms */
if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index 71bd6dbc0547..734d40f99e31 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -765,7 +765,7 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
struct phy *generic_phy = ufs->phy;
- struct ufs_dev_params ufs_exynos_cap;
+ struct ufs_host_params host_params;
int ret;
if (!dev_req_params) {
@@ -774,10 +774,9 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
goto out;
}
- ufshcd_init_pwr_dev_param(&ufs_exynos_cap);
+ ufshcd_init_host_params(&host_params);
- ret = ufshcd_get_pwr_dev_param(&ufs_exynos_cap,
- dev_max_params, dev_req_params);
+ ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
if (ret) {
pr_err("%s: failed to determine capabilities\n", __func__);
goto out;
diff --git a/drivers/ufs/host/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c
index 0229ac0a8dbe..5ee73ff05251 100644
--- a/drivers/ufs/host/ufs-hisi.c
+++ b/drivers/ufs/host/ufs-hisi.c
@@ -293,9 +293,9 @@ static int ufs_hisi_link_startup_notify(struct ufs_hba *hba,
return err;
}
-static void ufs_hisi_set_dev_cap(struct ufs_dev_params *hisi_param)
+static void ufs_hisi_set_dev_cap(struct ufs_host_params *host_params)
{
- ufshcd_init_pwr_dev_param(hisi_param);
+ ufshcd_init_host_params(host_params);
}
static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
@@ -365,7 +365,7 @@ static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
- struct ufs_dev_params ufs_hisi_cap;
+ struct ufs_host_params host_params;
int ret = 0;
if (!dev_req_params) {
@@ -377,9 +377,8 @@ static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
- ufs_hisi_set_dev_cap(&ufs_hisi_cap);
- ret = ufshcd_get_pwr_dev_param(&ufs_hisi_cap,
- dev_max_params, dev_req_params);
+ ufs_hisi_set_dev_cap(&host_params);
+ ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
if (ret) {
dev_err(hba->dev,
"%s: failed to determine capabilities\n", __func__);
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index fc61790d289b..776bca4f70c8 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -996,16 +996,14 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- struct ufs_dev_params host_cap;
+ struct ufs_host_params host_params;
int ret;
- ufshcd_init_pwr_dev_param(&host_cap);
- host_cap.hs_rx_gear = UFS_HS_G5;
- host_cap.hs_tx_gear = UFS_HS_G5;
+ ufshcd_init_host_params(&host_params);
+ host_params.hs_rx_gear = UFS_HS_G5;
+ host_params.hs_tx_gear = UFS_HS_G5;
- ret = ufshcd_get_pwr_dev_param(&host_cap,
- dev_max_params,
- dev_req_params);
+ ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
if (ret) {
pr_info("%s: failed to determine capabilities\n",
__func__);
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 17e24270477d..480787048e75 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -4,26 +4,26 @@
*/
#include <linux/acpi.h>
-#include <linux/time.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/devfreq.h>
+#include <linux/gpio/consumer.h>
#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/platform_device.h>
#include <linux/phy/phy.h>
-#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
#include <linux/reset-controller.h>
-#include <linux/devfreq.h>
+#include <linux/time.h>
#include <soc/qcom/ice.h>
#include <ufs/ufshcd.h>
-#include "ufshcd-pltfrm.h"
-#include <ufs/unipro.h>
-#include "ufs-qcom.h"
#include <ufs/ufshci.h>
#include <ufs/ufs_quirks.h>
+#include <ufs/unipro.h>
+#include "ufshcd-pltfrm.h"
+#include "ufs-qcom.h"
#define MCQ_QCFGPTR_MASK GENMASK(7, 0)
#define MCQ_QCFGPTR_UNIT 0x200
@@ -90,8 +90,6 @@ static const struct __ufs_qcom_bw_table {
[MODE_MAX][0][0] = { 7643136, 307200 },
};
-static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
-
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up);
@@ -158,7 +156,7 @@ static int ufs_qcom_ice_program_key(struct ufs_hba *hba,
cap = hba->crypto_cap_array[cfg->crypto_cap_idx];
if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS ||
cap.key_size != UFS_CRYPTO_KEY_SIZE_256)
- return -EINVAL;
+ return -EOPNOTSUPP;
if (config_enable)
return qcom_ice_program_key(host->ice,
@@ -194,52 +192,12 @@ static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
}
#endif
-static int ufs_qcom_host_clk_get(struct device *dev,
- const char *name, struct clk **clk_out, bool optional)
-{
- struct clk *clk;
- int err = 0;
-
- clk = devm_clk_get(dev, name);
- if (!IS_ERR(clk)) {
- *clk_out = clk;
- return 0;
- }
-
- err = PTR_ERR(clk);
-
- if (optional && err == -ENOENT) {
- *clk_out = NULL;
- return 0;
- }
-
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to get %s err %d\n", name, err);
-
- return err;
-}
-
-static int ufs_qcom_host_clk_enable(struct device *dev,
- const char *name, struct clk *clk)
-{
- int err = 0;
-
- err = clk_prepare_enable(clk);
- if (err)
- dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
-
- return err;
-}
-
static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
{
if (!host->is_lane_clks_enabled)
return;
- clk_disable_unprepare(host->tx_l1_sync_clk);
- clk_disable_unprepare(host->tx_l0_sync_clk);
- clk_disable_unprepare(host->rx_l1_sync_clk);
- clk_disable_unprepare(host->rx_l0_sync_clk);
+ clk_bulk_disable_unprepare(host->num_clks, host->clks);
host->is_lane_clks_enabled = false;
}
@@ -247,73 +205,29 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
{
int err;
- struct device *dev = host->hba->dev;
- if (host->is_lane_clks_enabled)
- return 0;
-
- err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
- host->rx_l0_sync_clk);
+ err = clk_bulk_prepare_enable(host->num_clks, host->clks);
if (err)
return err;
- err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
- host->tx_l0_sync_clk);
- if (err)
- goto disable_rx_l0;
-
- err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
- host->rx_l1_sync_clk);
- if (err)
- goto disable_tx_l0;
-
- err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
- host->tx_l1_sync_clk);
- if (err)
- goto disable_rx_l1;
-
host->is_lane_clks_enabled = true;
return 0;
-
-disable_rx_l1:
- clk_disable_unprepare(host->rx_l1_sync_clk);
-disable_tx_l0:
- clk_disable_unprepare(host->tx_l0_sync_clk);
-disable_rx_l0:
- clk_disable_unprepare(host->rx_l0_sync_clk);
-
- return err;
}
static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
{
- int err = 0;
+ int err;
struct device *dev = host->hba->dev;
if (has_acpi_companion(dev))
return 0;
- err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
- &host->rx_l0_sync_clk, false);
- if (err)
+ err = devm_clk_bulk_get_all(dev, &host->clks);
+ if (err <= 0)
return err;
- err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
- &host->tx_l0_sync_clk, false);
- if (err)
- return err;
-
- /* In case of single lane per direction, don't read lane1 clocks */
- if (host->hba->lanes_per_direction > 1) {
- err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
- &host->rx_l1_sync_clk, false);
- if (err)
- return err;
-
- err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
- &host->tx_l1_sync_clk, true);
- }
+ host->num_clks = err;
return 0;
}
@@ -321,7 +235,7 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
{
int err;
- u32 tx_fsm_val = 0;
+ u32 tx_fsm_val;
unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
do {
@@ -360,9 +274,7 @@ static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
{
- ufshcd_rmwl(host->hba, QUNIPRO_SEL,
- ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
- REG_UFS_CFG1);
+ ufshcd_rmwl(host->hba, QUNIPRO_SEL, QUNIPRO_SEL, REG_UFS_CFG1);
if (host->hw_ver.major >= 0x05)
ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
@@ -376,18 +288,15 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
*/
static int ufs_qcom_host_reset(struct ufs_hba *hba)
{
- int ret = 0;
+ int ret;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- bool reenable_intr = false;
+ bool reenable_intr;
- if (!host->core_reset) {
- dev_warn(hba->dev, "%s: reset control not set\n", __func__);
+ if (!host->core_reset)
return 0;
- }
reenable_intr = hba->is_irq_enabled;
- disable_irq(hba->irq);
- hba->is_irq_enabled = false;
+ ufshcd_disable_irq(hba);
ret = reset_control_assert(host->core_reset);
if (ret) {
@@ -404,16 +313,16 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
usleep_range(200, 210);
ret = reset_control_deassert(host->core_reset);
- if (ret)
+ if (ret) {
dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
__func__, ret);
+ return ret;
+ }
usleep_range(1000, 1100);
- if (reenable_intr) {
- enable_irq(hba->irq);
- hba->is_irq_enabled = true;
- }
+ if (reenable_intr)
+ ufshcd_enable_irq(hba);
return 0;
}
@@ -422,18 +331,8 @@ static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- if (host->hw_ver.major == 0x1) {
- /*
- * HS-G3 operations may not reliably work on legacy QCOM
- * UFS host controller hardware even though capability
- * exchange during link startup phase may end up
- * negotiating maximum supported gear as G3.
- * Hence downgrade the maximum supported gear to HS-G2.
- */
- return UFS_HS_G2;
- } else if (host->hw_ver.major >= 0x4) {
+ if (host->hw_ver.major >= 0x4)
return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0));
- }
/* Default is HS-G3 */
return UFS_HS_G3;
@@ -442,14 +341,29 @@ static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufs_host_params *host_params = &host->host_params;
struct phy *phy = host->generic_phy;
+ enum phy_mode mode;
int ret;
+ /*
+ * HW ver 5 can only support up to HS-G5 Rate-A due to HW limitations.
+ * If the HS-G5 PHY gear is used, update host_params->hs_rate to Rate-A,
+ * so that the subsequent power mode change shall stick to Rate-A.
+ */
+ if (host->hw_ver.major == 0x5) {
+ if (host->phy_gear == UFS_HS_G5)
+ host_params->hs_rate = PA_HS_MODE_A;
+ else
+ host_params->hs_rate = PA_HS_MODE_B;
+ }
+
+ mode = host_params->hs_rate == PA_HS_MODE_B ? PHY_MODE_UFS_HS_B : PHY_MODE_UFS_HS_A;
+
/* Reset UFS Host Controller and PHY */
ret = ufs_qcom_host_reset(hba);
if (ret)
- dev_warn(hba->dev, "%s: host reset returned %d\n",
- __func__, ret);
+ return ret;
/* phy initialization - calibrate the phy */
ret = phy_init(phy);
@@ -459,7 +373,9 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
return ret;
}
- phy_set_mode_ext(phy, PHY_MODE_UFS_HS_B, host->phy_gear);
+ ret = phy_set_mode_ext(phy, mode, host->phy_gear);
+ if (ret)
+ goto out_disable_phy;
/* power on phy - start serdes and phy's power and clocks */
ret = phy_power_on(phy);
@@ -489,9 +405,8 @@ out_disable_phy:
*/
static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
{
- ufshcd_writel(hba,
- ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
- REG_UFS_CFG2);
+ ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL,
+ REG_UFS_CFG2);
/* Ensure that HW clock gating is enabled before next operations */
mb();
@@ -501,11 +416,14 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int err = 0;
+ int err;
switch (status) {
case PRE_CHANGE:
- ufs_qcom_power_up_sequence(hba);
+ err = ufs_qcom_power_up_sequence(hba);
+ if (err)
+ return err;
+
/*
* The PHY PLL output is the source of tx/rx lane symbol
* clocks, hence, enable the lane clocks only after PHY
@@ -544,41 +462,16 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_clk_info *clki;
- u32 core_clk_period_in_ns;
- u32 tx_clk_cycles_per_us = 0;
unsigned long core_clk_rate = 0;
- u32 core_clk_cycles_per_us = 0;
-
- static u32 pwm_fr_table[][2] = {
- {UFS_PWM_G1, 0x1},
- {UFS_PWM_G2, 0x1},
- {UFS_PWM_G3, 0x1},
- {UFS_PWM_G4, 0x1},
- };
-
- static u32 hs_fr_table_rA[][2] = {
- {UFS_HS_G1, 0x1F},
- {UFS_HS_G2, 0x3e},
- {UFS_HS_G3, 0x7D},
- };
-
- static u32 hs_fr_table_rB[][2] = {
- {UFS_HS_G1, 0x24},
- {UFS_HS_G2, 0x49},
- {UFS_HS_G3, 0x92},
- };
+ u32 core_clk_cycles_per_us;
/*
- * The Qunipro controller does not use following registers:
- * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
- * UFS_REG_PA_LINK_STARTUP_TIMER.
- * However UTP controller uses SYS1CLK_1US_REG register for Interrupt
+ * UTP controller uses SYS1CLK_1US_REG register for Interrupt
* Aggregation logic.
* It is mandatory to write SYS1CLK_1US_REG register on UFS host
* controller V4.0.0 onwards.
*/
- if (host->hw_ver.major < 4 && ufs_qcom_cap_qunipro(host) &&
- !ufshcd_is_intr_aggr_allowed(hba))
+ if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba))
return 0;
if (gear == 0) {
@@ -611,79 +504,6 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
mb();
}
- if (ufs_qcom_cap_qunipro(host))
- return 0;
-
- core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
- core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
- core_clk_period_in_ns &= MASK_CLK_NS_REG;
-
- switch (hs) {
- case FASTAUTO_MODE:
- case FAST_MODE:
- if (rate == PA_HS_MODE_A) {
- if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
- dev_err(hba->dev,
- "%s: index %d exceeds table size %zu\n",
- __func__, gear,
- ARRAY_SIZE(hs_fr_table_rA));
- return -EINVAL;
- }
- tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
- } else if (rate == PA_HS_MODE_B) {
- if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
- dev_err(hba->dev,
- "%s: index %d exceeds table size %zu\n",
- __func__, gear,
- ARRAY_SIZE(hs_fr_table_rB));
- return -EINVAL;
- }
- tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
- } else {
- dev_err(hba->dev, "%s: invalid rate = %d\n",
- __func__, rate);
- return -EINVAL;
- }
- break;
- case SLOWAUTO_MODE:
- case SLOW_MODE:
- if (gear > ARRAY_SIZE(pwm_fr_table)) {
- dev_err(hba->dev,
- "%s: index %d exceeds table size %zu\n",
- __func__, gear,
- ARRAY_SIZE(pwm_fr_table));
- return -EINVAL;
- }
- tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
- break;
- case UNCHANGED:
- default:
- dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
- return -EINVAL;
- }
-
- if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
- (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
- /* this register 2 fields shall be written at once */
- ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
- REG_UFS_TX_SYMBOL_CLK_NS_US);
- /*
- * make sure above write gets applied before we return from
- * this function.
- */
- mb();
- }
-
- if (update_link_startup_timer && host->hw_ver.major != 0x5) {
- ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
- REG_UFS_CFG0);
- /*
- * make sure that this configuration is applied before
- * we return
- */
- mb();
- }
-
return 0;
}
@@ -691,7 +511,6 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
int err = 0;
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
switch (status) {
case PRE_CHANGE:
@@ -702,11 +521,9 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
return -EINVAL;
}
- if (ufs_qcom_cap_qunipro(host)) {
- err = ufs_qcom_set_core_clk_ctrl(hba, true);
- if (err)
- dev_err(hba->dev, "cfg core clk ctrl failed\n");
- }
+ err = ufs_qcom_set_core_clk_ctrl(hba, true);
+ if (err)
+ dev_err(hba->dev, "cfg core clk ctrl failed\n");
/*
* Some UFS devices (and may be host) have issues if LCC is
* enabled. So we are setting PA_Local_TX_LCC_Enable to 0
@@ -898,7 +715,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct ufs_dev_params ufs_qcom_cap;
+ struct ufs_host_params *host_params = &host->host_params;
int ret = 0;
if (!dev_req_params) {
@@ -908,15 +725,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
- ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
- ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
-
- /* This driver only supports symmetic gear setting i.e., hs_tx_gear == hs_rx_gear */
- ufs_qcom_cap.hs_tx_gear = ufs_qcom_cap.hs_rx_gear = ufs_qcom_get_hs_gear(hba);
-
- ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
- dev_max_params,
- dev_req_params);
+ ret = ufshcd_negotiate_pwr_params(host_params, dev_max_params, dev_req_params);
if (ret) {
dev_err(hba->dev, "%s: failed to determine capabilities\n",
__func__);
@@ -924,11 +733,12 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
}
/*
- * Update phy_gear only when the gears are scaled to a higher value. This is
- * because, the PHY gear settings are backwards compatible and we only need to
- * change the PHY gear settings while scaling to higher gears.
+ * During UFS driver probe, always update the PHY gear to match the negotiated
+ * gear, so that, if quirk UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is enabled,
+ * the second init can program the optimal PHY settings. This allows one to start
+ * the first init with either the minimum or the maximum support gear.
*/
- if (dev_req_params->gear_tx > host->phy_gear)
+ if (hba->ufshcd_state == UFSHCD_STATE_RESET)
host->phy_gear = dev_req_params->gear_tx;
/* enable the device ref clock before changing to HS mode */
@@ -1005,12 +815,7 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- if (host->hw_ver.major == 0x1)
- return ufshci_version(1, 1);
- else
- return ufshci_version(2, 0);
+ return ufshci_version(2, 0);
}
/**
@@ -1026,46 +831,69 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- if (host->hw_ver.major == 0x01) {
- hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
- | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
- | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
+ if (host->hw_ver.major == 0x2)
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
- if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
- hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
+ if (host->hw_ver.major > 0x3)
+ hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
+}
- hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
- }
+static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host)
+{
+ struct ufs_host_params *host_params = &host->host_params;
+ u32 val, dev_major;
- if (host->hw_ver.major == 0x2) {
- hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
+ host->phy_gear = host_params->hs_tx_gear;
- if (!ufs_qcom_cap_qunipro(host))
- /* Legacy UniPro mode still need following quirks */
- hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
- | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
- | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
- }
+ if (host->hw_ver.major < 0x4) {
+ /*
+ * For controllers whose major HW version is < 4, power up the
+ * PHY using minimum supported gear (UFS_HS_G2). Switching to
+ * max gear will be performed during reinit if supported.
+ * For newer controllers, whose major HW version is >= 4, power
+ * up the PHY using max supported gear.
+ */
+ host->phy_gear = UFS_HS_G2;
+ } else if (host->hw_ver.major >= 0x5) {
+ val = ufshcd_readl(host->hba, REG_UFS_DEBUG_SPARE_CFG);
+ dev_major = FIELD_GET(UFS_DEV_VER_MAJOR_MASK, val);
- if (host->hw_ver.major > 0x3)
- hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
+ /*
+ * Since the UFS device version is populated, let's remove the
+ * REINIT quirk as the negotiated gear won't change during boot.
+ * So there is no need to do reinit.
+ */
+ if (dev_major != 0x0)
+ host->hba->quirks &= ~UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
+
+ /*
+ * For UFS 3.1 device and older, power up the PHY using HS-G4
+ * PHY gear to save power.
+ */
+ if (dev_major > 0x0 && dev_major < 0x4)
+ host->phy_gear = UFS_HS_G4;
+ }
}
-static void ufs_qcom_set_caps(struct ufs_hba *hba)
+static void ufs_qcom_set_host_params(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufs_host_params *host_params = &host->host_params;
+
+ ufshcd_init_host_params(host_params);
+ /* This driver only supports symmetic gear setting i.e., hs_tx_gear == hs_rx_gear */
+ host_params->hs_tx_gear = host_params->hs_rx_gear = ufs_qcom_get_hs_gear(hba);
+}
+
+static void ufs_qcom_set_caps(struct ufs_hba *hba)
+{
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING;
hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
hba->caps |= UFSHCD_CAP_WB_EN;
hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
-
- if (host->hw_ver.major >= 0x2) {
- host->caps = UFS_QCOM_CAP_QUNIPRO |
- UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
- }
}
/**
@@ -1188,16 +1016,12 @@ static int ufs_qcom_init(struct ufs_hba *hba)
{
int err;
struct device *dev = hba->dev;
- struct platform_device *pdev = to_platform_device(dev);
struct ufs_qcom_host *host;
- struct resource *res;
struct ufs_clk_info *clki;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
- if (!host) {
- dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
+ if (!host)
return -ENOMEM;
- }
/* Make a two way bind between the qcom host and the hba */
host->hba = hba;
@@ -1235,34 +1059,16 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host->device_reset = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(host->device_reset)) {
- err = PTR_ERR(host->device_reset);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to acquire reset gpio: %d\n", err);
+ err = dev_err_probe(dev, PTR_ERR(host->device_reset),
+ "Failed to acquire device reset gpio\n");
goto out_variant_clear;
}
ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
&host->hw_ver.minor, &host->hw_ver.step);
- /*
- * for newer controllers, device reference clock control bit has
- * moved inside UFS controller register address space itself.
- */
- if (host->hw_ver.major >= 0x02) {
- host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
- host->dev_ref_clk_en_mask = BIT(26);
- } else {
- /* "dev_ref_clk_ctrl_mem" is optional resource */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "dev_ref_clk_ctrl_mem");
- if (res) {
- host->dev_ref_clk_ctrl_mmio =
- devm_ioremap_resource(dev, res);
- if (IS_ERR(host->dev_ref_clk_ctrl_mmio))
- host->dev_ref_clk_ctrl_mmio = NULL;
- host->dev_ref_clk_en_mask = BIT(5);
- }
- }
+ host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
+ host->dev_ref_clk_en_mask = BIT(26);
list_for_each_entry(clki, &hba->clk_list_head, list) {
if (!strcmp(clki->name, "core_clk_unipro"))
@@ -1275,6 +1081,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
+ ufs_qcom_set_host_params(hba);
+ ufs_qcom_set_phy_gear(host);
err = ufs_qcom_ice_init(host);
if (err)
@@ -1282,9 +1090,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
- if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
- ufs_qcom_hosts[hba->dev->id] = host;
-
ufs_qcom_get_default_testbus_cfg(host);
err = ufs_qcom_testbus_config(host);
if (err)
@@ -1292,12 +1097,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
dev_warn(dev, "%s: failed to configure the testbus %d\n",
__func__, err);
- /*
- * Power up the PHY using the minimum supported gear (UFS_HS_G2).
- * Switching to max gear will be performed during reinit if supported.
- */
- host->phy_gear = UFS_HS_G2;
-
return 0;
out_variant_clear:
@@ -1391,7 +1190,7 @@ static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct list_head *head = &hba->clk_list_head;
struct ufs_clk_info *clki;
- u32 cycles_in_1us;
+ u32 cycles_in_1us = 0;
u32 core_clk_ctrl_reg;
int err;
@@ -1444,9 +1243,6 @@ static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
struct ufs_pa_layer_attr *attr = &host->dev_req_params;
int ret;
- if (!ufs_qcom_cap_qunipro(host))
- return 0;
-
ret = ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
attr->hs_rate, false, true);
if (ret) {
@@ -1464,13 +1260,9 @@ static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
u32 core_clk_ctrl_reg;
- if (!ufs_qcom_cap_qunipro(host))
- return 0;
-
err = ufshcd_dme_get(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
&core_clk_ctrl_reg);
@@ -1489,11 +1281,6 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- if (!ufs_qcom_cap_qunipro(host))
- return 0;
-
/* set unipro core clock attributes and clear clock divider */
return ufs_qcom_set_core_clk_ctrl(hba, false);
}
@@ -1502,7 +1289,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
bool scale_up, enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int err = 0;
+ int err;
/* check the host controller state before sending hibern8 cmd */
if (!ufshcd_is_hba_active(hba))
@@ -1775,7 +1562,7 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
struct platform_device *pdev = to_platform_device(hba->dev);
struct ufshcd_res_info *res;
struct resource *res_mem, *res_mcq;
- int i, ret = 0;
+ int i, ret;
memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info));
@@ -1787,7 +1574,7 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
if (!res->resource) {
dev_info(hba->dev, "Resource %s not provided\n", res->name);
if (i == RES_UFS)
- return -ENOMEM;
+ return -ENODEV;
continue;
} else if (i == RES_UFS) {
res_mem = res->resource;
@@ -1958,11 +1745,10 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
platform_msi_domain_free_irqs(hba->dev);
} else {
if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
- host->hw_ver.step == 0) {
- ufshcd_writel(hba,
- ufshcd_readl(hba, REG_UFS_CFG3) | 0x1F000,
- REG_UFS_CFG3);
- }
+ host->hw_ver.step == 0)
+ ufshcd_rmwl(hba, ESI_VEC_MASK,
+ FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
+ REG_UFS_CFG3);
ufshcd_mcq_enable_esi(hba);
}
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 9950a0089475..9dd9a391ebb7 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -10,22 +10,17 @@
#include <soc/qcom/ice.h>
#include <ufs/ufshcd.h>
-#define MAX_UFS_QCOM_HOSTS 1
-#define MAX_U32 (~(u32)0)
#define MPHY_TX_FSM_STATE 0x41
#define TX_FSM_HIBERN8 0x1
#define HBRN8_POLL_TOUT_MS 100
#define DEFAULT_CLK_RATE_HZ 1000000
-#define BUS_VECTOR_NAME_LEN 32
#define MAX_SUPP_MAC 64
+#define MAX_ESI_VEC 32
#define UFS_HW_VER_MAJOR_MASK GENMASK(31, 28)
#define UFS_HW_VER_MINOR_MASK GENMASK(27, 16)
#define UFS_HW_VER_STEP_MASK GENMASK(15, 0)
-
-/* vendor specific pre-defined parameters */
-#define SLOW 1
-#define FAST 2
+#define UFS_DEV_VER_MAJOR_MASK GENMASK(7, 4)
#define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B
@@ -56,6 +51,8 @@ enum {
UFS_AH8_CFG = 0xFC,
REG_UFS_CFG3 = 0x271C,
+
+ REG_UFS_DEBUG_SPARE_CFG = 0x284C,
};
/* QCOM UFS host controller vendor specific debug registers */
@@ -93,9 +90,6 @@ enum {
#define TEST_BUS_SEL GENMASK(22, 19)
#define UFS_REG_TEST_BUS_EN BIT(30)
-#define UFS_PHY_RESET_ENABLE 1
-#define UFS_PHY_RESET_DISABLE 0
-
/* bit definitions for REG_UFS_CFG2 register */
#define UAWM_HW_CGC_EN BIT(0)
#define UARM_HW_CGC_EN BIT(1)
@@ -106,6 +100,9 @@ enum {
#define TMRLUT_HW_CGC_EN BIT(6)
#define OCSC_HW_CGC_EN BIT(7)
+/* bit definitions for REG_UFS_CFG3 register */
+#define ESI_VEC_MASK GENMASK(22, 12)
+
/* bit definitions for REG_UFS_PARAM0 */
#define MAX_HS_GEAR_MASK GENMASK(6, 4)
#define UFS_QCOM_MAX_GEAR(x) FIELD_GET(MAX_HS_GEAR_MASK, (x))
@@ -118,13 +115,6 @@ enum {
DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\
TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
-/* bit offset */
-#define OFFSET_CLK_NS_REG 0xa
-
-/* bit masks */
-#define MASK_TX_SYMBOL_CLK_1US_REG GENMASK(9, 0)
-#define MASK_CLK_NS_REG GENMASK(23, 10)
-
/* QUniPro Vendor specific attributes */
#define PA_VS_CONFIG_REG1 0x9000
#define DME_VS_CORE_CLK_CTRL 0xD002
@@ -158,8 +148,7 @@ ufs_qcom_get_controller_revision(struct ufs_hba *hba,
static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
{
- ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, FIELD_PREP(UFS_PHY_SOFT_RESET, UFS_PHY_RESET_ENABLE),
- REG_UFS_CFG1);
+ ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
/*
* Make sure assertion of ufs phy reset is written to
@@ -170,8 +159,7 @@ static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
{
- ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, FIELD_PREP(UFS_PHY_SOFT_RESET, UFS_PHY_RESET_DISABLE),
- REG_UFS_CFG1);
+ ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, 0, REG_UFS_CFG1);
/*
* Make sure de-assertion of ufs phy reset is written to
@@ -195,28 +183,11 @@ struct ufs_qcom_testbus {
struct gpio_desc;
struct ufs_qcom_host {
- /*
- * Set this capability if host controller supports the QUniPro mode
- * and if driver wants the Host controller to operate in QUniPro mode.
- * Note: By default this capability will be kept enabled if host
- * controller supports the QUniPro mode.
- */
- #define UFS_QCOM_CAP_QUNIPRO 0x1
-
- /*
- * Set this capability if host controller can retain the secure
- * configuration even after UFS controller core power collapse.
- */
- #define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE 0x2
- u32 caps;
-
struct phy *generic_phy;
struct ufs_hba *hba;
struct ufs_pa_layer_attr dev_req_params;
- struct clk *rx_l0_sync_clk;
- struct clk *tx_l0_sync_clk;
- struct clk *rx_l1_sync_clk;
- struct clk *tx_l1_sync_clk;
+ struct clk_bulk_data *clks;
+ u32 num_clks;
bool is_lane_clks_enabled;
struct icc_path *icc_ddr;
@@ -240,6 +211,7 @@ struct ufs_qcom_host {
struct gpio_desc *device_reset;
+ struct ufs_host_params host_params;
u32 phy_gear;
bool esi_enabled;
@@ -261,9 +233,4 @@ ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
int ufs_qcom_testbus_config(struct ufs_qcom_host *host);
-static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host)
-{
- return host->caps & UFS_QCOM_CAP_QUNIPRO;
-}
-
#endif /* UFS_QCOM_H_ */
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index db9d9365ff55..a3e69ecafd27 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -339,61 +339,60 @@ static int ufshcd_parse_operating_points(struct ufs_hba *hba)
}
/**
- * ufshcd_get_pwr_dev_param - get finally agreed attributes for
- * power mode change
- * @pltfrm_param: pointer to platform parameters
+ * ufshcd_negotiate_pwr_params - find power mode settings that are supported by
+ * both the controller and the device
+ * @host_params: pointer to host parameters
* @dev_max: pointer to device attributes
* @agreed_pwr: returned agreed attributes
*
* Return: 0 on success, non-zero value on failure.
*/
-int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *pltfrm_param,
- const struct ufs_pa_layer_attr *dev_max,
- struct ufs_pa_layer_attr *agreed_pwr)
+int ufshcd_negotiate_pwr_params(const struct ufs_host_params *host_params,
+ const struct ufs_pa_layer_attr *dev_max,
+ struct ufs_pa_layer_attr *agreed_pwr)
{
- int min_pltfrm_gear;
+ int min_host_gear;
int min_dev_gear;
bool is_dev_sup_hs = false;
- bool is_pltfrm_max_hs = false;
+ bool is_host_max_hs = false;
if (dev_max->pwr_rx == FAST_MODE)
is_dev_sup_hs = true;
- if (pltfrm_param->desired_working_mode == UFS_HS_MODE) {
- is_pltfrm_max_hs = true;
- min_pltfrm_gear = min_t(u32, pltfrm_param->hs_rx_gear,
- pltfrm_param->hs_tx_gear);
+ if (host_params->desired_working_mode == UFS_HS_MODE) {
+ is_host_max_hs = true;
+ min_host_gear = min_t(u32, host_params->hs_rx_gear,
+ host_params->hs_tx_gear);
} else {
- min_pltfrm_gear = min_t(u32, pltfrm_param->pwm_rx_gear,
- pltfrm_param->pwm_tx_gear);
+ min_host_gear = min_t(u32, host_params->pwm_rx_gear,
+ host_params->pwm_tx_gear);
}
/*
- * device doesn't support HS but
- * pltfrm_param->desired_working_mode is HS,
- * thus device and pltfrm_param don't agree
+ * device doesn't support HS but host_params->desired_working_mode is HS,
+ * thus device and host_params don't agree
*/
- if (!is_dev_sup_hs && is_pltfrm_max_hs) {
+ if (!is_dev_sup_hs && is_host_max_hs) {
pr_info("%s: device doesn't support HS\n",
__func__);
return -ENOTSUPP;
- } else if (is_dev_sup_hs && is_pltfrm_max_hs) {
+ } else if (is_dev_sup_hs && is_host_max_hs) {
/*
* since device supports HS, it supports FAST_MODE.
- * since pltfrm_param->desired_working_mode is also HS
+ * since host_params->desired_working_mode is also HS
* then final decision (FAST/FASTAUTO) is done according
* to pltfrm_params as it is the restricting factor
*/
- agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_hs;
+ agreed_pwr->pwr_rx = host_params->rx_pwr_hs;
agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
} else {
/*
- * here pltfrm_param->desired_working_mode is PWM.
+ * here host_params->desired_working_mode is PWM.
* it doesn't matter whether device supports HS or PWM,
- * in both cases pltfrm_param->desired_working_mode will
+ * in both cases host_params->desired_working_mode will
* determine the mode
*/
- agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_pwm;
+ agreed_pwr->pwr_rx = host_params->rx_pwr_pwm;
agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
}
@@ -403,9 +402,9 @@ int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *pltfrm_param,
* the same decision will be made for rx
*/
agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
- pltfrm_param->tx_lanes);
+ host_params->tx_lanes);
agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
- pltfrm_param->rx_lanes);
+ host_params->rx_lanes);
/* device maximum gear is the minimum between device rx and tx gears */
min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
@@ -418,26 +417,26 @@ int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *pltfrm_param,
* what is the gear, as it is the one that also decided previously what
* pwr the device will be configured to.
*/
- if ((is_dev_sup_hs && is_pltfrm_max_hs) ||
- (!is_dev_sup_hs && !is_pltfrm_max_hs)) {
+ if ((is_dev_sup_hs && is_host_max_hs) ||
+ (!is_dev_sup_hs && !is_host_max_hs)) {
agreed_pwr->gear_rx =
- min_t(u32, min_dev_gear, min_pltfrm_gear);
+ min_t(u32, min_dev_gear, min_host_gear);
} else if (!is_dev_sup_hs) {
agreed_pwr->gear_rx = min_dev_gear;
} else {
- agreed_pwr->gear_rx = min_pltfrm_gear;
+ agreed_pwr->gear_rx = min_host_gear;
}
agreed_pwr->gear_tx = agreed_pwr->gear_rx;
- agreed_pwr->hs_rate = pltfrm_param->hs_rate;
+ agreed_pwr->hs_rate = host_params->hs_rate;
return 0;
}
-EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
+EXPORT_SYMBOL_GPL(ufshcd_negotiate_pwr_params);
-void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param)
+void ufshcd_init_host_params(struct ufs_host_params *host_params)
{
- *dev_param = (struct ufs_dev_params){
+ *host_params = (struct ufs_host_params){
.tx_lanes = UFS_LANE_2,
.rx_lanes = UFS_LANE_2,
.hs_rx_gear = UFS_HS_G3,
@@ -452,7 +451,7 @@ void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param)
.desired_working_mode = UFS_HS_MODE,
};
}
-EXPORT_SYMBOL_GPL(ufshcd_init_pwr_dev_param);
+EXPORT_SYMBOL_GPL(ufshcd_init_host_params);
/**
* ufshcd_pltfrm_init - probe routine of the driver
diff --git a/drivers/ufs/host/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h
index a86a3ada4bef..df387be5216b 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.h
+++ b/drivers/ufs/host/ufshcd-pltfrm.h
@@ -10,7 +10,7 @@
#define UFS_PWM_MODE 1
#define UFS_HS_MODE 2
-struct ufs_dev_params {
+struct ufs_host_params {
u32 pwm_rx_gear; /* pwm rx gear to work in */
u32 pwm_tx_gear; /* pwm tx gear to work in */
u32 hs_rx_gear; /* hs rx gear to work in */
@@ -25,10 +25,10 @@ struct ufs_dev_params {
u32 desired_working_mode;
};
-int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *dev_param,
- const struct ufs_pa_layer_attr *dev_max,
- struct ufs_pa_layer_attr *agreed_pwr);
-void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param);
+int ufshcd_negotiate_pwr_params(const struct ufs_host_params *host_params,
+ const struct ufs_pa_layer_attr *dev_max,
+ struct ufs_pa_layer_attr *agreed_pwr);
+void ufshcd_init_host_params(struct ufs_host_params *host_params);
int ufshcd_pltfrm_init(struct platform_device *pdev,
const struct ufs_hba_variant_ops *vops);
int ufshcd_populate_vreg(struct device *dev, const char *name,