summaryrefslogtreecommitdiff
path: root/drivers/ufs
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs')
-rw-r--r--drivers/ufs/core/ufs-sysfs.c197
-rw-r--r--drivers/ufs/core/ufshcd.c186
-rw-r--r--drivers/ufs/host/ufs-exynos.c4
-rw-r--r--drivers/ufs/host/ufs-mediatek.c330
-rw-r--r--drivers/ufs/host/ufs-mediatek.h32
-rw-r--r--drivers/ufs/host/ufs-qcom.c98
-rw-r--r--drivers/ufs/host/ufs-qcom.h9
-rw-r--r--drivers/ufs/host/ufshcd-pci.c33
8 files changed, 716 insertions, 173 deletions
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index de8b6acd4058..4bd7d491e3c5 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -5,6 +5,7 @@
#include <linux/string.h>
#include <linux/bitfield.h>
#include <linux/unaligned.h>
+#include <linux/string_choices.h>
#include <ufs/ufs.h>
#include <ufs/unipro.h>
@@ -87,6 +88,23 @@ static const char *ufs_wb_resize_status_to_string(enum wb_resize_status status)
}
}
+static const char * const ufs_hid_states[] = {
+ [HID_IDLE] = "idle",
+ [ANALYSIS_IN_PROGRESS] = "analysis_in_progress",
+ [DEFRAG_REQUIRED] = "defrag_required",
+ [DEFRAG_IN_PROGRESS] = "defrag_in_progress",
+ [DEFRAG_COMPLETED] = "defrag_completed",
+ [DEFRAG_NOT_REQUIRED] = "defrag_not_required",
+};
+
+static const char *ufs_hid_state_to_string(enum ufs_hid_state state)
+{
+ if (state < NUM_UFS_HID_STATES)
+ return ufs_hid_states[state];
+
+ return "unknown";
+}
+
static const char *ufshcd_uic_link_state_to_string(
enum uic_link_state state)
{
@@ -1499,7 +1517,7 @@ static ssize_t _name##_show(struct device *dev, \
ret = -EINVAL; \
goto out; \
} \
- ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false"); \
+ ret = sysfs_emit(buf, "%s\n", str_true_false(flag)); \
out: \
up(&hba->host_sem); \
return ret; \
@@ -1763,6 +1781,178 @@ static const struct attribute_group ufs_sysfs_attributes_group = {
.attrs = ufs_sysfs_attributes,
};
+static int hid_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+ enum attr_idn idn, u32 *attr_val)
+{
+ int ret;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ ret = ufshcd_query_attr(hba, opcode, idn, 0, 0, attr_val);
+ ufshcd_rpm_put_sync(hba);
+
+ up(&hba->host_sem);
+ return ret;
+}
+
+static ssize_t analysis_trigger_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int mode;
+ int ret;
+
+ if (sysfs_streq(buf, "enable"))
+ mode = HID_ANALYSIS_ENABLE;
+ else if (sysfs_streq(buf, "disable"))
+ mode = HID_ANALYSIS_AND_DEFRAG_DISABLE;
+ else
+ return -EINVAL;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &mode);
+
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_WO(analysis_trigger);
+
+static ssize_t defrag_trigger_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int mode;
+ int ret;
+
+ if (sysfs_streq(buf, "enable"))
+ mode = HID_ANALYSIS_AND_DEFRAG_ENABLE;
+ else if (sysfs_streq(buf, "disable"))
+ mode = HID_ANALYSIS_AND_DEFRAG_DISABLE;
+ else
+ return -EINVAL;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &mode);
+
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_WO(defrag_trigger);
+
+static ssize_t fragmented_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_HID_AVAILABLE_SIZE, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", value);
+}
+
+static DEVICE_ATTR_RO(fragmented_size);
+
+static ssize_t defrag_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_HID_SIZE, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", value);
+}
+
+static ssize_t defrag_size_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_HID_SIZE, &value);
+
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_RW(defrag_size);
+
+static ssize_t progress_ratio_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_HID_PROGRESS_RATIO, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", value);
+}
+
+static DEVICE_ATTR_RO(progress_ratio);
+
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_HID_STATE, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%s\n", ufs_hid_state_to_string(value));
+}
+
+static DEVICE_ATTR_RO(state);
+
+static struct attribute *ufs_sysfs_hid[] = {
+ &dev_attr_analysis_trigger.attr,
+ &dev_attr_defrag_trigger.attr,
+ &dev_attr_fragmented_size.attr,
+ &dev_attr_defrag_size.attr,
+ &dev_attr_progress_ratio.attr,
+ &dev_attr_state.attr,
+ NULL,
+};
+
+static umode_t ufs_sysfs_hid_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return hba->dev_info.hid_sup ? attr->mode : 0;
+}
+
+static const struct attribute_group ufs_sysfs_hid_group = {
+ .name = "hid",
+ .attrs = ufs_sysfs_hid,
+ .is_visible = ufs_sysfs_hid_is_visible,
+};
+
static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_default_group,
&ufs_sysfs_capabilities_group,
@@ -1777,6 +1967,7 @@ static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_string_descriptors_group,
&ufs_sysfs_flags_group,
&ufs_sysfs_attributes_group,
+ &ufs_sysfs_hid_group,
NULL,
};
@@ -1808,7 +1999,7 @@ UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1);
UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8);
UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4);
UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
-UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8);
+UFS_UNIT_DESC_PARAM(physical_memory_resource_count, _PHY_MEM_RSRC_CNT, 8);
UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4);
@@ -1825,7 +2016,7 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = {
&dev_attr_logical_block_count.attr,
&dev_attr_erase_block_size.attr,
&dev_attr_provisioning_type.attr,
- &dev_attr_physical_memory_resourse_count.attr,
+ &dev_attr_physical_memory_resource_count.attr,
&dev_attr_context_capabilities.attr,
&dev_attr_large_unit_granularity.attr,
&dev_attr_wb_buf_alloc_units.attr,
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 4410e7d93b7d..96ad57c3144b 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -364,6 +364,34 @@ void ufshcd_disable_irq(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_disable_irq);
+/**
+ * ufshcd_enable_intr - enable interrupts
+ * @hba: per adapter instance
+ * @intrs: interrupt bits
+ */
+static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ u32 new_val = old_val | intrs;
+
+ if (new_val != old_val)
+ ufshcd_writel(hba, new_val, REG_INTERRUPT_ENABLE);
+}
+
+/**
+ * ufshcd_disable_intr - disable interrupts
+ * @hba: per adapter instance
+ * @intrs: interrupt bits
+ */
+static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ u32 new_val = old_val & ~intrs;
+
+ if (new_val != old_val)
+ ufshcd_writel(hba, new_val, REG_INTERRUPT_ENABLE);
+}
+
static void ufshcd_configure_wb(struct ufs_hba *hba)
{
if (!ufshcd_is_wb_allowed(hba))
@@ -2566,7 +2594,7 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
* @hba: per adapter instance
* @uic_cmd: UIC command
*
- * Return: 0 only if success.
+ * Return: 0 if successful; < 0 upon failure.
*/
static int
__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
@@ -2596,6 +2624,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
*/
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
+ unsigned long flags;
int ret;
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
@@ -2605,6 +2634,10 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
if (!ret)
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
@@ -2682,32 +2715,6 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
}
/**
- * ufshcd_enable_intr - enable interrupts
- * @hba: per adapter instance
- * @intrs: interrupt bits
- */
-static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
-{
- u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
-
- set |= intrs;
- ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
-}
-
-/**
- * ufshcd_disable_intr - disable interrupts
- * @hba: per adapter instance
- * @intrs: interrupt bits
- */
-static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
-{
- u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
-
- set &= ~intrs;
- ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
-}
-
-/**
* ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
* descriptor according to request
* @hba: per adapter instance
@@ -2826,8 +2833,6 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
/* Copy the Descriptor */
if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
memcpy(ucd_req_ptr + 1, query->descriptor, len);
-
- memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
@@ -2840,8 +2845,6 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
.transaction_code = UPIU_TRANSACTION_NOP_OUT,
.task_tag = lrbp->task_tag,
};
-
- memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
/**
@@ -2867,6 +2870,8 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
else
ret = -EINVAL;
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+
return ret;
}
@@ -3074,6 +3079,9 @@ static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
hba->dev_cmd.type = cmd_type;
}
+/*
+ * Return: 0 upon success; < 0 upon failure.
+ */
static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
{
@@ -3186,9 +3194,13 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
break;
}
+ WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
+/*
+ * Return: 0 upon success; < 0 upon failure.
+ */
static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, int max_timeout)
{
@@ -3263,6 +3275,7 @@ retry:
}
}
+ WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@@ -3280,6 +3293,9 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba)
ufshcd_release(hba);
}
+/*
+ * Return: 0 upon success; < 0 upon failure.
+ */
static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
const u32 tag, int timeout)
{
@@ -3367,6 +3383,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
dev_err(hba->dev,
"%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
__func__, opcode, idn, ret, retries);
+ WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret);
return ret;
}
@@ -3378,7 +3395,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
* @index: flag index to access
* @flag_res: the flag value after the query request completes
*
- * Return: 0 for success, non-zero in case of failure.
+ * Return: 0 for success; < 0 upon failure.
*/
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res)
@@ -3434,6 +3451,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
out_unlock:
ufshcd_dev_man_unlock(hba);
+ WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@@ -3446,7 +3464,7 @@ out_unlock:
* @selector: selector field
* @attr_val: the attribute value after the query request completes
*
- * Return: 0 for success, non-zero in case of failure.
+ * Return: 0 upon success; < 0 upon failure.
*/
int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
@@ -3495,6 +3513,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
out_unlock:
ufshcd_dev_man_unlock(hba);
+ WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@@ -3509,7 +3528,7 @@ out_unlock:
* @attr_val: the attribute value after the query request
* completes
*
- * Return: 0 for success, non-zero in case of failure.
+ * Return: 0 for success; < 0 upon failure.
*/
int ufshcd_query_attr_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
@@ -3532,9 +3551,13 @@ int ufshcd_query_attr_retry(struct ufs_hba *hba,
dev_err(hba->dev,
"%s: query attribute, idn %d, failed with error %d after %d retries\n",
__func__, idn, ret, QUERY_REQ_RETRIES);
+ WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret);
return ret;
}
+/*
+ * Return: 0 if successful; < 0 upon failure.
+ */
static int __ufshcd_query_descriptor(struct ufs_hba *hba,
enum query_opcode opcode, enum desc_idn idn, u8 index,
u8 selector, u8 *desc_buf, int *buf_len)
@@ -3592,6 +3615,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
out_unlock:
hba->dev_cmd.query.descriptor = NULL;
ufshcd_dev_man_unlock(hba);
+ WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@@ -3608,7 +3632,7 @@ out_unlock:
* The buf_len parameter will contain, on return, the length parameter
* received on the response.
*
- * Return: 0 for success, non-zero in case of failure.
+ * Return: 0 for success; < 0 upon failure.
*/
int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
enum query_opcode opcode,
@@ -3626,6 +3650,7 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
break;
}
+ WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@@ -3638,7 +3663,7 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
* @param_read_buf: pointer to buffer where parameter would be read
* @param_size: sizeof(param_read_buf)
*
- * Return: 0 in case of success, non-zero otherwise.
+ * Return: 0 in case of success; < 0 upon failure.
*/
int ufshcd_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
@@ -3705,6 +3730,7 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
out:
if (is_kmalloc)
kfree(desc_buf);
+ WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret);
return ret;
}
@@ -3818,7 +3844,7 @@ out:
* @param_read_buf: pointer to buffer where parameter would be read
* @param_size: sizeof(param_read_buf)
*
- * Return: 0 in case of success, non-zero otherwise.
+ * Return: 0 in case of success; < 0 upon failure.
*/
static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
int lun,
@@ -4254,6 +4280,30 @@ out:
EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
/**
+ * ufshcd_dme_rmw - get modify set a DME attribute
+ * @hba: per adapter instance
+ * @mask: indicates which bits to clear from the value that has been read
+ * @val: actual value to write
+ * @attr: dme attribute
+ */
+int ufshcd_dme_rmw(struct ufs_hba *hba, u32 mask,
+ u32 val, u32 attr)
+{
+ u32 cfg = 0;
+ int err;
+
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB(attr), &cfg);
+ if (err)
+ return err;
+
+ cfg &= ~mask;
+ cfg |= (val & mask);
+
+ return ufshcd_dme_set(hba, UIC_ARG_MIB(attr), cfg);
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_rmw);
+
+/**
* ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
* state) and waits for it to take effect.
*
@@ -4275,7 +4325,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
unsigned long flags;
u8 status;
int ret;
- bool reenable_intr = false;
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
@@ -4286,15 +4335,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
goto out_unlock;
}
hba->uic_async_done = &uic_async_done;
- if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
- ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
- /*
- * Make sure UIC command completion interrupt is disabled before
- * issuing UIC command.
- */
- ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- reenable_intr = true;
- }
+ ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
spin_unlock_irqrestore(hba->host->host_lock, flags);
ret = __ufshcd_send_uic_cmd(hba, cmd);
if (ret) {
@@ -4338,9 +4379,7 @@ out:
spin_lock_irqsave(hba->host->host_lock, flags);
hba->active_uic_cmd = NULL;
hba->uic_async_done = NULL;
- if (reenable_intr)
- ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
- if (ret) {
+ if (ret && !hba->pm_op_in_progress) {
ufshcd_set_link_broken(hba);
ufshcd_schedule_eh_work(hba);
}
@@ -4348,6 +4387,14 @@ out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
mutex_unlock(&hba->uic_cmd_mutex);
+ /*
+ * If the h8 exit fails during the runtime resume process, it becomes
+ * stuck and cannot be recovered through the error handler. To fix
+ * this, use link recovery instead of the error handler.
+ */
+ if (ret && hba->pm_op_in_progress)
+ ret = ufshcd_link_recovery(hba);
+
return ret;
}
@@ -4362,28 +4409,17 @@ int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
int ret;
+ if (uic_cmd->argument1 != UIC_ARG_MIB(PA_PWRMODE) ||
+ uic_cmd->command != UIC_CMD_DME_SET)
+ return ufshcd_send_uic_cmd(hba, uic_cmd);
+
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
return 0;
ufshcd_hold(hba);
-
- if (uic_cmd->argument1 == UIC_ARG_MIB(PA_PWRMODE) &&
- uic_cmd->command == UIC_CMD_DME_SET) {
- ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd);
- goto out;
- }
-
- mutex_lock(&hba->uic_cmd_mutex);
- ufshcd_add_delay_before_dme_cmd(hba);
-
- ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
- if (!ret)
- ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
-
- mutex_unlock(&hba->uic_cmd_mutex);
-
-out:
+ ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd);
ufshcd_release(hba);
+
return ret;
}
@@ -4796,7 +4832,7 @@ out:
* 3. Program UTRL and UTMRL base address
* 4. Configure run-stop-registers
*
- * Return: 0 on success, non-zero value on failure.
+ * Return: 0 if successful; < 0 upon failure.
*/
int ufshcd_make_hba_operational(struct ufs_hba *hba)
{
@@ -6623,9 +6659,14 @@ static void ufshcd_err_handler(struct work_struct *work)
up(&hba->host_sem);
return;
}
- ufshcd_set_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+
ufshcd_err_handling_prepare(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
/* Complete requests that have door-bell cleared by h/w */
ufshcd_complete_requests(hba, false);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -7802,7 +7843,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
hba->silence_err_logs = false;
/* scale up clocks to max frequency before full reinitialization */
- ufshcd_scale_clks(hba, ULONG_MAX, true);
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_scale_clks(hba, ULONG_MAX, true);
err = ufshcd_hba_enable(hba);
@@ -8414,6 +8456,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
dev_info->rtt_cap = desc_buf[DEVICE_DESC_PARAM_RTT_CAP];
+ dev_info->hid_sup = get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP) &
+ UFS_DEV_HID_SUPPORT;
+
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index,
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index 3e545af536e5..f0adcd9dd553 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -1110,8 +1110,8 @@ static int exynos_ufs_post_link(struct ufs_hba *hba)
hci_writel(ufs, val, HCI_TXPRDT_ENTRY_SIZE);
hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE);
- hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
- hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
+ hci_writel(ufs, BIT(hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
+ hci_writel(ufs, BIT(hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN);
if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 182f58d0c9db..86ae73b89d4d 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -50,6 +50,7 @@ static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
static const struct of_device_id ufs_mtk_of_match[] = {
{ .compatible = "mediatek,mt8183-ufshci" },
+ { .compatible = "mediatek,mt8195-ufshci" },
{},
};
MODULE_DEVICE_TABLE(of, ufs_mtk_of_match);
@@ -96,49 +97,59 @@ static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
+ return host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
}
static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
+ return host->caps & UFS_MTK_CAP_VA09_PWR_CTRL;
}
static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
+ return host->caps & UFS_MTK_CAP_BROKEN_VCC;
}
static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
+ return host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO;
}
static bool ufs_mtk_is_tx_skew_fix(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return (host->caps & UFS_MTK_CAP_TX_SKEW_FIX);
+ return host->caps & UFS_MTK_CAP_TX_SKEW_FIX;
}
static bool ufs_mtk_is_rtff_mtcmos(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return (host->caps & UFS_MTK_CAP_RTFF_MTCMOS);
+ return host->caps & UFS_MTK_CAP_RTFF_MTCMOS;
}
static bool ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return (host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM);
+ return host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM;
+}
+
+static bool ufs_mtk_is_clk_scale_ready(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_clk *mclk = &host->mclk;
+
+ return mclk->ufs_sel_clki &&
+ mclk->ufs_sel_max_clki &&
+ mclk->ufs_sel_min_clki;
}
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
@@ -267,6 +278,13 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
ufshcd_writel(hba,
ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
REG_UFS_XOUFS_CTRL);
+
+ /* DDR_EN setting */
+ if (host->ip_ver >= IP_VER_MT6989) {
+ ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8),
+ 0x453000, REG_UFS_MMIO_OPT_CTRL_0);
+ }
+
}
return 0;
@@ -344,7 +362,16 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
- ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
+ /*
+ * If clock on timeout, assume clock is off, notify tfa do clock
+ * off setting.(keep DIFN disable, release resource)
+ * If clock off timeout, assume clock will off finally,
+ * set ref_clk_enabled directly.(keep DIFN disable, keep resource)
+ */
+ if (on)
+ ufs_mtk_ref_clk_notify(false, POST_CHANGE, res);
+ else
+ host->ref_clk_enabled = false;
return -ETIMEDOUT;
@@ -663,6 +690,9 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
if (of_property_read_bool(np, "mediatek,ufs-rtff-mtcmos"))
host->caps |= UFS_MTK_CAP_RTFF_MTCMOS;
+ if (of_property_read_bool(np, "mediatek,ufs-broken-rtc"))
+ host->caps |= UFS_MTK_CAP_MCQ_BROKEN_RTC;
+
dev_info(hba->dev, "caps: 0x%x", host->caps);
}
@@ -779,6 +809,91 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
return ret;
}
+static u32 ufs_mtk_mcq_get_irq(struct ufs_hba *hba, unsigned int cpu)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct blk_mq_tag_set *tag_set = &hba->host->tag_set;
+ struct blk_mq_queue_map *map = &tag_set->map[HCTX_TYPE_DEFAULT];
+ unsigned int nr = map->nr_queues;
+ unsigned int q_index;
+
+ q_index = map->mq_map[cpu];
+ if (q_index > nr) {
+ dev_err(hba->dev, "hwq index %d exceed %d\n",
+ q_index, nr);
+ return MTK_MCQ_INVALID_IRQ;
+ }
+
+ return host->mcq_intr_info[q_index].irq;
+}
+
+static void ufs_mtk_mcq_set_irq_affinity(struct ufs_hba *hba, unsigned int cpu)
+{
+ unsigned int irq, _cpu;
+ int ret;
+
+ irq = ufs_mtk_mcq_get_irq(hba, cpu);
+ if (irq == MTK_MCQ_INVALID_IRQ) {
+ dev_err(hba->dev, "invalid irq. unable to bind irq to cpu%d", cpu);
+ return;
+ }
+
+ /* force migrate irq of cpu0 to cpu3 */
+ _cpu = (cpu == 0) ? 3 : cpu;
+ ret = irq_set_affinity(irq, cpumask_of(_cpu));
+ if (ret) {
+ dev_err(hba->dev, "set irq %d affinity to CPU %d failed\n",
+ irq, _cpu);
+ return;
+ }
+ dev_info(hba->dev, "set irq %d affinity to CPU: %d\n", irq, _cpu);
+}
+
+static bool ufs_mtk_is_legacy_chipset(struct ufs_hba *hba, u32 hw_ip_ver)
+{
+ bool is_legacy = false;
+
+ switch (hw_ip_ver) {
+ case IP_LEGACY_VER_MT6893:
+ case IP_LEGACY_VER_MT6781:
+ /* can add other legacy chipset ID here accordingly */
+ is_legacy = true;
+ break;
+ default:
+ break;
+ }
+ dev_info(hba->dev, "legacy IP version - 0x%x, is legacy : %d", hw_ip_ver, is_legacy);
+
+ return is_legacy;
+}
+
+/*
+ * HW version format has been changed from 01MMmmmm to 1MMMmmmm, since
+ * project MT6878. In order to perform correct version comparison,
+ * version number is changed by SW for the following projects.
+ * IP_VER_MT6983 0x00360000 to 0x10360000
+ * IP_VER_MT6897 0x01440000 to 0x10440000
+ * IP_VER_MT6989 0x01450000 to 0x10450000
+ * IP_VER_MT6991 0x01460000 to 0x10460000
+ */
+static void ufs_mtk_get_hw_ip_version(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ u32 hw_ip_ver;
+
+ hw_ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
+
+ if (((hw_ip_ver & (0xFF << 24)) == (0x1 << 24)) ||
+ ((hw_ip_ver & (0xFF << 24)) == 0)) {
+ hw_ip_ver &= ~(0xFF << 24);
+ hw_ip_ver |= (0x1 << 28);
+ }
+
+ host->ip_ver = hw_ip_ver;
+
+ host->legacy_ip_ver = ufs_mtk_is_legacy_chipset(hba, hw_ip_ver);
+}
+
static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -818,8 +933,10 @@ static void ufs_mtk_init_clocks(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct list_head *head = &hba->clk_list_head;
- struct ufs_mtk_clk *mclk = &host->mclk;
struct ufs_clk_info *clki, *clki_tmp;
+ struct device *dev = hba->dev;
+ struct regulator *reg;
+ u32 volt;
/*
* Find private clocks and store them in struct ufs_mtk_clk.
@@ -837,15 +954,57 @@ static void ufs_mtk_init_clocks(struct ufs_hba *hba)
host->mclk.ufs_sel_min_clki = clki;
clk_disable_unprepare(clki->clk);
list_del(&clki->list);
+ } else if (!strcmp(clki->name, "ufs_fde")) {
+ host->mclk.ufs_fde_clki = clki;
+ } else if (!strcmp(clki->name, "ufs_fde_max_src")) {
+ host->mclk.ufs_fde_max_clki = clki;
+ clk_disable_unprepare(clki->clk);
+ list_del(&clki->list);
+ } else if (!strcmp(clki->name, "ufs_fde_min_src")) {
+ host->mclk.ufs_fde_min_clki = clki;
+ clk_disable_unprepare(clki->clk);
+ list_del(&clki->list);
}
}
- if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
- !mclk->ufs_sel_min_clki) {
+ list_for_each_entry(clki, head, list) {
+ dev_info(hba->dev, "clk \"%s\" present", clki->name);
+ }
+
+ if (!ufs_mtk_is_clk_scale_ready(hba)) {
hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
dev_info(hba->dev,
"%s: Clk-scaling not ready. Feature disabled.",
__func__);
+ return;
+ }
+
+ /*
+ * Default get vcore if dts have these settings.
+ * No matter clock scaling support or not. (may disable by customer)
+ */
+ reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
+ if (IS_ERR(reg)) {
+ dev_info(dev, "failed to get dvfsrc-vcore: %ld",
+ PTR_ERR(reg));
+ return;
+ }
+
+ if (of_property_read_u32(dev->of_node, "clk-scale-up-vcore-min",
+ &volt)) {
+ dev_info(dev, "failed to get clk-scale-up-vcore-min");
+ return;
+ }
+
+ host->mclk.reg_vcore = reg;
+ host->mclk.vcore_volt = volt;
+
+ /* If default boot is max gear, request vcore */
+ if (reg && volt && host->clk_scale_up) {
+ if (regulator_set_voltage(reg, volt, INT_MAX)) {
+ dev_info(hba->dev,
+ "Failed to set vcore to %d\n", volt);
+ }
}
}
@@ -1014,13 +1173,17 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable clk scaling*/
hba->caps |= UFSHCD_CAP_CLK_SCALING;
+ host->clk_scale_up = true; /* default is max freq */
/* Set runtime pm delay to replace default */
shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS;
hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
+
hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
- hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
+ if (host->caps & UFS_MTK_CAP_MCQ_BROKEN_RTC)
+ hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
+
hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
@@ -1050,7 +1213,7 @@ static int ufs_mtk_init(struct ufs_hba *hba)
ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
- host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
+ ufs_mtk_get_hw_ip_version(hba);
goto out;
@@ -1505,6 +1668,13 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
u16 mid = dev_info->wmanufacturerid;
+ unsigned int cpu;
+
+ if (hba->mcq_enabled) {
+ /* Iterate all cpus to set affinity for mcq irqs */
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ ufs_mtk_mcq_set_irq_affinity(hba, cpu);
+ }
if (mid == UFS_VENDOR_SAMSUNG) {
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
@@ -1598,24 +1768,30 @@ static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
hba->vps->ondemand_data.downdifferential = 20;
}
-/**
- * ufs_mtk_clk_scale - Internal clk scaling operation
- *
- * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
- * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
- * Max and min clocks rate of ufs_sel defined in dts should match rate of
- * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
- * This prevent changing rate of pll clock that is shared between modules.
- *
- * @hba: per adapter instance
- * @scale_up: True for scaling up and false for scaling down
- */
-static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
+static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct ufs_mtk_clk *mclk = &host->mclk;
struct ufs_clk_info *clki = mclk->ufs_sel_clki;
- int ret = 0;
+ struct ufs_clk_info *fde_clki = mclk->ufs_fde_clki;
+ struct regulator *reg;
+ int volt, ret = 0;
+ bool clk_bind_vcore = false;
+ bool clk_fde_scale = false;
+
+ if (!hba->clk_scaling.is_initialized)
+ return;
+
+ if (!clki || !fde_clki)
+ return;
+
+ reg = host->mclk.reg_vcore;
+ volt = host->mclk.vcore_volt;
+ if (reg && volt != 0)
+ clk_bind_vcore = true;
+
+ if (mclk->ufs_fde_max_clki && mclk->ufs_fde_min_clki)
+ clk_fde_scale = true;
ret = clk_prepare_enable(clki->clk);
if (ret) {
@@ -1624,21 +1800,109 @@ static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
return;
}
+ if (clk_fde_scale) {
+ ret = clk_prepare_enable(fde_clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "fde clk_prepare_enable() fail, ret: %d\n", ret);
+ return;
+ }
+ }
+
if (scale_up) {
+ if (clk_bind_vcore) {
+ ret = regulator_set_voltage(reg, volt, INT_MAX);
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set vcore to %d\n", volt);
+ goto out;
+ }
+ }
+
ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
- clki->curr_freq = clki->max_freq;
+ if (ret) {
+ dev_info(hba->dev, "Failed to set clk mux, ret = %d\n",
+ ret);
+ }
+
+ if (clk_fde_scale) {
+ ret = clk_set_parent(fde_clki->clk,
+ mclk->ufs_fde_max_clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set fde clk mux, ret = %d\n",
+ ret);
+ }
+ }
} else {
+ if (clk_fde_scale) {
+ ret = clk_set_parent(fde_clki->clk,
+ mclk->ufs_fde_min_clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set fde clk mux, ret = %d\n",
+ ret);
+ goto out;
+ }
+ }
+
ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
- clki->curr_freq = clki->min_freq;
- }
+ if (ret) {
+ dev_info(hba->dev, "Failed to set clk mux, ret = %d\n",
+ ret);
+ goto out;
+ }
- if (ret) {
- dev_info(hba->dev,
- "Failed to set ufs_sel_clki, ret: %d\n", ret);
+ if (clk_bind_vcore) {
+ ret = regulator_set_voltage(reg, 0, INT_MAX);
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to set vcore to MIN\n");
+ }
+ }
}
+out:
clk_disable_unprepare(clki->clk);
+ if (clk_fde_scale)
+ clk_disable_unprepare(fde_clki->clk);
+}
+
+/**
+ * ufs_mtk_clk_scale - Internal clk scaling operation
+ *
+ * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
+ * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
+ * Max and min clocks rate of ufs_sel defined in dts should match rate of
+ * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
+ * This prevent changing rate of pll clock that is shared between modules.
+ *
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up and false for scaling down
+ */
+static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_clk *mclk = &host->mclk;
+ struct ufs_clk_info *clki = mclk->ufs_sel_clki;
+
+ if (host->clk_scale_up == scale_up)
+ goto out;
+
+ if (scale_up)
+ _ufs_mtk_clk_scale(hba, true);
+ else
+ _ufs_mtk_clk_scale(hba, false);
+
+ host->clk_scale_up = scale_up;
+
+ /* Must always set before clk_set_rate() */
+ if (scale_up)
+ clki->curr_freq = clki->max_freq;
+ else
+ clki->curr_freq = clki->min_freq;
+out:
trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
}
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index 05d76a6bd772..e46dc5fa209d 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -133,6 +133,8 @@ enum ufs_mtk_host_caps {
UFS_MTK_CAP_DISABLE_MCQ = 1 << 8,
/* Control MTCMOS with RTFF */
UFS_MTK_CAP_RTFF_MTCMOS = 1 << 9,
+
+ UFS_MTK_CAP_MCQ_BROKEN_RTC = 1 << 10,
};
struct ufs_mtk_crypt_cfg {
@@ -147,6 +149,11 @@ struct ufs_mtk_clk {
struct ufs_clk_info *ufs_sel_clki; /* Mux */
struct ufs_clk_info *ufs_sel_max_clki; /* Max src */
struct ufs_clk_info *ufs_sel_min_clki; /* Min src */
+ struct ufs_clk_info *ufs_fde_clki; /* Mux */
+ struct ufs_clk_info *ufs_fde_max_clki; /* Max src */
+ struct ufs_clk_info *ufs_fde_min_clki; /* Min src */
+ struct regulator *reg_vcore;
+ int vcore_volt;
};
struct ufs_mtk_hw_ver {
@@ -176,9 +183,11 @@ struct ufs_mtk_host {
bool mphy_powered_on;
bool unipro_lpm;
bool ref_clk_enabled;
+ bool clk_scale_up;
u16 ref_clk_ungating_wait_us;
u16 ref_clk_gating_wait_us;
u32 ip_ver;
+ bool legacy_ip_ver;
bool mcq_set_intr;
bool is_mcq_intr_enabled;
@@ -192,4 +201,27 @@ struct ufs_mtk_host {
/* MTK RTT support number */
#define MTK_MAX_NUM_RTT 2
+/* UFSHCI MTK ip version value */
+enum {
+ /* UFSHCI 3.1 */
+ IP_VER_MT6983 = 0x10360000,
+ IP_VER_MT6878 = 0x10420200,
+
+ /* UFSHCI 4.0 */
+ IP_VER_MT6897 = 0x10440000,
+ IP_VER_MT6989 = 0x10450000,
+ IP_VER_MT6899 = 0x10450100,
+ IP_VER_MT6991_A0 = 0x10460000,
+ IP_VER_MT6991_B0 = 0x10470000,
+ IP_VER_MT6993 = 0x10480000,
+
+ IP_VER_NONE = 0xFFFFFFFF
+};
+
+enum ip_ver_legacy {
+ IP_LEGACY_VER_MT6781 = 0x10380000,
+ IP_LEGACY_VER_MT6879 = 0x10360000,
+ IP_LEGACY_VER_MT6893 = 0x20160706
+};
+
#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 18a978452001..76fc70503a62 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -532,6 +532,12 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
goto out_disable_phy;
}
+ ret = phy_calibrate(phy);
+ if (ret) {
+ dev_err(hba->dev, "Failed to calibrate PHY: %d\n", ret);
+ goto out_disable_phy;
+ }
+
ufs_qcom_select_unipro_mode(host);
return 0;
@@ -552,11 +558,32 @@ out_disable_phy:
*/
static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
{
+ int err;
+
+ /* Enable UTP internal clock gating */
ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL,
REG_UFS_CFG2);
/* Ensure that HW clock gating is enabled before next operations */
ufshcd_readl(hba, REG_UFS_CFG2);
+
+ /* Enable Unipro internal clock gating */
+ err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK,
+ DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG);
+ if (err)
+ goto out;
+
+ err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK,
+ PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG);
+ if (err)
+ goto out;
+
+ err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
+ DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
+ DME_VS_CORE_CLK_CTRL);
+out:
+ if (err)
+ dev_err(hba->dev, "hw clk gating enabled failed\n");
}
static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
@@ -705,26 +732,17 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
if (status == PRE_CHANGE)
return 0;
- if (ufs_qcom_is_link_off(hba)) {
- /*
- * Disable the tx/rx lane symbol clocks before PHY is
- * powered down as the PLL source should be disabled
- * after downstream clocks are disabled.
- */
+ if (!ufs_qcom_is_link_active(hba))
ufs_qcom_disable_lane_clks(host);
- phy_power_off(phy);
- /* reset the connected UFS device during power down */
- ufs_qcom_device_reset_ctrl(hba, true);
- } else if (!ufs_qcom_is_link_active(hba)) {
- ufs_qcom_disable_lane_clks(host);
- }
+ /* reset the connected UFS device during power down */
+ if (ufs_qcom_is_link_off(hba) && host->device_reset)
+ ufs_qcom_device_reset_ctrl(hba, true);
return ufs_qcom_ice_suspend(host);
}
@@ -732,26 +750,11 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
int err;
- if (ufs_qcom_is_link_off(hba)) {
- err = phy_power_on(phy);
- if (err) {
- dev_err(hba->dev, "%s: failed PHY power on: %d\n",
- __func__, err);
- return err;
- }
-
- err = ufs_qcom_enable_lane_clks(host);
- if (err)
- return err;
-
- } else if (!ufs_qcom_is_link_active(hba)) {
- err = ufs_qcom_enable_lane_clks(host);
- if (err)
- return err;
- }
+ err = ufs_qcom_enable_lane_clks(host);
+ if (err)
+ return err;
return ufs_qcom_ice_resume(host);
}
@@ -1130,12 +1133,20 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
* @on: If true, enable clocks else disable them.
* @status: PRE_CHANGE or POST_CHANGE notify
*
+ * There are certain clocks which comes from the PHY so it needs
+ * to be managed together along with controller clocks which also
+ * provides a better power saving. Hence keep phy_power_off/on calls
+ * in ufs_qcom_setup_clocks, so that PHY's regulators & clks can be
+ * turned on/off along with UFS's clocks.
+ *
* Return: 0 on success, non-zero on failure.
*/
static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct phy *phy;
+ int err;
/*
* In case ufs_qcom_init() is not yet done, simply ignore.
@@ -1145,6 +1156,8 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
if (!host)
return 0;
+ phy = host->generic_phy;
+
switch (status) {
case PRE_CHANGE:
if (on) {
@@ -1154,10 +1167,22 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
}
+
+ err = phy_power_off(phy);
+ if (err) {
+ dev_err(hba->dev, "phy power off failed, ret=%d\n", err);
+ return err;
+ }
}
break;
case POST_CHANGE:
if (on) {
+ err = phy_power_on(phy);
+ if (err) {
+ dev_err(hba->dev, "phy power on failed, ret = %d\n", err);
+ return err;
+ }
+
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
@@ -1873,7 +1898,6 @@ static int ufs_qcom_device_reset(struct ufs_hba *hba)
return 0;
}
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
struct devfreq_dev_profile *p,
struct devfreq_simple_ondemand_data *d)
@@ -1885,13 +1909,6 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
hba->clk_scaling.suspend_on_no_request = true;
}
-#else
-static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
- struct devfreq_dev_profile *p,
- struct devfreq_simple_ondemand_data *data)
-{
-}
-#endif
/* Resources */
static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
@@ -2109,8 +2126,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
retain_and_null_ptr(qi);
- if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
- host->hw_ver.step == 0) {
+ if (host->hw_ver.major >= 6) {
ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
REG_UFS_CFG3);
}
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 0a5cfc2dd4f7..e0e129af7c16 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -24,6 +24,15 @@
#define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B
+/* bit and mask definitions for PA_VS_CLK_CFG_REG attribute */
+#define PA_VS_CLK_CFG_REG 0x9004
+#define PA_VS_CLK_CFG_REG_MASK GENMASK(8, 0)
+
+/* bit and mask definitions for DL_VS_CLK_CFG attribute */
+#define DL_VS_CLK_CFG 0xA00B
+#define DL_VS_CLK_CFG_MASK GENMASK(9, 0)
+#define DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN BIT(9)
+
/* QCOM UFS host controller vendor specific registers */
enum {
REG_UFS_SYS1CLK_1US = 0xC0,
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index 996387906aa1..b39239f641f2 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -22,17 +22,12 @@
#define MAX_SUPP_MAC 64
-struct ufs_host {
- void (*late_init)(struct ufs_hba *hba);
-};
-
enum intel_ufs_dsm_func_id {
INTEL_DSM_FNS = 0,
INTEL_DSM_RESET = 1,
};
struct intel_host {
- struct ufs_host ufs_host;
u32 dsm_fns;
u32 active_ltr;
u32 idle_ltr;
@@ -408,8 +403,14 @@ static int ufs_intel_ehl_init(struct ufs_hba *hba)
return ufs_intel_common_init(hba);
}
-static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
+static int ufs_intel_lkf_init(struct ufs_hba *hba)
{
+ int err;
+
+ hba->nop_out_timeout = 200;
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+ hba->caps |= UFSHCD_CAP_CRYPTO;
+ err = ufs_intel_common_init(hba);
/* LKF always needs a full reset, so set PM accordingly */
if (hba->caps & UFSHCD_CAP_DEEPSLEEP) {
hba->spm_lvl = UFS_PM_LVL_6;
@@ -418,19 +419,6 @@ static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
hba->spm_lvl = UFS_PM_LVL_5;
hba->rpm_lvl = UFS_PM_LVL_5;
}
-}
-
-static int ufs_intel_lkf_init(struct ufs_hba *hba)
-{
- struct ufs_host *ufs_host;
- int err;
-
- hba->nop_out_timeout = 200;
- hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
- hba->caps |= UFSHCD_CAP_CRYPTO;
- err = ufs_intel_common_init(hba);
- ufs_host = ufshcd_get_variant(hba);
- ufs_host->late_init = ufs_intel_lkf_late_init;
return err;
}
@@ -444,6 +432,8 @@ static int ufs_intel_adl_init(struct ufs_hba *hba)
static int ufs_intel_mtl_init(struct ufs_hba *hba)
{
+ hba->rpm_lvl = UFS_PM_LVL_2;
+ hba->spm_lvl = UFS_PM_LVL_2;
hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN;
return ufs_intel_common_init(hba);
}
@@ -574,7 +564,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
static int
ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct ufs_host *ufs_host;
struct ufs_hba *hba;
void __iomem *mmio_base;
int err;
@@ -607,10 +596,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- ufs_host = ufshcd_get_variant(hba);
- if (ufs_host && ufs_host->late_init)
- ufs_host->late_init(hba);
-
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);