summaryrefslogtreecommitdiff
path: root/drivers/scsi/ufs/ufshcd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ufs/ufshcd.c')
-rw-r--r--drivers/scsi/ufs/ufshcd.c308
1 files changed, 254 insertions, 54 deletions
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index d0a1674915a1..3a811c5f70ba 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -233,8 +233,6 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
-static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *desired_pwr_mode);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
@@ -266,6 +264,18 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
}
}
+static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
+{
+ if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
+ scsi_unblock_requests(hba->host);
+}
+
+static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
+{
+ if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
+ scsi_block_requests(hba->host);
+}
+
/* replace non-printable or non-ASCII characters with spaces */
static inline void ufshcd_remove_non_printable(char *val)
{
@@ -675,7 +685,24 @@ static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
*/
static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
{
- ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+ if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
+ ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+ else
+ ufshcd_writel(hba, ~(1 << pos),
+ REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+}
+
+/**
+ * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
+ * @hba: per adapter instance
+ * @pos: position of the bit to be cleared
+ */
+static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
+{
+ if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
+ ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
+ else
+ ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
}
/**
@@ -1091,12 +1118,12 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
* make sure that there are no outstanding requests when
* clock scaling is in progress
*/
- scsi_block_requests(hba->host);
+ ufshcd_scsi_block_requests(hba);
down_write(&hba->clk_scaling_lock);
if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
- scsi_unblock_requests(hba->host);
+ ufshcd_scsi_unblock_requests(hba);
}
return ret;
@@ -1105,7 +1132,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
{
up_write(&hba->clk_scaling_lock);
- scsi_unblock_requests(hba->host);
+ ufshcd_scsi_unblock_requests(hba);
}
/**
@@ -1200,16 +1227,13 @@ static int ufshcd_devfreq_target(struct device *dev,
struct ufs_hba *hba = dev_get_drvdata(dev);
ktime_t start;
bool scale_up, sched_clk_scaling_suspend_work = false;
+ struct list_head *clk_list = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
unsigned long irq_flags;
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
- if ((*freq > 0) && (*freq < UINT_MAX)) {
- dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
- return -EINVAL;
- }
-
spin_lock_irqsave(hba->host->host_lock, irq_flags);
if (ufshcd_eh_in_progress(hba)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
@@ -1219,7 +1243,13 @@ static int ufshcd_devfreq_target(struct device *dev,
if (!hba->clk_scaling.active_reqs)
sched_clk_scaling_suspend_work = true;
- scale_up = (*freq == UINT_MAX) ? true : false;
+ if (list_empty(clk_list)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ goto out;
+ }
+
+ clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
+ scale_up = (*freq == clki->max_freq) ? true : false;
if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
ret = 0;
@@ -1287,6 +1317,55 @@ static struct devfreq_dev_profile ufs_devfreq_profile = {
.get_dev_status = ufshcd_devfreq_get_dev_status,
};
+static int ufshcd_devfreq_init(struct ufs_hba *hba)
+{
+ struct list_head *clk_list = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
+ struct devfreq *devfreq;
+ int ret;
+
+ /* Skip devfreq if we don't have any clocks in the list */
+ if (list_empty(clk_list))
+ return 0;
+
+ clki = list_first_entry(clk_list, struct ufs_clk_info, list);
+ dev_pm_opp_add(hba->dev, clki->min_freq, 0);
+ dev_pm_opp_add(hba->dev, clki->max_freq, 0);
+
+ devfreq = devfreq_add_device(hba->dev,
+ &ufs_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ NULL);
+ if (IS_ERR(devfreq)) {
+ ret = PTR_ERR(devfreq);
+ dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
+
+ dev_pm_opp_remove(hba->dev, clki->min_freq);
+ dev_pm_opp_remove(hba->dev, clki->max_freq);
+ return ret;
+ }
+
+ hba->devfreq = devfreq;
+
+ return 0;
+}
+
+static void ufshcd_devfreq_remove(struct ufs_hba *hba)
+{
+ struct list_head *clk_list = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
+
+ if (!hba->devfreq)
+ return;
+
+ devfreq_remove_device(hba->devfreq);
+ hba->devfreq = NULL;
+
+ clki = list_first_entry(clk_list, struct ufs_clk_info, list);
+ dev_pm_opp_remove(hba->dev, clki->min_freq);
+ dev_pm_opp_remove(hba->dev, clki->max_freq);
+}
+
static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
{
unsigned long flags;
@@ -1425,7 +1504,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
hba->clk_gating.is_suspended = false;
}
unblock_reqs:
- scsi_unblock_requests(hba->host);
+ ufshcd_scsi_unblock_requests(hba);
}
/**
@@ -1481,11 +1560,12 @@ start:
* work and to enable clocks.
*/
case CLKS_OFF:
- scsi_block_requests(hba->host);
+ ufshcd_scsi_block_requests(hba);
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- schedule_work(&hba->clk_gating.ungate_work);
+ queue_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.ungate_work);
/*
* fall through to check if we should wait for this
* work to be done or not.
@@ -1671,6 +1751,8 @@ out:
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
+ char wq_name[sizeof("ufs_clk_gating_00")];
+
if (!ufshcd_is_clkgating_allowed(hba))
return;
@@ -1678,6 +1760,11 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
+ snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
+ hba->host->host_no);
+ hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
+ WQ_MEM_RECLAIM);
+
hba->clk_gating.is_enabled = true;
hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
@@ -1705,6 +1792,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
cancel_work_sync(&hba->clk_gating.ungate_work);
cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+ destroy_workqueue(hba->clk_gating.clk_gating_workq);
}
/* Must be called with host lock acquired */
@@ -3383,6 +3471,52 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
"dme-link-startup: error code %d\n", ret);
return ret;
}
+/**
+ * ufshcd_dme_reset - UIC command for DME_RESET
+ * @hba: per adapter instance
+ *
+ * DME_RESET command is issued in order to reset UniPro stack.
+ * This function now deal with cold reset.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dme_reset(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_RESET;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_err(hba->dev,
+ "dme-reset: error code %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * ufshcd_dme_enable - UIC command for DME_ENABLE
+ * @hba: per adapter instance
+ *
+ * DME_ENABLE command is issued in order to enable UniPro stack.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dme_enable(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_ENABLE;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_err(hba->dev,
+ "dme-reset: error code %d\n", ret);
+
+ return ret;
+}
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
{
@@ -3906,7 +4040,7 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
* @hba: per-adapter instance
* @desired_pwr_mode: desired power configuration
*/
-static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+int ufshcd_config_pwr_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *desired_pwr_mode)
{
struct ufs_pa_layer_attr final_params = { 0 };
@@ -3924,6 +4058,7 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
/**
* ufshcd_complete_dev_init() - checks device readiness
@@ -4041,7 +4176,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
}
/**
- * ufshcd_hba_enable - initialize the controller
+ * ufshcd_hba_execute_hce - initialize the controller
* @hba: per adapter instance
*
* The controller resets itself and controller firmware initialization
@@ -4050,7 +4185,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
*
* Returns 0 on success, non-zero value on failure
*/
-static int ufshcd_hba_enable(struct ufs_hba *hba)
+static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
{
int retry;
@@ -4105,6 +4240,31 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
return 0;
}
+static int ufshcd_hba_enable(struct ufs_hba *hba)
+{
+ int ret;
+
+ if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
+ ufshcd_set_link_off(hba);
+ ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
+
+ /* enable UIC related interrupts */
+ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
+ ret = ufshcd_dme_reset(hba);
+ if (!ret) {
+ ret = ufshcd_dme_enable(hba);
+ if (!ret)
+ ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
+ if (ret)
+ dev_err(hba->dev,
+ "Host controller enable failed with non-hce\n");
+ }
+ } else {
+ ret = ufshcd_hba_execute_hce(hba);
+ }
+
+ return ret;
+}
static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
{
int tx_lanes, i, err = 0;
@@ -4678,7 +4838,8 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
* false interrupt if device completes another request after resetting
* aggregation and before reading the DB.
*/
- if (ufshcd_is_intr_aggr_allowed(hba))
+ if (ufshcd_is_intr_aggr_allowed(hba) &&
+ !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
ufshcd_reset_intr_aggr(hba);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -4969,6 +5130,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eeh_work);
pm_runtime_get_sync(hba->dev);
+ scsi_block_requests(hba->host);
err = ufshcd_get_ee_status(hba, &status);
if (err) {
dev_err(hba->dev, "%s: failed to get exception status %d\n",
@@ -4982,6 +5144,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
ufshcd_bkops_exception_event_handler(hba);
out:
+ scsi_unblock_requests(hba->host);
pm_runtime_put_sync(hba->dev);
return;
}
@@ -5192,7 +5355,7 @@ skip_err_handling:
out:
spin_unlock_irqrestore(hba->host->host_lock, flags);
- scsi_unblock_requests(hba->host);
+ ufshcd_scsi_unblock_requests(hba);
ufshcd_release(hba);
pm_runtime_put_sync(hba->dev);
}
@@ -5294,7 +5457,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
/* handle fatal errors only when link is functional */
if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
/* block commands from scsi mid-layer */
- scsi_block_requests(hba->host);
+ ufshcd_scsi_block_requests(hba);
hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
@@ -5371,19 +5534,30 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
u32 intr_status, enabled_intr_status;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
+ int retries = hba->nutrs;
spin_lock(hba->host->host_lock);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- enabled_intr_status =
- intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (intr_status)
- ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ /*
+ * There could be max of hba->nutrs reqs in flight and in worst case
+ * if the reqs get finished 1 by 1 after the interrupt status is
+ * read, make sure we handle them by checking the interrupt status
+ * again in a loop until we process all of the reqs before returning.
+ */
+ do {
+ enabled_intr_status =
+ intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ if (intr_status)
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ if (enabled_intr_status) {
+ ufshcd_sl_intr(hba, enabled_intr_status);
+ retval = IRQ_HANDLED;
+ }
+
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ } while (intr_status && --retries);
- if (enabled_intr_status) {
- ufshcd_sl_intr(hba, enabled_intr_status);
- retval = IRQ_HANDLED;
- }
spin_unlock(hba->host->host_lock);
return retval;
}
@@ -5398,7 +5572,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
goto out;
spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
+ ufshcd_utmrl_clear(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* poll for max. 1 sec to clear door bell register by h/w */
@@ -5958,14 +6132,18 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
{
int ret;
int buff_len = hba->desc_size.pwr_desc;
- u8 desc_buf[hba->desc_size.pwr_desc];
+ u8 *desc_buf;
+
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf)
+ return;
ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
if (ret) {
dev_err(hba->dev,
"%s: Failed reading power descriptor.len = %d ret = %d",
__func__, buff_len, ret);
- return;
+ goto out;
}
hba->init_prefetch_data.icc_level =
@@ -5983,6 +6161,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
"%s: Failed configuring bActiveICCLevel = %d ret = %d",
__func__, hba->init_prefetch_data.icc_level , ret);
+out:
+ kfree(desc_buf);
}
/**
@@ -6052,9 +6232,17 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
struct ufs_dev_desc *dev_desc)
{
int err;
+ size_t buff_len;
u8 model_index;
- u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
- u8 desc_buf[hba->desc_size.dev_desc];
+ u8 *desc_buf;
+
+ buff_len = max_t(size_t, hba->desc_size.dev_desc,
+ QUERY_DESC_MAX_SIZE + 1);
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
if (err) {
@@ -6072,7 +6260,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
- err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
+ /* Zero-pad entire buffer for string termination. */
+ memset(desc_buf, 0, buff_len);
+
+ err = ufshcd_read_string_desc(hba, model_index, desc_buf,
QUERY_DESC_MAX_SIZE, true/*ASCII*/);
if (err) {
dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
@@ -6080,15 +6271,16 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
goto out;
}
- str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
- strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
- min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
+ desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
+ strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE),
+ min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
MAX_MODEL_LEN));
/* Null terminate the model string */
dev_desc->model[MAX_MODEL_LEN] = '\0';
out:
+ kfree(desc_buf);
return err;
}
@@ -6439,16 +6631,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
sizeof(struct ufs_pa_layer_attr));
hba->clk_scaling.saved_pwr_info.is_valid = true;
if (!hba->devfreq) {
- hba->devfreq = devm_devfreq_add_device(hba->dev,
- &ufs_devfreq_profile,
- "simple_ondemand",
- NULL);
- if (IS_ERR(hba->devfreq)) {
- ret = PTR_ERR(hba->devfreq);
- dev_err(hba->dev, "Unable to register with devfreq %d\n",
- ret);
+ ret = ufshcd_devfreq_init(hba);
+ if (ret)
goto out;
- }
}
hba->clk_scaling.is_allowed = true;
}
@@ -6799,9 +6984,16 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
if (list_empty(head))
goto out;
- ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
- if (ret)
- return ret;
+ /*
+ * vendor specific setup_clocks ops may depend on clocks managed by
+ * this standard driver hence call the vendor specific setup_clocks
+ * before disabling the clocks managed here.
+ */
+ if (!on) {
+ ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
+ if (ret)
+ return ret;
+ }
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
@@ -6825,9 +7017,16 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
}
}
- ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
- if (ret)
- return ret;
+ /*
+ * vendor specific setup_clocks ops may depend on clocks managed by
+ * this standard driver hence call the vendor specific setup_clocks
+ * after enabling the clocks managed here.
+ */
+ if (on) {
+ ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
+ if (ret)
+ return ret;
+ }
out:
if (ret) {
@@ -6992,6 +7191,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
if (hba->devfreq)
ufshcd_suspend_clkscaling(hba);
destroy_workqueue(hba->clk_scaling.workq);
+ ufshcd_devfreq_remove(hba);
}
ufshcd_setup_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
@@ -7904,7 +8104,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
-
+ atomic_set(&hba->scsi_block_reqs_cnt, 0);
/*
* We are assuming that device wasn't put in sleep/power-down
* state exclusively during the boot stage before kernel.