summaryrefslogtreecommitdiff
path: root/drivers/ufs/core/ufshcd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/core/ufshcd.c')
-rw-r--r--drivers/ufs/core/ufshcd.c2804
1 files changed, 1644 insertions, 1160 deletions
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 46433ecf0c4d..040a0ceb170a 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -28,6 +28,7 @@
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#include "ufshcd-priv.h"
#include <ufs/ufs_quirks.h>
#include <ufs/unipro.h>
@@ -36,23 +37,20 @@
#include "ufs-fault-injection.h"
#include "ufs_bsg.h"
#include "ufshcd-crypto.h"
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/ufs.h>
+#include "ufs_trace.h"
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)
-#define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
- UFSHCD_ERROR_MASK |\
- MCQ_CQ_EVENT_STATUS)
-
-
/* UIC command timeout, unit: ms */
-#define UIC_CMD_TIMEOUT 500
-
+enum {
+ UIC_CMD_TIMEOUT_DEFAULT = 500,
+ UIC_CMD_TIMEOUT_MAX = 5000,
+};
/* NOP OUT retries waiting for NOP IN response */
#define NOP_OUT_RETRIES 10
/* Timeout after 50 msecs if NOP OUT hangs without response */
@@ -61,7 +59,11 @@
/* Query request retries */
#define QUERY_REQ_RETRIES 3
/* Query request timeout */
-#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
+enum {
+ QUERY_REQ_TIMEOUT_MIN = 1,
+ QUERY_REQ_TIMEOUT_DEFAULT = 1500,
+ QUERY_REQ_TIMEOUT_MAX = 30000
+};
/* Advanced RPMB request timeout */
#define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
@@ -102,6 +104,9 @@
/* Default RTC update every 10 seconds */
#define UFS_RTC_UPDATE_INTERVAL_MS (10 * MSEC_PER_SEC)
+/* bMaxNumOfRTT is equal to two after device manufacturing */
+#define DEFAULT_MAX_NUM_RTT 2
+
/* UFSHC 4.0 compliant HC support this mode. */
static bool use_mcq_mode = true;
@@ -113,6 +118,40 @@ static bool is_mcq_supported(struct ufs_hba *hba)
module_param(use_mcq_mode, bool, 0644);
MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
+static unsigned int uic_cmd_timeout = UIC_CMD_TIMEOUT_DEFAULT;
+
+static int uic_cmd_timeout_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UIC_CMD_TIMEOUT_DEFAULT,
+ UIC_CMD_TIMEOUT_MAX);
+}
+
+static const struct kernel_param_ops uic_cmd_timeout_ops = {
+ .set = uic_cmd_timeout_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644);
+MODULE_PARM_DESC(uic_cmd_timeout,
+ "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 5 seconds inclusively");
+
+static unsigned int dev_cmd_timeout = QUERY_REQ_TIMEOUT_DEFAULT;
+
+static int dev_cmd_timeout_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, QUERY_REQ_TIMEOUT_MIN,
+ QUERY_REQ_TIMEOUT_MAX);
+}
+
+static const struct kernel_param_ops dev_cmd_timeout_ops = {
+ .set = dev_cmd_timeout_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(dev_cmd_timeout, &dev_cmd_timeout_ops, &dev_cmd_timeout, 0644);
+MODULE_PARM_DESC(dev_cmd_timeout,
+ "UFS Device command timeout in milliseconds. Defaults to 1.5s. Supported values range from 1ms to 30 seconds inclusively");
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -161,8 +200,6 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
- UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
- UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
};
static const char *const ufshcd_state_name[] = {
@@ -238,10 +275,15 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
return UFS_PM_LVL_0;
}
+static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
+{
+ return hba->outstanding_tasks || hba->active_uic_cmd ||
+ hba->uic_async_done;
+}
+
static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
{
- return (hba->clk_gating.active_reqs || hba->outstanding_reqs || hba->outstanding_tasks ||
- hba->active_uic_cmd || hba->uic_async_done);
+ return scsi_host_busy(hba->host) || ufshcd_has_pending_tasks(hba);
}
static const struct ufs_dev_quirk ufs_fixups[] = {
@@ -253,6 +295,7 @@ static const struct ufs_dev_quirk ufs_fixups[] = {
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
+ UFS_DEVICE_QUIRK_PA_HIBER8TIME |
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = UFS_ANY_MODEL,
@@ -269,6 +312,9 @@ static const struct ufs_dev_quirk ufs_fixups[] = {
{ .wmanufacturerid = UFS_VENDOR_TOSHIBA,
.model = "THGLF2G9D8KBADG",
.quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
+ { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
+ .model = "THGJFJT1E45BATP",
+ .quirk = UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT },
{}
};
@@ -278,6 +324,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
static void ufshcd_hba_exit(struct ufs_hba *hba);
+static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params);
static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
@@ -316,6 +363,34 @@ void ufshcd_disable_irq(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_disable_irq);
+/**
+ * ufshcd_enable_intr - enable interrupts
+ * @hba: per adapter instance
+ * @intrs: interrupt bits
+ */
+void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ u32 new_val = old_val | intrs;
+
+ if (new_val != old_val)
+ ufshcd_writel(hba, new_val, REG_INTERRUPT_ENABLE);
+}
+
+/**
+ * ufshcd_disable_intr - disable interrupts
+ * @hba: per adapter instance
+ * @intrs: interrupt bits
+ */
+static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ u32 new_val = old_val & ~intrs;
+
+ if (new_val != old_val)
+ ufshcd_writel(hba, new_val, REG_INTERRUPT_ENABLE);
+}
+
static void ufshcd_configure_wb(struct ufs_hba *hba)
{
if (!ufshcd_is_wb_allowed(hba))
@@ -329,22 +404,11 @@ static void ufshcd_configure_wb(struct ufs_hba *hba)
ufshcd_wb_toggle_buf_flush(hba, true);
}
-static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
-{
- if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
- scsi_unblock_requests(hba->host);
-}
-
-static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
-{
- if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
- scsi_block_requests(hba->host);
-}
-
-static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrb,
enum ufs_trace_str_t str_t)
{
- struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+ struct utp_upiu_req *rq = lrb->ucd_req_ptr;
struct utp_upiu_header *header;
if (!trace_ufshcd_upiu_enabled())
@@ -353,9 +417,9 @@ static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
if (str_t == UFS_CMD_SEND)
header = &rq->header;
else
- header = &hba->lrb[tag].ucd_rsp_ptr->header;
+ header = &lrb->ucd_rsp_ptr->header;
- trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
+ trace_ufshcd_upiu(hba, str_t, header, &rq->sc.cdb,
UFS_TSF_CDB);
}
@@ -366,7 +430,7 @@ static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
if (!trace_ufshcd_upiu_enabled())
return;
- trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
+ trace_ufshcd_upiu(hba, str_t, &rq_rsp->header,
&rq_rsp->qr, UFS_TSF_OSF);
}
@@ -379,12 +443,12 @@ static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
return;
if (str_t == UFS_TM_SEND)
- trace_ufshcd_upiu(dev_name(hba->dev), str_t,
+ trace_ufshcd_upiu(hba, str_t,
&descp->upiu_req.req_header,
&descp->upiu_req.input_param1,
UFS_TSF_TM_INPUT);
else
- trace_ufshcd_upiu(dev_name(hba->dev), str_t,
+ trace_ufshcd_upiu(hba, str_t,
&descp->upiu_rsp.rsp_header,
&descp->upiu_rsp.output_param1,
UFS_TSF_TM_OUTPUT);
@@ -404,30 +468,27 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
else
cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
- trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
+ trace_ufshcd_uic_command(hba, str_t, cmd,
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
}
-static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
+static void ufshcd_add_command_trace(struct ufs_hba *hba, struct scsi_cmnd *cmd,
enum ufs_trace_str_t str_t)
{
u64 lba = 0;
u8 opcode = 0, group_id = 0;
u32 doorbell = 0;
u32 intr;
- int hwq_id = -1;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
- struct scsi_cmnd *cmd = lrbp->cmd;
+ u32 hwq_id = 0;
struct request *rq = scsi_cmd_to_rq(cmd);
+ unsigned int tag = rq->tag;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
int transfer_len = -1;
- if (!cmd)
- return;
-
/* trace UPIU also */
- ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
+ ufshcd_add_cmd_upiu_trace(hba, lrbp, str_t);
if (!trace_ufshcd_command_enabled())
return;
@@ -441,7 +502,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
lba = scsi_get_lba(cmd);
if (opcode == WRITE_10)
- group_id = lrbp->cmd->cmnd[6];
+ group_id = cmd->cmnd[6];
} else if (opcode == UNMAP) {
/*
* The number of Bytes to be unmapped beginning with the lba.
@@ -452,14 +513,14 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- if (is_mcq_enabled(hba)) {
+ if (hba->mcq_enabled) {
struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
hwq_id = hwq->id;
} else {
doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
}
- trace_ufshcd_command(cmd->device, str_t, tag, doorbell, hwq_id,
+ trace_ufshcd_command(cmd->device, hba, str_t, tag, doorbell, hwq_id,
transfer_len, intr, lba, opcode, group_id);
}
@@ -534,18 +595,19 @@ static void ufshcd_print_evt_hist(struct ufs_hba *hba)
ufshcd_vops_dbg_register_dump(hba);
}
-static
-void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt)
+static void ufshcd_print_tr(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ bool pr_prdt)
{
- const struct ufshcd_lrb *lrbp;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ const int tag = scsi_cmd_to_rq(cmd)->tag;
int prdt_length;
- lrbp = &hba->lrb[tag];
-
- dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
- tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000));
- dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
- tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000));
+ if (hba->monitor.enabled) {
+ dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", tag,
+ div_u64(lrbp->issue_time_stamp_local_clock, 1000));
+ dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n", tag,
+ div_u64(lrbp->compl_time_stamp_local_clock, 1000));
+ }
dev_err(hba->dev,
"UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
tag, (u64)lrbp->utrd_dma_addr);
@@ -582,7 +644,8 @@ static bool ufshcd_print_tr_iter(struct request *req, void *priv)
struct Scsi_Host *shost = sdev->host;
struct ufs_hba *hba = shost_priv(shost);
- ufshcd_print_tr(hba, req->tag, *(bool *)priv);
+ if (!blk_mq_is_reserved_rq(req))
+ ufshcd_print_tr(hba, blk_mq_rq_to_pdu(req), *(bool *)priv);
return true;
}
@@ -614,8 +677,8 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
- dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
- hba->outstanding_reqs, hba->outstanding_tasks);
+ dev_err(hba->dev, "%d outstanding reqs, tasks=0x%lx\n",
+ scsi_host_busy(hba->host), hba->outstanding_tasks);
dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
hba->saved_err, hba->saved_uic_err);
dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
@@ -629,9 +692,6 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
"last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000),
hba->ufs_stats.hibern8_exit_cnt);
- dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
- div_u64(hba->ufs_stats.last_intr_ts, 1000),
- hba->ufs_stats.last_intr_status);
dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
hba->eh_flags, hba->req_abort_count);
dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
@@ -719,25 +779,15 @@ EXPORT_SYMBOL_GPL(ufshcd_delay_us);
* Return: -ETIMEDOUT on error, zero on success.
*/
static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
- u32 val, unsigned long interval_us,
- unsigned long timeout_ms)
+ u32 val, unsigned long interval_us,
+ unsigned long timeout_ms)
{
- int err = 0;
- unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ u32 v;
- /* ignore bits that we don't intend to wait on */
- val = val & mask;
+ val &= mask; /* ignore bits that we don't intend to wait on */
- while ((ufshcd_readl(hba, reg) & mask) != val) {
- usleep_range(interval_us, interval_us + 50);
- if (time_after(jiffies, timeout)) {
- if ((ufshcd_readl(hba, reg) & mask) != val)
- err = -ETIMEDOUT;
- break;
- }
- }
-
- return err;
+ return read_poll_timeout(ufshcd_readl, v, (v & mask) == val,
+ interval_us, timeout_ms * 1000, false, hba, reg);
}
/**
@@ -805,7 +855,7 @@ static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
struct cq_entry *cqe)
{
if (cqe)
- return le32_to_cpu(cqe->status) & MASK_OCS;
+ return cqe->overall_status & MASK_OCS;
return lrbp->utr_descriptor_ptr->header.ocs & MASK_OCS;
}
@@ -994,6 +1044,7 @@ EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
*/
void ufshcd_pm_qos_init(struct ufs_hba *hba)
{
+ guard(mutex)(&hba->pm_qos_mutex);
if (hba->pm_qos_enabled)
return;
@@ -1010,6 +1061,8 @@ void ufshcd_pm_qos_init(struct ufs_hba *hba)
*/
void ufshcd_pm_qos_exit(struct ufs_hba *hba)
{
+ guard(mutex)(&hba->pm_qos_mutex);
+
if (!hba->pm_qos_enabled)
return;
@@ -1022,13 +1075,16 @@ void ufshcd_pm_qos_exit(struct ufs_hba *hba)
* @hba: per adapter instance
* @on: If True, vote for perf PM QoS mode otherwise power save mode
*/
-static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
+void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
{
+ guard(mutex)(&hba->pm_qos_mutex);
+
if (!hba->pm_qos_enabled)
return;
cpu_latency_qos_update_request(&hba->pm_qos_req, on ? 0 : PM_QOS_DEFAULT_VALUE);
}
+EXPORT_SYMBOL_GPL(ufshcd_pm_qos_update);
/**
* ufshcd_set_clk_freq - set UFS controller clock frequencies
@@ -1059,7 +1115,7 @@ static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
clki->max_freq, ret);
break;
}
- trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_clk_scaling(hba,
"scaled up", clki->name,
clki->curr_freq,
clki->max_freq);
@@ -1077,7 +1133,7 @@ static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
clki->min_freq, ret);
break;
}
- trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_clk_scaling(hba,
"scaled down", clki->name,
clki->curr_freq,
clki->min_freq);
@@ -1118,7 +1174,7 @@ int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table,
return ret;
}
- trace_ufshcd_clk_scaling(dev_name(dev),
+ trace_ufshcd_clk_scaling(hba,
(scaling_down ? "scaled down" : "scaled up"),
clki->name, hba->clk_scaling.target_freq, freq);
}
@@ -1158,7 +1214,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
int ret = 0;
ktime_t start = ktime_get();
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, freq, PRE_CHANGE);
if (ret)
goto out;
@@ -1169,7 +1225,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
if (ret)
goto out;
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, freq, POST_CHANGE);
if (ret) {
if (hba->use_pm_opp)
ufshcd_opp_set_rate(hba,
@@ -1182,7 +1238,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
ufshcd_pm_qos_update(hba, scale_up);
out:
- trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_profile_clk_scaling(hba,
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
@@ -1234,12 +1290,14 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
*/
static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
{
- const struct scsi_device *sdev;
+ struct scsi_device *sdev;
+ unsigned long flags;
u32 pending = 0;
- lockdep_assert_held(hba->host->host_lock);
+ spin_lock_irqsave(hba->host->host_lock, flags);
__shost_for_each_device(sdev, hba->host)
- pending += sbitmap_weight(&sdev->budget_map);
+ pending += scsi_device_busy(sdev);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
return pending;
}
@@ -1250,10 +1308,9 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
*
* Return: 0 upon success; -EBUSY upon timeout.
*/
-static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
+static int ufshcd_wait_for_pending_cmds(struct ufs_hba *hba,
u64 wait_timeout_us)
{
- unsigned long flags;
int ret = 0;
u32 tm_doorbell;
u32 tr_pending;
@@ -1261,7 +1318,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
ktime_t start;
ufshcd_hold(hba);
- spin_lock_irqsave(hba->host->host_lock, flags);
/*
* Wait for all the outstanding tasks/transfer requests.
* Verify by checking the doorbell registers are clear.
@@ -1282,7 +1338,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
break;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
io_schedule_timeout(msecs_to_jiffies(20));
if (ktime_to_us(ktime_sub(ktime_get(), start)) >
wait_timeout_us) {
@@ -1294,7 +1349,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
*/
do_last_check = true;
}
- spin_lock_irqsave(hba->host->host_lock, flags);
} while (tm_doorbell || tr_pending);
if (timeout) {
@@ -1304,7 +1358,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
ret = -EBUSY;
}
out:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_release(hba);
return ret;
}
@@ -1312,16 +1365,26 @@ out:
/**
* ufshcd_scale_gear - scale up/down UFS gear
* @hba: per adapter instance
+ * @target_gear: target gear to scale to
* @scale_up: True for scaling up gear and false for scaling down
*
* Return: 0 for success; -EBUSY if scaling can't happen at this time;
* non-zero for any other errors.
*/
-static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
+static int ufshcd_scale_gear(struct ufs_hba *hba, u32 target_gear, bool scale_up)
{
int ret = 0;
struct ufs_pa_layer_attr new_pwr_info;
+ if (target_gear) {
+ new_pwr_info = hba->pwr_info;
+ new_pwr_info.gear_tx = target_gear;
+ new_pwr_info.gear_rx = target_gear;
+
+ goto config_pwr_mode;
+ }
+
+ /* Legacy gear scaling, in case vops_freq_to_gear_speed() is not implemented */
if (scale_up) {
memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info,
sizeof(struct ufs_pa_layer_attr));
@@ -1342,6 +1405,7 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
}
}
+config_pwr_mode:
/* check if the power mode needs to be changed or not? */
ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
if (ret)
@@ -1366,16 +1430,18 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
* make sure that there are no outstanding requests when
* clock scaling is in progress
*/
+ mutex_lock(&hba->host->scan_mutex);
blk_mq_quiesce_tagset(&hba->host->tag_set);
mutex_lock(&hba->wb_mutex);
down_write(&hba->clk_scaling_lock);
if (!hba->clk_scaling.is_allowed ||
- ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
+ ufshcd_wait_for_pending_cmds(hba, timeout_us)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
mutex_unlock(&hba->wb_mutex);
blk_mq_unquiesce_tagset(&hba->host->tag_set);
+ mutex_unlock(&hba->host->scan_mutex);
goto out;
}
@@ -1386,17 +1452,18 @@ out:
return ret;
}
-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err)
{
up_write(&hba->clk_scaling_lock);
- /* Enable Write Booster if we have scaled up else disable it */
+ /* Enable Write Booster if current gear requires it else disable it */
if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
- ufshcd_wb_toggle(hba, scale_up);
+ ufshcd_wb_toggle(hba, hba->pwr_info.gear_rx >= hba->clk_scaling.wb_gear);
mutex_unlock(&hba->wb_mutex);
blk_mq_unquiesce_tagset(&hba->host->tag_set);
+ mutex_unlock(&hba->host->scan_mutex);
ufshcd_release(hba);
}
@@ -1412,15 +1479,19 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
bool scale_up)
{
+ u32 old_gear = hba->pwr_info.gear_rx;
+ u32 new_gear = 0;
int ret = 0;
+ new_gear = ufshcd_vops_freq_to_gear_speed(hba, freq);
+
ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
if (ret)
return ret;
/* scale down the gear before scaling down clocks */
if (!scale_up) {
- ret = ufshcd_scale_gear(hba, false);
+ ret = ufshcd_scale_gear(hba, new_gear, false);
if (ret)
goto out_unprepare;
}
@@ -1428,13 +1499,13 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
ret = ufshcd_scale_clks(hba, freq, scale_up);
if (ret) {
if (!scale_up)
- ufshcd_scale_gear(hba, true);
+ ufshcd_scale_gear(hba, old_gear, true);
goto out_unprepare;
}
/* scale up the gear after scaling up clocks */
if (scale_up) {
- ret = ufshcd_scale_gear(hba, true);
+ ret = ufshcd_scale_gear(hba, new_gear, true);
if (ret) {
ufshcd_scale_clks(hba, hba->devfreq->previous_freq,
false);
@@ -1443,7 +1514,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
}
out_unprepare:
- ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
+ ufshcd_clock_scaling_unprepare(hba, ret);
return ret;
}
@@ -1451,16 +1522,16 @@ static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_scaling.suspend_work);
- unsigned long irq_flags;
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (hba->clk_scaling.active_reqs ||
+ hba->clk_scaling.is_suspended)
+ return;
+
+ hba->clk_scaling.is_suspended = true;
+ hba->clk_scaling.window_start_t = 0;
}
- hba->clk_scaling.is_suspended = true;
- hba->clk_scaling.window_start_t = 0;
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
devfreq_suspend_device(hba->devfreq);
}
@@ -1469,15 +1540,13 @@ static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_scaling.resume_work);
- unsigned long irq_flags;
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (!hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (!hba->clk_scaling.is_suspended)
+ return;
+ hba->clk_scaling.is_suspended = false;
}
- hba->clk_scaling.is_suspended = false;
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
devfreq_resume_device(hba->devfreq);
}
@@ -1491,7 +1560,6 @@ static int ufshcd_devfreq_target(struct device *dev,
bool scale_up = false, sched_clk_scaling_suspend_work = false;
struct list_head *clk_list = &hba->clk_list_head;
struct ufs_clk_info *clki;
- unsigned long irq_flags;
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
@@ -1512,55 +1580,51 @@ static int ufshcd_devfreq_target(struct device *dev,
*freq = (unsigned long) clk_round_rate(clki->clk, *freq);
}
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (ufshcd_eh_in_progress(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return 0;
- }
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (ufshcd_eh_in_progress(hba))
+ return 0;
- /* Skip scaling clock when clock scaling is suspended */
- if (hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- dev_warn(hba->dev, "clock scaling is suspended, skip");
- return 0;
- }
+ /* Skip scaling clock when clock scaling is suspended */
+ if (hba->clk_scaling.is_suspended) {
+ dev_warn(hba->dev, "clock scaling is suspended, skip");
+ return 0;
+ }
- if (!hba->clk_scaling.active_reqs)
- sched_clk_scaling_suspend_work = true;
+ if (!hba->clk_scaling.active_reqs)
+ sched_clk_scaling_suspend_work = true;
- if (list_empty(clk_list)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- goto out;
- }
+ if (list_empty(clk_list))
+ goto out;
- /* Decide based on the target or rounded-off frequency and update */
- if (hba->use_pm_opp)
- scale_up = *freq > hba->clk_scaling.target_freq;
- else
- scale_up = *freq == clki->max_freq;
+ /* Decide based on the target or rounded-off frequency and update */
+ if (hba->use_pm_opp)
+ scale_up = *freq > hba->clk_scaling.target_freq;
+ else
+ scale_up = *freq == clki->max_freq;
- if (!hba->use_pm_opp && !scale_up)
- *freq = clki->min_freq;
+ if (!hba->use_pm_opp && !scale_up)
+ *freq = clki->min_freq;
- /* Update the frequency */
- if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- ret = 0;
- goto out; /* no state change required */
+ /* Update the frequency */
+ if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
+ ret = 0;
+ goto out; /* no state change required */
+ }
}
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
start = ktime_get();
ret = ufshcd_devfreq_scale(hba, *freq, scale_up);
if (!ret)
hba->clk_scaling.target_freq = *freq;
- trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_profile_clk_scaling(hba,
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
out:
- if (sched_clk_scaling_suspend_work && !scale_up)
+ if (sched_clk_scaling_suspend_work &&
+ (!scale_up || hba->clk_scaling.suspend_on_no_request))
queue_work(hba->clk_scaling.workq,
&hba->clk_scaling.suspend_work);
@@ -1572,7 +1636,6 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
- unsigned long flags;
ktime_t curr_t;
if (!ufshcd_is_clkscaling_supported(hba))
@@ -1580,7 +1643,8 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
memset(stat, 0, sizeof(*stat));
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
curr_t = ktime_get();
if (!scaling->window_start_t)
goto start_window;
@@ -1616,7 +1680,7 @@ start_window:
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+
return 0;
}
@@ -1680,19 +1744,19 @@ static void ufshcd_devfreq_remove(struct ufs_hba *hba)
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
{
- unsigned long flags;
bool suspend = false;
cancel_work_sync(&hba->clk_scaling.suspend_work);
cancel_work_sync(&hba->clk_scaling.resume_work);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!hba->clk_scaling.is_suspended) {
- suspend = true;
- hba->clk_scaling.is_suspended = true;
- hba->clk_scaling.window_start_t = 0;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (!hba->clk_scaling.is_suspended) {
+ suspend = true;
+ hba->clk_scaling.is_suspended = true;
+ hba->clk_scaling.window_start_t = 0;
+ }
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
if (suspend)
devfreq_suspend_device(hba->devfreq);
@@ -1700,15 +1764,15 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
{
- unsigned long flags;
bool resume = false;
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_scaling.is_suspended) {
- resume = true;
- hba->clk_scaling.is_suspended = false;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (hba->clk_scaling.is_suspended) {
+ resume = true;
+ hba->clk_scaling.is_suspended = false;
+ }
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
if (resume)
devfreq_resume_device(hba->devfreq);
@@ -1726,6 +1790,8 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_clk_info *clki;
+ unsigned long freq;
u32 value;
int err = 0;
@@ -1749,14 +1815,25 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
if (value) {
ufshcd_resume_clkscaling(hba);
- } else {
- ufshcd_suspend_clkscaling(hba);
- err = ufshcd_devfreq_scale(hba, ULONG_MAX, true);
- if (err)
- dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
- __func__, err);
+ goto out_rel;
}
+ clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
+ freq = clki->max_freq;
+
+ ufshcd_suspend_clkscaling(hba);
+
+ if (!ufshcd_is_devfreq_scaling_required(hba, freq, true))
+ goto out_rel;
+
+ err = ufshcd_devfreq_scale(hba, freq, true);
+ if (err)
+ dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
+ __func__, err);
+ else
+ hba->clk_scaling.target_freq = freq;
+
+out_rel:
ufshcd_release(hba);
ufshcd_rpm_put_sync(hba);
out:
@@ -1783,22 +1860,25 @@ static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
{
- char wq_name[sizeof("ufs_clkscaling_00")];
-
if (!ufshcd_is_clkscaling_supported(hba))
return;
if (!hba->clk_scaling.min_gear)
hba->clk_scaling.min_gear = UFS_HS_G1;
+ if (!hba->clk_scaling.wb_gear)
+ /* Use intermediate gear speed HS_G3 as the default wb_gear */
+ hba->clk_scaling.wb_gear = UFS_HS_G3;
+
INIT_WORK(&hba->clk_scaling.suspend_work,
ufshcd_clk_scaling_suspend_work);
INIT_WORK(&hba->clk_scaling.resume_work,
ufshcd_clk_scaling_resume_work);
- snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
- hba->host->host_no);
- hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+ spin_lock_init(&hba->clk_scaling.lock);
+
+ hba->clk_scaling.workq = alloc_ordered_workqueue(
+ "ufs_clkscaling_%d", WQ_MEM_RECLAIM, hba->host->host_no);
hba->clk_scaling.is_initialized = true;
}
@@ -1817,19 +1897,16 @@ static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
- unsigned long flags;
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.ungate_work);
cancel_delayed_work_sync(&hba->clk_gating.gate_work);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.state == CLKS_ON) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return;
+ scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
+ if (hba->clk_gating.state == CLKS_ON)
+ return;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_hba_vreg_set_hpm(hba);
ufshcd_setup_clocks(hba, true);
@@ -1864,7 +1941,7 @@ void ufshcd_hold(struct ufs_hba *hba)
if (!ufshcd_is_clkgating_allowed(hba) ||
!hba->clk_gating.is_initialized)
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ spin_lock_irqsave(&hba->clk_gating.lock, flags);
hba->clk_gating.active_reqs++;
start:
@@ -1880,18 +1957,18 @@ start:
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
flush_result = flush_work(&hba->clk_gating.ungate_work);
if (hba->clk_gating.is_suspended && !flush_result)
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ spin_lock_irqsave(&hba->clk_gating.lock, flags);
goto start;
}
break;
case REQ_CLKS_OFF:
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
break;
}
@@ -1903,7 +1980,7 @@ start:
fallthrough;
case CLKS_OFF:
hba->clk_gating.state = REQ_CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
queue_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.ungate_work);
@@ -1913,17 +1990,17 @@ start:
*/
fallthrough;
case REQ_CLKS_ON:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
flush_work(&hba->clk_gating.ungate_work);
/* Make sure state is CLKS_ON before returning */
- spin_lock_irqsave(hba->host->host_lock, flags);
+ spin_lock_irqsave(&hba->clk_gating.lock, flags);
goto start;
default:
dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
__func__, hba->clk_gating.state);
break;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_hold);
@@ -1931,28 +2008,32 @@ static void ufshcd_gate_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.gate_work.work);
- unsigned long flags;
int ret;
- spin_lock_irqsave(hba->host->host_lock, flags);
- /*
- * In case you are here to cancel this work the gating state
- * would be marked as REQ_CLKS_ON. In this case save time by
- * skipping the gating work and exit after changing the clock
- * state to CLKS_ON.
- */
- if (hba->clk_gating.is_suspended ||
- (hba->clk_gating.state != REQ_CLKS_OFF)) {
- hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- goto rel_lock;
- }
+ scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case save time by
+ * skipping the gating work and exit after changing the clock
+ * state to CLKS_ON.
+ */
+ if (hba->clk_gating.is_suspended ||
+ hba->clk_gating.state != REQ_CLKS_OFF) {
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(hba,
+ hba->clk_gating.state);
+ return;
+ }
- if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
- goto rel_lock;
+ if (hba->clk_gating.active_reqs)
+ return;
+ }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ scoped_guard(spinlock_irqsave, hba->host->host_lock) {
+ if (ufshcd_is_ufs_dev_busy(hba) ||
+ hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ return;
+ }
/* put the link into hibern8 mode before turning off clocks */
if (ufshcd_can_hibern8_during_gating(hba)) {
@@ -1961,9 +2042,9 @@ static void ufshcd_gate_work(struct work_struct *work)
hba->clk_gating.state = CLKS_ON;
dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
__func__, ret);
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
- goto out;
+ return;
}
ufshcd_set_link_hibern8(hba);
}
@@ -1983,35 +2064,36 @@ static void ufshcd_gate_work(struct work_struct *work)
* prevent from doing cancel work multiple times when there are
* new requests arriving before the current cancel work is done.
*/
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
if (hba->clk_gating.state == REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
}
-rel_lock:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-out:
- return;
}
-/* host lock must be held before calling this variant */
static void __ufshcd_release(struct ufs_hba *hba)
{
+ lockdep_assert_held(&hba->clk_gating.lock);
+
if (!ufshcd_is_clkgating_allowed(hba))
return;
hba->clk_gating.active_reqs--;
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
- hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
- hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
- hba->active_uic_cmd || hba->uic_async_done ||
+ !hba->clk_gating.is_initialized ||
hba->clk_gating.state == CLKS_OFF)
return;
+ scoped_guard(spinlock_irqsave, hba->host->host_lock) {
+ if (ufshcd_has_pending_tasks(hba) ||
+ hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ return;
+ }
+
hba->clk_gating.state = REQ_CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+ trace_ufshcd_clk_gating(hba, hba->clk_gating.state);
queue_delayed_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.gate_work,
msecs_to_jiffies(hba->clk_gating.delay_ms));
@@ -2019,11 +2101,8 @@ static void __ufshcd_release(struct ufs_hba *hba)
void ufshcd_release(struct ufs_hba *hba)
{
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
__ufshcd_release(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_release);
@@ -2038,11 +2117,9 @@ static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long flags;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
hba->clk_gating.delay_ms = value;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
@@ -2070,7 +2147,6 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long flags;
u32 value;
if (kstrtou32(buf, 0, &value))
@@ -2078,9 +2154,10 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
value = !!value;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
+
if (value == hba->clk_gating.is_enabled)
- goto out;
+ return count;
if (value)
__ufshcd_release(hba);
@@ -2088,8 +2165,7 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
hba->clk_gating.active_reqs++;
hba->clk_gating.is_enabled = value;
-out:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+
return count;
}
@@ -2122,8 +2198,6 @@ static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
- char wq_name[sizeof("ufs_clk_gating_00")];
-
if (!ufshcd_is_clkgating_allowed(hba))
return;
@@ -2133,10 +2207,9 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
- snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
- hba->host->host_no);
- hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
- WQ_MEM_RECLAIM | WQ_HIGHPRI);
+ hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(
+ "ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI,
+ hba->host->host_no);
ufshcd_init_clk_gating_sysfs(hba);
@@ -2162,20 +2235,20 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{
bool queue_resume_work = false;
- ktime_t curr_t = ktime_get();
- unsigned long flags;
+ ktime_t curr_t;
if (!ufshcd_is_clkscaling_supported(hba))
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ curr_t = ktime_get();
+
+ guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
if (!hba->clk_scaling.active_reqs++)
queue_resume_work = true;
- if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
return;
- }
if (queue_resume_work)
queue_work(hba->clk_scaling.workq,
@@ -2191,18 +2264,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
hba->clk_scaling.busy_start_t = curr_t;
hba->clk_scaling.is_busy_started = true;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
{
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
- unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
hba->clk_scaling.active_reqs--;
if (!scaling->active_reqs && scaling->is_busy_started) {
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
@@ -2210,7 +2282,6 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static inline int ufshcd_monitor_opcode2dir(u8 opcode)
@@ -2223,20 +2294,21 @@ static inline int ufshcd_monitor_opcode2dir(u8 opcode)
return -EINVAL;
}
+/* Must only be called for SCSI commands. */
static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
+ struct scsi_cmnd *cmd)
{
const struct ufs_hba_monitor *m = &hba->monitor;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
- return (m->enabled && lrbp && lrbp->cmd &&
- (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
- ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
+ return m->enabled &&
+ (!m->chunk_size || m->chunk_size == cmd->sdb.length) &&
+ ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp);
}
-static void ufshcd_start_monitor(struct ufs_hba *hba,
- const struct ufshcd_lrb *lrbp)
+static void ufshcd_start_monitor(struct ufs_hba *hba, struct scsi_cmnd *cmd)
{
- int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
+ int dir = ufshcd_monitor_opcode2dir(cmd->cmnd[0]);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2245,14 +2317,15 @@ static void ufshcd_start_monitor(struct ufs_hba *hba,
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
-static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp)
+static void ufshcd_update_monitor(struct ufs_hba *hba, struct scsi_cmnd *cmd)
{
- int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
+ struct request *req = scsi_cmd_to_rq(cmd);
+ const struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ int dir = ufshcd_monitor_opcode2dir(cmd->cmnd[0]);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
- const struct request *req = scsi_cmd_to_rq(lrbp->cmd);
struct ufs_hba_monitor *m = &hba->monitor;
ktime_t now, inc, lat;
@@ -2277,30 +2350,40 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
+/* Returns %true for SCSI commands and %false for device management commands. */
+static bool ufshcd_is_scsi_cmd(struct scsi_cmnd *cmd)
+{
+ return !blk_mq_is_reserved_rq(scsi_cmd_to_rq(cmd));
+}
+
/**
* ufshcd_send_command - Send SCSI or device management commands
* @hba: per adapter instance
- * @task_tag: Task tag of the command
+ * @cmd: SCSI command or device management command pointer
* @hwq: pointer to hardware queue instance
*/
-static inline
-void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
- struct ufs_hw_queue *hwq)
+static inline void ufshcd_send_command(struct ufs_hba *hba,
+ struct scsi_cmnd *cmd,
+ struct ufs_hw_queue *hwq)
{
- struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ const int tag = scsi_cmd_to_rq(cmd)->tag;
unsigned long flags;
- lrbp->issue_time_stamp = ktime_get();
- lrbp->issue_time_stamp_local_clock = local_clock();
- lrbp->compl_time_stamp = ktime_set(0, 0);
- lrbp->compl_time_stamp_local_clock = 0;
- ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
- if (lrbp->cmd)
+ if (hba->monitor.enabled) {
+ lrbp->issue_time_stamp = ktime_get();
+ lrbp->issue_time_stamp_local_clock = local_clock();
+ lrbp->compl_time_stamp = ktime_set(0, 0);
+ lrbp->compl_time_stamp_local_clock = 0;
+ }
+ if (ufshcd_is_scsi_cmd(cmd)) {
+ ufshcd_add_command_trace(hba, cmd, UFS_CMD_SEND);
ufshcd_clk_scaling_start_busy(hba);
- if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
- ufshcd_start_monitor(hba, lrbp);
+ if (unlikely(ufshcd_should_inform_monitor(hba, cmd)))
+ ufshcd_start_monitor(hba, cmd);
+ }
- if (is_mcq_enabled(hba)) {
+ if (hba->mcq_enabled) {
int utrd_size = sizeof(struct utp_transfer_req_desc);
struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr;
struct utp_transfer_req_desc *dest;
@@ -2313,22 +2396,22 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
} else {
spin_lock_irqsave(&hba->outstanding_lock, flags);
if (hba->vops && hba->vops->setup_xfer_req)
- hba->vops->setup_xfer_req(hba, lrbp->task_tag,
- !!lrbp->cmd);
- __set_bit(lrbp->task_tag, &hba->outstanding_reqs);
- ufshcd_writel(hba, 1 << lrbp->task_tag,
- REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ hba->vops->setup_xfer_req(hba, tag,
+ ufshcd_is_scsi_cmd(cmd));
+ __set_bit(tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
}
}
/**
* ufshcd_copy_sense_data - Copy sense data in case of check condition
- * @lrbp: pointer to local reference block
+ * @cmd: SCSI command
*/
-static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
+static inline void ufshcd_copy_sense_data(struct scsi_cmnd *cmd)
{
- u8 *const sense_buffer = lrbp->cmd->sense_buffer;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ u8 *const sense_buffer = cmd->sense_buffer;
u16 resp_len;
int len;
@@ -2396,14 +2479,13 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
int err;
hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
- if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS)
- hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
/* nutrs and nutmrs are 0 based values */
- hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
+ hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1;
hba->nutmrs =
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
- hba->reserved_slot = hba->nutrs - 1;
+
+ hba->nortt = FIELD_GET(MASK_NUMBER_OUTSTANDING_RTT, hba->capabilities) + 1;
/* Read crypto capabilities */
err = ufshcd_hba_init_crypto_capabilities(hba);
@@ -2412,13 +2494,22 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
return err;
}
+ /*
+ * The UFSHCI 3.0 specification does not define MCQ_SUPPORT and
+ * LSDB_SUPPORT, but [31:29] as reserved bits with reset value 0s, which
+ * means we can simply read values regardless of version.
+ */
hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
- if (!hba->mcq_sup)
- return 0;
+ /*
+ * 0h: legacy single doorbell support is available
+ * 1h: indicate that legacy single doorbell support has been removed
+ */
+ if (!(hba->quirks & UFSHCD_QUIRK_BROKEN_LSDBS_CAP))
+ hba->lsdb_sup = !FIELD_GET(MASK_LSDB_SUPPORT, hba->capabilities);
+ else
+ hba->lsdb_sup = true;
hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
- hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
- hba->mcq_capabilities);
return 0;
}
@@ -2434,7 +2525,7 @@ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
{
u32 val;
int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
- 500, UIC_CMD_TIMEOUT * 1000, false, hba,
+ 500, uic_cmd_timeout * 1000, false, hba,
REG_CONTROLLER_STATUS);
return ret == 0;
}
@@ -2494,7 +2585,7 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
lockdep_assert_held(&hba->uic_cmd_mutex);
if (wait_for_completion_timeout(&uic_cmd->done,
- msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+ msecs_to_jiffies(uic_cmd_timeout))) {
ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
} else {
ret = -ETIMEDOUT;
@@ -2520,13 +2611,11 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
* __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
* @hba: per adapter instance
* @uic_cmd: UIC command
- * @completion: initialize the completion only if this is set to true
*
- * Return: 0 only if success.
+ * Return: 0 if successful; < 0 upon failure.
*/
static int
-__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
- bool completion)
+__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
lockdep_assert_held(&hba->uic_cmd_mutex);
@@ -2536,10 +2625,9 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
return -EIO;
}
- if (completion)
- init_completion(&uic_cmd->done);
+ init_completion(&uic_cmd->done);
- uic_cmd->cmd_active = 1;
+ uic_cmd->cmd_active = true;
ufshcd_dispatch_uic_cmd(hba, uic_cmd);
return 0;
@@ -2554,6 +2642,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
*/
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
+ unsigned long flags;
int ret;
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
@@ -2563,7 +2652,11 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
- ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
if (!ret)
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
@@ -2622,13 +2715,13 @@ static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int
/**
* ufshcd_map_sg - Map scatter-gather list to prdt
* @hba: per adapter instance
- * @lrbp: pointer to local reference block
+ * @cmd: SCSI command
*
* Return: 0 in case of success, non-zero value in case of failure.
*/
-static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static int ufshcd_map_sg(struct ufs_hba *hba, struct scsi_cmnd *cmd)
{
- struct scsi_cmnd *cmd = lrbp->cmd;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
int sg_segments = scsi_dma_map(cmd);
if (sg_segments < 0)
@@ -2636,33 +2729,7 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
- return 0;
-}
-
-/**
- * ufshcd_enable_intr - enable interrupts
- * @hba: per adapter instance
- * @intrs: interrupt bits
- */
-static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
-{
- u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
-
- set |= intrs;
- ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
-}
-
-/**
- * ufshcd_disable_intr - disable interrupts
- * @hba: per adapter instance
- * @intrs: interrupt bits
- */
-static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
-{
- u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
-
- set &= ~intrs;
- ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
+ return ufshcd_crypto_fill_prdt(hba, cmd);
}
/**
@@ -2721,13 +2788,14 @@ ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
/**
* ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
* for scsi commands
- * @lrbp: local reference block pointer
+ * @cmd: SCSI command
* @upiu_flags: flags
*/
-static
-void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
+static void ufshcd_prepare_utp_scsi_cmd_upiu(struct scsi_cmnd *cmd,
+ u8 upiu_flags)
{
- struct scsi_cmnd *cmd = lrbp->cmd;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ const int tag = scsi_cmd_to_rq(cmd)->tag;
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
unsigned short cdb_len;
@@ -2735,16 +2803,15 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
.transaction_code = UPIU_TRANSACTION_COMMAND,
.flags = upiu_flags,
.lun = lrbp->lun,
- .task_tag = lrbp->task_tag,
+ .task_tag = tag,
.command_set_type = UPIU_COMMAND_SET_TYPE_SCSI,
};
- WARN_ON_ONCE(ucd_req_ptr->header.task_tag != lrbp->task_tag);
+ WARN_ON_ONCE(ucd_req_ptr->header.task_tag != tag);
ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
- memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
@@ -2753,13 +2820,15 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
/**
* ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
* @hba: UFS hba
- * @lrbp: local reference block pointer
+ * @cmd: SCSI command pointer
* @upiu_flags: flags
*/
static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp, u8 upiu_flags)
+ struct scsi_cmnd *cmd, u8 upiu_flags)
{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+ const int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufs_query *query = &hba->dev_cmd.query;
u16 len = be16_to_cpu(query->request.upiu_req.length);
@@ -2768,7 +2837,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
.transaction_code = UPIU_TRANSACTION_QUERY_REQ,
.flags = upiu_flags,
.lun = lrbp->lun,
- .task_tag = lrbp->task_tag,
+ .task_tag = tag,
.query_function = query->request.query_func,
/* Data segment length only need for WRITE_DESC */
.data_segment_length =
@@ -2785,47 +2854,48 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
/* Copy the Descriptor */
if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
memcpy(ucd_req_ptr + 1, query->descriptor, len);
-
- memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
-static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
+static inline void ufshcd_prepare_utp_nop_upiu(struct scsi_cmnd *cmd)
{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+ const int tag = scsi_cmd_to_rq(cmd)->tag;
memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
ucd_req_ptr->header = (struct utp_upiu_header){
.transaction_code = UPIU_TRANSACTION_NOP_OUT,
- .task_tag = lrbp->task_tag,
+ .task_tag = tag,
};
-
- memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
/**
* ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
* for Device Management Purposes
* @hba: per adapter instance
- * @lrbp: pointer to local reference block
+ * @cmd: SCSI command pointer
*
* Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
+ struct scsi_cmnd *cmd)
{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
u8 upiu_flags;
int ret = 0;
ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, 0);
if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
- ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
+ ufshcd_prepare_utp_query_req_upiu(hba, cmd, upiu_flags);
else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
- ufshcd_prepare_utp_nop_upiu(lrbp);
+ ufshcd_prepare_utp_nop_upiu(cmd);
else
ret = -EINVAL;
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+
return ret;
}
@@ -2833,18 +2903,69 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
* ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
* for SCSI Purposes
* @hba: per adapter instance
- * @lrbp: pointer to local reference block
+ * @cmd: SCSI command
*/
-static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct scsi_cmnd *cmd)
{
- struct request *rq = scsi_cmd_to_rq(lrbp->cmd);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ struct request *rq = scsi_cmd_to_rq(cmd);
unsigned int ioprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
u8 upiu_flags;
- ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
+ ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags,
+ cmd->sc_data_direction, 0);
if (ioprio_class == IOPRIO_CLASS_RT)
upiu_flags |= UPIU_CMD_FLAGS_CP;
- ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
+ ufshcd_prepare_utp_scsi_cmd_upiu(cmd, upiu_flags);
+}
+
+static void ufshcd_init_lrb(struct ufs_hba *hba, struct scsi_cmnd *cmd)
+{
+ const int i = scsi_cmd_to_rq(cmd)->tag;
+ struct utp_transfer_cmd_desc *cmd_descp =
+ (void *)hba->ucdl_base_addr + i * ufshcd_get_ucd_size(hba);
+ struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
+ dma_addr_t cmd_desc_element_addr =
+ hba->ucdl_dma_addr + i * ufshcd_get_ucd_size(hba);
+ u16 response_offset = le16_to_cpu(utrdlp[i].response_upiu_offset);
+ u16 prdt_offset = le16_to_cpu(utrdlp[i].prd_table_offset);
+ struct ufshcd_lrb *lrb = scsi_cmd_priv(cmd);
+
+ lrb->utr_descriptor_ptr = utrdlp + i;
+ lrb->utrd_dma_addr =
+ hba->utrdl_dma_addr + i * sizeof(struct utp_transfer_req_desc);
+ lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
+ lrb->ucd_req_dma_addr = cmd_desc_element_addr;
+ lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
+ lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
+ lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
+ lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
+}
+
+static void __ufshcd_setup_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ u8 lun, int tag)
+{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+
+ ufshcd_init_lrb(hba, cmd);
+
+ memset(lrbp->ucd_req_ptr, 0, sizeof(*lrbp->ucd_req_ptr));
+
+ lrbp->lun = lun;
+ ufshcd_prepare_lrbp_crypto(ufshcd_is_scsi_cmd(cmd) ?
+ scsi_cmd_to_rq(cmd) : NULL, lrbp);
+}
+
+static void ufshcd_setup_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ u8 lun, int tag)
+{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+
+ __ufshcd_setup_cmd(hba, cmd, lun, tag);
+ lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
+ lrbp->req_abort_skip = false;
+
+ ufshcd_comp_scsi_upiu(hba, cmd);
}
/**
@@ -2895,26 +3016,13 @@ static void ufshcd_map_queues(struct Scsi_Host *shost)
}
}
-static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
+/*
+ * The only purpose of this function is to make the SCSI core skip the memset()
+ * call for the private command data.
+ */
+static int ufshcd_init_cmd_priv(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
- struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
- i * ufshcd_get_ucd_size(hba);
- struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
- dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
- i * ufshcd_get_ucd_size(hba);
- u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
- response_upiu);
- u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
-
- lrb->utr_descriptor_ptr = utrdlp + i;
- lrb->utrd_dma_addr = hba->utrdl_dma_addr +
- i * sizeof(struct utp_transfer_req_desc);
- lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
- lrb->ucd_req_dma_addr = cmd_desc_element_addr;
- lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
- lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
- lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
- lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
+ return 0;
}
/**
@@ -2928,7 +3036,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
struct ufs_hba *hba = shost_priv(host);
int tag = scsi_cmd_to_rq(cmd)->tag;
- struct ufshcd_lrb *lrbp;
int err = 0;
struct ufs_hw_queue *hwq = NULL;
@@ -2979,28 +3086,19 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_hold(hba);
- lrbp = &hba->lrb[tag];
- lrbp->cmd = cmd;
- lrbp->task_tag = tag;
- lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
- lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
+ ufshcd_setup_scsi_cmd(hba, cmd,
+ ufshcd_scsi_to_upiu_lun(cmd->device->lun), tag);
- ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
-
- lrbp->req_abort_skip = false;
-
- ufshcd_comp_scsi_upiu(hba, lrbp);
-
- err = ufshcd_map_sg(hba, lrbp);
+ err = ufshcd_map_sg(hba, cmd);
if (err) {
ufshcd_release(hba);
goto out;
}
- if (is_mcq_enabled(hba))
+ if (hba->mcq_enabled)
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
- ufshcd_send_command(hba, tag, hwq);
+ ufshcd_send_command(hba, cmd, hwq);
out:
if (ufs_trigger_eh(hba)) {
@@ -3014,23 +3112,39 @@ out:
return err;
}
-static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
- enum dev_cmd_type cmd_type, u8 lun, int tag)
+static int ufshcd_queue_reserved_command(struct Scsi_Host *host,
+ struct scsi_cmnd *cmd)
{
- lrbp->cmd = NULL;
- lrbp->task_tag = tag;
- lrbp->lun = lun;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ struct ufs_hba *hba = shost_priv(host);
+ struct ufs_hw_queue *hwq =
+ hba->mcq_enabled ? ufshcd_mcq_req_to_hwq(hba, rq) : NULL;
+
+ ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
+ ufshcd_send_command(hba, cmd, hwq);
+ return 0;
+}
+
+static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ enum dev_cmd_type cmd_type, u8 lun, int tag)
+{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+
+ __ufshcd_setup_cmd(hba, cmd, lun, tag);
lrbp->intr_cmd = true; /* No interrupt aggregation */
- ufshcd_prepare_lrbp_crypto(NULL, lrbp);
hba->dev_cmd.type = cmd_type;
}
-static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
+/*
+ * Return: 0 upon success; < 0 upon failure.
+ */
+static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ enum dev_cmd_type cmd_type, int tag)
{
- ufshcd_setup_dev_cmd(hba, lrbp, cmd_type, 0, tag);
+ ufshcd_setup_dev_cmd(hba, cmd, cmd_type, 0, tag);
- return ufshcd_compose_devman_upiu(hba, lrbp);
+ return ufshcd_compose_devman_upiu(hba, cmd);
}
/*
@@ -3053,10 +3167,9 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
{
u32 mask;
- unsigned long flags;
int err;
- if (is_mcq_enabled(hba)) {
+ if (hba->mcq_enabled) {
/*
* MCQ mode. Clean up the MCQ resources similar to
* what the ufshcd_utrl_clear() does for SDB mode.
@@ -3073,9 +3186,7 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
mask = 1U << task_tag;
/* clear outstanding transaction before retry */
- spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_utrl_clear(hba, mask);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
/*
* wait for h/w to clear corresponding bit in door-bell.
@@ -3112,8 +3223,13 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
case UPIU_TRANSACTION_QUERY_RSP: {
u8 response = lrbp->ucd_rsp_ptr->header.response;
- if (response == 0)
+ if (response == 0) {
err = ufshcd_copy_query_response(hba, lrbp);
+ } else {
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: unexpected response in Query RSP: %x\n",
+ __func__, response);
+ }
break;
}
case UPIU_TRANSACTION_REJECT_UPIU:
@@ -3135,94 +3251,7 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
break;
}
- return err;
-}
-
-static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp, int max_timeout)
-{
- unsigned long time_left = msecs_to_jiffies(max_timeout);
- unsigned long flags;
- bool pending;
- int err;
-
-retry:
- time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
- time_left);
-
- if (likely(time_left)) {
- /*
- * The completion handler called complete() and the caller of
- * this function still owns the @lrbp tag so the code below does
- * not trigger any race conditions.
- */
- hba->dev_cmd.complete = NULL;
- err = ufshcd_get_tr_ocs(lrbp, NULL);
- if (!err)
- err = ufshcd_dev_cmd_completion(hba, lrbp);
- } else {
- err = -ETIMEDOUT;
- dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
- __func__, lrbp->task_tag);
-
- /* MCQ mode */
- if (is_mcq_enabled(hba)) {
- /* successfully cleared the command, retry if needed */
- if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
- err = -EAGAIN;
- hba->dev_cmd.complete = NULL;
- return err;
- }
-
- /* SDB mode */
- if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
- /* successfully cleared the command, retry if needed */
- err = -EAGAIN;
- /*
- * Since clearing the command succeeded we also need to
- * clear the task tag bit from the outstanding_reqs
- * variable.
- */
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- pending = test_bit(lrbp->task_tag,
- &hba->outstanding_reqs);
- if (pending) {
- hba->dev_cmd.complete = NULL;
- __clear_bit(lrbp->task_tag,
- &hba->outstanding_reqs);
- }
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
-
- if (!pending) {
- /*
- * The completion handler ran while we tried to
- * clear the command.
- */
- time_left = 1;
- goto retry;
- }
- } else {
- dev_err(hba->dev, "%s: failed to clear tag %d\n",
- __func__, lrbp->task_tag);
-
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- pending = test_bit(lrbp->task_tag,
- &hba->outstanding_reqs);
- if (pending)
- hba->dev_cmd.complete = NULL;
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
-
- if (!pending) {
- /*
- * The completion handler ran while we tried to
- * clear the command.
- */
- time_left = 1;
- goto retry;
- }
- }
- }
-
+ WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@@ -3240,23 +3269,40 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba)
ufshcd_release(hba);
}
-static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
- const u32 tag, int timeout)
+static struct scsi_cmnd *ufshcd_get_dev_mgmt_cmd(struct ufs_hba *hba)
{
- DECLARE_COMPLETION_ONSTACK(wait);
- int err;
-
- hba->dev_cmd.complete = &wait;
+ /*
+ * The caller must hold this lock to guarantee that the NOWAIT
+ * allocation will succeed.
+ */
+ lockdep_assert_held(&hba->dev_cmd.lock);
- ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
+ return scsi_get_internal_cmd(
+ hba->host->pseudo_sdev, DMA_TO_DEVICE,
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+}
- ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
- err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
+static void ufshcd_put_dev_mgmt_cmd(struct scsi_cmnd *cmd)
+{
+ scsi_put_internal_cmd(cmd);
+}
- ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
- (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
+/*
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
+ */
+static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ const u32 tag, int timeout)
+{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ blk_status_t sts;
- return err;
+ rq->timeout = timeout;
+ sts = blk_execute_rq(rq, true);
+ if (sts != BLK_STS_OK)
+ return blk_status_to_errno(sts);
+ return lrbp->utr_descriptor_ptr->header.ocs;
}
/**
@@ -3265,7 +3311,8 @@ static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
* @cmd_type: specifies the type (NOP, Query...)
* @timeout: timeout in milliseconds
*
- * Return: 0 upon success; < 0 upon failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*
* NOTE: Since there is only one available tag for device management commands,
* it is expected you hold the hba->dev_cmd.lock mutex.
@@ -3273,18 +3320,31 @@ static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
- const u32 tag = hba->reserved_slot;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct scsi_cmnd *cmd = ufshcd_get_dev_mgmt_cmd(hba);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ u32 tag;
int err;
- /* Protects use of hba->reserved_slot. */
+ /* Protects use of hba->dev_cmd. */
lockdep_assert_held(&hba->dev_cmd.lock);
- err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
+ if (WARN_ON_ONCE(!cmd))
+ return -ENOMEM;
+
+ tag = scsi_cmd_to_rq(cmd)->tag;
+
+ err = ufshcd_compose_dev_cmd(hba, cmd, cmd_type, tag);
if (unlikely(err))
- return err;
+ goto out;
- return ufshcd_issue_dev_cmd(hba, lrbp, tag, timeout);
+ err = ufshcd_issue_dev_cmd(hba, cmd, tag, timeout);
+ if (err == 0)
+ err = ufshcd_dev_cmd_completion(hba, lrbp);
+
+out:
+ ufshcd_put_dev_mgmt_cmd(cmd);
+
+ return err;
}
/**
@@ -3311,6 +3371,10 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
(*request)->upiu_req.selector = selector;
}
+/*
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
+ */
static int ufshcd_query_flag_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
{
@@ -3342,7 +3406,8 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
* @index: flag index to access
* @flag_res: the flag value after the query request completes
*
- * Return: 0 for success, non-zero in case of failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res)
@@ -3350,7 +3415,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
int err, selector = 0;
- int timeout = QUERY_REQ_TIMEOUT;
+ int timeout = dev_cmd_timeout;
BUG_ON(!hba);
@@ -3410,8 +3475,9 @@ out_unlock:
* @selector: selector field
* @attr_val: the attribute value after the query request completes
*
- * Return: 0 for success, non-zero in case of failure.
-*/
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
+ */
int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
{
@@ -3447,7 +3513,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
goto out_unlock;
}
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err) {
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
@@ -3473,8 +3539,9 @@ out_unlock:
* @attr_val: the attribute value after the query request
* completes
*
- * Return: 0 for success, non-zero in case of failure.
-*/
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
+ */
int ufshcd_query_attr_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
u32 *attr_val)
@@ -3499,6 +3566,10 @@ int ufshcd_query_attr_retry(struct ufs_hba *hba,
return ret;
}
+/*
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
+ */
static int __ufshcd_query_descriptor(struct ufs_hba *hba,
enum query_opcode opcode, enum desc_idn idn, u8 index,
u8 selector, u8 *desc_buf, int *buf_len)
@@ -3543,7 +3614,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out_unlock;
}
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err) {
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
@@ -3572,7 +3643,8 @@ out_unlock:
* The buf_len parameter will contain, on return, the length parameter
* received on the response.
*
- * Return: 0 for success, non-zero in case of failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
enum query_opcode opcode,
@@ -3602,7 +3674,8 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
* @param_read_buf: pointer to buffer where parameter would be read
* @param_size: sizeof(param_read_buf)
*
- * Return: 0 in case of success, non-zero otherwise.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
int ufshcd_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
@@ -3697,16 +3770,14 @@ static inline char ufshcd_remove_non_printable(u8 ch)
* @desc_index: descriptor index
* @buf: pointer to buffer where descriptor would be read,
* the caller should free the memory.
- * @ascii: if true convert from unicode to ascii characters
- * null terminated string.
+ * @fmt: if %SD_ASCII_STD, convert from UTF-16 to ASCII
*
* Return:
* * string size on success.
* * -ENOMEM: on allocation failure
* * -EINVAL: on a wrong parameter
*/
-int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
- u8 **buf, bool ascii)
+int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, u8 **buf, enum ufs_descr_fmt fmt)
{
struct uc_string_id *uc_str;
u8 *str;
@@ -3735,7 +3806,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
goto out;
}
- if (ascii) {
+ if (fmt == SD_ASCII_STD) {
ssize_t ascii_len;
int i;
/* remove header and divide by 2 to move from UTF16 to UTF8 */
@@ -3761,7 +3832,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
str[ret++] = '\0';
} else {
- str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
+ str = kmemdup(uc_str->uc, uc_str->len, GFP_KERNEL);
if (!str) {
ret = -ENOMEM;
goto out;
@@ -3782,7 +3853,7 @@ out:
* @param_read_buf: pointer to buffer where parameter would be read
* @param_size: sizeof(param_read_buf)
*
- * Return: 0 in case of success, non-zero otherwise.
+ * Return: 0 in case of success; < 0 upon failure.
*/
static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
int lun,
@@ -3901,14 +3972,6 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
}
skip_utmrdl:
- /* Allocate memory for local reference block */
- hba->lrb = devm_kcalloc(hba->dev,
- hba->nutrs, sizeof(struct ufshcd_lrb),
- GFP_KERNEL);
- if (!hba->lrb) {
- dev_err(hba->dev, "LRB Memory allocation failed\n");
- goto out;
- }
return 0;
out:
return -ENOMEM;
@@ -3970,8 +4033,6 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
utrdlp[i].response_upiu_length =
cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
}
-
- ufshcd_init_lrb(hba, &hba->lrb[i], i);
}
}
@@ -3988,11 +4049,11 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
*/
static int ufshcd_dme_link_startup(struct ufs_hba *hba)
{
- struct uic_command uic_cmd = {0};
+ struct uic_command uic_cmd = {
+ .command = UIC_CMD_DME_LINK_STARTUP,
+ };
int ret;
- uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
-
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_dbg(hba->dev,
@@ -4008,13 +4069,13 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
*
* Return: 0 on success, non-zero value on failure.
*/
-static int ufshcd_dme_reset(struct ufs_hba *hba)
+int ufshcd_dme_reset(struct ufs_hba *hba)
{
- struct uic_command uic_cmd = {0};
+ struct uic_command uic_cmd = {
+ .command = UIC_CMD_DME_RESET,
+ };
int ret;
- uic_cmd.command = UIC_CMD_DME_RESET;
-
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_err(hba->dev,
@@ -4022,6 +4083,7 @@ static int ufshcd_dme_reset(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_dme_reset);
int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
int agreed_gear,
@@ -4047,13 +4109,13 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
*
* Return: 0 on success, non-zero value on failure.
*/
-static int ufshcd_dme_enable(struct ufs_hba *hba)
+int ufshcd_dme_enable(struct ufs_hba *hba)
{
- struct uic_command uic_cmd = {0};
+ struct uic_command uic_cmd = {
+ .command = UIC_CMD_DME_ENABLE,
+ };
int ret;
- uic_cmd.command = UIC_CMD_DME_ENABLE;
-
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_err(hba->dev,
@@ -4061,6 +4123,7 @@ static int ufshcd_dme_enable(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_dme_enable);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
{
@@ -4086,11 +4149,16 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
min_sleep_time_us =
MIN_DELAY_BEFORE_DME_CMDS_US - delta;
else
- return; /* no more delay required */
+ min_sleep_time_us = 0; /* no more delay required */
+ }
+
+ if (min_sleep_time_us > 0) {
+ /* allow sleep for extra 50us if needed */
+ usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
}
- /* allow sleep for extra 50us if needed */
- usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
+ /* update the last_dme_cmd_tstamp */
+ hba->last_dme_cmd_tstamp = ktime_get();
}
/**
@@ -4106,7 +4174,12 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
u8 attr_set, u32 mib_val, u8 peer)
{
- struct uic_command uic_cmd = {0};
+ struct uic_command uic_cmd = {
+ .command = peer ? UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET,
+ .argument1 = attr_sel,
+ .argument2 = UIC_ARG_ATTR_TYPE(attr_set),
+ .argument3 = mib_val,
+ };
static const char *const action[] = {
"dme-set",
"dme-peer-set"
@@ -4115,12 +4188,6 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
int ret;
int retries = UFS_UIC_COMMAND_RETRIES;
- uic_cmd.command = peer ?
- UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
- uic_cmd.argument1 = attr_sel;
- uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
- uic_cmd.argument3 = mib_val;
-
do {
/* for peer attributes we retry upon failure */
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
@@ -4150,7 +4217,10 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
u32 *mib_val, u8 peer)
{
- struct uic_command uic_cmd = {0};
+ struct uic_command uic_cmd = {
+ .command = peer ? UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET,
+ .argument1 = attr_sel,
+ };
static const char *const action[] = {
"dme-get",
"dme-peer-get"
@@ -4184,10 +4254,6 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
}
}
- uic_cmd.command = peer ?
- UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
- uic_cmd.argument1 = attr_sel;
-
do {
/* for peer attributes we retry upon failure */
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
@@ -4201,8 +4267,8 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
get, UIC_GET_ATTR_ID(attr_sel),
UFS_UIC_COMMAND_RETRIES - retries);
- if (mib_val && !ret)
- *mib_val = uic_cmd.argument3;
+ if (mib_val)
+ *mib_val = ret == 0 ? uic_cmd.argument3 : 0;
if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
&& pwr_mode_change)
@@ -4213,6 +4279,30 @@ out:
EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
/**
+ * ufshcd_dme_rmw - get modify set a DME attribute
+ * @hba: per adapter instance
+ * @mask: indicates which bits to clear from the value that has been read
+ * @val: actual value to write
+ * @attr: dme attribute
+ */
+int ufshcd_dme_rmw(struct ufs_hba *hba, u32 mask,
+ u32 val, u32 attr)
+{
+ u32 cfg = 0;
+ int err;
+
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB(attr), &cfg);
+ if (err)
+ return err;
+
+ cfg &= ~mask;
+ cfg |= (val & mask);
+
+ return ufshcd_dme_set(hba, UIC_ARG_MIB(attr), cfg);
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_rmw);
+
+/**
* ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
* state) and waits for it to take effect.
*
@@ -4234,7 +4324,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
unsigned long flags;
u8 status;
int ret;
- bool reenable_intr = false;
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
@@ -4245,17 +4334,9 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
goto out_unlock;
}
hba->uic_async_done = &uic_async_done;
- if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
- ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
- /*
- * Make sure UIC command completion interrupt is disabled before
- * issuing UIC command.
- */
- ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- reenable_intr = true;
- }
+ ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
spin_unlock_irqrestore(hba->host->host_lock, flags);
- ret = __ufshcd_send_uic_cmd(hba, cmd, false);
+ ret = __ufshcd_send_uic_cmd(hba, cmd);
if (ret) {
dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
@@ -4264,7 +4345,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
}
if (!wait_for_completion_timeout(hba->uic_async_done,
- msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+ msecs_to_jiffies(uic_cmd_timeout))) {
dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
cmd->command, cmd->argument3);
@@ -4297,9 +4378,7 @@ out:
spin_lock_irqsave(hba->host->host_lock, flags);
hba->active_uic_cmd = NULL;
hba->uic_async_done = NULL;
- if (reenable_intr)
- ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
- if (ret) {
+ if (ret && !hba->pm_op_in_progress) {
ufshcd_set_link_broken(hba);
ufshcd_schedule_eh_work(hba);
}
@@ -4307,6 +4386,39 @@ out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
mutex_unlock(&hba->uic_cmd_mutex);
+ /*
+ * If the h8 exit fails during the runtime resume process, it becomes
+ * stuck and cannot be recovered through the error handler. To fix
+ * this, use link recovery instead of the error handler.
+ */
+ if (ret && hba->pm_op_in_progress)
+ ret = ufshcd_link_recovery(hba);
+
+ return ret;
+}
+
+/**
+ * ufshcd_send_bsg_uic_cmd - Send UIC commands requested via BSG layer and retrieve the result
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+ *
+ * Return: 0 only if success.
+ */
+int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+{
+ int ret;
+
+ if (uic_cmd->argument1 != UIC_ARG_MIB(PA_PWRMODE) ||
+ uic_cmd->command != UIC_CMD_DME_SET)
+ return ufshcd_send_uic_cmd(hba, uic_cmd);
+
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
+ return 0;
+
+ ufshcd_hold(hba);
+ ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd);
+ ufshcd_release(hba);
+
return ret;
}
@@ -4320,7 +4432,11 @@ out_unlock:
*/
int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
{
- struct uic_command uic_cmd = {0};
+ struct uic_command uic_cmd = {
+ .command = UIC_CMD_DME_SET,
+ .argument1 = UIC_ARG_MIB(PA_PWRMODE),
+ .argument3 = mode,
+ };
int ret;
if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
@@ -4333,9 +4449,6 @@ int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
}
}
- uic_cmd.command = UIC_CMD_DME_SET;
- uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
- uic_cmd.argument3 = mode;
ufshcd_hold(hba);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
ufshcd_release(hba);
@@ -4376,15 +4489,16 @@ EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
{
- int ret;
- struct uic_command uic_cmd = {0};
+ struct uic_command uic_cmd = {
+ .command = UIC_CMD_DME_HIBER_ENTER,
+ };
ktime_t start = ktime_get();
+ int ret;
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
- uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
- trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
+ trace_ufshcd_profile_hibern8(hba, "enter",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret)
@@ -4400,15 +4514,16 @@ EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
{
- struct uic_command uic_cmd = {0};
+ struct uic_command uic_cmd = {
+ .command = UIC_CMD_DME_HIBER_EXIT,
+ };
int ret;
ktime_t start = ktime_get();
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
- uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
- trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
+ trace_ufshcd_profile_hibern8(hba, "exit",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret) {
@@ -4503,6 +4618,14 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
return -EINVAL;
}
+ if (pwr_info->lane_rx != pwr_info->lane_tx) {
+ dev_err(hba->dev, "%s: asymmetric connected lanes. rx=%d, tx=%d\n",
+ __func__,
+ pwr_info->lane_rx,
+ pwr_info->lane_tx);
+ return -EINVAL;
+ }
+
/*
* First, get the maximum gears of HS speed.
* If a zero value, it means there is no HSGEAR capability.
@@ -4615,9 +4738,6 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
dev_err(hba->dev,
"%s: power mode change failed %d\n", __func__, ret);
} else {
- ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
- pwr_mode);
-
memcpy(&hba->pwr_info, pwr_mode,
sizeof(struct ufs_pa_layer_attr));
}
@@ -4646,6 +4766,10 @@ int ufshcd_config_pwr_mode(struct ufs_hba *hba,
ret = ufshcd_change_power_mode(hba, &final_params);
+ if (!ret)
+ ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
+ &final_params);
+
return ret;
}
EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
@@ -4656,7 +4780,8 @@ EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
*
* Set fDeviceInit flag and poll until device toggles it.
*
- * Return: 0 upon success; < 0 upon failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
static int ufshcd_complete_dev_init(struct ufs_hba *hba)
{
@@ -4707,7 +4832,7 @@ out:
* 3. Program UTRL and UTMRL base address
* 4. Configure run-stop-registers
*
- * Return: 0 on success, non-zero value on failure.
+ * Return: 0 if successful; < 0 upon failure.
*/
int ufshcd_make_hba_operational(struct ufs_hba *hba)
{
@@ -4755,20 +4880,14 @@ EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
*/
void ufshcd_hba_stop(struct ufs_hba *hba)
{
- unsigned long flags;
int err;
- /*
- * Obtain the host lock to prevent that the controller is disabled
- * while the UFS interrupt handler is active on another CPU.
- */
- spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_disable_irq(hba);
ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
CONTROLLER_ENABLE, CONTROLLER_DISABLE,
10, 1);
+ ufshcd_enable_irq(hba);
if (err)
dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
}
@@ -4786,51 +4905,44 @@ EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
*/
static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
{
- int retry_outer = 3;
- int retry_inner;
+ int retry;
-start:
- if (ufshcd_is_hba_active(hba))
- /* change controller state to "reset state" */
- ufshcd_hba_stop(hba);
+ for (retry = 3; retry > 0; retry--) {
+ if (ufshcd_is_hba_active(hba))
+ /* change controller state to "reset state" */
+ ufshcd_hba_stop(hba);
- /* UniPro link is disabled at this point */
- ufshcd_set_link_off(hba);
+ /* UniPro link is disabled at this point */
+ ufshcd_set_link_off(hba);
- ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
+ ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
- /* start controller initialization sequence */
- ufshcd_hba_start(hba);
+ /* start controller initialization sequence */
+ ufshcd_hba_start(hba);
- /*
- * To initialize a UFS host controller HCE bit must be set to 1.
- * During initialization the HCE bit value changes from 1->0->1.
- * When the host controller completes initialization sequence
- * it sets the value of HCE bit to 1. The same HCE bit is read back
- * to check if the controller has completed initialization sequence.
- * So without this delay the value HCE = 1, set in the previous
- * instruction might be read back.
- * This delay can be changed based on the controller.
- */
- ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
+ /*
+ * To initialize a UFS host controller HCE bit must be set to 1.
+ * During initialization the HCE bit value changes from 1->0->1.
+ * When the host controller completes initialization sequence
+ * it sets the value of HCE bit to 1. The same HCE bit is read back
+ * to check if the controller has completed initialization sequence.
+ * So without this delay the value HCE = 1, set in the previous
+ * instruction might be read back.
+ * This delay can be changed based on the controller.
+ */
+ ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
- /* wait for the host controller to complete initialization */
- retry_inner = 50;
- while (!ufshcd_is_hba_active(hba)) {
- if (retry_inner) {
- retry_inner--;
- } else {
- dev_err(hba->dev,
- "Controller enable failed\n");
- if (retry_outer) {
- retry_outer--;
- goto start;
- }
- return -EIO;
- }
- usleep_range(1000, 1100);
+ /* wait for the host controller to complete initialization */
+ if (!ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, CONTROLLER_ENABLE,
+ CONTROLLER_ENABLE, 1000, 50))
+ break;
+
+ dev_err(hba->dev, "Enabling the controller failed\n");
}
+ if (!retry)
+ return -EIO;
+
/* enable UIC related interrupts */
ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
@@ -4872,7 +4984,7 @@ EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
{
- int tx_lanes = 0, i, err = 0;
+ int tx_lanes, i, err = 0;
if (!peer)
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
@@ -4939,7 +5051,8 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
* If UFS device isn't active then we will have to issue link startup
* 2 times to make sure the device state move to active.
*/
- if (!ufshcd_is_ufs_dev_active(hba))
+ if (!(hba->quirks & UFSHCD_QUIRK_PERFORM_LINK_STARTUP_ONCE) &&
+ !ufshcd_is_ufs_dev_active(hba))
link_startup_again = true;
link_startup:
@@ -5004,12 +5117,8 @@ link_startup:
ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
ret = ufshcd_make_hba_operational(hba);
out:
- if (ret) {
+ if (ret)
dev_err(hba->dev, "link startup failed %d\n", ret);
- ufshcd_print_host_state(hba);
- ufshcd_print_pwr_info(hba);
- ufshcd_print_evt_hist(hba);
- }
return ret;
}
@@ -5023,7 +5132,8 @@ out:
* not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
* and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
*
- * Return: 0 upon success; < 0 upon failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
static int ufshcd_verify_dev_init(struct ufs_hba *hba)
{
@@ -5128,10 +5238,15 @@ static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP)
hba->dev_info.is_lu_power_on_wp = true;
- /* In case of RPMB LU, check if advanced RPMB mode is enabled */
- if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN &&
- desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
- hba->dev_info.b_advanced_rpmb_en = true;
+ /* In case of RPMB LU, check if advanced RPMB mode is enabled, and get region size */
+ if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN) {
+ if (desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
+ hba->dev_info.b_advanced_rpmb_en = true;
+ hba->dev_info.rpmb_region_size[0] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION0_SIZE];
+ hba->dev_info.rpmb_region_size[1] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION1_SIZE];
+ hba->dev_info.rpmb_region_size[2] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION2_SIZE];
+ hba->dev_info.rpmb_region_size[3] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION3_SIZE];
+ }
kfree(desc_buf);
@@ -5145,12 +5260,12 @@ set_qdepth:
}
/**
- * ufshcd_slave_alloc - handle initial SCSI device configurations
+ * ufshcd_sdev_init - handle initial SCSI device configurations
* @sdev: pointer to SCSI device
*
* Return: success.
*/
-static int ufshcd_slave_alloc(struct scsi_device *sdev)
+static int ufshcd_sdev_init(struct scsi_device *sdev)
{
struct ufs_hba *hba;
@@ -5193,17 +5308,19 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
}
/**
- * ufshcd_slave_configure - adjust SCSI device configurations
+ * ufshcd_sdev_configure - adjust SCSI device configurations
* @sdev: pointer to SCSI device
+ * @lim: queue limits
*
* Return: 0 (success).
*/
-static int ufshcd_slave_configure(struct scsi_device *sdev)
+static int ufshcd_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue;
- blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
+ lim->dma_pad_mask = PRDT_DATA_BYTE_COUNT_PAD - 1;
/*
* Block runtime-pm until all consumers are added.
@@ -5220,16 +5337,19 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
*/
sdev->silence_suspend = 1;
+ if (hba->vops && hba->vops->config_scsi_dev)
+ hba->vops->config_scsi_dev(sdev);
+
ufshcd_crypto_register(hba, q);
return 0;
}
/**
- * ufshcd_slave_destroy - remove SCSI device configurations
+ * ufshcd_sdev_destroy - remove SCSI device configurations
* @sdev: pointer to SCSI device
*/
-static void ufshcd_slave_destroy(struct scsi_device *sdev)
+static void ufshcd_sdev_destroy(struct scsi_device *sdev)
{
struct ufs_hba *hba;
unsigned long flags;
@@ -5266,19 +5386,18 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
/**
* ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
- * @lrbp: pointer to local reference block of completed command
+ * @cmd: SCSI command
* @scsi_status: SCSI command status
*
* Return: value base on SCSI command status.
*/
-static inline int
-ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
+static inline int ufshcd_scsi_cmd_status(struct scsi_cmnd *cmd, int scsi_status)
{
int result = 0;
switch (scsi_status) {
case SAM_STAT_CHECK_CONDITION:
- ufshcd_copy_sense_data(lrbp);
+ ufshcd_copy_sense_data(cmd);
fallthrough;
case SAM_STAT_GOOD:
result |= DID_OK << 16 | scsi_status;
@@ -5286,7 +5405,7 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
case SAM_STAT_TASK_SET_FULL:
case SAM_STAT_BUSY:
case SAM_STAT_TASK_ABORTED:
- ufshcd_copy_sense_data(lrbp);
+ ufshcd_copy_sense_data(cmd);
result |= scsi_status;
break;
default:
@@ -5300,15 +5419,17 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
/**
* ufshcd_transfer_rsp_status - Get overall status of the response
* @hba: per adapter instance
- * @lrbp: pointer to local reference block of completed command
+ * @cmd: SCSI command
* @cqe: pointer to the completion queue entry
*
* Return: result of the command to notify SCSI midlayer.
*/
-static inline int
-ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
- struct cq_entry *cqe)
+static inline int ufshcd_transfer_rsp_status(struct ufs_hba *hba,
+ struct scsi_cmnd *cmd,
+ struct cq_entry *cqe)
{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ const int tag = scsi_cmd_to_rq(cmd)->tag;
int result = 0;
int scsi_status;
enum utp_ocs ocs;
@@ -5322,7 +5443,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
* not set either flag.
*/
if (resid && !(upiu_flags & UPIU_RSP_FLAG_OVERFLOW))
- scsi_set_resid(lrbp->cmd, resid);
+ scsi_set_resid(cmd, resid);
/* overall command status of utrd */
ocs = ufshcd_get_tr_ocs(lrbp, cqe);
@@ -5343,7 +5464,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
* to notify the SCSI midlayer of the command status
*/
scsi_status = lrbp->ucd_rsp_ptr->header.status;
- result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
+ result = ufshcd_scsi_cmd_status(cmd, scsi_status);
/*
* Currently we are only supporting BKOPs exception
@@ -5378,10 +5499,10 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
}
break;
case OCS_ABORTED:
- result |= DID_ABORT << 16;
- break;
case OCS_INVALID_COMMAND_STATUS:
result |= DID_REQUEUE << 16;
+ dev_warn(hba->dev, "OCS %s from controller for tag %d\n",
+ ocs == OCS_ABORTED ? "aborted" : "invalid", tag);
break;
case OCS_INVALID_CMD_TABLE_ATTR:
case OCS_INVALID_PRDT_ATTR:
@@ -5394,17 +5515,19 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
case OCS_GENERAL_CRYPTO_ERROR:
default:
result |= DID_ERROR << 16;
- dev_err(hba->dev,
- "OCS error from controller = %x for tag %d\n",
- ocs, lrbp->task_tag);
+ dev_err(hba->dev, "OCS error from controller = %x for tag %d\n",
+ ocs, tag);
ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
break;
} /* end of switch */
if ((host_byte(result) != DID_OK) &&
- (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
- ufshcd_print_tr(hba, lrbp->task_tag, true);
+ (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs) {
+ if (cqe)
+ ufshcd_hex_dump("UPIU CQE: ", cqe, sizeof(struct cq_entry));
+ ufshcd_print_tr(hba, cmd, true);
+ }
return result;
}
@@ -5438,42 +5561,42 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{
irqreturn_t retval = IRQ_NONE;
+ struct uic_command *cmd;
+
+ guard(spinlock_irqsave)(hba->host->host_lock);
+ cmd = hba->active_uic_cmd;
+ if (!cmd)
+ return retval;
- spin_lock(hba->host->host_lock);
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
- if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
- hba->active_uic_cmd->argument2 |=
- ufshcd_get_uic_cmd_result(hba);
- hba->active_uic_cmd->argument3 =
- ufshcd_get_dme_attr_val(hba);
+ if (intr_status & UIC_COMMAND_COMPL) {
+ cmd->argument2 |= ufshcd_get_uic_cmd_result(hba);
+ cmd->argument3 = ufshcd_get_dme_attr_val(hba);
if (!hba->uic_async_done)
- hba->active_uic_cmd->cmd_active = 0;
- complete(&hba->active_uic_cmd->done);
+ cmd->cmd_active = false;
+ complete(&cmd->done);
retval = IRQ_HANDLED;
}
- if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
- hba->active_uic_cmd->cmd_active = 0;
+ if (intr_status & UFSHCD_UIC_PWR_MASK && hba->uic_async_done) {
+ cmd->cmd_active = false;
complete(hba->uic_async_done);
retval = IRQ_HANDLED;
}
if (retval == IRQ_HANDLED)
- ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
- UFS_CMD_COMP);
- spin_unlock(hba->host->host_lock);
+ ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP);
+
return retval;
}
/* Release the resources allocated for processing a SCSI command. */
-void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
+void ufshcd_release_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd)
{
- struct scsi_cmnd *cmd = lrbp->cmd;
-
scsi_dma_unmap(cmd);
+ ufshcd_crypto_clear_prdt(hba, cmd);
ufshcd_release(hba);
ufshcd_clk_scaling_update_busy(hba);
}
@@ -5487,28 +5610,39 @@ void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
struct cq_entry *cqe)
{
- struct ufshcd_lrb *lrbp;
- struct scsi_cmnd *cmd;
+ struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, task_tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
enum utp_ocs ocs;
- lrbp = &hba->lrb[task_tag];
- lrbp->compl_time_stamp = ktime_get();
- cmd = lrbp->cmd;
- if (cmd) {
- if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
- ufshcd_update_monitor(hba, lrbp);
- ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP);
- cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe);
- ufshcd_release_scsi_cmd(hba, lrbp);
- /* Do not touch lrbp after scsi done */
- scsi_done(cmd);
- } else if (hba->dev_cmd.complete) {
+ if (WARN_ONCE(!cmd, "cqe->command_desc_base_addr = %#llx\n",
+ le64_to_cpu(cqe->command_desc_base_addr)))
+ return;
+
+ if (hba->monitor.enabled) {
+ lrbp->compl_time_stamp = ktime_get();
+ lrbp->compl_time_stamp_local_clock = local_clock();
+ }
+ if (ufshcd_is_scsi_cmd(cmd)) {
+ if (unlikely(ufshcd_should_inform_monitor(hba, cmd)))
+ ufshcd_update_monitor(hba, cmd);
+ ufshcd_add_command_trace(hba, cmd, UFS_CMD_COMP);
+ cmd->result = ufshcd_transfer_rsp_status(hba, cmd, cqe);
+ ufshcd_release_scsi_cmd(hba, cmd);
+ } else {
if (cqe) {
- ocs = le32_to_cpu(cqe->status) & MASK_OCS;
+ ocs = cqe->overall_status & MASK_OCS;
lrbp->utr_descriptor_ptr->header.ocs = ocs;
+ } else {
+ ocs = lrbp->utr_descriptor_ptr->header.ocs;
}
- complete(hba->dev_cmd.complete);
+ ufshcd_add_query_upiu_trace(
+ hba,
+ ocs == OCS_SUCCESS ? UFS_QUERY_COMP : UFS_QUERY_ERR,
+ (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
+ cmd->result = 0;
}
+ /* Do not touch lrbp after scsi_done() has been called. */
+ scsi_done(cmd);
}
/**
@@ -5536,7 +5670,7 @@ static void ufshcd_clear_polled(struct ufs_hba *hba,
int tag;
for_each_set_bit(tag, completed_reqs, hba->nutrs) {
- struct scsi_cmnd *cmd = hba->lrb[tag].cmd;
+ struct scsi_cmnd *cmd = scsi_host_find_tag(hba->host, tag);
if (!cmd)
continue;
@@ -5556,7 +5690,7 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
u32 tr_doorbell;
struct ufs_hw_queue *hwq;
- if (is_mcq_enabled(hba)) {
+ if (hba->mcq_enabled) {
hwq = &hba->uhq[queue_num];
return ufshcd_mcq_poll_cqe_lock(hba, hwq);
@@ -5581,6 +5715,47 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
return completed_reqs != 0;
}
+static bool ufshcd_mcq_force_compl_one(struct request *rq, void *priv)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct scsi_device *sdev = rq->q->queuedata;
+ struct Scsi_Host *shost = sdev->host;
+ struct ufs_hba *hba = shost_priv(shost);
+ struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
+
+ if (blk_mq_is_reserved_rq(rq) || !hwq)
+ return true;
+
+ ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
+
+ /*
+ * For those cmds of which the cqes are not present in the cq, complete
+ * them explicitly.
+ */
+ scoped_guard(spinlock_irqsave, &hwq->cq_lock) {
+ if (!test_bit(SCMD_STATE_COMPLETE, &cmd->state)) {
+ set_host_byte(cmd, DID_REQUEUE);
+ ufshcd_release_scsi_cmd(hba, cmd);
+ scsi_done(cmd);
+ }
+ }
+
+ return true;
+}
+
+static bool ufshcd_mcq_compl_one(struct request *rq, void *priv)
+{
+ struct scsi_device *sdev = rq->q->queuedata;
+ struct Scsi_Host *shost = sdev->host;
+ struct ufs_hba *hba = shost_priv(shost);
+ struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
+
+ if (!blk_mq_is_reserved_rq(rq) && hwq)
+ ufshcd_mcq_poll_cqe_lock(hba, hwq);
+
+ return true;
+}
+
/**
* ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
* invoked from the error handler context or ufshcd_host_reset_and_restore()
@@ -5595,38 +5770,10 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
bool force_compl)
{
- struct ufs_hw_queue *hwq;
- struct ufshcd_lrb *lrbp;
- struct scsi_cmnd *cmd;
- unsigned long flags;
- int tag;
-
- for (tag = 0; tag < hba->nutrs; tag++) {
- lrbp = &hba->lrb[tag];
- cmd = lrbp->cmd;
- if (!ufshcd_cmd_inflight(cmd) ||
- test_bit(SCMD_STATE_COMPLETE, &cmd->state))
- continue;
-
- hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
-
- if (force_compl) {
- ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
- /*
- * For those cmds of which the cqes are not present
- * in the cq, complete them explicitly.
- */
- spin_lock_irqsave(&hwq->cq_lock, flags);
- if (cmd && !test_bit(SCMD_STATE_COMPLETE, &cmd->state)) {
- set_host_byte(cmd, DID_REQUEUE);
- ufshcd_release_scsi_cmd(hba, lrbp);
- scsi_done(cmd);
- }
- spin_unlock_irqrestore(&hwq->cq_lock, flags);
- } else {
- ufshcd_mcq_poll_cqe_lock(hba, hwq);
- }
- }
+ blk_mq_tagset_busy_iter(&hba->host->tag_set,
+ force_compl ? ufshcd_mcq_force_compl_one :
+ ufshcd_mcq_compl_one,
+ NULL);
}
/**
@@ -5741,7 +5888,8 @@ static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
* as the device is allowed to manage its own way of handling background
* operations.
*
- * Return: zero on success, non-zero on failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
{
@@ -5759,7 +5907,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
}
hba->auto_bkops_enabled = true;
- trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
+ trace_ufshcd_auto_bkops_state(hba, "Enabled");
/* No need of URGENT_BKOPS exception from the device */
err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
@@ -5780,7 +5928,8 @@ out:
* host is idle so that BKOPS are managed effectively without any negative
* impacts.
*
- * Return: zero on success, non-zero on failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
{
@@ -5810,7 +5959,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
}
hba->auto_bkops_enabled = false;
- trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
+ trace_ufshcd_auto_bkops_state(hba, "Disabled");
hba->is_urgent_bkops_lvl_checked = false;
out:
return err;
@@ -5849,12 +5998,11 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
/**
* ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
* @hba: per-adapter instance
- * @status: bkops_status value
*
* Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
* flag in the device to permit background operations if the device
- * bkops_status is greater than or equal to "status" argument passed to
- * this function, disable otherwise.
+ * bkops_status is greater than or equal to the "hba->urgent_bkops_lvl",
+ * disable otherwise.
*
* Return: 0 for success, non-zero in case of failure.
*
@@ -5862,11 +6010,11 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
* to know whether auto bkops is enabled or disabled after this function
* returns control to it.
*/
-static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
- enum bkops_status status)
+static int ufshcd_bkops_ctrl(struct ufs_hba *hba)
{
- int err;
+ enum bkops_status status = hba->urgent_bkops_lvl;
u32 curr_status = 0;
+ int err;
err = ufshcd_get_bkops_status(hba, &curr_status);
if (err) {
@@ -5888,23 +6036,6 @@ out:
return err;
}
-/**
- * ufshcd_urgent_bkops - handle urgent bkops exception event
- * @hba: per-adapter instance
- *
- * Enable fBackgroundOpsEn flag in the device to permit background
- * operations.
- *
- * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
- * and negative error value for any other failure.
- *
- * Return: 0 upon success; < 0 upon failure.
- */
-static int ufshcd_urgent_bkops(struct ufs_hba *hba)
-{
- return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
-}
-
static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
{
return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
@@ -5948,22 +6079,44 @@ out:
__func__, err);
}
-static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
+/*
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
+ */
+int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id)
{
- u32 value;
+ struct utp_upiu_query_v4_0 *upiu_resp;
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err;
- if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
- return;
+ if (hba->dev_info.wspecversion < 0x410)
+ return -EOPNOTSUPP;
- dev_info(hba->dev, "exception Tcase %d\n", value - 80);
+ ufshcd_hold(hba);
+ mutex_lock(&hba->dev_cmd.lock);
- ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
+ ufshcd_init_query(hba, &request, &response,
+ UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_DEV_LVL_EXCEPTION_ID, 0, 0);
- /*
- * A placeholder for the platform vendors to add whatever additional
- * steps required
- */
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
+
+ if (err) {
+ dev_err(hba->dev, "%s: failed to read device level exception %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ upiu_resp = (struct utp_upiu_query_v4_0 *)response;
+ *exception_id = get_unaligned_be64(&upiu_resp->osf3);
+out:
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+
+ return err;
}
static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
@@ -6036,7 +6189,22 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
return ret;
}
-static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
+int ufshcd_wb_set_resize_en(struct ufs_hba *hba, enum wb_resize_en en_mode)
+{
+ int ret;
+ u8 index;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_WB_BUF_RESIZE_EN, index, 0, &en_mode);
+ if (ret)
+ dev_err(hba->dev, "%s: Enable WB buf resize operation failed %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static bool ufshcd_wb_curr_buff_threshold_check(struct ufs_hba *hba,
u32 avail_buf)
{
u32 cur_buf;
@@ -6118,15 +6286,13 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
}
/*
- * The ufs device needs the vcc to be ON to flush.
* With user-space reduction enabled, it's enough to enable flush
* by checking only the available buffer. The threshold
* defined here is > 90% full.
* With user-space preserved enabled, the current-buffer
* should be checked too because the wb buffer size can reduce
* when disk tends to be full. This info is provided by current
- * buffer (dCurrentWriteBoosterBufferSize). There's no point in
- * keeping vcc on when current buffer is empty.
+ * buffer (dCurrentWriteBoosterBufferSize).
*/
index = ufshcd_wb_get_query_index(hba);
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
@@ -6141,7 +6307,7 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
if (!hba->dev_info.b_presrv_uspc_en)
return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
- return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
+ return ufshcd_wb_curr_buff_threshold_check(hba, avail_buf);
}
static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
@@ -6173,31 +6339,38 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
u32 status = 0;
hba = container_of(work, struct ufs_hba, eeh_work);
- ufshcd_scsi_block_requests(hba);
err = ufshcd_get_ee_status(hba, &status);
if (err) {
dev_err(hba->dev, "%s: failed to get exception status %d\n",
__func__, err);
- goto out;
+ return;
}
- trace_ufshcd_exception_event(dev_name(hba->dev), status);
+ trace_ufshcd_exception_event(hba, status);
if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
ufshcd_bkops_exception_event_handler(hba);
if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
- ufshcd_temp_exception_event_handler(hba, status);
+ ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
+
+ if (status & hba->ee_drv_mask & MASK_EE_HEALTH_CRITICAL) {
+ hba->critical_health_count++;
+ sysfs_notify(&hba->dev->kobj, NULL, "critical_health");
+ }
+
+ if (status & hba->ee_drv_mask & MASK_EE_DEV_LVL_EXCEPTION) {
+ atomic_inc(&hba->dev_lvl_exception_count);
+ sysfs_notify(&hba->dev->kobj, NULL, "device_lvl_exception_count");
+ }
ufs_debugfs_exception_event(hba, status);
-out:
- ufshcd_scsi_unblock_requests(hba);
}
/* Complete requests that have door-bell cleared */
static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl)
{
- if (is_mcq_enabled(hba))
+ if (hba->mcq_enabled)
ufshcd_mcq_compl_pending_transfer(hba, force_compl);
else
ufshcd_transfer_req_compl(hba);
@@ -6298,13 +6471,14 @@ void ufshcd_schedule_eh_work(struct ufs_hba *hba)
}
}
-static void ufshcd_force_error_recovery(struct ufs_hba *hba)
+void ufshcd_force_error_recovery(struct ufs_hba *hba)
{
spin_lock_irq(hba->host->host_lock);
hba->force_reset = true;
ufshcd_schedule_eh_work(hba);
spin_unlock_irq(hba->host->host_lock);
}
+EXPORT_SYMBOL_GPL(ufshcd_force_error_recovery);
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
@@ -6357,15 +6531,14 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
ufshcd_suspend_clkscaling(hba);
ufshcd_clk_scaling_allow(hba, false);
}
- ufshcd_scsi_block_requests(hba);
/* Wait for ongoing ufshcd_queuecommand() calls to finish. */
- blk_mq_wait_quiesce_done(&hba->host->tag_set);
+ blk_mq_quiesce_tagset(&hba->host->tag_set);
cancel_work_sync(&hba->eeh_work);
}
static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
{
- ufshcd_scsi_unblock_requests(hba);
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
ufshcd_release(hba);
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
@@ -6444,26 +6617,15 @@ static bool ufshcd_abort_one(struct request *rq, void *priv)
struct scsi_device *sdev = cmd->device;
struct Scsi_Host *shost = sdev->host;
struct ufs_hba *hba = shost_priv(shost);
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
- struct ufs_hw_queue *hwq;
- unsigned long flags;
+
+ if (blk_mq_is_reserved_rq(rq))
+ return true;
*ret = ufshcd_try_to_abort_task(hba, tag);
dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
- hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
+ ufshcd_is_scsi_cmd(cmd) ? cmd->cmnd[0] : -1,
*ret ? "failed" : "succeeded");
- /* Release cmd in MCQ mode if abort succeeds */
- if (is_mcq_enabled(hba) && (*ret == 0)) {
- hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
- if (!hwq)
- return 0;
- spin_lock_irqsave(&hwq->cq_lock, flags);
- if (ufshcd_cmd_inflight(lrbp->cmd))
- ufshcd_release_scsi_cmd(hba, lrbp);
- spin_unlock_irqrestore(&hwq->cq_lock, flags);
- }
-
return *ret == 0;
}
@@ -6511,12 +6673,26 @@ static void ufshcd_err_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eh_work);
dev_info(hba->dev,
- "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
+ "%s started; HBA state %s; powered %d; shutting down %d; saved_err = 0x%x; saved_uic_err = 0x%x; force_reset = %d%s\n",
__func__, ufshcd_state_name[hba->ufshcd_state],
hba->is_powered, hba->shutting_down, hba->saved_err,
hba->saved_uic_err, hba->force_reset,
ufshcd_is_link_broken(hba) ? "; link is broken" : "");
+ /*
+ * Use ufshcd_rpm_get_noresume() here to safely perform link recovery
+ * even if an error occurs during runtime suspend or runtime resume.
+ * This avoids potential deadlocks that could happen if we tried to
+ * resume the device while a PM operation is already in progress.
+ */
+ ufshcd_rpm_get_noresume(hba);
+ if (hba->pm_op_in_progress) {
+ ufshcd_link_recovery(hba);
+ ufshcd_rpm_put(hba);
+ return;
+ }
+ ufshcd_rpm_put(hba);
+
down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
if (ufshcd_err_handling_should_stop(hba)) {
@@ -6526,9 +6702,14 @@ static void ufshcd_err_handler(struct work_struct *work)
up(&hba->host_sem);
return;
}
- ufshcd_set_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+
ufshcd_err_handling_prepare(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
/* Complete requests that have door-bell cleared by h/w */
ufshcd_complete_requests(hba, false);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -6545,7 +6726,8 @@ again:
if (ufshcd_err_handling_should_stop(hba))
goto skip_err_handling;
- if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ if ((hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) &&
+ !hba->force_reset) {
bool ret;
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -6781,7 +6963,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
bool queue_eh_work = false;
irqreturn_t retval = IRQ_NONE;
- spin_lock(hba->host->host_lock);
+ guard(spinlock_irqsave)(hba->host->host_lock);
hba->errors |= UFSHCD_ERROR_MASK & intr_status;
if (hba->errors & INT_FATAL_ERRORS) {
@@ -6840,7 +7022,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
*/
hba->errors = 0;
hba->uic_error = 0;
- spin_unlock(hba->host->host_lock);
+
return retval;
}
@@ -6939,7 +7121,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
}
/**
- * ufshcd_intr - Main interrupt service routine
+ * ufshcd_threaded_intr - Threaded interrupt service routine
* @irq: irq number
* @__hba: pointer to adapter instance
*
@@ -6947,16 +7129,14 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
-static irqreturn_t ufshcd_intr(int irq, void *__hba)
+static irqreturn_t ufshcd_threaded_intr(int irq, void *__hba)
{
- u32 intr_status, enabled_intr_status = 0;
+ u32 last_intr_status, intr_status, enabled_intr_status = 0;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
int retries = hba->nutrs;
- intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- hba->ufs_stats.last_intr_status = intr_status;
- hba->ufs_stats.last_intr_ts = local_clock();
+ last_intr_status = intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
/*
* There could be max of hba->nutrs reqs in flight and in worst case
@@ -6980,7 +7160,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
__func__,
intr_status,
- hba->ufs_stats.last_intr_status,
+ last_intr_status,
enabled_intr_status);
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
}
@@ -6988,18 +7168,43 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
return retval;
}
+/**
+ * ufshcd_intr - Main interrupt service routine
+ * @irq: irq number
+ * @__hba: pointer to adapter instance
+ *
+ * Return:
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_WAKE_THREAD - If handling is moved to threaded handled
+ * IRQ_NONE - If invalid interrupt
+ */
+static irqreturn_t ufshcd_intr(int irq, void *__hba)
+{
+ struct ufs_hba *hba = __hba;
+ u32 intr_status, enabled_intr_status;
+
+ /* Move interrupt handling to thread when MCQ & ESI are not enabled */
+ if (!hba->mcq_enabled || !hba->mcq_esi_enabled)
+ return IRQ_WAKE_THREAD;
+
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ enabled_intr_status = intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+
+ /* Directly handle interrupts since MCQ ESI handlers does the hard job */
+ return ufshcd_sl_intr(hba, enabled_intr_status);
+}
+
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
{
int err = 0;
u32 mask = 1 << tag;
- unsigned long flags;
if (!test_bit(tag, &hba->outstanding_tasks))
goto out;
- spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_utmrl_clear(hba, tag);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
/* poll for max. 1 sec to clear door bell register by h/w */
err = ufshcd_wait_for_register(hba,
@@ -7042,12 +7247,13 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
- /* send command to the controller */
__set_bit(task_tag, &hba->outstanding_tasks);
- ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
spin_unlock_irqrestore(host->host_lock, flags);
+ /* send command to the controller */
+ ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
+
ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
/* wait until the task management command is completed */
@@ -7152,15 +7358,21 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type,
enum query_opcode desc_op)
{
- const u32 tag = hba->reserved_slot;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct scsi_cmnd *cmd = ufshcd_get_dev_mgmt_cmd(hba);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ u32 tag;
int err = 0;
u8 upiu_flags;
- /* Protects use of hba->reserved_slot. */
+ /* Protects use of hba->dev_cmd. */
lockdep_assert_held(&hba->dev_cmd.lock);
- ufshcd_setup_dev_cmd(hba, lrbp, cmd_type, 0, tag);
+ if (WARN_ON_ONCE(!cmd))
+ return -ENOMEM;
+
+ tag = scsi_cmd_to_rq(cmd)->tag;
+
+ ufshcd_setup_dev_cmd(hba, cmd, cmd_type, 0, tag);
ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, 0);
@@ -7180,12 +7392,9 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
- /*
- * ignore the returning value here - ufshcd_check_query_response is
- * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
- * read the response directly ignoring all errors.
- */
- ufshcd_issue_dev_cmd(hba, lrbp, tag, QUERY_REQ_TIMEOUT);
+ err = ufshcd_issue_dev_cmd(hba, cmd, tag, dev_cmd_timeout);
+ if (err)
+ goto put_dev_mgmt_cmd;
/* just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
@@ -7205,8 +7414,9 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
err = -EINVAL;
}
}
- ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
- (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
+
+put_dev_mgmt_cmd:
+ ufshcd_put_dev_mgmt_cmd(cmd);
return err;
}
@@ -7293,15 +7503,17 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
* @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
* @dir: DMA direction
*
- * Return: zero on success, non-zero on failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
enum dma_data_direction dir)
{
- const u32 tag = hba->reserved_slot;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct scsi_cmnd *cmd;
+ struct ufshcd_lrb *lrbp;
+ u32 tag;
int err = 0;
int result;
u8 upiu_flags;
@@ -7309,10 +7521,20 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
u16 ehs_len;
int ehs = (hba->capabilities & MASK_EHSLUTRD_SUPPORTED) ? 2 : 0;
- /* Protects use of hba->reserved_slot. */
ufshcd_dev_man_lock(hba);
- ufshcd_setup_dev_cmd(hba, lrbp, DEV_CMD_TYPE_RPMB, UFS_UPIU_RPMB_WLUN, tag);
+ cmd = ufshcd_get_dev_mgmt_cmd(hba);
+
+ if (WARN_ON_ONCE(!cmd)) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ lrbp = scsi_cmd_priv(cmd);
+ tag = scsi_cmd_to_rq(cmd)->tag;
+
+ ufshcd_setup_dev_cmd(hba, cmd, DEV_CMD_TYPE_RPMB, UFS_UPIU_RPMB_WLUN,
+ tag);
ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, ehs);
@@ -7329,8 +7551,11 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
- err = ufshcd_issue_dev_cmd(hba, lrbp, tag, ADVANCED_RPMB_REQ_TIMEOUT);
+ err = ufshcd_issue_dev_cmd(hba, cmd, tag, ADVANCED_RPMB_REQ_TIMEOUT);
+ if (err)
+ goto put_dev_mgmt_cmd;
+ err = ufshcd_dev_cmd_completion(hba, lrbp);
if (!err) {
/* Just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
@@ -7354,11 +7579,45 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
}
}
+put_dev_mgmt_cmd:
+ ufshcd_put_dev_mgmt_cmd(cmd);
+
+unlock:
ufshcd_dev_man_unlock(hba);
return err ? : result;
}
+static bool ufshcd_clear_lu_cmds(struct request *req, void *priv)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+ struct scsi_device *sdev = cmd->device;
+ struct Scsi_Host *shost = sdev->host;
+ struct ufs_hba *hba = shost_priv(shost);
+ const u64 lun = *(u64 *)priv;
+ const u32 tag = req->tag;
+
+ if (blk_mq_is_reserved_rq(req) || sdev->lun != lun)
+ return true;
+
+ if (ufshcd_clear_cmd(hba, tag) < 0) {
+ dev_err(hba->dev, "%s: failed to clear request %d\n", __func__,
+ tag);
+ return true;
+ }
+
+ if (hba->mcq_enabled) {
+ struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, req);
+
+ if (hwq)
+ ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ return true;
+ }
+
+ ufshcd_compl_one_cqe(hba, tag, NULL);
+ return true;
+}
+
/**
* ufshcd_eh_device_reset_handler() - Reset a single logical unit.
* @cmd: SCSI command pointer
@@ -7367,12 +7626,8 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
*/
static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
- unsigned long flags, pending_reqs = 0, not_cleared = 0;
struct Scsi_Host *host;
struct ufs_hba *hba;
- struct ufs_hw_queue *hwq;
- struct ufshcd_lrb *lrbp;
- u32 pos, not_cleared_mask = 0;
int err;
u8 resp = 0xF, lun;
@@ -7381,50 +7636,16 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
- if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
- if (!err)
- err = resp;
- goto out;
- }
-
- if (is_mcq_enabled(hba)) {
- for (pos = 0; pos < hba->nutrs; pos++) {
- lrbp = &hba->lrb[pos];
- if (ufshcd_cmd_inflight(lrbp->cmd) &&
- lrbp->lun == lun) {
- ufshcd_clear_cmd(hba, pos);
- hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
- ufshcd_mcq_poll_cqe_lock(hba, hwq);
- }
- }
- err = 0;
- goto out;
- }
-
- /* clear the commands that were pending for corresponding LUN */
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
- if (hba->lrb[pos].lun == lun)
- __set_bit(pos, &pending_reqs);
- hba->outstanding_reqs &= ~pending_reqs;
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
-
- for_each_set_bit(pos, &pending_reqs, hba->nutrs) {
- if (ufshcd_clear_cmd(hba, pos) < 0) {
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- not_cleared = 1U << pos &
- ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- hba->outstanding_reqs |= not_cleared;
- not_cleared_mask |= not_cleared;
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
-
- dev_err(hba->dev, "%s: failed to clear request %d\n",
- __func__, pos);
- }
+ if (err) {
+ } else if (resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ err = resp;
+ } else {
+ /* clear the commands that were pending for corresponding LUN */
+ blk_mq_tagset_busy_iter(&hba->host->tag_set,
+ ufshcd_clear_lu_cmds,
+ &cmd->device->lun);
}
- __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask);
-out:
hba->req_abort_count = 0;
ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
if (!err) {
@@ -7438,11 +7659,12 @@ out:
static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
{
- struct ufshcd_lrb *lrbp;
int tag;
for_each_set_bit(tag, &bitmap, hba->nutrs) {
- lrbp = &hba->lrb[tag];
+ struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+
lrbp->req_abort_skip = true;
}
}
@@ -7450,7 +7672,7 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
/**
* ufshcd_try_to_abort_task - abort a specific task
* @hba: Pointer to adapter instance
- * @tag: Task tag/index to be aborted
+ * @tag: Tag of the task to be aborted
*
* Abort the pending command in device by sending UFS_ABORT_TASK task management
* command, and in host controller by clearing the door-bell register. There can
@@ -7462,15 +7684,15 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
*/
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
{
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
- int err = 0;
+ struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ int err;
int poll_cnt;
u8 resp = 0xF;
- u32 reg;
for (poll_cnt = 100; poll_cnt; poll_cnt--) {
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
- UFS_QUERY_TASK, &resp);
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, tag, UFS_QUERY_TASK,
+ &resp);
if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
/* cmd pending in the device */
dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
@@ -7481,56 +7703,36 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
* cmd not pending in the device, check if it is
* in transition.
*/
- dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
+ dev_info(
+ hba->dev,
+ "%s: cmd with tag %d not pending in the device.\n",
__func__, tag);
- if (is_mcq_enabled(hba)) {
- /* MCQ mode */
- if (ufshcd_cmd_inflight(lrbp->cmd)) {
- /* sleep for max. 200us same delay as in SDB mode */
- usleep_range(100, 200);
- continue;
- }
- /* command completed already */
- dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n",
- __func__, tag);
- goto out;
+ if (!ufshcd_cmd_inflight(cmd)) {
+ dev_info(hba->dev,
+ "%s: cmd with tag=%d completed.\n",
+ __func__, tag);
+ return 0;
}
-
- /* Single Doorbell Mode */
- reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- if (reg & (1 << tag)) {
- /* sleep for max. 200us to stabilize */
- usleep_range(100, 200);
- continue;
- }
- /* command completed already */
- dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
- __func__, tag);
- goto out;
+ usleep_range(100, 200);
} else {
dev_err(hba->dev,
"%s: no response from device. tag = %d, err %d\n",
__func__, tag, err);
- if (!err)
- err = resp; /* service response error */
- goto out;
+ return err ? : resp;
}
}
- if (!poll_cnt) {
- err = -EBUSY;
- goto out;
- }
+ if (!poll_cnt)
+ return -EBUSY;
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
- UFS_ABORT_TASK, &resp);
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, tag, UFS_ABORT_TASK, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
if (!err) {
err = resp; /* service response error */
dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
__func__, tag, err);
}
- goto out;
+ return err;
}
err = ufshcd_clear_cmd(hba, tag);
@@ -7538,7 +7740,6 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
__func__, tag, err);
-out:
return err;
}
@@ -7552,8 +7753,9 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct ufs_hba *hba = shost_priv(host);
- int tag = scsi_cmd_to_rq(cmd)->tag;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ int tag = rq->tag;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
unsigned long flags;
int err = FAILED;
bool outstanding;
@@ -7561,7 +7763,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
ufshcd_hold(hba);
- if (!is_mcq_enabled(hba)) {
+ if (!hba->mcq_enabled) {
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
if (!test_bit(tag, &hba->outstanding_reqs)) {
/* If command is already aborted/completed, return FAILED. */
@@ -7582,19 +7784,20 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
* to reduce repeated printouts. For other aborted requests only print
* basic details.
*/
- scsi_print_command(cmd);
+ if (ufshcd_is_scsi_cmd(cmd))
+ scsi_print_command(cmd);
if (!hba->req_abort_count) {
ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
- ufshcd_print_tr(hba, tag, true);
+ ufshcd_print_tr(hba, cmd, true);
} else {
- ufshcd_print_tr(hba, tag, false);
+ ufshcd_print_tr(hba, cmd, false);
}
hba->req_abort_count++;
- if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) {
+ if (!hba->mcq_enabled && !(reg & (1 << tag))) {
/* only execute this code in single doorbell mode */
dev_err(hba->dev,
"%s: cmd was completed, but without a notifying intr, tag = %d",
@@ -7621,7 +7824,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto release;
}
- if (is_mcq_enabled(hba)) {
+ if (hba->mcq_enabled) {
/* MCQ mode. Branch off to handle abort for mcq mode */
err = ufshcd_mcq_abort(cmd);
goto release;
@@ -7634,7 +7837,10 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto release;
}
- err = ufshcd_try_to_abort_task(hba, tag);
+ if (blk_mq_is_reserved_rq(rq))
+ err = ufshcd_clear_cmd(hba, tag);
+ else
+ err = ufshcd_try_to_abort_task(hba, tag);
if (err) {
dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
@@ -7651,7 +7857,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (outstanding)
- ufshcd_release_scsi_cmd(hba, lrbp);
+ ufshcd_release_scsi_cmd(hba, cmd);
err = SUCCESS;
@@ -7662,6 +7868,29 @@ release:
}
/**
+ * ufshcd_process_probe_result - Process the ufshcd_probe_hba() result.
+ * @hba: UFS host controller instance.
+ * @probe_start: time when the ufshcd_probe_hba() call started.
+ * @ret: ufshcd_probe_hba() return value.
+ */
+static void ufshcd_process_probe_result(struct ufs_hba *hba,
+ ktime_t probe_start, int ret)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (ret)
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ trace_ufshcd_init(hba, ret,
+ ktime_to_us(ktime_sub(ktime_get(), probe_start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+}
+
+/**
* ufshcd_host_reset_and_restore - reset and restore host controller
* @hba: per-adapter instance
*
@@ -7685,13 +7914,20 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
hba->silence_err_logs = false;
/* scale up clocks to max frequency before full reinitialization */
- ufshcd_scale_clks(hba, ULONG_MAX, true);
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_scale_clks(hba, ULONG_MAX, true);
err = ufshcd_hba_enable(hba);
/* Establish the link again and restore the device */
- if (!err)
- err = ufshcd_probe_hba(hba, false);
+ if (!err) {
+ ktime_t probe_start = ktime_get();
+
+ err = ufshcd_device_init(hba, /*init_dev_params=*/false);
+ if (!err)
+ err = ufshcd_probe_hba(hba, false);
+ ufshcd_process_probe_result(hba, probe_start, err);
+ }
if (err)
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
@@ -7987,8 +8223,11 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
if (IS_ERR(sdev_rpmb)) {
ret = PTR_ERR(sdev_rpmb);
+ hba->ufs_rpmb_wlun = NULL;
+ dev_err(hba->dev, "%s: RPMB WLUN not found\n", __func__);
goto remove_ufs_device_wlun;
}
+ hba->ufs_rpmb_wlun = sdev_rpmb;
ufshcd_blk_pm_runtime_init(sdev_rpmb);
scsi_device_put(sdev_rpmb);
@@ -8041,6 +8280,9 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
*/
dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
+ dev_info->ext_wb_sup = get_unaligned_be16(desc_buf +
+ DEVICE_DESC_PARAM_EXT_WB_SUP);
+
dev_info->b_presrv_uspc_en =
desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
@@ -8098,29 +8340,52 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
}
}
-static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf)
+static void ufshcd_device_lvl_exception_probe(struct ufs_hba *hba, u8 *desc_buf)
{
- struct ufs_dev_info *dev_info = &hba->dev_info;
u32 ext_ufs_feature;
- u32 ext_iid_en = 0;
- int err;
- /* Only UFS-4.0 and above may support EXT_IID */
- if (dev_info->wspecversion < 0x400)
- goto out;
+ if (hba->dev_info.wspecversion < 0x410)
+ return;
ext_ufs_feature = get_unaligned_be32(desc_buf +
- DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
- if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP))
- goto out;
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+ if (!(ext_ufs_feature & UFS_DEV_LVL_EXCEPTION_SUP))
+ return;
- err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en);
- if (err)
- dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err);
+ atomic_set(&hba->dev_lvl_exception_count, 0);
+ ufshcd_enable_ee(hba, MASK_EE_DEV_LVL_EXCEPTION);
+}
-out:
- dev_info->b_ext_iid_en = ext_iid_en;
+static void ufshcd_set_rtt(struct ufs_hba *hba)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ u32 rtt = 0;
+ u32 dev_rtt = 0;
+ int host_rtt_cap = hba->vops && hba->vops->max_num_rtt ?
+ hba->vops->max_num_rtt : hba->nortt;
+
+ /* RTT override makes sense only for UFS-4.0 and above */
+ if (dev_info->wspecversion < 0x400)
+ return;
+
+ if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &dev_rtt)) {
+ dev_err(hba->dev, "failed reading bMaxNumOfRTT\n");
+ return;
+ }
+
+ /* do not override if it was already written */
+ if (dev_rtt != DEFAULT_MAX_NUM_RTT)
+ return;
+
+ rtt = min_t(int, dev_info->rtt_cap, host_rtt_cap);
+
+ if (rtt == dev_rtt)
+ return;
+
+ if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt))
+ dev_err(hba->dev, "failed writing bMaxNumOfRTT\n");
}
void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
@@ -8171,10 +8436,13 @@ static void ufshcd_update_rtc(struct ufs_hba *hba)
*/
val = ts64.tv_sec - hba->dev_info.rtc_time_baseline;
- ufshcd_rpm_get_sync(hba);
+ /* Skip update RTC if RPM state is not RPM_ACTIVE */
+ if (ufshcd_rpm_get_if_active(hba) <= 0)
+ return;
+
err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED,
0, 0, &val);
- ufshcd_rpm_put_sync(hba);
+ ufshcd_rpm_put(hba);
if (err)
dev_err(hba->dev, "%s: Failed to update rtc %d\n", __func__, err);
@@ -8189,7 +8457,9 @@ static void ufshcd_rtc_work(struct work_struct *work)
hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work);
/* Update RTC only when there are no requests in progress and UFSHCI is operational */
- if (!ufshcd_is_ufs_dev_busy(hba) && hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL)
+ if (!ufshcd_is_ufs_dev_busy(hba) &&
+ hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL &&
+ !hba->clk_gating.active_reqs)
ufshcd_update_rtc(hba);
if (ufshcd_is_ufs_dev_active(hba) && hba->dev_info.rtc_update_period)
@@ -8225,12 +8495,74 @@ static void ufs_init_rtc(struct ufs_hba *hba, u8 *desc_buf)
dev_info->rtc_update_period = 0;
}
+/**
+ * ufshcd_create_device_id - Generate unique device identifier string
+ * @hba: per-adapter instance
+ * @desc_buf: device descriptor buffer
+ *
+ * Creates a unique device ID string combining manufacturer ID, spec version,
+ * model name, serial number (as hex), device version, and manufacture date.
+ *
+ * Returns: Allocated device ID string on success, NULL on failure
+ */
+static char *ufshcd_create_device_id(struct ufs_hba *hba, u8 *desc_buf)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ u16 manufacture_date;
+ u16 device_version;
+ u8 *serial_number;
+ char *serial_hex;
+ char *device_id;
+ u8 serial_index;
+ int serial_len;
+ int ret;
+
+ serial_index = desc_buf[DEVICE_DESC_PARAM_SN];
+
+ ret = ufshcd_read_string_desc(hba, serial_index, &serial_number, SD_RAW);
+ if (ret < 0) {
+ dev_err(hba->dev, "Failed reading Serial Number. err = %d\n", ret);
+ return NULL;
+ }
+
+ device_version = get_unaligned_be16(&desc_buf[DEVICE_DESC_PARAM_DEV_VER]);
+ manufacture_date = get_unaligned_be16(&desc_buf[DEVICE_DESC_PARAM_MANF_DATE]);
+
+ serial_len = ret;
+ /* Allocate buffer for hex string: 2 chars per byte + null terminator */
+ serial_hex = kzalloc(serial_len * 2 + 1, GFP_KERNEL);
+ if (!serial_hex) {
+ kfree(serial_number);
+ return NULL;
+ }
+
+ bin2hex(serial_hex, serial_number, serial_len);
+
+ /*
+ * Device ID format is ABI with secure world - do not change without firmware
+ * coordination.
+ */
+ device_id = kasprintf(GFP_KERNEL, "%04X-%04X-%s-%s-%04X-%04X",
+ dev_info->wmanufacturerid, dev_info->wspecversion,
+ dev_info->model, serial_hex, device_version,
+ manufacture_date);
+
+ kfree(serial_hex);
+ kfree(serial_number);
+
+ if (!device_id)
+ dev_warn(hba->dev, "Failed to allocate unique device ID\n");
+
+ return device_id;
+}
+
static int ufs_get_device_desc(struct ufs_hba *hba)
{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ struct Scsi_Host *shost = hba->host;
int err;
u8 model_index;
u8 *desc_buf;
- struct ufs_dev_info *dev_info = &hba->dev_info;
desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!desc_buf) {
@@ -8258,6 +8590,24 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
+ /*
+ * According to the UFS standard, the UFS device queue depth
+ * (bQueueDepth) must be in the range 1..255 if the shared queueing
+ * architecture is supported. bQueueDepth is zero if the shared queueing
+ * architecture is not supported.
+ */
+ if (dev_info->bqueuedepth)
+ shost->cmd_per_lun = min(hba->nutrs, dev_info->bqueuedepth) -
+ UFSHCD_NUM_RESERVED;
+ else
+ shost->cmd_per_lun = shost->can_queue;
+
+ dev_info->rtt_cap = desc_buf[DEVICE_DESC_PARAM_RTT_CAP];
+
+ dev_info->hid_sup = get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP) &
+ UFS_DEV_HID_SUPPORT;
+
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index,
@@ -8268,6 +8618,9 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
goto out;
}
+ /* Generate unique device ID */
+ dev_info->device_id = ufshcd_create_device_id(hba, desc_buf);
+
hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
@@ -8277,10 +8630,14 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufshcd_temp_notif_probe(hba, desc_buf);
+ if (dev_info->wspecversion >= 0x410) {
+ hba->critical_health_count = 0;
+ ufshcd_enable_ee(hba, MASK_EE_HEALTH_CRITICAL);
+ }
+
ufs_init_rtc(hba, desc_buf);
- if (hba->ext_iid_sup)
- ufshcd_ext_iid_probe(hba, desc_buf);
+ ufshcd_device_lvl_exception_probe(hba, desc_buf);
/*
* ufshcd_read_string_desc returns size of the string
@@ -8299,6 +8656,8 @@ static void ufs_put_device_desc(struct ufs_hba *hba)
kfree(dev_info->model);
dev_info->model = NULL;
+ kfree(dev_info->device_id);
+ dev_info->device_id = NULL;
}
/**
@@ -8371,6 +8730,31 @@ out:
return ret;
}
+/**
+ * ufshcd_quirk_override_pa_h8time - Ensures proper adjustment of PA_HIBERN8TIME.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require specific adjustments to the PA_HIBERN8TIME parameter
+ * to ensure proper hibernation timing. This function retrieves the current
+ * PA_HIBERN8TIME value and increments it by 100us.
+ */
+static void ufshcd_quirk_override_pa_h8time(struct ufs_hba *hba)
+{
+ u32 pa_h8time;
+ int ret;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), &pa_h8time);
+ if (ret) {
+ dev_err(hba->dev, "Failed to get PA_HIBERN8TIME: %d\n", ret);
+ return;
+ }
+
+ /* Increment by 1 to increase hibernation time by 100 µs */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), pa_h8time + 1);
+ if (ret)
+ dev_err(hba->dev, "Failed updating PA_HIBERN8TIME: %d\n", ret);
+}
+
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
ufshcd_vops_apply_dev_quirks(hba);
@@ -8381,6 +8765,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME)
+ ufshcd_quirk_override_pa_h8time(hba);
}
static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
@@ -8414,6 +8801,8 @@ static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
hba->dev_info.max_lu_supported = 8;
+ hba->dev_info.rpmb_io_size = desc_buf[GEOMETRY_DESC_PARAM_RPMB_RW_SIZE];
+
out:
kfree(desc_buf);
return err;
@@ -8510,6 +8899,8 @@ static int ufshcd_device_params_init(struct ufs_hba *hba)
goto out;
}
+ ufshcd_set_rtt(hba);
+
ufshcd_get_ref_clk_gating_wait(hba);
if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
@@ -8533,7 +8924,8 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
struct ufs_dev_info *dev_info = &hba->dev_info;
struct utp_upiu_query_v4_0 *upiu_data;
- if (dev_info->wspecversion < 0x400)
+ if (dev_info->wspecversion < 0x400 ||
+ hba->dev_quirks & UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT)
return;
ufshcd_dev_man_lock(hba);
@@ -8548,7 +8940,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3);
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err)
dev_err(hba->dev, "%s: failed to set timestamp %d\n",
@@ -8587,8 +8979,17 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
ufshcd_init_clk_scaling_sysfs(hba);
}
+ /*
+ * The RTC update code accesses the hba->ufs_device_wlun->sdev_gendev
+ * pointer and hence must only be started after the WLUN pointer has
+ * been initialized by ufshcd_scsi_add_wlus().
+ */
+ schedule_delayed_work(&hba->ufs_rtc_update_work,
+ msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
+
ufs_bsg_probe(hba);
scsi_scan_host(hba->host);
+ ufs_rpmb_probe(hba);
out:
return ret;
@@ -8606,8 +9007,6 @@ static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
hba->utrdl_dma_addr);
-
- devm_kfree(hba->dev, hba->lrb);
}
static int ufshcd_alloc_mcq(struct ufs_hba *hba)
@@ -8615,7 +9014,7 @@ static int ufshcd_alloc_mcq(struct ufs_hba *hba)
int ret;
int old_nutrs = hba->nutrs;
- ret = ufshcd_mcq_decide_queue_depth(hba);
+ ret = ufshcd_get_hba_mac(hba);
if (ret < 0)
return ret;
@@ -8640,6 +9039,8 @@ static int ufshcd_alloc_mcq(struct ufs_hba *hba)
if (ret)
goto err;
+ hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
+
return 0;
err:
hba->nutrs = old_nutrs;
@@ -8649,34 +9050,57 @@ err:
static void ufshcd_config_mcq(struct ufs_hba *hba)
{
int ret;
- u32 intrs;
ret = ufshcd_mcq_vops_config_esi(hba);
+ hba->mcq_esi_enabled = !ret;
dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
- intrs = UFSHCD_ENABLE_MCQ_INTRS;
- if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR)
- intrs &= ~MCQ_CQ_EVENT_STATUS;
- ufshcd_enable_intr(hba, intrs);
ufshcd_mcq_make_queues_operational(hba);
ufshcd_mcq_config_mac(hba, hba->nutrs);
- hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
- hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
-
- ufshcd_mcq_enable(hba);
- hba->mcq_enabled = true;
-
dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT],
hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL],
hba->nutrs);
}
+static int ufshcd_post_device_init(struct ufs_hba *hba)
+{
+ int ret;
+
+ ufshcd_tune_unipro_params(hba);
+
+ /* UFS device is also active now */
+ ufshcd_set_ufs_dev_active(hba);
+ ufshcd_force_reset_auto_bkops(hba);
+
+ ufshcd_set_timestamp_attr(hba);
+
+ if (!hba->max_pwr_info.is_valid)
+ return 0;
+
+ /*
+ * Set the right value to bRefClkFreq before attempting to
+ * switch to HS gears.
+ */
+ if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
+ ufshcd_set_dev_ref_clk(hba);
+ /* Gear up to HS gear. */
+ ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
{
int ret;
- struct Scsi_Host *host = hba->host;
+
+ WARN_ON_ONCE(!hba->scsi_host_added);
hba->ufshcd_state = UFSHCD_STATE_RESET;
@@ -8694,8 +9118,10 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
ufshcd_set_link_active(hba);
/* Reconfigure MCQ upon reset */
- if (is_mcq_enabled(hba) && !init_dev_params)
+ if (hba->mcq_enabled && !init_dev_params) {
ufshcd_config_mcq(hba);
+ ufshcd_mcq_enable(hba);
+ }
/* Verify device initialization by sending NOP OUT UPIU */
ret = ufshcd_verify_dev_init(hba);
@@ -8715,55 +9141,14 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
ret = ufshcd_device_params_init(hba);
if (ret)
return ret;
- if (is_mcq_supported(hba) && !hba->scsi_host_added) {
- ret = ufshcd_alloc_mcq(hba);
- if (!ret) {
- ufshcd_config_mcq(hba);
- } else {
- /* Continue with SDB mode */
- use_mcq_mode = false;
- dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
- ret);
- }
- ret = scsi_add_host(host, hba->dev);
- if (ret) {
- dev_err(hba->dev, "scsi_add_host failed\n");
- return ret;
- }
- hba->scsi_host_added = true;
- } else if (is_mcq_supported(hba)) {
- /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */
+ if (is_mcq_supported(hba) &&
+ hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) {
ufshcd_config_mcq(hba);
+ ufshcd_mcq_enable(hba);
}
}
- ufshcd_tune_unipro_params(hba);
-
- /* UFS device is also active now */
- ufshcd_set_ufs_dev_active(hba);
- ufshcd_force_reset_auto_bkops(hba);
-
- ufshcd_set_timestamp_attr(hba);
- schedule_delayed_work(&hba->ufs_rtc_update_work,
- msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
-
- /* Gear up to HS gear if supported */
- if (hba->max_pwr_info.is_valid) {
- /*
- * Set the right value to bRefClkFreq before attempting to
- * switch to HS gears.
- */
- if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
- ufshcd_set_dev_ref_clk(hba);
- ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
- if (ret) {
- dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
- __func__, ret);
- return ret;
- }
- }
-
- return 0;
+ return ufshcd_post_device_init(hba);
}
/**
@@ -8777,33 +9162,26 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
*/
static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
{
- ktime_t start = ktime_get();
- unsigned long flags;
int ret;
- ret = ufshcd_device_init(hba, init_dev_params);
- if (ret)
- goto out;
-
if (!hba->pm_op_in_progress &&
(hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
/* Reset the device and controller before doing reinit */
ufshcd_device_reset(hba);
ufs_put_device_desc(hba);
ufshcd_hba_stop(hba);
- ufshcd_vops_reinit_notify(hba);
ret = ufshcd_hba_enable(hba);
if (ret) {
dev_err(hba->dev, "Host controller enable failed\n");
ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
- goto out;
+ return ret;
}
/* Reinit the device */
ret = ufshcd_device_init(hba, init_dev_params);
if (ret)
- goto out;
+ return ret;
}
ufshcd_print_pwr_info(hba);
@@ -8823,18 +9201,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
ufshcd_write_ee_control(hba);
ufshcd_configure_auto_hibern8(hba);
-out:
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (ret)
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
- else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- trace_ufshcd_init(dev_name(hba->dev), ret,
- ktime_to_us(ktime_sub(ktime_get(), start)),
- hba->curr_dev_pwr_mode, hba->uic_link_state);
- return ret;
+ return 0;
}
/**
@@ -8845,11 +9212,14 @@ out:
static void ufshcd_async_scan(void *data, async_cookie_t cookie)
{
struct ufs_hba *hba = (struct ufs_hba *)data;
+ ktime_t probe_start;
int ret;
down(&hba->host_sem);
/* Initialize hba, detect and initialize UFS device */
+ probe_start = ktime_get();
ret = ufshcd_probe_hba(hba, true);
+ ufshcd_process_probe_result(hba, probe_start, ret);
up(&hba->host_sem);
if (ret)
goto out;
@@ -8883,7 +9253,7 @@ static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
__func__, hba->outstanding_tasks);
- return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
+ return scsi_host_busy(hba->host) ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
}
static const struct attribute_group *ufshcd_driver_groups[] = {
@@ -8907,11 +9277,15 @@ static const struct scsi_host_template ufshcd_driver_template = {
.name = UFSHCD,
.proc_name = UFSHCD,
.map_queues = ufshcd_map_queues,
+ .cmd_size = sizeof(struct ufshcd_lrb),
+ .init_cmd_priv = ufshcd_init_cmd_priv,
.queuecommand = ufshcd_queuecommand,
+ .queue_reserved_command = ufshcd_queue_reserved_command,
+ .nr_reserved_cmds = UFSHCD_NUM_RESERVED,
.mq_poll = ufshcd_poll,
- .slave_alloc = ufshcd_slave_alloc,
- .slave_configure = ufshcd_slave_configure,
- .slave_destroy = ufshcd_slave_destroy,
+ .sdev_init = ufshcd_sdev_init,
+ .sdev_configure = ufshcd_sdev_configure,
+ .sdev_destroy = ufshcd_sdev_destroy,
.change_queue_depth = ufshcd_change_queue_depth,
.eh_abort_handler = ufshcd_abort,
.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
@@ -8919,8 +9293,6 @@ static const struct scsi_host_template ufshcd_driver_template = {
.eh_timed_out = ufshcd_eh_timed_out,
.this_id = -1,
.sg_tablesize = SG_ALL,
- .cmd_per_lun = UFSHCD_CMD_PER_LUN,
- .can_queue = UFSHCD_CAN_QUEUE,
.max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
.max_sectors = SZ_1M / SECTOR_SIZE,
.max_host_blocked = 1,
@@ -9099,7 +9471,6 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
int ret = 0;
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
- unsigned long flags;
ktime_t start = ktime_get();
bool clk_state_changed = false;
@@ -9149,16 +9520,15 @@ out:
if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
clk_disable_unprepare(clki->clk);
}
- } else if (!ret && on) {
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ } else if (!ret && on && hba->clk_gating.is_initialized) {
+ scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
if (clk_state_changed)
- trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_profile_clk_gating(hba,
(on ? "on" : "off"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
@@ -9522,11 +9892,11 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
}
/*
- * Some UFS devices require delay after VCC power rail is turned-off.
+ * All UFS devices require delay after VCC power rail is turned-off.
*/
- if (vcc_off && hba->vreg_info.vcc &&
- hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
- usleep_range(5000, 5100);
+ if (vcc_off && hba->vreg_info.vcc && !hba->vreg_info.vcc->always_on)
+ usleep_range(hba->vcc_off_delay_us,
+ hba->vcc_off_delay_us + 100);
}
#ifdef CONFIG_PM
@@ -9625,7 +9995,7 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* allow background operations if bkops status shows
* that performance might be impacted.
*/
- ret = ufshcd_urgent_bkops(hba);
+ ret = ufshcd_bkops_ctrl(hba);
if (ret) {
/*
* If return err in suspend flow, IO will hang.
@@ -9814,7 +10184,7 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* If BKOPs operations are urgently needed at this moment then
* keep auto-bkops enabled or else disable it.
*/
- ufshcd_urgent_bkops(hba);
+ ufshcd_bkops_ctrl(hba);
if (hba->ee_usr_mask)
ufshcd_write_ee_control(hba);
@@ -9858,7 +10228,7 @@ static int ufshcd_wl_runtime_suspend(struct device *dev)
if (ret)
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
- trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
+ trace_ufshcd_wl_runtime_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9878,7 +10248,7 @@ static int ufshcd_wl_runtime_resume(struct device *dev)
if (ret)
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
- trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
+ trace_ufshcd_wl_runtime_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9910,7 +10280,7 @@ static int ufshcd_wl_suspend(struct device *dev)
out:
if (!ret)
hba->is_sys_suspended = true;
- trace_ufshcd_wl_suspend(dev_name(dev), ret,
+ trace_ufshcd_wl_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9933,7 +10303,7 @@ static int ufshcd_wl_resume(struct device *dev)
if (ret)
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
out:
- trace_ufshcd_wl_resume(dev_name(dev), ret,
+ trace_ufshcd_wl_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
if (!ret)
@@ -9971,7 +10341,7 @@ static int ufshcd_suspend(struct ufs_hba *hba)
}
if (ufshcd_is_clkgating_allowed(hba)) {
hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
}
@@ -10044,7 +10414,7 @@ int ufshcd_system_suspend(struct device *dev)
ret = ufshcd_suspend(hba);
out:
- trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
+ trace_ufshcd_system_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
@@ -10072,7 +10442,7 @@ int ufshcd_system_resume(struct device *dev)
ret = ufshcd_resume(hba);
out:
- trace_ufshcd_system_resume(dev_name(hba->dev), ret,
+ trace_ufshcd_system_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -10098,7 +10468,7 @@ int ufshcd_runtime_suspend(struct device *dev)
ret = ufshcd_suspend(hba);
- trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
+ trace_ufshcd_runtime_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
@@ -10125,7 +10495,7 @@ int ufshcd_runtime_resume(struct device *dev)
ret = ufshcd_resume(hba);
- trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
+ trace_ufshcd_runtime_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
@@ -10148,7 +10518,9 @@ static void ufshcd_wl_shutdown(struct device *dev)
shost_for_each_device(sdev, hba->host) {
if (sdev == hba->ufs_device_wlun)
continue;
- scsi_device_quiesce(sdev);
+ mutex_lock(&sdev->state_mutex);
+ scsi_device_set_state(sdev, SDEV_OFFLINE);
+ mutex_unlock(&sdev->state_mutex);
}
__ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
@@ -10173,11 +10545,14 @@ void ufshcd_remove(struct ufs_hba *hba)
ufshcd_rpm_get_sync(hba);
ufs_hwmon_remove(hba);
ufs_bsg_remove(hba);
+ ufs_rpmb_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
+ cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
blk_mq_destroy_queue(hba->tmf_queue);
blk_put_queue(hba->tmf_queue);
blk_mq_free_tag_set(&hba->tmf_tag_set);
- scsi_remove_host(hba->host);
+ if (hba->scsi_host_added)
+ scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba);
@@ -10220,9 +10595,6 @@ int ufshcd_system_restore(struct device *dev)
*/
ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H);
- /* Resuming from hibernate, assume that link was OFF */
- ufshcd_set_link_off(hba);
-
return 0;
}
@@ -10236,16 +10608,6 @@ EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
#endif /* CONFIG_PM_SLEEP */
/**
- * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
- * @hba: pointer to Host Bus Adapter (HBA)
- */
-void ufshcd_dealloc_host(struct ufs_hba *hba)
-{
- scsi_host_put(hba->host);
-}
-EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
-
-/**
* ufshcd_set_dma_mask - Set dma mask based on the controller
* addressing capability
* @hba: per adapter instance
@@ -10254,6 +10616,8 @@ EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
*/
static int ufshcd_set_dma_mask(struct ufs_hba *hba)
{
+ if (hba->vops && hba->vops->set_dma_mask)
+ return hba->vops->set_dma_mask(hba);
if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
return 0;
@@ -10262,11 +10626,25 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
}
/**
+ * ufshcd_devres_release - devres cleanup handler, invoked during release of
+ * hba->dev
+ * @host: pointer to SCSI host
+ */
+static void ufshcd_devres_release(void *host)
+{
+ scsi_host_put(host);
+}
+
+/**
* ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
* @dev: pointer to device handle
* @hba_handle: driver private handle
*
* Return: 0 on success, non-zero value on failure.
+ *
+ * NOTE: There is no corresponding ufshcd_dealloc_host() because this function
+ * keeps track of its allocations using devres and deallocates everything on
+ * device removal automatically.
*/
int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
{
@@ -10288,6 +10666,12 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
+
+ err = devm_add_action_or_reset(dev, ufshcd_devres_release,
+ host);
+ if (err)
+ return err;
+
host->nr_maps = HCTX_TYPE_POLL + 1;
hba = shost_priv(host);
hba->host = host;
@@ -10317,20 +10701,89 @@ static const struct blk_mq_ops ufshcd_tmf_ops = {
.queue_rq = ufshcd_queue_tmf,
};
+static int ufshcd_add_scsi_host(struct ufs_hba *hba)
+{
+ int err;
+
+ WARN_ON_ONCE(!hba->host->can_queue);
+ WARN_ON_ONCE(!hba->host->cmd_per_lun);
+
+ if (is_mcq_supported(hba)) {
+ ufshcd_mcq_enable(hba);
+ err = ufshcd_alloc_mcq(hba);
+ if (!err) {
+ ufshcd_config_mcq(hba);
+ } else {
+ /* Continue with SDB mode */
+ ufshcd_mcq_disable(hba);
+ use_mcq_mode = false;
+ dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
+ err);
+ }
+ }
+ if (!is_mcq_supported(hba) && !hba->lsdb_sup) {
+ dev_err(hba->dev,
+ "%s: failed to initialize (legacy doorbell mode not supported)\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ err = scsi_add_host(hba->host, hba->dev);
+ if (err) {
+ dev_err(hba->dev, "scsi_add_host failed\n");
+ return err;
+ }
+ hba->scsi_host_added = true;
+
+ hba->tmf_tag_set = (struct blk_mq_tag_set) {
+ .nr_hw_queues = 1,
+ .queue_depth = hba->nutmrs,
+ .ops = &ufshcd_tmf_ops,
+ };
+ err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
+ if (err < 0)
+ goto remove_scsi_host;
+ hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL);
+ if (IS_ERR(hba->tmf_queue)) {
+ err = PTR_ERR(hba->tmf_queue);
+ goto free_tmf_tag_set;
+ }
+ hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
+ sizeof(*hba->tmf_rqs), GFP_KERNEL);
+ if (!hba->tmf_rqs) {
+ err = -ENOMEM;
+ goto free_tmf_queue;
+ }
+
+ return 0;
+
+free_tmf_queue:
+ blk_mq_destroy_queue(hba->tmf_queue);
+ blk_put_queue(hba->tmf_queue);
+
+free_tmf_tag_set:
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
+
+remove_scsi_host:
+ if (hba->scsi_host_added)
+ scsi_remove_host(hba->host);
+
+ return err;
+}
+
/**
* ufshcd_init - Driver initialization routine
* @hba: per-adapter instance
* @mmio_base: base register address
* @irq: Interrupt line of device
*
- * Return: 0 on success, non-zero value on failure.
+ * Return: 0 on success; < 0 on failure.
*/
int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
{
int err;
struct Scsi_Host *host = hba->host;
struct device *dev = hba->dev;
- char eh_wq_name[sizeof("ufs_eh_wq_00")];
/*
* dev_set_drvdata() must be called before any callbacks are registered
@@ -10350,6 +10803,37 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->irq = irq;
hba->vps = &ufs_hba_vps;
+ /*
+ * Initialize clk_gating.lock early since it is being used in
+ * ufshcd_setup_clocks()
+ */
+ spin_lock_init(&hba->clk_gating.lock);
+
+ /* Initialize mutex for PM QoS request synchronization */
+ mutex_init(&hba->pm_qos_mutex);
+
+ /*
+ * Set the default power management level for runtime and system PM.
+ * Host controller drivers can override them in their
+ * 'ufs_hba_variant_ops::init' callback.
+ *
+ * Default power saving mode is to keep UFS link in Hibern8 state
+ * and UFS device in sleep state.
+ */
+ hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+ hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+
+ /*
+ * Most ufs devices require 1ms delay after vcc is powered off before
+ * it can be powered on again. Set the default to 2ms. The platform
+ * drivers can override this setting as needed.
+ */
+ hba->vcc_off_delay_us = 2000;
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
@@ -10382,7 +10866,11 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
ufshcd_host_memory_configure(hba);
host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
- host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
+ /*
+ * Set the queue depth for WLUNs. ufs_get_device_desc() will increase
+ * host->cmd_per_lun to a larger value.
+ */
+ host->cmd_per_lun = 1;
host->max_id = UFSHCD_MAX_ID;
host->max_lun = UFS_MAX_LUNS;
host->max_channel = UFSHCD_MAX_CHANNEL;
@@ -10397,9 +10885,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->max_pwr_info.is_valid = false;
/* Initialize work queues */
- snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
- hba->host->host_no);
- hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
+ hba->eh_wq = alloc_ordered_workqueue("ufs_eh_wq_%d", WQ_MEM_RECLAIM,
+ hba->host->host_no);
if (!hba->eh_wq) {
dev_err(hba->dev, "%s: failed to create eh workqueue\n",
__func__);
@@ -10421,6 +10908,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
mutex_init(&hba->ee_ctrl_mutex);
mutex_init(&hba->wb_mutex);
+
init_rwsem(&hba->clk_scaling_lock);
ufshcd_init_clk_gating(hba);
@@ -10442,7 +10930,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
/* IRQ registration */
- err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+ err = devm_request_threaded_irq(dev, irq, ufshcd_intr, ufshcd_threaded_intr,
+ IRQF_ONESHOT | IRQF_SHARED, UFSHCD, hba);
if (err) {
dev_err(hba->dev, "request irq failed\n");
goto out_disable;
@@ -10450,35 +10939,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->is_irq_enabled = true;
}
- if (!is_mcq_supported(hba)) {
- err = scsi_add_host(host, hba->dev);
- if (err) {
- dev_err(hba->dev, "scsi_add_host failed\n");
- goto out_disable;
- }
- }
-
- hba->tmf_tag_set = (struct blk_mq_tag_set) {
- .nr_hw_queues = 1,
- .queue_depth = hba->nutmrs,
- .ops = &ufshcd_tmf_ops,
- .flags = BLK_MQ_F_NO_SCHED,
- };
- err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
- if (err < 0)
- goto out_remove_scsi_host;
- hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL);
- if (IS_ERR(hba->tmf_queue)) {
- err = PTR_ERR(hba->tmf_queue);
- goto free_tmf_tag_set;
- }
- hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
- sizeof(*hba->tmf_rqs), GFP_KERNEL);
- if (!hba->tmf_rqs) {
- err = -ENOMEM;
- goto free_tmf_queue;
- }
-
/* Reset the attached device */
ufshcd_device_reset(hba);
@@ -10490,21 +10950,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
dev_err(hba->dev, "Host controller enable failed\n");
ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
- goto free_tmf_queue;
+ goto out_disable;
}
- /*
- * Set the default power management level for runtime and system PM.
- * Default power saving mode is to keep UFS link in Hibern8 state
- * and UFS device in sleep state.
- */
- hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
- hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
-
INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work);
INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work);
@@ -10514,9 +10962,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
}
+ err = ufshcd_add_scsi_host(hba);
+ if (err)
+ goto out_disable;
+
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
- atomic_set(&hba->scsi_block_reqs_cnt, 0);
+
/*
* We are assuming that device wasn't put in sleep/power-down
* state exclusively during the boot stage before kernel.
@@ -10525,25 +10977,57 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
*/
ufshcd_set_ufs_dev_active(hba);
- async_schedule(ufshcd_async_scan, hba);
+ /* Initialize hba, detect and initialize UFS device */
+ ktime_t probe_start = ktime_get();
+
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+
+ err = ufshcd_link_startup(hba);
+ if (err)
+ goto out_disable;
+
+ if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
+ goto initialized;
+
+ /* Debug counters initialization */
+ ufshcd_clear_dbg_ufs_stats(hba);
+
+ /* UniPro link is active now */
+ ufshcd_set_link_active(hba);
+
+ /* Verify device initialization by sending NOP OUT UPIU */
+ err = ufshcd_verify_dev_init(hba);
+ if (err)
+ goto out_disable;
+
+ /* Initiate UFS initialization, and waiting until completion */
+ err = ufshcd_complete_dev_init(hba);
+ if (err)
+ goto out_disable;
+
+ err = ufshcd_device_params_init(hba);
+ if (err)
+ goto out_disable;
+
+ err = ufshcd_post_device_init(hba);
+
+initialized:
+ ufshcd_process_probe_result(hba, probe_start, err);
+ if (err)
+ goto out_disable;
+
ufs_sysfs_add_nodes(hba->dev);
+ async_schedule(ufshcd_async_scan, hba);
device_enable_async_suspend(dev);
ufshcd_pm_qos_init(hba);
return 0;
-free_tmf_queue:
- blk_mq_destroy_queue(hba->tmf_queue);
- blk_put_queue(hba->tmf_queue);
-free_tmf_tag_set:
- blk_mq_free_tag_set(&hba->tmf_tag_set);
-out_remove_scsi_host:
- scsi_remove_host(hba->host);
out_disable:
hba->is_irq_enabled = false;
ufshcd_hba_exit(hba);
out_error:
- return err;
+ return err > 0 ? -EIO : err;
}
EXPORT_SYMBOL_GPL(ufshcd_init);