summaryrefslogtreecommitdiff
path: root/drivers/ufs/core/ufshcd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/core/ufshcd.c')
-rw-r--r--drivers/ufs/core/ufshcd.c471
1 files changed, 287 insertions, 184 deletions
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index e7e79f515e14..983fae84d9e8 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -98,7 +98,7 @@
/* Polling time to wait for fDeviceInit */
#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
-/* UFSHC 4.0 compliant HC support this mode, refer param_set_mcq_mode() */
+/* UFSHC 4.0 compliant HC support this mode. */
static bool use_mcq_mode = true;
static bool is_mcq_supported(struct ufs_hba *hba)
@@ -106,23 +106,7 @@ static bool is_mcq_supported(struct ufs_hba *hba)
return hba->mcq_sup && use_mcq_mode;
}
-static int param_set_mcq_mode(const char *val, const struct kernel_param *kp)
-{
- int ret;
-
- ret = param_set_bool(val, kp);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static const struct kernel_param_ops mcq_mode_ops = {
- .set = param_set_mcq_mode,
- .get = param_get_bool,
-};
-
-module_param_cb(use_mcq_mode, &mcq_mode_ops, &use_mcq_mode, 0644);
+module_param(use_mcq_mode, bool, 0644);
MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
@@ -173,7 +157,6 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
- UFSHCD_NUM_RESERVED = 1,
UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
};
@@ -301,7 +284,6 @@ static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
struct ufs_vreg *vreg);
-static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
bool enable);
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
@@ -1205,7 +1187,7 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
bool timeout = false, do_last_check = false;
ktime_t start;
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
/*
* Wait for all the outstanding tasks/transfer requests.
@@ -1326,7 +1308,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
}
/* let's not get into low power until clock scaling is completed */
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
out:
return ret;
@@ -1656,7 +1638,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
goto out;
ufshcd_rpm_get_sync(hba);
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
hba->clk_scaling.is_enabled = value;
@@ -1739,7 +1721,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_gating.state == CLKS_ON) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
- goto unblock_reqs;
+ return;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1762,25 +1744,21 @@ static void ufshcd_ungate_work(struct work_struct *work)
}
hba->clk_gating.is_suspended = false;
}
-unblock_reqs:
- ufshcd_scsi_unblock_requests(hba);
}
/**
* ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
* Also, exit from hibern8 mode and set the link as active.
* @hba: per adapter instance
- * @async: This indicates whether caller should ungate clocks asynchronously.
*/
-int ufshcd_hold(struct ufs_hba *hba, bool async)
+void ufshcd_hold(struct ufs_hba *hba)
{
- int rc = 0;
bool flush_result;
unsigned long flags;
if (!ufshcd_is_clkgating_allowed(hba) ||
!hba->clk_gating.is_initialized)
- goto out;
+ return;
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.active_reqs++;
@@ -1797,15 +1775,10 @@ start:
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
- if (async) {
- rc = -EAGAIN;
- hba->clk_gating.active_reqs--;
- break;
- }
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_result = flush_work(&hba->clk_gating.ungate_work);
if (hba->clk_gating.is_suspended && !flush_result)
- goto out;
+ return;
spin_lock_irqsave(hba->host->host_lock, flags);
goto start;
}
@@ -1827,21 +1800,14 @@ start:
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- if (queue_work(hba->clk_gating.clk_gating_workq,
- &hba->clk_gating.ungate_work))
- ufshcd_scsi_block_requests(hba);
+ queue_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.ungate_work);
/*
* fall through to check if we should wait for this
* work to be done or not.
*/
fallthrough;
case REQ_CLKS_ON:
- if (async) {
- rc = -EAGAIN;
- hba->clk_gating.active_reqs--;
- break;
- }
-
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_work(&hba->clk_gating.ungate_work);
/* Make sure state is CLKS_ON before returning */
@@ -1853,8 +1819,6 @@ start:
break;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
-out:
- return rc;
}
EXPORT_SYMBOL_GPL(ufshcd_hold);
@@ -2086,7 +2050,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
ufshcd_remove_clk_gating_sysfs(hba);
/* Ungate the clock if necessary. */
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
hba->clk_gating.is_initialized = false;
ufshcd_release(hba);
@@ -2336,18 +2300,20 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
/* Read crypto capabilities */
err = ufshcd_hba_init_crypto_capabilities(hba);
- if (err)
+ if (err) {
dev_err(hba->dev, "crypto setup failed\n");
+ return err;
+ }
hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
if (!hba->mcq_sup)
- return err;
+ return 0;
hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
hba->mcq_capabilities);
- return err;
+ return 0;
}
/**
@@ -2482,7 +2448,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
return 0;
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
@@ -2533,7 +2499,7 @@ static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int
* 11b to indicate Dword granularity. A value of '3'
* indicates 4 bytes, '7' indicates 8 bytes, etc."
*/
- WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
+ WARN_ONCE(len > SZ_256K, "len = %#x\n", len);
prd->size = cpu_to_le32(len - 1);
prd->addr = cpu_to_le64(sg->dma_address);
prd->reserved = 0;
@@ -2885,12 +2851,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
- /*
- * Allows the UFS error handler to wait for prior ufshcd_queuecommand()
- * calls.
- */
- rcu_read_lock();
-
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
break;
@@ -2936,16 +2896,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
hba->req_abort_count = 0;
- err = ufshcd_hold(hba, true);
- if (err) {
- err = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
- WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
- (hba->clk_gating.state != CLKS_ON));
+ ufshcd_hold(hba);
lrbp = &hba->lrb[tag];
- WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
lrbp->task_tag = tag;
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
@@ -2961,7 +2914,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
err = ufshcd_map_sg(hba, lrbp);
if (err) {
- lrbp->cmd = NULL;
ufshcd_release(hba);
goto out;
}
@@ -2972,8 +2924,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_send_command(hba, tag, hwq);
out:
- rcu_read_unlock();
-
if (ufs_trigger_eh()) {
unsigned long flags;
@@ -2999,13 +2949,50 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
}
/*
- * Clear all the requests from the controller for which a bit has been set in
- * @mask and wait until the controller confirms that these requests have been
- * cleared.
+ * Check with the block layer if the command is inflight
+ * @cmd: command to check.
+ *
+ * Returns true if command is inflight; false if not.
+ */
+bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
+{
+ struct request *rq;
+
+ if (!cmd)
+ return false;
+
+ rq = scsi_cmd_to_rq(cmd);
+ if (!blk_mq_request_started(rq))
+ return false;
+
+ return true;
+}
+
+/*
+ * Clear the pending command in the controller and wait until
+ * the controller confirms that the command has been cleared.
+ * @hba: per adapter instance
+ * @task_tag: The tag number of the command to be cleared.
*/
-static int ufshcd_clear_cmds(struct ufs_hba *hba, u32 mask)
+static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
{
+ u32 mask = 1U << task_tag;
unsigned long flags;
+ int err;
+
+ if (is_mcq_enabled(hba)) {
+ /*
+ * MCQ mode. Clean up the MCQ resources similar to
+ * what the ufshcd_utrl_clear() does for SDB mode.
+ */
+ err = ufshcd_mcq_sq_cleanup(hba, task_tag);
+ if (err) {
+ dev_err(hba->dev, "%s: failed tag=%d. err=%d\n",
+ __func__, task_tag, err);
+ return err;
+ }
+ return 0;
+ }
/* clear outstanding transaction before retry */
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -3099,14 +3086,23 @@ retry:
* not trigger any race conditions.
*/
hba->dev_cmd.complete = NULL;
- err = ufshcd_get_tr_ocs(lrbp, hba->dev_cmd.cqe);
+ err = ufshcd_get_tr_ocs(lrbp, NULL);
if (!err)
err = ufshcd_dev_cmd_completion(hba, lrbp);
} else {
err = -ETIMEDOUT;
dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
__func__, lrbp->task_tag);
- if (ufshcd_clear_cmds(hba, 1U << lrbp->task_tag) == 0) {
+
+ /* MCQ mode */
+ if (is_mcq_enabled(hba)) {
+ err = ufshcd_clear_cmd(hba, lrbp->task_tag);
+ hba->dev_cmd.complete = NULL;
+ return err;
+ }
+
+ /* SDB mode */
+ if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
/* successfully cleared the command, retry if needed */
err = -EAGAIN;
/*
@@ -3180,13 +3176,12 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
down_read(&hba->clk_scaling_lock);
lrbp = &hba->lrb[tag];
- WARN_ON(lrbp->cmd);
+ lrbp->cmd = NULL;
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
if (unlikely(err))
goto out;
hba->dev_cmd.complete = &wait;
- hba->dev_cmd.cqe = NULL;
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
@@ -3267,7 +3262,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
BUG_ON(!hba);
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
@@ -3341,7 +3336,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
return -EINVAL;
}
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
@@ -3437,7 +3432,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
return -EINVAL;
}
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
@@ -3779,7 +3774,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/*
* Allocate memory for UTP Transfer descriptors
- * UFSHCI requires 1024 byte alignment of UTRD
+ * UFSHCI requires 1KB alignment of UTRD
*/
utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
@@ -3787,7 +3782,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
&hba->utrdl_dma_addr,
GFP_KERNEL);
if (!hba->utrdl_base_addr ||
- WARN_ON(hba->utrdl_dma_addr & (1024 - 1))) {
+ WARN_ON(hba->utrdl_dma_addr & (SZ_1K - 1))) {
dev_err(hba->dev,
"Transfer Descriptor Memory allocation failed\n");
goto out;
@@ -3803,7 +3798,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
goto skip_utmrdl;
/*
* Allocate memory for UTP Task Management descriptors
- * UFSHCI requires 1024 byte alignment of UTMRD
+ * UFSHCI requires 1KB alignment of UTMRD
*/
utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
@@ -3811,7 +3806,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
&hba->utmrdl_dma_addr,
GFP_KERNEL);
if (!hba->utmrdl_base_addr ||
- WARN_ON(hba->utmrdl_dma_addr & (1024 - 1))) {
+ WARN_ON(hba->utmrdl_dma_addr & (SZ_1K - 1))) {
dev_err(hba->dev,
"Task Management Descriptor Memory allocation failed\n");
goto out;
@@ -3868,10 +3863,8 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
/* Configure UTRD with command descriptor base address */
cmd_desc_element_addr =
(cmd_desc_dma_addr + (cmd_desc_size * i));
- utrdlp[i].command_desc_base_addr_lo =
- cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
- utrdlp[i].command_desc_base_addr_hi =
- cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
+ utrdlp[i].command_desc_base_addr =
+ cpu_to_le64(cmd_desc_element_addr);
/* Response upiu and prdt offset should be in double words */
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
@@ -4255,7 +4248,7 @@ int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
uic_cmd.command = UIC_CMD_DME_SET;
uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
uic_cmd.argument3 = mode;
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
ufshcd_release(hba);
@@ -4362,7 +4355,7 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
if (update &&
!pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
ufshcd_rpm_get_sync(hba);
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
ufshcd_auto_hibern8_enable(hba);
ufshcd_release(hba);
ufshcd_rpm_put_sync(hba);
@@ -4955,7 +4948,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
int err = 0;
int retries;
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
@@ -5148,7 +5141,7 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
- blk_queue_update_dma_alignment(q, 4096 - 1);
+ blk_queue_update_dma_alignment(q, SZ_4K - 1);
/*
* Block runtime-pm until all consumers are added.
* Refer ufshcd_setup_links().
@@ -5416,13 +5409,12 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
}
/* Release the resources allocated for processing a SCSI command. */
-static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
+void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp)
{
struct scsi_cmnd *cmd = lrbp->cmd;
scsi_dma_unmap(cmd);
- lrbp->cmd = NULL; /* Mark the command as completed. */
ufshcd_release(hba);
ufshcd_clk_scaling_update_busy(hba);
}
@@ -5438,6 +5430,7 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
+ enum utp_ocs ocs;
lrbp = &hba->lrb[task_tag];
lrbp->compl_time_stamp = ktime_get();
@@ -5453,8 +5446,11 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
if (hba->dev_cmd.complete) {
- hba->dev_cmd.cqe = cqe;
- ufshcd_add_command_trace(hba, task_tag, UFS_DEV_COMP);
+ if (cqe) {
+ ocs = le32_to_cpu(cqe->status) & MASK_OCS;
+ lrbp->utr_descriptor_ptr->header.dword_2 =
+ cpu_to_le32(ocs);
+ }
complete(hba->dev_cmd.complete);
ufshcd_clk_scaling_update_busy(hba);
}
@@ -5507,7 +5503,7 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
struct ufs_hw_queue *hwq;
if (is_mcq_enabled(hba)) {
- hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
+ hwq = &hba->uhq[queue_num];
return ufshcd_mcq_poll_cqe_lock(hba, hwq);
}
@@ -5532,6 +5528,57 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
}
/**
+ * ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
+ * invoked from the error handler context or ufshcd_host_reset_and_restore()
+ * to complete the pending transfers and free the resources associated with
+ * the scsi command.
+ *
+ * @hba: per adapter instance
+ * @force_compl: This flag is set to true when invoked
+ * from ufshcd_host_reset_and_restore() in which case it requires special
+ * handling because the host controller has been reset by ufshcd_hba_stop().
+ */
+static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
+ bool force_compl)
+{
+ struct ufs_hw_queue *hwq;
+ struct ufshcd_lrb *lrbp;
+ struct scsi_cmnd *cmd;
+ unsigned long flags;
+ u32 hwq_num, utag;
+ int tag;
+
+ for (tag = 0; tag < hba->nutrs; tag++) {
+ lrbp = &hba->lrb[tag];
+ cmd = lrbp->cmd;
+ if (!ufshcd_cmd_inflight(cmd) ||
+ test_bit(SCMD_STATE_COMPLETE, &cmd->state))
+ continue;
+
+ utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
+ hwq_num = blk_mq_unique_tag_to_hwq(utag);
+ hwq = &hba->uhq[hwq_num];
+
+ if (force_compl) {
+ ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
+ /*
+ * For those cmds of which the cqes are not present
+ * in the cq, complete them explicitly.
+ */
+ if (cmd && !test_bit(SCMD_STATE_COMPLETE, &cmd->state)) {
+ spin_lock_irqsave(&hwq->cq_lock, flags);
+ set_host_byte(cmd, DID_REQUEUE);
+ ufshcd_release_scsi_cmd(hba, lrbp);
+ scsi_done(cmd);
+ spin_unlock_irqrestore(&hwq->cq_lock, flags);
+ }
+ } else {
+ ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ }
+ }
+}
+
+/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
*
@@ -6095,9 +6142,13 @@ out:
}
/* Complete requests that have door-bell cleared */
-static void ufshcd_complete_requests(struct ufs_hba *hba)
+static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl)
{
- ufshcd_transfer_req_compl(hba);
+ if (is_mcq_enabled(hba))
+ ufshcd_mcq_compl_pending_transfer(hba, force_compl);
+ else
+ ufshcd_transfer_req_compl(hba);
+
ufshcd_tmc_handler(hba);
}
@@ -6241,22 +6292,22 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
ufshcd_setup_vreg(hba, true);
ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
if (!ufshcd_is_clkgating_allowed(hba))
ufshcd_setup_clocks(hba, true);
ufshcd_release(hba);
pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
ufshcd_vops_resume(hba, pm_op);
} else {
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
if (ufshcd_is_clkscaling_supported(hba) &&
hba->clk_scaling.is_enabled)
ufshcd_suspend_clkscaling(hba);
ufshcd_clk_scaling_allow(hba, false);
}
ufshcd_scsi_block_requests(hba);
- /* Drain ufshcd_queuecommand() */
- synchronize_rcu();
+ /* Wait for ongoing ufshcd_queuecommand() calls to finish. */
+ blk_mq_wait_quiesce_done(&hba->host->tag_set);
cancel_work_sync(&hba->eeh_work);
}
@@ -6338,18 +6389,36 @@ static bool ufshcd_abort_all(struct ufs_hba *hba)
bool needs_reset = false;
int tag, ret;
- /* Clear pending transfer requests */
- for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
- ret = ufshcd_try_to_abort_task(hba, tag);
- dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
- hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
- ret ? "failed" : "succeeded");
- if (ret) {
- needs_reset = true;
- goto out;
+ if (is_mcq_enabled(hba)) {
+ struct ufshcd_lrb *lrbp;
+ int tag;
+
+ for (tag = 0; tag < hba->nutrs; tag++) {
+ lrbp = &hba->lrb[tag];
+ if (!ufshcd_cmd_inflight(lrbp->cmd))
+ continue;
+ ret = ufshcd_try_to_abort_task(hba, tag);
+ dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
+ hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
+ ret ? "failed" : "succeeded");
+ if (ret) {
+ needs_reset = true;
+ goto out;
+ }
+ }
+ } else {
+ /* Clear pending transfer requests */
+ for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
+ ret = ufshcd_try_to_abort_task(hba, tag);
+ dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
+ hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
+ ret ? "failed" : "succeeded");
+ if (ret) {
+ needs_reset = true;
+ goto out;
+ }
}
}
-
/* Clear pending task management requests */
for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
if (ufshcd_clear_tm_cmd(hba, tag)) {
@@ -6360,7 +6429,7 @@ static bool ufshcd_abort_all(struct ufs_hba *hba)
out:
/* Complete the requests that are cleared by s/w */
- ufshcd_complete_requests(hba);
+ ufshcd_complete_requests(hba, false);
return needs_reset;
}
@@ -6400,7 +6469,7 @@ static void ufshcd_err_handler(struct work_struct *work)
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_err_handling_prepare(hba);
/* Complete requests that have door-bell cleared by h/w */
- ufshcd_complete_requests(hba);
+ ufshcd_complete_requests(hba, false);
spin_lock_irqsave(hba->host->host_lock, flags);
again:
needs_restore = false;
@@ -6771,7 +6840,7 @@ static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
ufshcd_mcq_write_cqis(hba, events, i);
if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
- ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+ ufshcd_mcq_poll_cqe_lock(hba, hwq);
}
return IRQ_HANDLED;
@@ -6901,7 +6970,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
return PTR_ERR(req);
req->end_io_data = &wait;
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
spin_lock_irqsave(host->host_lock, flags);
@@ -7037,7 +7106,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
down_read(&hba->clk_scaling_lock);
lrbp = &hba->lrb[tag];
- WARN_ON(lrbp->cmd);
lrbp->cmd = NULL;
lrbp->task_tag = tag;
lrbp->lun = 0;
@@ -7138,7 +7206,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
cmd_type = DEV_CMD_TYPE_NOP;
fallthrough;
case UPIU_TRANSACTION_QUERY_REQ:
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
desc_buff, buff_len,
@@ -7204,12 +7272,11 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
u16 ehs_len;
/* Protects use of hba->reserved_slot. */
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
down_read(&hba->clk_scaling_lock);
lrbp = &hba->lrb[tag];
- WARN_ON(lrbp->cmd);
lrbp->cmd = NULL;
lrbp->task_tag = tag;
lrbp->lun = UFS_UPIU_RPMB_WLUN;
@@ -7281,7 +7348,9 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
unsigned long flags, pending_reqs = 0, not_cleared = 0;
struct Scsi_Host *host;
struct ufs_hba *hba;
- u32 pos;
+ struct ufs_hw_queue *hwq;
+ struct ufshcd_lrb *lrbp;
+ u32 pos, not_cleared_mask = 0;
int err;
u8 resp = 0xF, lun;
@@ -7296,6 +7365,20 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
goto out;
}
+ if (is_mcq_enabled(hba)) {
+ for (pos = 0; pos < hba->nutrs; pos++) {
+ lrbp = &hba->lrb[pos];
+ if (ufshcd_cmd_inflight(lrbp->cmd) &&
+ lrbp->lun == lun) {
+ ufshcd_clear_cmd(hba, pos);
+ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
+ ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ }
+ }
+ err = 0;
+ goto out;
+ }
+
/* clear the commands that were pending for corresponding LUN */
spin_lock_irqsave(&hba->outstanding_lock, flags);
for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
@@ -7304,17 +7387,20 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
hba->outstanding_reqs &= ~pending_reqs;
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
- if (ufshcd_clear_cmds(hba, pending_reqs) < 0) {
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- not_cleared = pending_reqs &
- ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- hba->outstanding_reqs |= not_cleared;
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+ for_each_set_bit(pos, &pending_reqs, hba->nutrs) {
+ if (ufshcd_clear_cmd(hba, pos) < 0) {
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ not_cleared = 1U << pos &
+ ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ hba->outstanding_reqs |= not_cleared;
+ not_cleared_mask |= not_cleared;
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
- dev_err(hba->dev, "%s: failed to clear requests %#lx\n",
- __func__, not_cleared);
+ dev_err(hba->dev, "%s: failed to clear request %d\n",
+ __func__, pos);
+ }
}
- __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared);
+ __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask);
out:
hba->req_abort_count = 0;
@@ -7352,7 +7438,7 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
*
* Returns zero on success, non-zero on failure
*/
-static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
+int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
{
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
int err = 0;
@@ -7375,6 +7461,20 @@ static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
*/
dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
__func__, tag);
+ if (is_mcq_enabled(hba)) {
+ /* MCQ mode */
+ if (ufshcd_cmd_inflight(lrbp->cmd)) {
+ /* sleep for max. 200us same delay as in SDB mode */
+ usleep_range(100, 200);
+ continue;
+ }
+ /* command completed already */
+ dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n",
+ __func__, tag);
+ goto out;
+ }
+
+ /* Single Doorbell Mode */
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
if (reg & (1 << tag)) {
/* sleep for max. 200us to stabilize */
@@ -7411,7 +7511,7 @@ static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
goto out;
}
- err = ufshcd_clear_cmds(hba, 1U << tag);
+ err = ufshcd_clear_cmd(hba, tag);
if (err)
dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
__func__, tag, err);
@@ -7439,14 +7539,17 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
- ufshcd_hold(hba, false);
- reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- /* If command is already aborted/completed, return FAILED. */
- if (!(test_bit(tag, &hba->outstanding_reqs))) {
- dev_err(hba->dev,
- "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
- __func__, tag, hba->outstanding_reqs, reg);
- goto release;
+ ufshcd_hold(hba);
+
+ if (!is_mcq_enabled(hba)) {
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (!test_bit(tag, &hba->outstanding_reqs)) {
+ /* If command is already aborted/completed, return FAILED. */
+ dev_err(hba->dev,
+ "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
+ __func__, tag, hba->outstanding_reqs, reg);
+ goto release;
+ }
}
/* Print Transfer Request of aborted task */
@@ -7471,7 +7574,8 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
}
hba->req_abort_count++;
- if (!(reg & (1 << tag))) {
+ if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) {
+ /* only execute this code in single doorbell mode */
dev_err(hba->dev,
"%s: cmd was completed, but without a notifying intr, tag = %d",
__func__, tag);
@@ -7497,6 +7601,12 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto release;
}
+ if (is_mcq_enabled(hba)) {
+ /* MCQ mode. Branch off to handle abort for mcq mode */
+ err = ufshcd_mcq_abort(cmd);
+ goto release;
+ }
+
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skipping abort\n", __func__);
@@ -7552,7 +7662,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
ufshpb_toggle_state(hba, HPB_PRESENT, HPB_RESET);
ufshcd_hba_stop(hba);
hba->silence_err_logs = true;
- ufshcd_complete_requests(hba);
+ ufshcd_complete_requests(hba, true);
hba->silence_err_logs = false;
/* scale up clocks to max frequency before full reinitialization */
@@ -8502,11 +8612,15 @@ err:
static void ufshcd_config_mcq(struct ufs_hba *hba)
{
int ret;
+ u32 intrs;
ret = ufshcd_mcq_vops_config_esi(hba);
dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
- ufshcd_enable_intr(hba, UFSHCD_ENABLE_MCQ_INTRS);
+ intrs = UFSHCD_ENABLE_MCQ_INTRS;
+ if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR)
+ intrs &= ~MCQ_CQ_EVENT_STATUS;
+ ufshcd_enable_intr(hba, intrs);
ufshcd_mcq_make_queues_operational(hba);
ufshcd_mcq_config_mac(hba, hba->nutrs);
@@ -8774,7 +8888,7 @@ static const struct scsi_host_template ufshcd_driver_template = {
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
.can_queue = UFSHCD_CAN_QUEUE,
.max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
- .max_sectors = (1 << 20) / SECTOR_SIZE, /* 1 MiB */
+ .max_sectors = SZ_1M / SECTOR_SIZE,
.max_host_blocked = 1,
.track_queue_depth = 1,
.skip_settle_delay = 1,
@@ -9184,7 +9298,8 @@ static int ufshcd_execute_start_stop(struct scsi_device *sdev,
};
return scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, /*buffer=*/NULL,
- /*bufflen=*/0, /*timeout=*/HZ, /*retries=*/0, &args);
+ /*bufflen=*/0, /*timeout=*/10 * HZ, /*retries=*/0,
+ &args);
}
/**
@@ -9430,7 +9545,7 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* If we can't transition into any of the low power modes
* just gate the clocks.
*/
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
hba->clk_gating.is_suspended = true;
if (ufshcd_is_clkscaling_supported(hba))
@@ -9775,28 +9890,6 @@ out:
}
#endif
-static void ufshcd_wl_shutdown(struct device *dev)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufs_hba *hba;
-
- hba = shost_priv(sdev->host);
-
- down(&hba->host_sem);
- hba->shutting_down = true;
- up(&hba->host_sem);
-
- /* Turn on everything while shutting down */
- ufshcd_rpm_get_sync(hba);
- scsi_device_quiesce(sdev);
- shost_for_each_device(sdev, hba->host) {
- if (sdev == hba->ufs_device_wlun)
- continue;
- scsi_device_quiesce(sdev);
- }
- __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
-}
-
/**
* ufshcd_suspend - helper function for suspend operations
* @hba: per adapter instance
@@ -9981,25 +10074,34 @@ int ufshcd_runtime_resume(struct device *dev)
EXPORT_SYMBOL(ufshcd_runtime_resume);
#endif /* CONFIG_PM */
-/**
- * ufshcd_shutdown - shutdown routine
- * @hba: per adapter instance
- *
- * This function would turn off both UFS device and UFS hba
- * regulators. It would also disable clocks.
- *
- * Returns 0 always to allow force shutdown even in case of errors.
- */
-int ufshcd_shutdown(struct ufs_hba *hba)
+static void ufshcd_wl_shutdown(struct device *dev)
{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba = shost_priv(sdev->host);
+
+ down(&hba->host_sem);
+ hba->shutting_down = true;
+ up(&hba->host_sem);
+
+ /* Turn on everything while shutting down */
+ ufshcd_rpm_get_sync(hba);
+ scsi_device_quiesce(sdev);
+ shost_for_each_device(sdev, hba->host) {
+ if (sdev == hba->ufs_device_wlun)
+ continue;
+ scsi_device_quiesce(sdev);
+ }
+ __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
+
+ /*
+ * Next, turn off the UFS controller and the UFS regulators. Disable
+ * clocks.
+ */
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
ufshcd_suspend(hba);
hba->is_powered = false;
- /* allow force shutdown even in case of errors */
- return 0;
}
-EXPORT_SYMBOL(ufshcd_shutdown);
/**
* ufshcd_remove - de-allocate SCSI host and host memory space
@@ -10226,6 +10328,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
host->max_channel = UFSHCD_MAX_CHANNEL;
host->unique_id = host->host_no;
host->max_cmd_len = UFS_CDB_SIZE;
+ host->queuecommand_may_block = !!(hba->caps & UFSHCD_CAP_CLK_GATING);
hba->max_pwr_info.is_valid = false;