summaryrefslogtreecommitdiff
path: root/drivers/ufs/core/ufs-mcq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/core/ufs-mcq.c')
-rw-r--r--drivers/ufs/core/ufs-mcq.c261
1 files changed, 145 insertions, 116 deletions
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 386674ead7f0..9ab91b4c05b0 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -7,7 +7,7 @@
* Can Guo <quic_cang@quicinc.com>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -18,6 +18,7 @@
#include <linux/iopoll.h>
#define MAX_QUEUE_SUP GENMASK(7, 0)
+#define QCFGPTR GENMASK(23, 16)
#define UFS_MCQ_MIN_RW_QUEUES 2
#define UFS_MCQ_MIN_READ_QUEUES 0
#define UFS_MCQ_MIN_POLL_QUEUES 0
@@ -25,10 +26,13 @@
#define QUEUE_ID_OFFSET 16
#define MCQ_CFG_MAC_MASK GENMASK(16, 8)
-#define MCQ_QCFG_SIZE 0x40
#define MCQ_ENTRY_SIZE_IN_DWORD 8
#define CQE_UCD_BA GENMASK_ULL(63, 7)
+#define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
+ UFSHCD_ERROR_MASK |\
+ MCQ_CQ_EVENT_STATUS)
+
/* Max mcq register polling time in microseconds */
#define MCQ_POLL_US 500000
@@ -94,9 +98,10 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
val = ufshcd_readl(hba, REG_UFS_MCQ_CFG);
val &= ~MCQ_CFG_MAC_MASK;
- val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds);
+ val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds - 1);
ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
/**
* ufshcd_mcq_req_to_hwq - find the hardware queue on which the
@@ -104,48 +109,59 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
* @hba: per adapter instance
* @req: pointer to the request to be issued
*
- * Returns the hardware queue instance on which the request would
- * be queued.
+ * Return: the hardware queue instance on which the request will be or has
+ * been queued. %NULL if the request has already been freed.
*/
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req)
{
- u32 utag = blk_mq_unique_tag(req);
- u32 hwq = blk_mq_unique_tag_to_hwq(utag);
+ struct blk_mq_hw_ctx *hctx = READ_ONCE(req->mq_hctx);
- return &hba->uhq[hwq];
+ return hctx ? &hba->uhq[hctx->queue_num] : NULL;
}
/**
- * ufshcd_mcq_decide_queue_depth - decide the queue depth
+ * ufshcd_mcq_queue_cfg_addr - get an start address of the MCQ Queue Config
+ * Registers.
* @hba: per adapter instance
*
- * Returns queue-depth on success, non-zero on error
+ * Return: Start address of MCQ Queue Config Registers in HCI
+ */
+unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba)
+{
+ return FIELD_GET(QCFGPTR, hba->mcq_capabilities) * 0x200;
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr);
+
+/**
+ * ufshcd_get_hba_mac - Maximum number of commands supported by the host
+ * controller.
+ * @hba: per adapter instance
*
- * MAC - Max. Active Command of the Host Controller (HC)
- * HC wouldn't send more than this commands to the device.
- * It is mandatory to implement get_hba_mac() to enable MCQ mode.
- * Calculates and adjusts the queue depth based on the depth
- * supported by the HC and ufs device.
+ * Return: queue depth on success; negative upon error.
+ *
+ * MAC = Maximum number of Active Commands supported by the Host Controller.
*/
-int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
+int ufshcd_get_hba_mac(struct ufs_hba *hba)
{
int mac;
- /* Mandatory to implement get_hba_mac() */
- mac = ufshcd_mcq_vops_get_hba_mac(hba);
- if (mac < 0) {
- dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
- return mac;
+ if (!hba->vops || !hba->vops->get_hba_mac) {
+ /*
+ * Extract the maximum number of active transfer tasks value
+ * from the host controller capabilities register. This value is
+ * 0-based.
+ */
+ hba->capabilities =
+ ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
+ mac = hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_MCQ;
+ mac++;
+ } else {
+ mac = hba->vops->get_hba_mac(hba);
}
-
- WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
- /*
- * max. value of bqueuedepth = 256, mac is host dependent.
- * It is mandatory for UFS device to define bQueueDepth if
- * shared queuing architecture is enabled.
- */
- return min_t(int, mac, hba->dev_info.bqueuedepth);
+ if (mac < 0)
+ dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
+ return mac;
}
static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
@@ -165,6 +181,15 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
return -EOPNOTSUPP;
}
+ /*
+ * Device should support at least one I/O queue to handle device
+ * commands via hba->dev_cmd_queue.
+ */
+ if (hba_maxq == poll_queues) {
+ dev_err(hba->dev, "At least one non-poll queue required\n");
+ return -EOPNOTSUPP;
+ }
+
rem = hba_maxq;
if (rw_queues) {
@@ -209,7 +234,7 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
&hwq->sqe_dma_addr,
GFP_KERNEL);
- if (!hwq->sqe_dma_addr) {
+ if (!hwq->sqe_base_addr) {
dev_err(hba->dev, "SQE allocation failed\n");
return -ENOMEM;
}
@@ -218,7 +243,7 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
&hwq->cqe_dma_addr,
GFP_KERNEL);
- if (!hwq->cqe_dma_addr) {
+ if (!hwq->cqe_base_addr) {
dev_err(hba->dev, "CQE allocation failed\n");
return -ENOMEM;
}
@@ -227,12 +252,6 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
return 0;
}
-
-/* Operation and runtime registers configuration */
-#define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i))
-#define MCQ_OPR_OFFSET_n(p, i) \
- (hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i))
-
static void __iomem *mcq_opr_base(struct ufs_hba *hba,
enum ufshcd_mcq_opr n, int i)
{
@@ -245,6 +264,7 @@ u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i)
{
return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_read_cqis);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
{
@@ -256,9 +276,7 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis);
* Current MCQ specification doesn't provide a Task Tag or its equivalent in
* the Completion Queue Entry. Find the Task Tag using an indirect method.
*/
-static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
- struct ufs_hw_queue *hwq,
- struct cq_entry *cqe)
+static int ufshcd_mcq_get_tag(struct ufs_hba *hba, struct cq_entry *cqe)
{
u64 addr;
@@ -276,9 +294,10 @@ static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
struct ufs_hw_queue *hwq)
{
struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
- int tag = ufshcd_mcq_get_tag(hba, hwq, cqe);
if (cqe->command_desc_base_addr) {
+ int tag = ufshcd_mcq_get_tag(hba, cqe);
+
ufshcd_compl_one_cqe(hba, tag, cqe);
/* After processed the cqe, mark it empty (invalid) entry */
cqe->command_desc_base_addr = 0;
@@ -328,9 +347,16 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock);
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
{
struct ufs_hw_queue *hwq;
+ u32 intrs;
u16 qsize;
int i;
+ /* Enable required interrupts */
+ intrs = UFSHCD_ENABLE_MCQ_INTRS;
+ if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR)
+ intrs &= ~MCQ_CQ_EVENT_STATUS;
+ ufshcd_enable_intr(hba, intrs);
+
for (i = 0; i < hba->nr_hw_queues; i++) {
hwq = &hba->uhq[i];
hwq->id = i;
@@ -338,29 +364,29 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
/* Submission Queue Lower Base Address */
ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr),
- MCQ_CFG_n(REG_SQLBA, i));
+ ufshcd_mcq_cfg_offset(REG_SQLBA, i));
/* Submission Queue Upper Base Address */
ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr),
- MCQ_CFG_n(REG_SQUBA, i));
+ ufshcd_mcq_cfg_offset(REG_SQUBA, i));
/* Submission Queue Doorbell Address Offset */
- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i),
- MCQ_CFG_n(REG_SQDAO, i));
+ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQD, i),
+ ufshcd_mcq_cfg_offset(REG_SQDAO, i));
/* Submission Queue Interrupt Status Address Offset */
- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i),
- MCQ_CFG_n(REG_SQISAO, i));
+ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQIS, i),
+ ufshcd_mcq_cfg_offset(REG_SQISAO, i));
/* Completion Queue Lower Base Address */
ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr),
- MCQ_CFG_n(REG_CQLBA, i));
+ ufshcd_mcq_cfg_offset(REG_CQLBA, i));
/* Completion Queue Upper Base Address */
ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr),
- MCQ_CFG_n(REG_CQUBA, i));
+ ufshcd_mcq_cfg_offset(REG_CQUBA, i));
/* Completion Queue Doorbell Address Offset */
- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i),
- MCQ_CFG_n(REG_CQDAO, i));
+ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQD, i),
+ ufshcd_mcq_cfg_offset(REG_CQDAO, i));
/* Completion Queue Interrupt Status Address Offset */
- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i),
- MCQ_CFG_n(REG_CQISAO, i));
+ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQIS, i),
+ ufshcd_mcq_cfg_offset(REG_CQISAO, i));
/* Save the base addresses for quicker access */
hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP;
@@ -377,7 +403,7 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
/* Completion Queue Enable|Size to Completion Queue Attribute */
ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize,
- MCQ_CFG_n(REG_CQATTR, i));
+ ufshcd_mcq_cfg_offset(REG_CQATTR, i));
/*
* Submission Qeueue Enable|Size|Completion Queue ID to
@@ -385,9 +411,23 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
*/
ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize |
(i << QUEUE_ID_OFFSET),
- MCQ_CFG_n(REG_SQATTR, i));
+ ufshcd_mcq_cfg_offset(REG_SQATTR, i));
}
}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational);
+
+void ufshcd_mcq_enable(struct ufs_hba *hba)
+{
+ ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG);
+ hba->mcq_enabled = true;
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_enable);
+
+void ufshcd_mcq_disable(struct ufs_hba *hba)
+{
+ ufshcd_rmwl(hba, MCQ_MODE_SELECT, 0, REG_UFS_MEM_CFG);
+ hba->mcq_enabled = false;
+}
void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
{
@@ -433,15 +473,12 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
for (i = 0; i < hba->nr_hw_queues; i++) {
hwq = &hba->uhq[i];
- hwq->max_entries = hba->nutrs;
+ hwq->max_entries = hba->nutrs + 1;
spin_lock_init(&hwq->sq_lock);
spin_lock_init(&hwq->cq_lock);
mutex_init(&hwq->sq_mutex);
}
- /* The very first HW queue serves device commands */
- hba->dev_cmd_queue = &hba->uhq[0];
-
host->host_tagset = 1;
return 0;
}
@@ -487,15 +524,16 @@ static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
/**
* ufshcd_mcq_sq_cleanup - Clean up submission queue resources
* associated with the pending command.
- * @hba - per adapter instance.
- * @task_tag - The command's task tag.
+ * @hba: per adapter instance.
+ * @task_tag: The command's task tag.
*
- * Returns 0 for success; error code otherwise.
+ * Return: 0 for success; error code otherwise.
*/
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
{
- struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
- struct scsi_cmnd *cmd = lrbp->cmd;
+ struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, task_tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct ufs_hw_queue *hwq;
void __iomem *reg, *opr_sqd_base;
u32 nexus, id, val;
@@ -504,45 +542,47 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
return -ETIMEDOUT;
- if (task_tag != hba->nutrs - UFSHCD_NUM_RESERVED) {
- if (!cmd)
- return -EINVAL;
- hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
- } else {
- hwq = hba->dev_cmd_queue;
- }
+ if (!cmd)
+ return -EINVAL;
+
+ hwq = ufshcd_mcq_req_to_hwq(hba, rq);
+ if (!hwq)
+ return 0;
id = hwq->id;
- mutex_lock(&hwq->sq_mutex);
+ guard(mutex)(&hwq->sq_mutex);
/* stop the SQ fetching before working on it */
err = ufshcd_mcq_sq_stop(hba, hwq);
if (err)
- goto unlock;
+ return err;
/* SQCTI = EXT_IID, IID, LUN, Task Tag */
nexus = lrbp->lun << 8 | task_tag;
opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id);
writel(nexus, opr_sqd_base + REG_SQCTI);
- /* SQRTCy.ICU = 1 */
- writel(SQ_ICU, opr_sqd_base + REG_SQRTC);
+ /* Initiate Cleanup */
+ writel(readl(opr_sqd_base + REG_SQRTC) | SQ_ICU,
+ opr_sqd_base + REG_SQRTC);
- /* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */
+ /* Wait until SQRTSy.CUS = 1. Report SQRTSy.RTC. */
reg = opr_sqd_base + REG_SQRTS;
err = read_poll_timeout(readl, val, val & SQ_CUS, 20,
MCQ_POLL_US, false, reg);
if (err)
- dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n",
- __func__, id, task_tag,
- FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)));
+ dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%d\n",
+ __func__, id, task_tag, err);
+ else
+ dev_info(hba->dev,
+ "%s, hwq %d: cleanup return code (RTC) %ld\n",
+ __func__, id,
+ FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)));
if (ufshcd_mcq_sq_start(hba, hwq))
err = -ETIMEDOUT;
-unlock:
- mutex_unlock(&hwq->sq_mutex);
return err;
}
@@ -551,16 +591,11 @@ unlock:
* Write the sqe's Command Type to 0xF. The host controller will not
* fetch any sqe with Command Type = 0xF.
*
- * @utrd - UTP Transfer Request Descriptor to be nullified.
+ * @utrd: UTP Transfer Request Descriptor to be nullified.
*/
static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
{
- u32 dword_0;
-
- dword_0 = le32_to_cpu(utrd->header.dword_0);
- dword_0 &= ~UPIU_COMMAND_TYPE_MASK;
- dword_0 |= FIELD_PREP(UPIU_COMMAND_TYPE_MASK, 0xF);
- utrd->header.dword_0 = cpu_to_le32(dword_0);
+ utrd->header.command_type = 0xf;
}
/**
@@ -568,17 +603,18 @@ static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
* If the command is in the submission queue and not issued to the device yet,
* nullify the sqe so the host controller will skip fetching the sqe.
*
- * @hba - per adapter instance.
- * @hwq - Hardware Queue to be searched.
- * @task_tag - The command's task tag.
+ * @hba: per adapter instance.
+ * @hwq: Hardware Queue to be searched.
+ * @task_tag: The command's task tag.
*
- * Returns true if the SQE containing the command is present in the SQ
+ * Return: true if the SQE containing the command is present in the SQ
* (not fetched by the controller); returns false if the SQE is not in the SQ.
*/
static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
struct ufs_hw_queue *hwq, int task_tag)
{
- struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
+ struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, task_tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
struct utp_transfer_req_desc *utrd;
__le64 cmd_desc_base_addr;
bool ret = false;
@@ -599,8 +635,7 @@ static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA;
while (sq_head_slot != hwq->sq_tail_slot) {
- utrd = hwq->sqe_base_addr +
- sq_head_slot * sizeof(struct utp_transfer_req_desc);
+ utrd = hwq->sqe_base_addr + sq_head_slot;
match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA;
if (addr == match) {
ufshcd_mcq_nullify_sqe(utrd);
@@ -621,34 +656,32 @@ out:
/**
* ufshcd_mcq_abort - Abort the command in MCQ.
- * @cmd - The command to be aborted.
+ * @cmd: The command to be aborted.
*
- * Returns SUCCESS or FAILED error codes
+ * Return: SUCCESS or FAILED error codes
*/
int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct ufs_hba *hba = shost_priv(host);
int tag = scsi_cmd_to_rq(cmd)->tag;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
struct ufs_hw_queue *hwq;
- int err = FAILED;
-
- if (!ufshcd_cmd_inflight(lrbp->cmd)) {
- dev_err(hba->dev,
- "%s: skip abort. cmd at tag %d already completed.\n",
- __func__, tag);
- goto out;
- }
+ int err;
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
__func__, tag);
- goto out;
+ return FAILED;
}
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq) {
+ dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n",
+ __func__, tag);
+ return FAILED;
+ }
if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
/*
@@ -657,7 +690,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
*/
dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
__func__, hwq->id, tag);
- goto out;
+ return FAILED;
}
/*
@@ -665,16 +698,12 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
* in the completion queue either. Query the device to see if
* the command is being processed in the device.
*/
- if (ufshcd_try_to_abort_task(hba, tag)) {
+ err = ufshcd_try_to_abort_task(hba, tag);
+ if (err) {
dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
lrbp->req_abort_skip = true;
- goto out;
+ return FAILED;
}
- err = SUCCESS;
- if (ufshcd_cmd_inflight(lrbp->cmd))
- ufshcd_release_scsi_cmd(hba, lrbp);
-
-out:
- return err;
+ return SUCCESS;
}