summaryrefslogtreecommitdiff
path: root/drivers/ufs/core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/core')
-rw-r--r--drivers/ufs/core/ufs-fault-injection.c1
-rw-r--r--drivers/ufs/core/ufs-mcq.c153
-rw-r--r--drivers/ufs/core/ufs-sysfs.c224
-rw-r--r--drivers/ufs/core/ufs_bsg.c13
-rw-r--r--drivers/ufs/core/ufs_trace.h405
-rw-r--r--drivers/ufs/core/ufshcd-crypto.c56
-rw-r--r--drivers/ufs/core/ufshcd-crypto.h36
-rw-r--r--drivers/ufs/core/ufshcd-priv.h27
-rw-r--r--drivers/ufs/core/ufshcd.c1770
9 files changed, 1655 insertions, 1030 deletions
diff --git a/drivers/ufs/core/ufs-fault-injection.c b/drivers/ufs/core/ufs-fault-injection.c
index 169540417079..55db38e75cc4 100644
--- a/drivers/ufs/core/ufs-fault-injection.c
+++ b/drivers/ufs/core/ufs-fault-injection.c
@@ -3,6 +3,7 @@
#include <linux/kconfig.h>
#include <linux/types.h>
#include <linux/fault-inject.h>
+#include <linux/debugfs.h>
#include <linux/module.h>
#include <ufs/ufshcd.h>
#include "ufs-fault-injection.h"
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 0787456c2b89..240ce135bbfb 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -7,7 +7,7 @@
* Can Guo <quic_cang@quicinc.com>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -18,6 +18,7 @@
#include <linux/iopoll.h>
#define MAX_QUEUE_SUP GENMASK(7, 0)
+#define QCFGPTR GENMASK(23, 16)
#define UFS_MCQ_MIN_RW_QUEUES 2
#define UFS_MCQ_MIN_READ_QUEUES 0
#define UFS_MCQ_MIN_POLL_QUEUES 0
@@ -25,7 +26,6 @@
#define QUEUE_ID_OFFSET 16
#define MCQ_CFG_MAC_MASK GENMASK(16, 8)
-#define MCQ_QCFG_SIZE 0x40
#define MCQ_ENTRY_SIZE_IN_DWORD 8
#define CQE_UCD_BA GENMASK_ULL(63, 7)
@@ -94,7 +94,7 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
val = ufshcd_readl(hba, REG_UFS_MCQ_CFG);
val &= ~MCQ_CFG_MAC_MASK;
- val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds);
+ val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds - 1);
ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
@@ -105,19 +105,31 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
* @hba: per adapter instance
* @req: pointer to the request to be issued
*
- * Return: the hardware queue instance on which the request would
- * be queued.
+ * Return: the hardware queue instance on which the request will be or has
+ * been queued. %NULL if the request has already been freed.
*/
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req)
{
- u32 utag = blk_mq_unique_tag(req);
- u32 hwq = blk_mq_unique_tag_to_hwq(utag);
+ struct blk_mq_hw_ctx *hctx = READ_ONCE(req->mq_hctx);
- return &hba->uhq[hwq];
+ return hctx ? &hba->uhq[hctx->queue_num] : NULL;
}
/**
+ * ufshcd_mcq_queue_cfg_addr - get an start address of the MCQ Queue Config
+ * Registers.
+ * @hba: per adapter instance
+ *
+ * Return: Start address of MCQ Queue Config Registers in HCI
+ */
+unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba)
+{
+ return FIELD_GET(QCFGPTR, hba->mcq_capabilities) * 0x200;
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr);
+
+/**
* ufshcd_mcq_decide_queue_depth - decide the queue depth
* @hba: per adapter instance
*
@@ -125,7 +137,6 @@ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
*
* MAC - Max. Active Command of the Host Controller (HC)
* HC wouldn't send more than this commands to the device.
- * It is mandatory to implement get_hba_mac() to enable MCQ mode.
* Calculates and adjusts the queue depth based on the depth
* supported by the HC and ufs device.
*/
@@ -133,12 +144,21 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
{
int mac;
- /* Mandatory to implement get_hba_mac() */
- mac = ufshcd_mcq_vops_get_hba_mac(hba);
- if (mac < 0) {
- dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
- return mac;
+ if (!hba->vops || !hba->vops->get_hba_mac) {
+ /*
+ * Extract the maximum number of active transfer tasks value
+ * from the host controller capabilities register. This value is
+ * 0-based.
+ */
+ hba->capabilities =
+ ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
+ mac = hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_MCQ;
+ mac++;
+ } else {
+ mac = hba->vops->get_hba_mac(hba);
}
+ if (mac < 0)
+ goto err;
WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
/*
@@ -147,6 +167,10 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
* shared queuing architecture is enabled.
*/
return min_t(int, mac, hba->dev_info.bqueuedepth);
+
+err:
+ dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
+ return mac;
}
static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
@@ -166,6 +190,15 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
return -EOPNOTSUPP;
}
+ /*
+ * Device should support at least one I/O queue to handle device
+ * commands via hba->dev_cmd_queue.
+ */
+ if (hba_maxq == poll_queues) {
+ dev_err(hba->dev, "At least one non-poll queue required\n");
+ return -EOPNOTSUPP;
+ }
+
rem = hba_maxq;
if (rw_queues) {
@@ -228,12 +261,6 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
return 0;
}
-
-/* Operation and runtime registers configuration */
-#define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i))
-#define MCQ_OPR_OFFSET_n(p, i) \
- (hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i))
-
static void __iomem *mcq_opr_base(struct ufs_hba *hba,
enum ufshcd_mcq_opr n, int i)
{
@@ -258,9 +285,7 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis);
* Current MCQ specification doesn't provide a Task Tag or its equivalent in
* the Completion Queue Entry. Find the Task Tag using an indirect method.
*/
-static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
- struct ufs_hw_queue *hwq,
- struct cq_entry *cqe)
+static int ufshcd_mcq_get_tag(struct ufs_hba *hba, struct cq_entry *cqe)
{
u64 addr;
@@ -278,7 +303,7 @@ static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
struct ufs_hw_queue *hwq)
{
struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
- int tag = ufshcd_mcq_get_tag(hba, hwq, cqe);
+ int tag = ufshcd_mcq_get_tag(hba, cqe);
if (cqe->command_desc_base_addr) {
ufshcd_compl_one_cqe(hba, tag, cqe);
@@ -340,29 +365,29 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
/* Submission Queue Lower Base Address */
ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr),
- MCQ_CFG_n(REG_SQLBA, i));
+ ufshcd_mcq_cfg_offset(REG_SQLBA, i));
/* Submission Queue Upper Base Address */
ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr),
- MCQ_CFG_n(REG_SQUBA, i));
+ ufshcd_mcq_cfg_offset(REG_SQUBA, i));
/* Submission Queue Doorbell Address Offset */
- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i),
- MCQ_CFG_n(REG_SQDAO, i));
+ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQD, i),
+ ufshcd_mcq_cfg_offset(REG_SQDAO, i));
/* Submission Queue Interrupt Status Address Offset */
- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i),
- MCQ_CFG_n(REG_SQISAO, i));
+ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQIS, i),
+ ufshcd_mcq_cfg_offset(REG_SQISAO, i));
/* Completion Queue Lower Base Address */
ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr),
- MCQ_CFG_n(REG_CQLBA, i));
+ ufshcd_mcq_cfg_offset(REG_CQLBA, i));
/* Completion Queue Upper Base Address */
ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr),
- MCQ_CFG_n(REG_CQUBA, i));
+ ufshcd_mcq_cfg_offset(REG_CQUBA, i));
/* Completion Queue Doorbell Address Offset */
- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i),
- MCQ_CFG_n(REG_CQDAO, i));
+ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQD, i),
+ ufshcd_mcq_cfg_offset(REG_CQDAO, i));
/* Completion Queue Interrupt Status Address Offset */
- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i),
- MCQ_CFG_n(REG_CQISAO, i));
+ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQIS, i),
+ ufshcd_mcq_cfg_offset(REG_CQISAO, i));
/* Save the base addresses for quicker access */
hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP;
@@ -379,7 +404,7 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
/* Completion Queue Enable|Size to Completion Queue Attribute */
ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize,
- MCQ_CFG_n(REG_CQATTR, i));
+ ufshcd_mcq_cfg_offset(REG_CQATTR, i));
/*
* Submission Qeueue Enable|Size|Completion Queue ID to
@@ -387,11 +412,24 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
*/
ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize |
(i << QUEUE_ID_OFFSET),
- MCQ_CFG_n(REG_SQATTR, i));
+ ufshcd_mcq_cfg_offset(REG_SQATTR, i));
}
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational);
+void ufshcd_mcq_enable(struct ufs_hba *hba)
+{
+ ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG);
+ hba->mcq_enabled = true;
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_enable);
+
+void ufshcd_mcq_disable(struct ufs_hba *hba)
+{
+ ufshcd_rmwl(hba, MCQ_MODE_SELECT, 0, REG_UFS_MEM_CFG);
+ hba->mcq_enabled = false;
+}
+
void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
{
ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2,
@@ -511,6 +549,8 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
if (!cmd)
return -EINVAL;
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq)
+ return 0;
} else {
hwq = hba->dev_cmd_queue;
}
@@ -529,17 +569,22 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id);
writel(nexus, opr_sqd_base + REG_SQCTI);
- /* SQRTCy.ICU = 1 */
- writel(SQ_ICU, opr_sqd_base + REG_SQRTC);
+ /* Initiate Cleanup */
+ writel(readl(opr_sqd_base + REG_SQRTC) | SQ_ICU,
+ opr_sqd_base + REG_SQRTC);
- /* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */
+ /* Wait until SQRTSy.CUS = 1. Report SQRTSy.RTC. */
reg = opr_sqd_base + REG_SQRTS;
err = read_poll_timeout(readl, val, val & SQ_CUS, 20,
MCQ_POLL_US, false, reg);
if (err)
- dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n",
- __func__, id, task_tag,
- FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)));
+ dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%d\n",
+ __func__, id, task_tag, err);
+ else
+ dev_info(hba->dev,
+ "%s, hwq %d: cleanup return code (RTC) %ld\n",
+ __func__, id,
+ FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)));
if (ufshcd_mcq_sq_start(hba, hwq))
err = -ETIMEDOUT;
@@ -597,8 +642,7 @@ static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA;
while (sq_head_slot != hwq->sq_tail_slot) {
- utrd = hwq->sqe_base_addr +
- sq_head_slot * sizeof(struct utp_transfer_req_desc);
+ utrd = hwq->sqe_base_addr + sq_head_slot;
match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA;
if (addr == match) {
ufshcd_mcq_nullify_sqe(utrd);
@@ -631,20 +675,20 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct ufs_hw_queue *hwq;
unsigned long flags;
- int err = FAILED;
+ int err;
if (!ufshcd_cmd_inflight(lrbp->cmd)) {
dev_err(hba->dev,
"%s: skip abort. cmd at tag %d already completed.\n",
__func__, tag);
- goto out;
+ return FAILED;
}
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
__func__, tag);
- goto out;
+ return FAILED;
}
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
@@ -656,7 +700,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
*/
dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
__func__, hwq->id, tag);
- goto out;
+ return FAILED;
}
/*
@@ -664,18 +708,17 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
* in the completion queue either. Query the device to see if
* the command is being processed in the device.
*/
- if (ufshcd_try_to_abort_task(hba, tag)) {
+ err = ufshcd_try_to_abort_task(hba, tag);
+ if (err) {
dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
lrbp->req_abort_skip = true;
- goto out;
+ return FAILED;
}
- err = SUCCESS;
spin_lock_irqsave(&hwq->cq_lock, flags);
if (ufshcd_cmd_inflight(lrbp->cmd))
ufshcd_release_scsi_cmd(hba, lrbp);
spin_unlock_irqrestore(&hwq->cq_lock, flags);
-out:
- return err;
+ return SUCCESS;
}
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index e6d12289e017..3438269a5440 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -4,7 +4,7 @@
#include <linux/err.h>
#include <linux/string.h>
#include <linux/bitfield.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <ufs/ufs.h>
#include <ufs/unipro.h>
@@ -198,6 +198,24 @@ static u32 ufshcd_us_to_ahit(unsigned int timer)
FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
}
+static int ufshcd_read_hci_reg(struct ufs_hba *hba, u32 *val, unsigned int reg)
+{
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ ufshcd_hold(hba);
+ *val = ufshcd_readl(hba, reg);
+ ufshcd_release(hba);
+ ufshcd_rpm_put_sync(hba);
+
+ up(&hba->host_sem);
+ return 0;
+}
+
static ssize_t auto_hibern8_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -208,23 +226,11 @@ static ssize_t auto_hibern8_show(struct device *dev,
if (!ufshcd_is_auto_hibern8_supported(hba))
return -EOPNOTSUPP;
- down(&hba->host_sem);
- if (!ufshcd_is_user_access_allowed(hba)) {
- ret = -EBUSY;
- goto out;
- }
-
- pm_runtime_get_sync(hba->dev);
- ufshcd_hold(hba);
- ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
- ufshcd_release(hba);
- pm_runtime_put_sync(hba->dev);
-
- ret = sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
+ ret = ufshcd_read_hci_reg(hba, &ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ if (ret)
+ return ret;
-out:
- up(&hba->host_sem);
- return ret;
+ return sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
}
static ssize_t auto_hibern8_store(struct device *dev,
@@ -405,6 +411,53 @@ static ssize_t wb_flush_threshold_store(struct device *dev,
return count;
}
+/**
+ * pm_qos_enable_show - sysfs handler to show pm qos enable value
+ * @dev: device associated with the UFS controller
+ * @attr: sysfs attribute handle
+ * @buf: buffer for sysfs file
+ *
+ * Print 1 if PM QoS feature is enabled, 0 if disabled.
+ *
+ * Returns number of characters written to @buf.
+ */
+static ssize_t pm_qos_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", hba->pm_qos_enabled);
+}
+
+/**
+ * pm_qos_enable_store - sysfs handler to store value
+ * @dev: device associated with the UFS controller
+ * @attr: sysfs attribute handle
+ * @buf: buffer for sysfs file
+ * @count: stores buffer characters count
+ *
+ * Input 0 to disable PM QoS and 1 value to enable.
+ * Default state: 1
+ *
+ * Return: number of characters written to @buf on success, < 0 upon failure.
+ */
+static ssize_t pm_qos_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ bool value;
+
+ if (kstrtobool(buf, &value))
+ return -EINVAL;
+
+ if (value)
+ ufshcd_pm_qos_init(hba);
+ else
+ ufshcd_pm_qos_exit(hba);
+
+ return count;
+}
+
static DEVICE_ATTR_RW(rpm_lvl);
static DEVICE_ATTR_RO(rpm_target_dev_state);
static DEVICE_ATTR_RO(rpm_target_link_state);
@@ -416,6 +469,7 @@ static DEVICE_ATTR_RW(wb_on);
static DEVICE_ATTR_RW(enable_wb_buf_flush);
static DEVICE_ATTR_RW(wb_flush_threshold);
static DEVICE_ATTR_RW(rtc_update_ms);
+static DEVICE_ATTR_RW(pm_qos_enable);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
@@ -429,6 +483,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_enable_wb_buf_flush.attr,
&dev_attr_wb_flush_threshold.attr,
&dev_attr_rtc_update_ms.attr,
+ &dev_attr_pm_qos_enable.attr,
NULL
};
@@ -470,6 +525,58 @@ static const struct attribute_group ufs_sysfs_capabilities_group = {
.attrs = ufs_sysfs_capabilities_attrs,
};
+static ssize_t version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "0x%x\n", hba->ufs_version);
+}
+
+static ssize_t product_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ u32 val;
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ret = ufshcd_read_hci_reg(hba, &val, REG_CONTROLLER_PID);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%x\n", val);
+}
+
+static ssize_t man_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ u32 val;
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ret = ufshcd_read_hci_reg(hba, &val, REG_CONTROLLER_MID);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%x\n", val);
+}
+
+static DEVICE_ATTR_RO(version);
+static DEVICE_ATTR_RO(product_id);
+static DEVICE_ATTR_RO(man_id);
+
+static struct attribute *ufs_sysfs_ufshci_cap_attrs[] = {
+ &dev_attr_version.attr,
+ &dev_attr_product_id.attr,
+ &dev_attr_man_id.attr,
+ NULL
+};
+
+static const struct attribute_group ufs_sysfs_ufshci_group = {
+ .name = "ufshci_capabilities",
+ .attrs = ufs_sysfs_ufshci_cap_attrs,
+};
+
static ssize_t monitor_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -563,6 +670,9 @@ static ssize_t read_req_latency_avg_show(struct device *dev,
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_hba_monitor *m = &hba->monitor;
+ if (!m->nr_req[READ])
+ return sysfs_emit(buf, "0\n");
+
return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[READ]),
m->nr_req[READ]));
}
@@ -630,6 +740,9 @@ static ssize_t write_req_latency_avg_show(struct device *dev,
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_hba_monitor *m = &hba->monitor;
+ if (!m->nr_req[WRITE])
+ return sysfs_emit(buf, "0\n");
+
return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[WRITE]),
m->nr_req[WRITE]));
}
@@ -1291,6 +1404,81 @@ static const struct attribute_group ufs_sysfs_flags_group = {
.attrs = ufs_sysfs_device_flags,
};
+static ssize_t max_number_of_rtt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 rtt;
+ int ret;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt);
+ ufshcd_rpm_put_sync(hba);
+
+ if (ret)
+ goto out;
+
+ ret = sysfs_emit(buf, "0x%08X\n", rtt);
+
+out:
+ up(&hba->host_sem);
+ return ret;
+}
+
+static ssize_t max_number_of_rtt_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ struct scsi_device *sdev;
+ unsigned int memflags;
+ unsigned int rtt;
+ int ret;
+
+ if (kstrtouint(buf, 0, &rtt))
+ return -EINVAL;
+
+ if (rtt > dev_info->rtt_cap) {
+ dev_err(dev, "rtt can be at most bDeviceRTTCap\n");
+ return -EINVAL;
+ }
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+
+ memflags = memalloc_noio_save();
+ shost_for_each_device(sdev, hba->host)
+ blk_mq_freeze_queue_nomemsave(sdev->request_queue);
+
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt);
+
+ shost_for_each_device(sdev, hba->host)
+ blk_mq_unfreeze_queue_nomemrestore(sdev->request_queue);
+ memalloc_noio_restore(memflags);
+
+ ufshcd_rpm_put_sync(hba);
+
+out:
+ up(&hba->host_sem);
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_RW(max_number_of_rtt);
+
static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
{
return idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS &&
@@ -1338,7 +1526,6 @@ UFS_ATTRIBUTE(max_data_in_size, _MAX_DATA_IN);
UFS_ATTRIBUTE(max_data_out_size, _MAX_DATA_OUT);
UFS_ATTRIBUTE(reference_clock_frequency, _REF_CLK_FREQ);
UFS_ATTRIBUTE(configuration_descriptor_lock, _CONF_DESC_LOCK);
-UFS_ATTRIBUTE(max_number_of_rtt, _MAX_NUM_OF_RTT);
UFS_ATTRIBUTE(exception_event_control, _EE_CONTROL);
UFS_ATTRIBUTE(exception_event_status, _EE_STATUS);
UFS_ATTRIBUTE(ffu_status, _FFU_STATUS);
@@ -1382,6 +1569,7 @@ static const struct attribute_group ufs_sysfs_attributes_group = {
static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_default_group,
&ufs_sysfs_capabilities_group,
+ &ufs_sysfs_ufshci_group,
&ufs_sysfs_monitor_group,
&ufs_sysfs_power_info_group,
&ufs_sysfs_device_descriptor_group,
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index 374e5aae4e7e..252186124669 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -170,7 +170,7 @@ static int ufs_bsg_request(struct bsg_job *job)
break;
case UPIU_TRANSACTION_UIC_CMD:
memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
- ret = ufshcd_send_uic_cmd(hba, &uc);
+ ret = ufshcd_send_bsg_uic_cmd(hba, &uc);
if (ret)
dev_err(hba->dev, "send uic cmd: error code %d\n", ret);
@@ -194,10 +194,12 @@ out:
ufshcd_rpm_put_sync(hba);
kfree(buff);
bsg_reply->result = ret;
- job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply);
/* complete the job here only if no error */
- if (ret == 0)
+ if (ret == 0) {
+ job->reply_len = rpmb ? sizeof(struct ufs_rpmb_reply) :
+ sizeof(struct ufs_bsg_reply);
bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
+ }
return ret;
}
@@ -216,6 +218,7 @@ void ufs_bsg_remove(struct ufs_hba *hba)
return;
bsg_remove_queue(hba->bsg_queue);
+ hba->bsg_queue = NULL;
device_del(bsg_dev);
put_device(bsg_dev);
@@ -253,9 +256,11 @@ int ufs_bsg_probe(struct ufs_hba *hba)
if (ret)
goto out;
- q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, NULL, 0);
+ q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), NULL, ufs_bsg_request,
+ NULL, 0);
if (IS_ERR(q)) {
ret = PTR_ERR(q);
+ device_del(bsg_dev);
goto out;
}
diff --git a/drivers/ufs/core/ufs_trace.h b/drivers/ufs/core/ufs_trace.h
new file mode 100644
index 000000000000..84deca2b841d
--- /dev/null
+++ b/drivers/ufs/core/ufs_trace.h
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ufs
+
+#if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_UFS_H
+
+#include <ufs/ufs.h>
+#include <linux/tracepoint.h>
+
+#define str_opcode(opcode) \
+ __print_symbolic(opcode, \
+ { WRITE_16, "WRITE_16" }, \
+ { WRITE_10, "WRITE_10" }, \
+ { READ_16, "READ_16" }, \
+ { READ_10, "READ_10" }, \
+ { SYNCHRONIZE_CACHE, "SYNC" }, \
+ { UNMAP, "UNMAP" })
+
+#define UFS_LINK_STATES \
+ EM(UIC_LINK_OFF_STATE, "UIC_LINK_OFF_STATE") \
+ EM(UIC_LINK_ACTIVE_STATE, "UIC_LINK_ACTIVE_STATE") \
+ EMe(UIC_LINK_HIBERN8_STATE, "UIC_LINK_HIBERN8_STATE")
+
+#define UFS_PWR_MODES \
+ EM(UFS_ACTIVE_PWR_MODE, "UFS_ACTIVE_PWR_MODE") \
+ EM(UFS_SLEEP_PWR_MODE, "UFS_SLEEP_PWR_MODE") \
+ EM(UFS_POWERDOWN_PWR_MODE, "UFS_POWERDOWN_PWR_MODE") \
+ EMe(UFS_DEEPSLEEP_PWR_MODE, "UFS_DEEPSLEEP_PWR_MODE")
+
+#define UFSCHD_CLK_GATING_STATES \
+ EM(CLKS_OFF, "CLKS_OFF") \
+ EM(CLKS_ON, "CLKS_ON") \
+ EM(REQ_CLKS_OFF, "REQ_CLKS_OFF") \
+ EMe(REQ_CLKS_ON, "REQ_CLKS_ON")
+
+#define UFS_CMD_TRACE_STRINGS \
+ EM(UFS_CMD_SEND, "send_req") \
+ EM(UFS_CMD_COMP, "complete_rsp") \
+ EM(UFS_DEV_COMP, "dev_complete") \
+ EM(UFS_QUERY_SEND, "query_send") \
+ EM(UFS_QUERY_COMP, "query_complete") \
+ EM(UFS_QUERY_ERR, "query_complete_err") \
+ EM(UFS_TM_SEND, "tm_send") \
+ EM(UFS_TM_COMP, "tm_complete") \
+ EMe(UFS_TM_ERR, "tm_complete_err")
+
+#define UFS_CMD_TRACE_TSF_TYPES \
+ EM(UFS_TSF_CDB, "CDB") \
+ EM(UFS_TSF_OSF, "OSF") \
+ EM(UFS_TSF_TM_INPUT, "TM_INPUT") \
+ EMe(UFS_TSF_TM_OUTPUT, "TM_OUTPUT")
+
+/* Enums require being exported to userspace, for user tool parsing */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+UFS_LINK_STATES;
+UFS_PWR_MODES;
+UFSCHD_CLK_GATING_STATES;
+UFS_CMD_TRACE_STRINGS
+UFS_CMD_TRACE_TSF_TYPES
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
+
+#define show_ufs_cmd_trace_str(str_t) \
+ __print_symbolic(str_t, UFS_CMD_TRACE_STRINGS)
+#define show_ufs_cmd_trace_tsf(tsf) \
+ __print_symbolic(tsf, UFS_CMD_TRACE_TSF_TYPES)
+
+TRACE_EVENT(ufshcd_clk_gating,
+
+ TP_PROTO(const char *dev_name, int state),
+
+ TP_ARGS(dev_name, state),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name)
+ __field(int, state)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name);
+ __entry->state = state;
+ ),
+
+ TP_printk("%s: gating state changed to %s",
+ __get_str(dev_name),
+ __print_symbolic(__entry->state, UFSCHD_CLK_GATING_STATES))
+);
+
+TRACE_EVENT(ufshcd_clk_scaling,
+
+ TP_PROTO(const char *dev_name, const char *state, const char *clk,
+ u32 prev_state, u32 curr_state),
+
+ TP_ARGS(dev_name, state, clk, prev_state, curr_state),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name)
+ __string(state, state)
+ __string(clk, clk)
+ __field(u32, prev_state)
+ __field(u32, curr_state)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name);
+ __assign_str(state);
+ __assign_str(clk);
+ __entry->prev_state = prev_state;
+ __entry->curr_state = curr_state;
+ ),
+
+ TP_printk("%s: %s %s from %u to %u Hz",
+ __get_str(dev_name), __get_str(state), __get_str(clk),
+ __entry->prev_state, __entry->curr_state)
+);
+
+TRACE_EVENT(ufshcd_auto_bkops_state,
+
+ TP_PROTO(const char *dev_name, const char *state),
+
+ TP_ARGS(dev_name, state),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name)
+ __string(state, state)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name);
+ __assign_str(state);
+ ),
+
+ TP_printk("%s: auto bkops - %s",
+ __get_str(dev_name), __get_str(state))
+);
+
+DECLARE_EVENT_CLASS(ufshcd_profiling_template,
+ TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ int err),
+
+ TP_ARGS(dev_name, profile_info, time_us, err),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name)
+ __string(profile_info, profile_info)
+ __field(s64, time_us)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name);
+ __assign_str(profile_info);
+ __entry->time_us = time_us;
+ __entry->err = err;
+ ),
+
+ TP_printk("%s: %s: took %lld usecs, err %d",
+ __get_str(dev_name), __get_str(profile_info),
+ __entry->time_us, __entry->err)
+);
+
+DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_hibern8,
+ TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ int err),
+ TP_ARGS(dev_name, profile_info, time_us, err));
+
+DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_gating,
+ TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ int err),
+ TP_ARGS(dev_name, profile_info, time_us, err));
+
+DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_scaling,
+ TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ int err),
+ TP_ARGS(dev_name, profile_info, time_us, err));
+
+DECLARE_EVENT_CLASS(ufshcd_template,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state),
+
+ TP_STRUCT__entry(
+ __field(s64, usecs)
+ __field(int, err)
+ __string(dev_name, dev_name)
+ __field(int, dev_state)
+ __field(int, link_state)
+ ),
+
+ TP_fast_assign(
+ __entry->usecs = usecs;
+ __entry->err = err;
+ __assign_str(dev_name);
+ __entry->dev_state = dev_state;
+ __entry->link_state = link_state;
+ ),
+
+ TP_printk(
+ "%s: took %lld usecs, dev_state: %s, link_state: %s, err %d",
+ __get_str(dev_name),
+ __entry->usecs,
+ __print_symbolic(__entry->dev_state, UFS_PWR_MODES),
+ __print_symbolic(__entry->link_state, UFS_LINK_STATES),
+ __entry->err
+ )
+);
+
+DEFINE_EVENT(ufshcd_template, ufshcd_system_suspend,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_system_resume,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_runtime_suspend,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_runtime_resume,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_init,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_wl_suspend,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_wl_resume,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_suspend,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+TRACE_EVENT(ufshcd_command,
+ TP_PROTO(struct scsi_device *sdev, enum ufs_trace_str_t str_t,
+ unsigned int tag, u32 doorbell, u32 hwq_id, int transfer_len,
+ u32 intr, u64 lba, u8 opcode, u8 group_id),
+
+ TP_ARGS(sdev, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba,
+ opcode, group_id),
+
+ TP_STRUCT__entry(
+ __field(struct scsi_device *, sdev)
+ __field(enum ufs_trace_str_t, str_t)
+ __field(unsigned int, tag)
+ __field(u32, doorbell)
+ __field(u32, hwq_id)
+ __field(u32, intr)
+ __field(u64, lba)
+ __field(int, transfer_len)
+ __field(u8, opcode)
+ __field(u8, group_id)
+ ),
+
+ TP_fast_assign(
+ __entry->sdev = sdev;
+ __entry->str_t = str_t;
+ __entry->tag = tag;
+ __entry->doorbell = doorbell;
+ __entry->hwq_id = hwq_id;
+ __entry->intr = intr;
+ __entry->lba = lba;
+ __entry->transfer_len = transfer_len;
+ __entry->opcode = opcode;
+ __entry->group_id = group_id;
+ ),
+
+ TP_printk(
+ "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x (%s), group_id: 0x%x, hwq_id: %d",
+ show_ufs_cmd_trace_str(__entry->str_t),
+ dev_name(&__entry->sdev->sdev_dev), __entry->tag,
+ __entry->doorbell, __entry->transfer_len, __entry->intr,
+ __entry->lba, (u32)__entry->opcode, str_opcode(__entry->opcode),
+ (u32)__entry->group_id, __entry->hwq_id
+ )
+);
+
+TRACE_EVENT(ufshcd_uic_command,
+ TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, u32 cmd,
+ u32 arg1, u32 arg2, u32 arg3),
+
+ TP_ARGS(dev_name, str_t, cmd, arg1, arg2, arg3),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name)
+ __field(enum ufs_trace_str_t, str_t)
+ __field(u32, cmd)
+ __field(u32, arg1)
+ __field(u32, arg2)
+ __field(u32, arg3)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name);
+ __entry->str_t = str_t;
+ __entry->cmd = cmd;
+ __entry->arg1 = arg1;
+ __entry->arg2 = arg2;
+ __entry->arg3 = arg3;
+ ),
+
+ TP_printk(
+ "%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x",
+ show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
+ __entry->cmd, __entry->arg1, __entry->arg2, __entry->arg3
+ )
+);
+
+TRACE_EVENT(ufshcd_upiu,
+ TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, void *hdr,
+ void *tsf, enum ufs_trace_tsf_t tsf_t),
+
+ TP_ARGS(dev_name, str_t, hdr, tsf, tsf_t),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name)
+ __field(enum ufs_trace_str_t, str_t)
+ __array(unsigned char, hdr, 12)
+ __array(unsigned char, tsf, 16)
+ __field(enum ufs_trace_tsf_t, tsf_t)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name);
+ __entry->str_t = str_t;
+ memcpy(__entry->hdr, hdr, sizeof(__entry->hdr));
+ memcpy(__entry->tsf, tsf, sizeof(__entry->tsf));
+ __entry->tsf_t = tsf_t;
+ ),
+
+ TP_printk(
+ "%s: %s: HDR:%s, %s:%s",
+ show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
+ __print_hex(__entry->hdr, sizeof(__entry->hdr)),
+ show_ufs_cmd_trace_tsf(__entry->tsf_t),
+ __print_hex(__entry->tsf, sizeof(__entry->tsf))
+ )
+);
+
+TRACE_EVENT(ufshcd_exception_event,
+
+ TP_PROTO(const char *dev_name, u16 status),
+
+ TP_ARGS(dev_name, status),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name)
+ __field(u16, status)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name);
+ __entry->status = status;
+ ),
+
+ TP_printk("%s: status 0x%x",
+ __get_str(dev_name), __entry->status
+ )
+);
+
+#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/ufs/core
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE ufs_trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/ufs/core/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c
index f2c4422cab86..694ff7578fc1 100644
--- a/drivers/ufs/core/ufshcd-crypto.c
+++ b/drivers/ufs/core/ufshcd-crypto.c
@@ -17,20 +17,14 @@ static const struct ufs_crypto_alg_entry {
},
};
-static int ufshcd_program_key(struct ufs_hba *hba,
- const union ufs_crypto_cfg_entry *cfg, int slot)
+static void ufshcd_program_key(struct ufs_hba *hba,
+ const union ufs_crypto_cfg_entry *cfg, int slot)
{
int i;
u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg);
- int err = 0;
ufshcd_hold(hba);
- if (hba->vops && hba->vops->program_key) {
- err = hba->vops->program_key(hba, cfg, slot);
- goto out;
- }
-
/* Ensure that CFGE is cleared before programming the key */
ufshcd_writel(hba, 0, slot_offset + 16 * sizeof(cfg->reg_val[0]));
for (i = 0; i < 16; i++) {
@@ -43,17 +37,14 @@ static int ufshcd_program_key(struct ufs_hba *hba,
/* Dword 16 must be written last */
ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[16]),
slot_offset + 16 * sizeof(cfg->reg_val[0]));
-out:
ufshcd_release(hba);
- return err;
}
static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
- struct ufs_hba *hba =
- container_of(profile, struct ufs_hba, crypto_profile);
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
const union ufs_crypto_cap_entry *ccap_array = hba->crypto_cap_array;
const struct ufs_crypto_alg_entry *alg =
&ufs_crypto_algs[key->crypto_cfg.crypto_mode];
@@ -61,7 +52,6 @@ static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile,
int i;
int cap_idx = -1;
union ufs_crypto_cfg_entry cfg = {};
- int err;
BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0);
for (i = 0; i < hba->crypto_capabilities.num_crypto_cap; i++) {
@@ -89,33 +79,31 @@ static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile,
memcpy(cfg.crypto_key, key->raw, key->size);
}
- err = ufshcd_program_key(hba, &cfg, slot);
+ ufshcd_program_key(hba, &cfg, slot);
memzero_explicit(&cfg, sizeof(cfg));
- return err;
+ return 0;
}
-static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot)
+static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot)
{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
/*
* Clear the crypto cfg on the device. Clearing CFGE
* might not be sufficient, so just clear the entire cfg.
*/
union ufs_crypto_cfg_entry cfg = {};
- return ufshcd_program_key(hba, &cfg, slot);
-}
-
-static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile,
- const struct blk_crypto_key *key,
- unsigned int slot)
-{
- struct ufs_hba *hba =
- container_of(profile, struct ufs_hba, crypto_profile);
-
- return ufshcd_clear_keyslot(hba, slot);
+ ufshcd_program_key(hba, &cfg, slot);
+ return 0;
}
+/*
+ * Reprogram the keyslots if needed, and return true if CRYPTO_GENERAL_ENABLE
+ * should be used in the host controller initialization sequence.
+ */
bool ufshcd_crypto_enable(struct ufs_hba *hba)
{
if (!(hba->caps & UFSHCD_CAP_CRYPTO))
@@ -123,6 +111,10 @@ bool ufshcd_crypto_enable(struct ufs_hba *hba)
/* Reset might clear all keys, so reprogram all the keys. */
blk_crypto_reprogram_all_keys(&hba->crypto_profile);
+
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE)
+ return false;
+
return true;
}
@@ -159,6 +151,9 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
int err = 0;
enum blk_crypto_mode_num blk_mode_num;
+ if (hba->quirks & UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE)
+ return 0;
+
/*
* Don't use crypto if either the hardware doesn't advertise the
* standard crypto capability bit *or* if the vendor specific driver
@@ -228,9 +223,10 @@ void ufshcd_init_crypto(struct ufs_hba *hba)
if (!(hba->caps & UFSHCD_CAP_CRYPTO))
return;
- /* Clear all keyslots - the number of keyslots is (CFGC + 1) */
- for (slot = 0; slot < hba->crypto_capabilities.config_count + 1; slot++)
- ufshcd_clear_keyslot(hba, slot);
+ /* Clear all keyslots. */
+ for (slot = 0; slot < hba->crypto_profile.num_slots; slot++)
+ hba->crypto_profile.ll_ops.keyslot_evict(&hba->crypto_profile,
+ NULL, slot);
}
void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q)
diff --git a/drivers/ufs/core/ufshcd-crypto.h b/drivers/ufs/core/ufshcd-crypto.h
index be8596f20ba2..89bb97c14c15 100644
--- a/drivers/ufs/core/ufshcd-crypto.h
+++ b/drivers/ufs/core/ufshcd-crypto.h
@@ -37,6 +37,33 @@ ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp,
h->dunu = cpu_to_le32(upper_32_bits(lrbp->data_unit_num));
}
+static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp)
+{
+ struct scsi_cmnd *cmd = lrbp->cmd;
+ const struct bio_crypt_ctx *crypt_ctx = scsi_cmd_to_rq(cmd)->crypt_ctx;
+
+ if (crypt_ctx && hba->vops && hba->vops->fill_crypto_prdt)
+ return hba->vops->fill_crypto_prdt(hba, crypt_ctx,
+ lrbp->ucd_prdt_ptr,
+ scsi_sg_count(cmd));
+ return 0;
+}
+
+static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp)
+{
+ if (!(hba->quirks & UFSHCD_QUIRK_KEYS_IN_PRDT))
+ return;
+
+ if (!(scsi_cmd_to_rq(lrbp->cmd)->crypt_ctx))
+ return;
+
+ /* Zeroize the PRDT because it can contain cryptographic keys. */
+ memzero_explicit(lrbp->ucd_prdt_ptr,
+ ufshcd_sg_entry_size(hba) * scsi_sg_count(lrbp->cmd));
+}
+
bool ufshcd_crypto_enable(struct ufs_hba *hba);
int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba);
@@ -54,6 +81,15 @@ static inline void
ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp,
struct request_desc_header *h) { }
+static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp)
+{
+ return 0;
+}
+
+static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp) { }
+
static inline bool ufshcd_crypto_enable(struct ufs_hba *hba)
{
return false;
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index f42d99ce5bf1..786f20ef2238 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -64,16 +64,11 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
struct cq_entry *cqe);
int ufshcd_mcq_init(struct ufs_hba *hba);
+void ufshcd_mcq_disable(struct ufs_hba *hba);
int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
-void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
-void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
-u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
-void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req);
-unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
- struct ufs_hw_queue *hwq);
void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd);
@@ -89,6 +84,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
u8 **buf, bool ascii);
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
+int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
struct utp_upiu_req *req_upiu,
@@ -241,12 +237,6 @@ static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
hba->vops->config_scaling_param(hba, p, data);
}
-static inline void ufshcd_vops_reinit_notify(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->reinit_notify)
- hba->vops->reinit_notify(hba);
-}
-
static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba)
{
if (hba->vops && hba->vops->mcq_config_resource)
@@ -255,14 +245,6 @@ static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba)
return -EOPNOTSUPP;
}
-static inline int ufshcd_mcq_vops_get_hba_mac(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->get_hba_mac)
- return hba->vops->get_hba_mac(hba);
-
- return -EOPNOTSUPP;
-}
-
static inline int ufshcd_mcq_vops_op_runtime_config(struct ufs_hba *hba)
{
if (hba->vops && hba->vops->op_runtime_config)
@@ -329,6 +311,11 @@ static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba)
return pm_runtime_get_sync(&hba->ufs_device_wlun->sdev_gendev);
}
+static inline int ufshcd_rpm_get_if_active(struct ufs_hba *hba)
+{
+ return pm_runtime_get_if_active(&hba->ufs_device_wlun->sdev_gendev);
+}
+
static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba)
{
return pm_runtime_put_sync(&hba->ufs_device_wlun->sdev_gendev);
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 3b89c9d4aa40..464f13da259a 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -36,10 +36,10 @@
#include "ufs-fault-injection.h"
#include "ufs_bsg.h"
#include "ufshcd-crypto.h"
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/ufs.h>
+#include "ufs_trace.h"
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
@@ -51,8 +51,10 @@
/* UIC command timeout, unit: ms */
-#define UIC_CMD_TIMEOUT 500
-
+enum {
+ UIC_CMD_TIMEOUT_DEFAULT = 500,
+ UIC_CMD_TIMEOUT_MAX = 2000,
+};
/* NOP OUT retries waiting for NOP IN response */
#define NOP_OUT_RETRIES 10
/* Timeout after 50 msecs if NOP OUT hangs without response */
@@ -102,6 +104,9 @@
/* Default RTC update every 10 seconds */
#define UFS_RTC_UPDATE_INTERVAL_MS (10 * MSEC_PER_SEC)
+/* bMaxNumOfRTT is equal to two after device manufacturing */
+#define DEFAULT_MAX_NUM_RTT 2
+
/* UFSHC 4.0 compliant HC support this mode. */
static bool use_mcq_mode = true;
@@ -113,6 +118,23 @@ static bool is_mcq_supported(struct ufs_hba *hba)
module_param(use_mcq_mode, bool, 0644);
MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
+static unsigned int uic_cmd_timeout = UIC_CMD_TIMEOUT_DEFAULT;
+
+static int uic_cmd_timeout_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UIC_CMD_TIMEOUT_DEFAULT,
+ UIC_CMD_TIMEOUT_MAX);
+}
+
+static const struct kernel_param_ops uic_cmd_timeout_ops = {
+ .set = uic_cmd_timeout_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644);
+MODULE_PARM_DESC(uic_cmd_timeout,
+ "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 2 seconds inclusively");
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -161,8 +183,6 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
- UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
- UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
};
static const char *const ufshcd_state_name[] = {
@@ -238,10 +258,15 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
return UFS_PM_LVL_0;
}
+static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
+{
+ return hba->outstanding_tasks || hba->active_uic_cmd ||
+ hba->uic_async_done;
+}
+
static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
{
- return (hba->clk_gating.active_reqs || hba->outstanding_reqs || hba->outstanding_tasks ||
- hba->active_uic_cmd || hba->uic_async_done);
+ return scsi_host_busy(hba->host) || ufshcd_has_pending_tasks(hba);
}
static const struct ufs_dev_quirk ufs_fixups[] = {
@@ -278,6 +303,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
static void ufshcd_hba_exit(struct ufs_hba *hba);
+static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params);
static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
@@ -329,18 +355,6 @@ static void ufshcd_configure_wb(struct ufs_hba *hba)
ufshcd_wb_toggle_buf_flush(hba, true);
}
-static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
-{
- if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
- scsi_unblock_requests(hba->host);
-}
-
-static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
-{
- if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
- scsi_block_requests(hba->host);
-}
-
static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
enum ufs_trace_str_t str_t)
{
@@ -452,7 +466,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- if (is_mcq_enabled(hba)) {
+ if (hba->mcq_enabled) {
struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
hwq_id = hwq->id;
@@ -614,8 +628,8 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
- dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
- hba->outstanding_reqs, hba->outstanding_tasks);
+ dev_err(hba->dev, "%d outstanding reqs, tasks=0x%lx\n",
+ scsi_host_busy(hba->host), hba->outstanding_tasks);
dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
hba->saved_err, hba->saved_uic_err);
dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
@@ -719,25 +733,15 @@ EXPORT_SYMBOL_GPL(ufshcd_delay_us);
* Return: -ETIMEDOUT on error, zero on success.
*/
static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
- u32 val, unsigned long interval_us,
- unsigned long timeout_ms)
+ u32 val, unsigned long interval_us,
+ unsigned long timeout_ms)
{
- int err = 0;
- unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
-
- /* ignore bits that we don't intend to wait on */
- val = val & mask;
+ u32 v;
- while ((ufshcd_readl(hba, reg) & mask) != val) {
- usleep_range(interval_us, interval_us + 50);
- if (time_after(jiffies, timeout)) {
- if ((ufshcd_readl(hba, reg) & mask) != val)
- err = -ETIMEDOUT;
- break;
- }
- }
+ val &= mask; /* ignore bits that we don't intend to wait on */
- return err;
+ return read_poll_timeout(ufshcd_readl, v, (v & mask) == val,
+ interval_us, timeout_ms * 1000, false, hba, reg);
}
/**
@@ -748,8 +752,6 @@ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
*/
static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
{
- if (hba->ufs_version == ufshci_version(1, 0))
- return INTERRUPT_MASK_ALL_VER_10;
if (hba->ufs_version <= ufshci_version(2, 0))
return INTERRUPT_MASK_ALL_VER_11;
@@ -990,28 +992,46 @@ bool ufshcd_is_hba_active(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
-u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
+/**
+ * ufshcd_pm_qos_init - initialize PM QoS request
+ * @hba: per adapter instance
+ */
+void ufshcd_pm_qos_init(struct ufs_hba *hba)
{
- /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
- if (hba->ufs_version <= ufshci_version(1, 1))
- return UFS_UNIPRO_VER_1_41;
- else
- return UFS_UNIPRO_VER_1_6;
+
+ if (hba->pm_qos_enabled)
+ return;
+
+ cpu_latency_qos_add_request(&hba->pm_qos_req, PM_QOS_DEFAULT_VALUE);
+
+ if (cpu_latency_qos_request_active(&hba->pm_qos_req))
+ hba->pm_qos_enabled = true;
}
-EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
-static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
+/**
+ * ufshcd_pm_qos_exit - remove request from PM QoS
+ * @hba: per adapter instance
+ */
+void ufshcd_pm_qos_exit(struct ufs_hba *hba)
{
- /*
- * If both host and device support UniPro ver1.6 or later, PA layer
- * parameters tuning happens during link startup itself.
- *
- * We can manually tune PA layer parameters if either host or device
- * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
- * logic simple, we will only do manual tuning if local unipro version
- * doesn't support ver1.6 or later.
- */
- return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6;
+ if (!hba->pm_qos_enabled)
+ return;
+
+ cpu_latency_qos_remove_request(&hba->pm_qos_req);
+ hba->pm_qos_enabled = false;
+}
+
+/**
+ * ufshcd_pm_qos_update - update PM QoS request
+ * @hba: per adapter instance
+ * @on: If True, vote for perf PM QoS mode otherwise power save mode
+ */
+static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
+{
+ if (!hba->pm_qos_enabled)
+ return;
+
+ cpu_latency_qos_update_request(&hba->pm_qos_req, on ? 0 : PM_QOS_DEFAULT_VALUE);
}
/**
@@ -1160,8 +1180,11 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
hba->devfreq->previous_freq);
else
ufshcd_set_clk_freq(hba, !scale_up);
+ goto out;
}
+ ufshcd_pm_qos_update(hba, scale_up);
+
out:
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
(scale_up ? "up" : "down"),
@@ -1216,11 +1239,13 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
{
const struct scsi_device *sdev;
+ unsigned long flags;
u32 pending = 0;
- lockdep_assert_held(hba->host->host_lock);
+ spin_lock_irqsave(hba->host->host_lock, flags);
__shost_for_each_device(sdev, hba->host)
pending += sbitmap_weight(&sdev->budget_map);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
return pending;
}
@@ -1234,7 +1259,6 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
u64 wait_timeout_us)
{
- unsigned long flags;
int ret = 0;
u32 tm_doorbell;
u32 tr_pending;
@@ -1242,7 +1266,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
ktime_t start;
ufshcd_hold(hba);
- spin_lock_irqsave(hba->host->host_lock, flags);
/*
* Wait for all the outstanding tasks/transfer requests.
* Verify by checking the doorbell registers are clear.
@@ -1263,7 +1286,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
break;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
io_schedule_timeout(msecs_to_jiffies(20));
if (ktime_to_us(ktime_sub(ktime_get(), start)) >
wait_timeout_us) {
@@ -1275,7 +1297,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
*/
do_last_check = true;
}
- spin_lock_irqsave(hba->host->host_lock, flags);
} while (tm_doorbell || tr_pending);
if (timeout) {
@@ -1285,7 +1306,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
ret = -EBUSY;
}
out:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_release(hba);
return ret;
}
@@ -1347,7 +1367,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
* make sure that there are no outstanding requests when
* clock scaling is in progress
*/
- ufshcd_scsi_block_requests(hba);
+ blk_mq_quiesce_tagset(&hba->host->tag_set);
mutex_lock(&hba->wb_mutex);
down_write(&hba->clk_scaling_lock);
@@ -1356,7 +1376,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
mutex_unlock(&hba->wb_mutex);
- ufshcd_scsi_unblock_requests(hba);
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
goto out;
}
@@ -1377,7 +1397,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
mutex_unlock(&hba->wb_mutex);
- ufshcd_scsi_unblock_requests(hba);
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
ufshcd_release(hba);
}
@@ -1432,16 +1452,16 @@ static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_scaling.suspend_work);
- unsigned long irq_flags;
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (hba->clk_scaling.active_reqs ||
+ hba->clk_scaling.is_suspended)
+ return;
+
+ hba->clk_scaling.is_suspended = true;
+ hba->clk_scaling.window_start_t = 0;
}
- hba->clk_scaling.is_suspended = true;
- hba->clk_scaling.window_start_t = 0;
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
devfreq_suspend_device(hba->devfreq);
}
@@ -1450,15 +1470,13 @@ static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_scaling.resume_work);
- unsigned long irq_flags;
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (!hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (!hba->clk_scaling.is_suspended)
+ return;
+ hba->clk_scaling.is_suspended = false;
}
- hba->clk_scaling.is_suspended = false;
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
devfreq_resume_device(hba->devfreq);
}
@@ -1472,7 +1490,6 @@ static int ufshcd_devfreq_target(struct device *dev,
bool scale_up = false, sched_clk_scaling_suspend_work = false;
struct list_head *clk_list = &hba->clk_list_head;
struct ufs_clk_info *clki;
- unsigned long irq_flags;
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
@@ -1493,43 +1510,38 @@ static int ufshcd_devfreq_target(struct device *dev,
*freq = (unsigned long) clk_round_rate(clki->clk, *freq);
}
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (ufshcd_eh_in_progress(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return 0;
- }
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (ufshcd_eh_in_progress(hba))
+ return 0;
- /* Skip scaling clock when clock scaling is suspended */
- if (hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- dev_warn(hba->dev, "clock scaling is suspended, skip");
- return 0;
- }
+ /* Skip scaling clock when clock scaling is suspended */
+ if (hba->clk_scaling.is_suspended) {
+ dev_warn(hba->dev, "clock scaling is suspended, skip");
+ return 0;
+ }
- if (!hba->clk_scaling.active_reqs)
- sched_clk_scaling_suspend_work = true;
+ if (!hba->clk_scaling.active_reqs)
+ sched_clk_scaling_suspend_work = true;
- if (list_empty(clk_list)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- goto out;
- }
+ if (list_empty(clk_list))
+ goto out;
- /* Decide based on the target or rounded-off frequency and update */
- if (hba->use_pm_opp)
- scale_up = *freq > hba->clk_scaling.target_freq;
- else
- scale_up = *freq == clki->max_freq;
+ /* Decide based on the target or rounded-off frequency and update */
+ if (hba->use_pm_opp)
+ scale_up = *freq > hba->clk_scaling.target_freq;
+ else
+ scale_up = *freq == clki->max_freq;
- if (!hba->use_pm_opp && !scale_up)
- *freq = clki->min_freq;
+ if (!hba->use_pm_opp && !scale_up)
+ *freq = clki->min_freq;
- /* Update the frequency */
- if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- ret = 0;
- goto out; /* no state change required */
+ /* Update the frequency */
+ if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
+ ret = 0;
+ goto out; /* no state change required */
+ }
}
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
start = ktime_get();
ret = ufshcd_devfreq_scale(hba, *freq, scale_up);
@@ -1541,7 +1553,8 @@ static int ufshcd_devfreq_target(struct device *dev,
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
out:
- if (sched_clk_scaling_suspend_work && !scale_up)
+ if (sched_clk_scaling_suspend_work &&
+ (!scale_up || hba->clk_scaling.suspend_on_no_request))
queue_work(hba->clk_scaling.workq,
&hba->clk_scaling.suspend_work);
@@ -1553,7 +1566,6 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
- unsigned long flags;
ktime_t curr_t;
if (!ufshcd_is_clkscaling_supported(hba))
@@ -1561,7 +1573,8 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
memset(stat, 0, sizeof(*stat));
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
curr_t = ktime_get();
if (!scaling->window_start_t)
goto start_window;
@@ -1597,7 +1610,7 @@ start_window:
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+
return 0;
}
@@ -1661,19 +1674,19 @@ static void ufshcd_devfreq_remove(struct ufs_hba *hba)
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
{
- unsigned long flags;
bool suspend = false;
cancel_work_sync(&hba->clk_scaling.suspend_work);
cancel_work_sync(&hba->clk_scaling.resume_work);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!hba->clk_scaling.is_suspended) {
- suspend = true;
- hba->clk_scaling.is_suspended = true;
- hba->clk_scaling.window_start_t = 0;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (!hba->clk_scaling.is_suspended) {
+ suspend = true;
+ hba->clk_scaling.is_suspended = true;
+ hba->clk_scaling.window_start_t = 0;
+ }
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
if (suspend)
devfreq_suspend_device(hba->devfreq);
@@ -1681,15 +1694,15 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
{
- unsigned long flags;
bool resume = false;
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_scaling.is_suspended) {
- resume = true;
- hba->clk_scaling.is_suspended = false;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (hba->clk_scaling.is_suspended) {
+ resume = true;
+ hba->clk_scaling.is_suspended = false;
+ }
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
if (resume)
devfreq_resume_device(hba->devfreq);
@@ -1764,8 +1777,6 @@ static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
{
- char wq_name[sizeof("ufs_clkscaling_00")];
-
if (!ufshcd_is_clkscaling_supported(hba))
return;
@@ -1777,9 +1788,10 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
INIT_WORK(&hba->clk_scaling.resume_work,
ufshcd_clk_scaling_resume_work);
- snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
- hba->host->host_no);
- hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+ spin_lock_init(&hba->clk_scaling.lock);
+
+ hba->clk_scaling.workq = alloc_ordered_workqueue(
+ "ufs_clkscaling_%d", WQ_MEM_RECLAIM, hba->host->host_no);
hba->clk_scaling.is_initialized = true;
}
@@ -1798,19 +1810,16 @@ static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
- unsigned long flags;
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.ungate_work);
cancel_delayed_work_sync(&hba->clk_gating.gate_work);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.state == CLKS_ON) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return;
+ scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
+ if (hba->clk_gating.state == CLKS_ON)
+ return;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_hba_vreg_set_hpm(hba);
ufshcd_setup_clocks(hba, true);
@@ -1845,7 +1854,7 @@ void ufshcd_hold(struct ufs_hba *hba)
if (!ufshcd_is_clkgating_allowed(hba) ||
!hba->clk_gating.is_initialized)
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ spin_lock_irqsave(&hba->clk_gating.lock, flags);
hba->clk_gating.active_reqs++;
start:
@@ -1861,11 +1870,11 @@ start:
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
flush_result = flush_work(&hba->clk_gating.ungate_work);
if (hba->clk_gating.is_suspended && !flush_result)
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ spin_lock_irqsave(&hba->clk_gating.lock, flags);
goto start;
}
break;
@@ -1894,17 +1903,17 @@ start:
*/
fallthrough;
case REQ_CLKS_ON:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
flush_work(&hba->clk_gating.ungate_work);
/* Make sure state is CLKS_ON before returning */
- spin_lock_irqsave(hba->host->host_lock, flags);
+ spin_lock_irqsave(&hba->clk_gating.lock, flags);
goto start;
default:
dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
__func__, hba->clk_gating.state);
break;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_hold);
@@ -1912,28 +1921,32 @@ static void ufshcd_gate_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.gate_work.work);
- unsigned long flags;
int ret;
- spin_lock_irqsave(hba->host->host_lock, flags);
- /*
- * In case you are here to cancel this work the gating state
- * would be marked as REQ_CLKS_ON. In this case save time by
- * skipping the gating work and exit after changing the clock
- * state to CLKS_ON.
- */
- if (hba->clk_gating.is_suspended ||
- (hba->clk_gating.state != REQ_CLKS_OFF)) {
- hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- goto rel_lock;
- }
+ scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case save time by
+ * skipping the gating work and exit after changing the clock
+ * state to CLKS_ON.
+ */
+ if (hba->clk_gating.is_suspended ||
+ hba->clk_gating.state != REQ_CLKS_OFF) {
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ return;
+ }
- if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
- goto rel_lock;
+ if (hba->clk_gating.active_reqs)
+ return;
+ }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ scoped_guard(spinlock_irqsave, hba->host->host_lock) {
+ if (ufshcd_is_ufs_dev_busy(hba) ||
+ hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ return;
+ }
/* put the link into hibern8 mode before turning off clocks */
if (ufshcd_can_hibern8_during_gating(hba)) {
@@ -1944,7 +1957,7 @@ static void ufshcd_gate_work(struct work_struct *work)
__func__, ret);
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- goto out;
+ return;
}
ufshcd_set_link_hibern8(hba);
}
@@ -1964,33 +1977,34 @@ static void ufshcd_gate_work(struct work_struct *work)
* prevent from doing cancel work multiple times when there are
* new requests arriving before the current cancel work is done.
*/
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
if (hba->clk_gating.state == REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
}
-rel_lock:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-out:
- return;
}
-/* host lock must be held before calling this variant */
static void __ufshcd_release(struct ufs_hba *hba)
{
+ lockdep_assert_held(&hba->clk_gating.lock);
+
if (!ufshcd_is_clkgating_allowed(hba))
return;
hba->clk_gating.active_reqs--;
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
- hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
- hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
- hba->active_uic_cmd || hba->uic_async_done ||
+ !hba->clk_gating.is_initialized ||
hba->clk_gating.state == CLKS_OFF)
return;
+ scoped_guard(spinlock_irqsave, hba->host->host_lock) {
+ if (ufshcd_has_pending_tasks(hba) ||
+ hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ return;
+ }
+
hba->clk_gating.state = REQ_CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
queue_delayed_work(hba->clk_gating.clk_gating_workq,
@@ -2000,11 +2014,8 @@ static void __ufshcd_release(struct ufs_hba *hba)
void ufshcd_release(struct ufs_hba *hba)
{
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
__ufshcd_release(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_release);
@@ -2019,11 +2030,9 @@ static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long flags;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
hba->clk_gating.delay_ms = value;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
@@ -2051,7 +2060,6 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long flags;
u32 value;
if (kstrtou32(buf, 0, &value))
@@ -2059,9 +2067,10 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
value = !!value;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
+
if (value == hba->clk_gating.is_enabled)
- goto out;
+ return count;
if (value)
__ufshcd_release(hba);
@@ -2069,8 +2078,7 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
hba->clk_gating.active_reqs++;
hba->clk_gating.is_enabled = value;
-out:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+
return count;
}
@@ -2103,8 +2111,6 @@ static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
- char wq_name[sizeof("ufs_clk_gating_00")];
-
if (!ufshcd_is_clkgating_allowed(hba))
return;
@@ -2114,10 +2120,9 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
- snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
- hba->host->host_no);
- hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
- WQ_MEM_RECLAIM | WQ_HIGHPRI);
+ hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(
+ "ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI,
+ hba->host->host_no);
ufshcd_init_clk_gating_sysfs(hba);
@@ -2144,19 +2149,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{
bool queue_resume_work = false;
ktime_t curr_t = ktime_get();
- unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
if (!hba->clk_scaling.active_reqs++)
queue_resume_work = true;
- if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
return;
- }
if (queue_resume_work)
queue_work(hba->clk_scaling.workq,
@@ -2172,18 +2175,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
hba->clk_scaling.busy_start_t = curr_t;
hba->clk_scaling.is_busy_started = true;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
{
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
- unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
hba->clk_scaling.active_reqs--;
if (!scaling->active_reqs && scaling->is_busy_started) {
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
@@ -2191,7 +2193,6 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static inline int ufshcd_monitor_opcode2dir(u8 opcode)
@@ -2281,7 +2282,7 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_start_monitor(hba, lrbp);
- if (is_mcq_enabled(hba)) {
+ if (hba->mcq_enabled) {
int utrd_size = sizeof(struct utp_transfer_req_desc);
struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr;
struct utp_transfer_req_desc *dest;
@@ -2377,15 +2378,15 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
int err;
hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
- if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS)
- hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
/* nutrs and nutmrs are 0 based values */
- hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
+ hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1;
hba->nutmrs =
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
hba->reserved_slot = hba->nutrs - 1;
+ hba->nortt = FIELD_GET(MASK_NUMBER_OUTSTANDING_RTT, hba->capabilities) + 1;
+
/* Read crypto capabilities */
err = ufshcd_hba_init_crypto_capabilities(hba);
if (err) {
@@ -2393,13 +2394,22 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
return err;
}
+ /*
+ * The UFSHCI 3.0 specification does not define MCQ_SUPPORT and
+ * LSDB_SUPPORT, but [31:29] as reserved bits with reset value 0s, which
+ * means we can simply read values regardless of version.
+ */
hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
- if (!hba->mcq_sup)
- return 0;
+ /*
+ * 0h: legacy single doorbell support is available
+ * 1h: indicate that legacy single doorbell support has been removed
+ */
+ if (!(hba->quirks & UFSHCD_QUIRK_BROKEN_LSDBS_CAP))
+ hba->lsdb_sup = !FIELD_GET(MASK_LSDB_SUPPORT, hba->capabilities);
+ else
+ hba->lsdb_sup = true;
hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
- hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
- hba->mcq_capabilities);
return 0;
}
@@ -2415,7 +2425,7 @@ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
{
u32 val;
int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
- 500, UIC_CMD_TIMEOUT * 1000, false, hba,
+ 500, uic_cmd_timeout * 1000, false, hba,
REG_CONTROLLER_STATUS);
return ret == 0;
}
@@ -2475,7 +2485,7 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
lockdep_assert_held(&hba->uic_cmd_mutex);
if (wait_for_completion_timeout(&uic_cmd->done,
- msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+ msecs_to_jiffies(uic_cmd_timeout))) {
ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
} else {
ret = -ETIMEDOUT;
@@ -2501,13 +2511,11 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
* __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
* @hba: per adapter instance
* @uic_cmd: UIC command
- * @completion: initialize the completion only if this is set to true
*
* Return: 0 only if success.
*/
static int
-__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
- bool completion)
+__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
lockdep_assert_held(&hba->uic_cmd_mutex);
@@ -2517,8 +2525,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
return -EIO;
}
- if (completion)
- init_completion(&uic_cmd->done);
+ init_completion(&uic_cmd->done);
uic_cmd->cmd_active = 1;
ufshcd_dispatch_uic_cmd(hba, uic_cmd);
@@ -2544,7 +2551,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
- ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
+ ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
if (!ret)
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
@@ -2617,7 +2624,7 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
- return 0;
+ return ufshcd_crypto_fill_prdt(hba, lrbp);
}
/**
@@ -2629,14 +2636,7 @@ static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
{
u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (hba->ufs_version == ufshci_version(1, 0)) {
- u32 rw;
- rw = set & INTERRUPT_MASK_RW_VER_10;
- set = rw | ((set ^ intrs) & intrs);
- } else {
- set |= intrs;
- }
-
+ set |= intrs;
ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
}
@@ -2649,34 +2649,30 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
{
u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (hba->ufs_version == ufshci_version(1, 0)) {
- u32 rw;
- rw = (set & INTERRUPT_MASK_RW_VER_10) &
- ~(intrs & INTERRUPT_MASK_RW_VER_10);
- set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
-
- } else {
- set &= ~intrs;
- }
-
+ set &= ~intrs;
ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
}
/**
* ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
* descriptor according to request
+ * @hba: per adapter instance
* @lrbp: pointer to local reference block
* @upiu_flags: flags required in the header
* @cmd_dir: requests data direction
* @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
*/
-static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
- enum dma_data_direction cmd_dir, int ehs_length)
+static void
+ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+ u8 *upiu_flags, enum dma_data_direction cmd_dir,
+ int ehs_length)
{
struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
struct request_desc_header *h = &req_desc->header;
enum utp_data_direction data_direction;
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+
*h = (typeof(*h)){ };
if (cmd_dir == DMA_FROM_DEVICE) {
@@ -2736,7 +2732,6 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
- memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
@@ -2809,12 +2804,8 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
u8 upiu_flags;
int ret = 0;
- if (hba->ufs_version <= ufshci_version(1, 1))
- lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
- else
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+ ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, 0);
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
@@ -2837,18 +2828,32 @@ static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
unsigned int ioprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
u8 upiu_flags;
- if (hba->ufs_version <= ufshci_version(1, 1))
- lrbp->command_type = UTP_CMD_TYPE_SCSI;
- else
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
-
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
- lrbp->cmd->sc_data_direction, 0);
+ ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
if (ioprio_class == IOPRIO_CLASS_RT)
upiu_flags |= UPIU_CMD_FLAGS_CP;
ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
}
+static void __ufshcd_setup_cmd(struct ufshcd_lrb *lrbp, struct scsi_cmnd *cmd, u8 lun, int tag)
+{
+ memset(lrbp->ucd_req_ptr, 0, sizeof(*lrbp->ucd_req_ptr));
+
+ lrbp->cmd = cmd;
+ lrbp->task_tag = tag;
+ lrbp->lun = lun;
+ ufshcd_prepare_lrbp_crypto(cmd ? scsi_cmd_to_rq(cmd) : NULL, lrbp);
+}
+
+static void ufshcd_setup_scsi_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+ struct scsi_cmnd *cmd, u8 lun, int tag)
+{
+ __ufshcd_setup_cmd(lrbp, cmd, lun, tag);
+ lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
+ lrbp->req_abort_skip = false;
+
+ ufshcd_comp_scsi_upiu(hba, lrbp);
+}
+
/**
* ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
* @upiu_wlun_id: UPIU W-LUN id
@@ -2904,9 +2909,8 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
i * ufshcd_get_ucd_size(hba);
- u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
- response_upiu);
- u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
+ u16 response_offset = le16_to_cpu(utrdlp[i].response_upiu_offset);
+ u16 prdt_offset = le16_to_cpu(utrdlp[i].prd_table_offset);
lrb->utr_descriptor_ptr = utrdlp + i;
lrb->utrd_dma_addr = hba->utrdl_dma_addr +
@@ -2982,16 +2986,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_hold(hba);
lrbp = &hba->lrb[tag];
- lrbp->cmd = cmd;
- lrbp->task_tag = tag;
- lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
- lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
-
- ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
-
- lrbp->req_abort_skip = false;
- ufshcd_comp_scsi_upiu(hba, lrbp);
+ ufshcd_setup_scsi_cmd(hba, lrbp, cmd, ufshcd_scsi_to_upiu_lun(cmd->device->lun), tag);
err = ufshcd_map_sg(hba, lrbp);
if (err) {
@@ -2999,7 +2995,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto out;
}
- if (is_mcq_enabled(hba))
+ if (hba->mcq_enabled)
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
ufshcd_send_command(hba, tag, hwq);
@@ -3016,15 +3012,18 @@ out:
return err;
}
-static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
+static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+ enum dev_cmd_type cmd_type, u8 lun, int tag)
{
- lrbp->cmd = NULL;
- lrbp->task_tag = tag;
- lrbp->lun = 0; /* device management cmd is not specific to any LUN */
+ __ufshcd_setup_cmd(lrbp, NULL, lun, tag);
lrbp->intr_cmd = true; /* No interrupt aggregation */
- ufshcd_prepare_lrbp_crypto(NULL, lrbp);
hba->dev_cmd.type = cmd_type;
+}
+
+static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
+{
+ ufshcd_setup_dev_cmd(hba, lrbp, cmd_type, 0, tag);
return ufshcd_compose_devman_upiu(hba, lrbp);
}
@@ -3037,16 +3036,7 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
*/
bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
{
- struct request *rq;
-
- if (!cmd)
- return false;
-
- rq = scsi_cmd_to_rq(cmd);
- if (!blk_mq_request_started(rq))
- return false;
-
- return true;
+ return cmd && blk_mq_rq_state(scsi_cmd_to_rq(cmd)) == MQ_RQ_IN_FLIGHT;
}
/*
@@ -3058,10 +3048,9 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
{
u32 mask;
- unsigned long flags;
int err;
- if (is_mcq_enabled(hba)) {
+ if (hba->mcq_enabled) {
/*
* MCQ mode. Clean up the MCQ resources similar to
* what the ufshcd_utrl_clear() does for SDB mode.
@@ -3078,9 +3067,7 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
mask = 1U << task_tag;
/* clear outstanding transaction before retry */
- spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_utrl_clear(hba, mask);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
/*
* wait for h/w to clear corresponding bit in door-bell.
@@ -3117,8 +3104,13 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
case UPIU_TRANSACTION_QUERY_RSP: {
u8 response = lrbp->ucd_rsp_ptr->header.response;
- if (response == 0)
+ if (response == 0) {
err = ufshcd_copy_query_response(hba, lrbp);
+ } else {
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: unexpected response in Query RSP: %x\n",
+ __func__, response);
+ }
break;
}
case UPIU_TRANSACTION_REJECT_UPIU:
@@ -3171,8 +3163,10 @@ retry:
__func__, lrbp->task_tag);
/* MCQ mode */
- if (is_mcq_enabled(hba)) {
- err = ufshcd_clear_cmd(hba, lrbp->task_tag);
+ if (hba->mcq_enabled) {
+ /* successfully cleared the command, retry if needed */
+ if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
+ err = -EAGAIN;
hba->dev_cmd.complete = NULL;
return err;
}
@@ -3229,6 +3223,39 @@ retry:
return err;
}
+static void ufshcd_dev_man_lock(struct ufs_hba *hba)
+{
+ ufshcd_hold(hba);
+ mutex_lock(&hba->dev_cmd.lock);
+ down_read(&hba->clk_scaling_lock);
+}
+
+static void ufshcd_dev_man_unlock(struct ufs_hba *hba)
+{
+ up_read(&hba->clk_scaling_lock);
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+}
+
+static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+ const u32 tag, int timeout)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ int err;
+
+ hba->dev_cmd.complete = &wait;
+
+ ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
+
+ ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
+ err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
+
+ ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
+ (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
+
+ return err;
+}
+
/**
* ufshcd_exec_dev_cmd - API for sending device management requests
* @hba: UFS hba
@@ -3243,34 +3270,18 @@ retry:
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
- DECLARE_COMPLETION_ONSTACK(wait);
const u32 tag = hba->reserved_slot;
- struct ufshcd_lrb *lrbp;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
int err;
/* Protects use of hba->reserved_slot. */
lockdep_assert_held(&hba->dev_cmd.lock);
- down_read(&hba->clk_scaling_lock);
-
- lrbp = &hba->lrb[tag];
- lrbp->cmd = NULL;
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
if (unlikely(err))
- goto out;
-
- hba->dev_cmd.complete = &wait;
-
- ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
-
- ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
- err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
- ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
- (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
+ return err;
-out:
- up_read(&hba->clk_scaling_lock);
- return err;
+ return ufshcd_issue_dev_cmd(hba, lrbp, tag, timeout);
}
/**
@@ -3340,8 +3351,8 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
BUG_ON(!hba);
- ufshcd_hold(hba);
- mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_dev_man_lock(hba);
+
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
@@ -3383,8 +3394,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
out_unlock:
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
return err;
}
@@ -3414,9 +3424,8 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
return -EINVAL;
}
- ufshcd_hold(hba);
+ ufshcd_dev_man_lock(hba);
- mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
@@ -3446,8 +3455,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
*attr_val = be32_to_cpu(response->upiu_res.value);
out_unlock:
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
return err;
}
@@ -3510,9 +3518,8 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
return -EINVAL;
}
- ufshcd_hold(hba);
+ ufshcd_dev_man_lock(hba);
- mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
hba->dev_cmd.query.descriptor = desc_buf;
@@ -3545,8 +3552,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
out_unlock:
hba->dev_cmd.query.descriptor = NULL;
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
return err;
}
@@ -3979,11 +3985,11 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
*/
static int ufshcd_dme_link_startup(struct ufs_hba *hba)
{
- struct uic_command uic_cmd = {0};
+ struct uic_command uic_cmd = {
+ .command = UIC_CMD_DME_LINK_STARTUP,
+ };
int ret;
- uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
-
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_dbg(hba->dev,
@@ -4001,11 +4007,11 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
*/
static int ufshcd_dme_reset(struct ufs_hba *hba)
{
- struct uic_command uic_cmd = {0};
+ struct uic_command uic_cmd = {
+ .command = UIC_CMD_DME_RESET,
+ };
int ret;
- uic_cmd.command = UIC_CMD_DME_RESET;
-
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_err(hba->dev,
@@ -4040,11 +4046,11 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
*/
static int ufshcd_dme_enable(struct ufs_hba *hba)
{
- struct uic_command uic_cmd = {0};
+ struct uic_command uic_cmd = {
+ .command = UIC_CMD_DME_ENABLE,
+ };
int ret;
- uic_cmd.command = UIC_CMD_DME_ENABLE;
-
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_err(hba->dev,
@@ -4077,11 +4083,16 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
min_sleep_time_us =
MIN_DELAY_BEFORE_DME_CMDS_US - delta;
else
- return; /* no more delay required */
+ min_sleep_time_us = 0; /* no more delay required */
}
- /* allow sleep for extra 50us if needed */
- usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
+ if (min_sleep_time_us > 0) {
+ /* allow sleep for extra 50us if needed */
+ usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
+ }
+
+ /* update the last_dme_cmd_tstamp */
+ hba->last_dme_cmd_tstamp = ktime_get();
}
/**
@@ -4097,7 +4108,12 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
u8 attr_set, u32 mib_val, u8 peer)
{
- struct uic_command uic_cmd = {0};
+ struct uic_command uic_cmd = {
+ .command = peer ? UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET,
+ .argument1 = attr_sel,
+ .argument2 = UIC_ARG_ATTR_TYPE(attr_set),
+ .argument3 = mib_val,
+ };
static const char *const action[] = {
"dme-set",
"dme-peer-set"
@@ -4106,12 +4122,6 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
int ret;
int retries = UFS_UIC_COMMAND_RETRIES;
- uic_cmd.command = peer ?
- UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
- uic_cmd.argument1 = attr_sel;
- uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
- uic_cmd.argument3 = mib_val;
-
do {
/* for peer attributes we retry upon failure */
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
@@ -4141,7 +4151,10 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
u32 *mib_val, u8 peer)
{
- struct uic_command uic_cmd = {0};
+ struct uic_command uic_cmd = {
+ .command = peer ? UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET,
+ .argument1 = attr_sel,
+ };
static const char *const action[] = {
"dme-get",
"dme-peer-get"
@@ -4175,10 +4188,6 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
}
}
- uic_cmd.command = peer ?
- UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
- uic_cmd.argument1 = attr_sel;
-
do {
/* for peer attributes we retry upon failure */
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
@@ -4242,11 +4251,11 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
* Make sure UIC command completion interrupt is disabled before
* issuing UIC command.
*/
- wmb();
+ ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
reenable_intr = true;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
- ret = __ufshcd_send_uic_cmd(hba, cmd, false);
+ ret = __ufshcd_send_uic_cmd(hba, cmd);
if (ret) {
dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
@@ -4255,7 +4264,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
}
if (!wait_for_completion_timeout(hba->uic_async_done,
- msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+ msecs_to_jiffies(uic_cmd_timeout))) {
dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
cmd->command, cmd->argument3);
@@ -4302,6 +4311,42 @@ out_unlock:
}
/**
+ * ufshcd_send_bsg_uic_cmd - Send UIC commands requested via BSG layer and retrieve the result
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+ *
+ * Return: 0 only if success.
+ */
+int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+{
+ int ret;
+
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
+ return 0;
+
+ ufshcd_hold(hba);
+
+ if (uic_cmd->argument1 == UIC_ARG_MIB(PA_PWRMODE) &&
+ uic_cmd->command == UIC_CMD_DME_SET) {
+ ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd);
+ goto out;
+ }
+
+ mutex_lock(&hba->uic_cmd_mutex);
+ ufshcd_add_delay_before_dme_cmd(hba);
+
+ ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
+ if (!ret)
+ ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
+
+ mutex_unlock(&hba->uic_cmd_mutex);
+
+out:
+ ufshcd_release(hba);
+ return ret;
+}
+
+/**
* ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
* using DME_SET primitives.
* @hba: per adapter instance
@@ -4311,7 +4356,11 @@ out_unlock:
*/
int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
{
- struct uic_command uic_cmd = {0};
+ struct uic_command uic_cmd = {
+ .command = UIC_CMD_DME_SET,
+ .argument1 = UIC_ARG_MIB(PA_PWRMODE),
+ .argument3 = mode,
+ };
int ret;
if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
@@ -4324,9 +4373,6 @@ int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
}
}
- uic_cmd.command = UIC_CMD_DME_SET;
- uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
- uic_cmd.argument3 = mode;
ufshcd_hold(hba);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
ufshcd_release(hba);
@@ -4367,13 +4413,14 @@ EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
{
- int ret;
- struct uic_command uic_cmd = {0};
+ struct uic_command uic_cmd = {
+ .command = UIC_CMD_DME_HIBER_ENTER,
+ };
ktime_t start = ktime_get();
+ int ret;
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
- uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
@@ -4391,13 +4438,14 @@ EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
{
- struct uic_command uic_cmd = {0};
+ struct uic_command uic_cmd = {
+ .command = UIC_CMD_DME_HIBER_EXIT,
+ };
int ret;
ktime_t start = ktime_get();
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
- uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
@@ -4494,6 +4542,14 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
return -EINVAL;
}
+ if (pwr_info->lane_rx != pwr_info->lane_tx) {
+ dev_err(hba->dev, "%s: asymmetric connected lanes. rx=%d, tx=%d\n",
+ __func__,
+ pwr_info->lane_rx,
+ pwr_info->lane_tx);
+ return -EINVAL;
+ }
+
/*
* First, get the maximum gears of HS speed.
* If a zero value, it means there is no HSGEAR capability.
@@ -4606,9 +4662,6 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
dev_err(hba->dev,
"%s: power mode change failed %d\n", __func__, ret);
} else {
- ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
- pwr_mode);
-
memcpy(&hba->pwr_info, pwr_mode,
sizeof(struct ufs_pa_layer_attr));
}
@@ -4637,6 +4690,10 @@ int ufshcd_config_pwr_mode(struct ufs_hba *hba,
ret = ufshcd_change_power_mode(hba, &final_params);
+ if (!ret)
+ ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
+ &final_params);
+
return ret;
}
EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
@@ -4725,12 +4782,6 @@ int ufshcd_make_hba_operational(struct ufs_hba *hba)
REG_UTP_TASK_REQ_LIST_BASE_H);
/*
- * Make sure base address and interrupt setup are updated before
- * enabling the run/stop registers below.
- */
- wmb();
-
- /*
* UCRDY, UTMRLDY and UTRLRDY bits must be 1
*/
reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
@@ -4752,20 +4803,14 @@ EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
*/
void ufshcd_hba_stop(struct ufs_hba *hba)
{
- unsigned long flags;
int err;
- /*
- * Obtain the host lock to prevent that the controller is disabled
- * while the UFS interrupt handler is active on another CPU.
- */
- spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_disable_irq(hba);
ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
CONTROLLER_ENABLE, CONTROLLER_DISABLE,
10, 1);
+ ufshcd_enable_irq(hba);
if (err)
dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
}
@@ -4783,51 +4828,44 @@ EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
*/
static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
{
- int retry_outer = 3;
- int retry_inner;
+ int retry;
-start:
- if (ufshcd_is_hba_active(hba))
- /* change controller state to "reset state" */
- ufshcd_hba_stop(hba);
+ for (retry = 3; retry > 0; retry--) {
+ if (ufshcd_is_hba_active(hba))
+ /* change controller state to "reset state" */
+ ufshcd_hba_stop(hba);
+
+ /* UniPro link is disabled at this point */
+ ufshcd_set_link_off(hba);
- /* UniPro link is disabled at this point */
- ufshcd_set_link_off(hba);
+ ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
- ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
+ /* start controller initialization sequence */
+ ufshcd_hba_start(hba);
- /* start controller initialization sequence */
- ufshcd_hba_start(hba);
+ /*
+ * To initialize a UFS host controller HCE bit must be set to 1.
+ * During initialization the HCE bit value changes from 1->0->1.
+ * When the host controller completes initialization sequence
+ * it sets the value of HCE bit to 1. The same HCE bit is read back
+ * to check if the controller has completed initialization sequence.
+ * So without this delay the value HCE = 1, set in the previous
+ * instruction might be read back.
+ * This delay can be changed based on the controller.
+ */
+ ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
- /*
- * To initialize a UFS host controller HCE bit must be set to 1.
- * During initialization the HCE bit value changes from 1->0->1.
- * When the host controller completes initialization sequence
- * it sets the value of HCE bit to 1. The same HCE bit is read back
- * to check if the controller has completed initialization sequence.
- * So without this delay the value HCE = 1, set in the previous
- * instruction might be read back.
- * This delay can be changed based on the controller.
- */
- ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
+ /* wait for the host controller to complete initialization */
+ if (!ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, CONTROLLER_ENABLE,
+ CONTROLLER_ENABLE, 1000, 50))
+ break;
- /* wait for the host controller to complete initialization */
- retry_inner = 50;
- while (!ufshcd_is_hba_active(hba)) {
- if (retry_inner) {
- retry_inner--;
- } else {
- dev_err(hba->dev,
- "Controller enable failed\n");
- if (retry_outer) {
- retry_outer--;
- goto start;
- }
- return -EIO;
- }
- usleep_range(1000, 1100);
+ dev_err(hba->dev, "Enabling the controller failed\n");
}
+ if (!retry)
+ return -EIO;
+
/* enable UIC related interrupts */
ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
@@ -5027,8 +5065,8 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
int err = 0;
int retries;
- ufshcd_hold(hba);
- mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_dev_man_lock(hba);
+
for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
hba->nop_out_timeout);
@@ -5038,8 +5076,8 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
}
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+
+ ufshcd_dev_man_unlock(hba);
if (err)
dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
@@ -5142,12 +5180,12 @@ set_qdepth:
}
/**
- * ufshcd_slave_alloc - handle initial SCSI device configurations
+ * ufshcd_sdev_init - handle initial SCSI device configurations
* @sdev: pointer to SCSI device
*
* Return: success.
*/
-static int ufshcd_slave_alloc(struct scsi_device *sdev)
+static int ufshcd_sdev_init(struct scsi_device *sdev)
{
struct ufs_hba *hba;
@@ -5190,17 +5228,19 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
}
/**
- * ufshcd_slave_configure - adjust SCSI device configurations
+ * ufshcd_sdev_configure - adjust SCSI device configurations
* @sdev: pointer to SCSI device
+ * @lim: queue limits
*
* Return: 0 (success).
*/
-static int ufshcd_slave_configure(struct scsi_device *sdev)
+static int ufshcd_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue;
- blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
+ lim->dma_pad_mask = PRDT_DATA_BYTE_COUNT_PAD - 1;
/*
* Block runtime-pm until all consumers are added.
@@ -5226,10 +5266,10 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
}
/**
- * ufshcd_slave_destroy - remove SCSI device configurations
+ * ufshcd_sdev_destroy - remove SCSI device configurations
* @sdev: pointer to SCSI device
*/
-static void ufshcd_slave_destroy(struct scsi_device *sdev)
+static void ufshcd_sdev_destroy(struct scsi_device *sdev)
{
struct ufs_hba *hba;
unsigned long flags;
@@ -5378,10 +5418,12 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
}
break;
case OCS_ABORTED:
- result |= DID_ABORT << 16;
- break;
case OCS_INVALID_COMMAND_STATUS:
result |= DID_REQUEUE << 16;
+ dev_warn(hba->dev,
+ "OCS %s from controller for tag %d\n",
+ (ocs == OCS_ABORTED ? "aborted" : "invalid"),
+ lrbp->task_tag);
break;
case OCS_INVALID_CMD_TABLE_ATTR:
case OCS_INVALID_PRDT_ATTR:
@@ -5438,32 +5480,37 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{
irqreturn_t retval = IRQ_NONE;
+ struct uic_command *cmd;
spin_lock(hba->host->host_lock);
+ cmd = hba->active_uic_cmd;
+ if (WARN_ON_ONCE(!cmd))
+ goto unlock;
+
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
- if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
- hba->active_uic_cmd->argument2 |=
- ufshcd_get_uic_cmd_result(hba);
- hba->active_uic_cmd->argument3 =
- ufshcd_get_dme_attr_val(hba);
+ if (intr_status & UIC_COMMAND_COMPL) {
+ cmd->argument2 |= ufshcd_get_uic_cmd_result(hba);
+ cmd->argument3 = ufshcd_get_dme_attr_val(hba);
if (!hba->uic_async_done)
- hba->active_uic_cmd->cmd_active = 0;
- complete(&hba->active_uic_cmd->done);
+ cmd->cmd_active = 0;
+ complete(&cmd->done);
retval = IRQ_HANDLED;
}
- if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
- hba->active_uic_cmd->cmd_active = 0;
+ if (intr_status & UFSHCD_UIC_PWR_MASK && hba->uic_async_done) {
+ cmd->cmd_active = 0;
complete(hba->uic_async_done);
retval = IRQ_HANDLED;
}
if (retval == IRQ_HANDLED)
- ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
- UFS_CMD_COMP);
+ ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP);
+
+unlock:
spin_unlock(hba->host->host_lock);
+
return retval;
}
@@ -5474,6 +5521,7 @@ void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
struct scsi_cmnd *cmd = lrbp->cmd;
scsi_dma_unmap(cmd);
+ ufshcd_crypto_clear_prdt(hba, lrbp);
ufshcd_release(hba);
ufshcd_clk_scaling_update_busy(hba);
}
@@ -5493,6 +5541,7 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
lrbp = &hba->lrb[task_tag];
lrbp->compl_time_stamp = ktime_get();
+ lrbp->compl_time_stamp_local_clock = local_clock();
cmd = lrbp->cmd;
if (cmd) {
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
@@ -5502,15 +5551,12 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
ufshcd_release_scsi_cmd(hba, lrbp);
/* Do not touch lrbp after scsi done */
scsi_done(cmd);
- } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
- lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
- if (hba->dev_cmd.complete) {
- if (cqe) {
- ocs = le32_to_cpu(cqe->status) & MASK_OCS;
- lrbp->utr_descriptor_ptr->header.ocs = ocs;
- }
- complete(hba->dev_cmd.complete);
+ } else if (hba->dev_cmd.complete) {
+ if (cqe) {
+ ocs = le32_to_cpu(cqe->status) & MASK_OCS;
+ lrbp->utr_descriptor_ptr->header.ocs = ocs;
}
+ complete(hba->dev_cmd.complete);
}
}
@@ -5559,7 +5605,7 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
u32 tr_doorbell;
struct ufs_hw_queue *hwq;
- if (is_mcq_enabled(hba)) {
+ if (hba->mcq_enabled) {
hwq = &hba->uhq[queue_num];
return ufshcd_mcq_poll_cqe_lock(hba, hwq);
@@ -5602,7 +5648,6 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
unsigned long flags;
- u32 hwq_num, utag;
int tag;
for (tag = 0; tag < hba->nutrs; tag++) {
@@ -5612,9 +5657,7 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
test_bit(SCMD_STATE_COMPLETE, &cmd->state))
continue;
- utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
- hwq_num = blk_mq_unique_tag_to_hwq(utag);
- hwq = &hba->uhq[hwq_num];
+ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
if (force_compl) {
ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
@@ -5855,12 +5898,11 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
/**
* ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
* @hba: per-adapter instance
- * @status: bkops_status value
*
* Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
* flag in the device to permit background operations if the device
- * bkops_status is greater than or equal to "status" argument passed to
- * this function, disable otherwise.
+ * bkops_status is greater than or equal to the "hba->urgent_bkops_lvl",
+ * disable otherwise.
*
* Return: 0 for success, non-zero in case of failure.
*
@@ -5868,11 +5910,11 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
* to know whether auto bkops is enabled or disabled after this function
* returns control to it.
*/
-static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
- enum bkops_status status)
+static int ufshcd_bkops_ctrl(struct ufs_hba *hba)
{
- int err;
+ enum bkops_status status = hba->urgent_bkops_lvl;
u32 curr_status = 0;
+ int err;
err = ufshcd_get_bkops_status(hba, &curr_status);
if (err) {
@@ -5894,23 +5936,6 @@ out:
return err;
}
-/**
- * ufshcd_urgent_bkops - handle urgent bkops exception event
- * @hba: per-adapter instance
- *
- * Enable fBackgroundOpsEn flag in the device to permit background
- * operations.
- *
- * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
- * and negative error value for any other failure.
- *
- * Return: 0 upon success; < 0 upon failure.
- */
-static int ufshcd_urgent_bkops(struct ufs_hba *hba)
-{
- return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
-}
-
static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
{
return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
@@ -5954,24 +5979,6 @@ out:
__func__, err);
}
-static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
-{
- u32 value;
-
- if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
- return;
-
- dev_info(hba->dev, "exception Tcase %d\n", value - 80);
-
- ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
-
- /*
- * A placeholder for the platform vendors to add whatever additional
- * steps required
- */
-}
-
static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
{
u8 index;
@@ -6179,12 +6186,11 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
u32 status = 0;
hba = container_of(work, struct ufs_hba, eeh_work);
- ufshcd_scsi_block_requests(hba);
err = ufshcd_get_ee_status(hba, &status);
if (err) {
dev_err(hba->dev, "%s: failed to get exception status %d\n",
__func__, err);
- goto out;
+ return;
}
trace_ufshcd_exception_event(dev_name(hba->dev), status);
@@ -6193,17 +6199,15 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
ufshcd_bkops_exception_event_handler(hba);
if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
- ufshcd_temp_exception_event_handler(hba, status);
+ ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
ufs_debugfs_exception_event(hba, status);
-out:
- ufshcd_scsi_unblock_requests(hba);
}
/* Complete requests that have door-bell cleared */
static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl)
{
- if (is_mcq_enabled(hba))
+ if (hba->mcq_enabled)
ufshcd_mcq_compl_pending_transfer(hba, force_compl);
else
ufshcd_transfer_req_compl(hba);
@@ -6363,15 +6367,14 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
ufshcd_suspend_clkscaling(hba);
ufshcd_clk_scaling_allow(hba, false);
}
- ufshcd_scsi_block_requests(hba);
/* Wait for ongoing ufshcd_queuecommand() calls to finish. */
- blk_mq_wait_quiesce_done(&hba->host->tag_set);
+ blk_mq_quiesce_tagset(&hba->host->tag_set);
cancel_work_sync(&hba->eeh_work);
}
static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
{
- ufshcd_scsi_unblock_requests(hba);
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
ufshcd_release(hba);
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
@@ -6450,24 +6453,12 @@ static bool ufshcd_abort_one(struct request *rq, void *priv)
struct scsi_device *sdev = cmd->device;
struct Scsi_Host *shost = sdev->host;
struct ufs_hba *hba = shost_priv(shost);
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
- struct ufs_hw_queue *hwq;
- unsigned long flags;
*ret = ufshcd_try_to_abort_task(hba, tag);
dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
*ret ? "failed" : "succeeded");
- /* Release cmd in MCQ mode if abort succeeds */
- if (is_mcq_enabled(hba) && (*ret == 0)) {
- hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
- spin_lock_irqsave(&hwq->cq_lock, flags);
- if (ufshcd_cmd_inflight(lrbp->cmd))
- ufshcd_release_scsi_cmd(hba, lrbp);
- spin_unlock_irqrestore(&hwq->cq_lock, flags);
- }
-
return *ret == 0;
}
@@ -6549,7 +6540,8 @@ again:
if (ufshcd_err_handling_should_stop(hba))
goto skip_err_handling;
- if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ if ((hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) &&
+ !hba->force_reset) {
bool ret;
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -6996,14 +6988,11 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
{
int err = 0;
u32 mask = 1 << tag;
- unsigned long flags;
if (!test_bit(tag, &hba->outstanding_tasks))
goto out;
- spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_utmrl_clear(hba, tag);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
/* poll for max. 1 sec to clear door bell register by h/w */
err = ufshcd_wait_for_register(hba,
@@ -7046,15 +7035,13 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
- /* send command to the controller */
__set_bit(task_tag, &hba->outstanding_tasks);
- ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
- /* Make sure that doorbell is committed immediately */
- wmb();
-
spin_unlock_irqrestore(host->host_lock, flags);
+ /* send command to the controller */
+ ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
+
ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
/* wait until the task management command is completed */
@@ -7159,35 +7146,21 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type,
enum query_opcode desc_op)
{
- DECLARE_COMPLETION_ONSTACK(wait);
const u32 tag = hba->reserved_slot;
- struct ufshcd_lrb *lrbp;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
int err = 0;
u8 upiu_flags;
/* Protects use of hba->reserved_slot. */
lockdep_assert_held(&hba->dev_cmd.lock);
- down_read(&hba->clk_scaling_lock);
+ ufshcd_setup_dev_cmd(hba, lrbp, cmd_type, 0, tag);
- lrbp = &hba->lrb[tag];
- lrbp->cmd = NULL;
- lrbp->task_tag = tag;
- lrbp->lun = 0;
- lrbp->intr_cmd = true;
- ufshcd_prepare_lrbp_crypto(NULL, lrbp);
- hba->dev_cmd.type = cmd_type;
-
- if (hba->ufs_version <= ufshci_version(1, 1))
- lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
- else
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+ ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, 0);
/* update the task tag in the request upiu */
req_upiu->header.task_tag = tag;
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
-
/* just copy the upiu request as it is */
memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
@@ -7201,17 +7174,12 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
- hba->dev_cmd.complete = &wait;
-
- ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
-
- ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
/*
* ignore the returning value here - ufshcd_check_query_response is
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
* read the response directly ignoring all errors.
*/
- ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
+ ufshcd_issue_dev_cmd(hba, lrbp, tag, QUERY_REQ_TIMEOUT);
/* just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
@@ -7234,7 +7202,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
- up_read(&hba->clk_scaling_lock);
return err;
}
@@ -7273,13 +7240,11 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
cmd_type = DEV_CMD_TYPE_NOP;
fallthrough;
case UPIU_TRANSACTION_QUERY_REQ:
- ufshcd_hold(hba);
- mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_dev_man_lock(hba);
err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
desc_buff, buff_len,
cmd_type, desc_op);
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
break;
case UPIU_TRANSACTION_TASK_REQ:
@@ -7329,41 +7294,21 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
enum dma_data_direction dir)
{
- DECLARE_COMPLETION_ONSTACK(wait);
const u32 tag = hba->reserved_slot;
- struct ufshcd_lrb *lrbp;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
int err = 0;
int result;
u8 upiu_flags;
u8 *ehs_data;
u16 ehs_len;
+ int ehs = (hba->capabilities & MASK_EHSLUTRD_SUPPORTED) ? 2 : 0;
/* Protects use of hba->reserved_slot. */
- ufshcd_hold(hba);
- mutex_lock(&hba->dev_cmd.lock);
- down_read(&hba->clk_scaling_lock);
+ ufshcd_dev_man_lock(hba);
- lrbp = &hba->lrb[tag];
- lrbp->cmd = NULL;
- lrbp->task_tag = tag;
- lrbp->lun = UFS_UPIU_RPMB_WLUN;
+ ufshcd_setup_dev_cmd(hba, lrbp, DEV_CMD_TYPE_RPMB, UFS_UPIU_RPMB_WLUN, tag);
- lrbp->intr_cmd = true;
- ufshcd_prepare_lrbp_crypto(NULL, lrbp);
- hba->dev_cmd.type = DEV_CMD_TYPE_RPMB;
-
- /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
-
- /*
- * According to UFSHCI 4.0 specification page 24, if EHSLUTRDS is 0, host controller takes
- * EHS length from CMD UPIU, and SW driver use EHS Length field in CMD UPIU. if it is 1,
- * HW controller takes EHS length from UTRD.
- */
- if (hba->capabilities & MASK_EHSLUTRD_SUPPORTED)
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
- else
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 0);
+ ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, ehs);
/* update the task tag */
req_upiu->header.task_tag = tag;
@@ -7378,11 +7323,7 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
- hba->dev_cmd.complete = &wait;
-
- ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
-
- err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT);
+ err = ufshcd_issue_dev_cmd(hba, lrbp, tag, ADVANCED_RPMB_REQ_TIMEOUT);
if (!err) {
/* Just copy the upiu response as it is */
@@ -7407,9 +7348,8 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
}
}
- up_read(&hba->clk_scaling_lock);
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
+
return err ? : result;
}
@@ -7441,7 +7381,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
goto out;
}
- if (is_mcq_enabled(hba)) {
+ if (hba->mcq_enabled) {
for (pos = 0; pos < hba->nutrs; pos++) {
lrbp = &hba->lrb[pos];
if (ufshcd_cmd_inflight(lrbp->cmd) &&
@@ -7517,10 +7457,9 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
{
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
- int err = 0;
+ int err;
int poll_cnt;
u8 resp = 0xF;
- u32 reg;
for (poll_cnt = 100; poll_cnt; poll_cnt--) {
err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
@@ -7535,46 +7474,27 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
* cmd not pending in the device, check if it is
* in transition.
*/
- dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
+ dev_info(
+ hba->dev,
+ "%s: cmd with tag %d not pending in the device.\n",
__func__, tag);
- if (is_mcq_enabled(hba)) {
- /* MCQ mode */
- if (ufshcd_cmd_inflight(lrbp->cmd)) {
- /* sleep for max. 200us same delay as in SDB mode */
- usleep_range(100, 200);
- continue;
- }
- /* command completed already */
- dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n",
- __func__, tag);
- goto out;
- }
-
- /* Single Doorbell Mode */
- reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- if (reg & (1 << tag)) {
- /* sleep for max. 200us to stabilize */
- usleep_range(100, 200);
- continue;
+ if (!ufshcd_cmd_inflight(lrbp->cmd)) {
+ dev_info(hba->dev,
+ "%s: cmd with tag=%d completed.\n",
+ __func__, tag);
+ return 0;
}
- /* command completed already */
- dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
- __func__, tag);
- goto out;
+ usleep_range(100, 200);
} else {
dev_err(hba->dev,
"%s: no response from device. tag = %d, err %d\n",
__func__, tag, err);
- if (!err)
- err = resp; /* service response error */
- goto out;
+ return err ? : resp;
}
}
- if (!poll_cnt) {
- err = -EBUSY;
- goto out;
- }
+ if (!poll_cnt)
+ return -EBUSY;
err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
UFS_ABORT_TASK, &resp);
@@ -7584,7 +7504,7 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
__func__, tag, err);
}
- goto out;
+ return err;
}
err = ufshcd_clear_cmd(hba, tag);
@@ -7592,7 +7512,6 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
__func__, tag, err);
-out:
return err;
}
@@ -7615,7 +7534,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
ufshcd_hold(hba);
- if (!is_mcq_enabled(hba)) {
+ if (!hba->mcq_enabled) {
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
if (!test_bit(tag, &hba->outstanding_reqs)) {
/* If command is already aborted/completed, return FAILED. */
@@ -7648,7 +7567,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
}
hba->req_abort_count++;
- if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) {
+ if (!hba->mcq_enabled && !(reg & (1 << tag))) {
/* only execute this code in single doorbell mode */
dev_err(hba->dev,
"%s: cmd was completed, but without a notifying intr, tag = %d",
@@ -7675,7 +7594,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto release;
}
- if (is_mcq_enabled(hba)) {
+ if (hba->mcq_enabled) {
/* MCQ mode. Branch off to handle abort for mcq mode */
err = ufshcd_mcq_abort(cmd);
goto release;
@@ -7716,6 +7635,29 @@ release:
}
/**
+ * ufshcd_process_probe_result - Process the ufshcd_probe_hba() result.
+ * @hba: UFS host controller instance.
+ * @probe_start: time when the ufshcd_probe_hba() call started.
+ * @ret: ufshcd_probe_hba() return value.
+ */
+static void ufshcd_process_probe_result(struct ufs_hba *hba,
+ ktime_t probe_start, int ret)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (ret)
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ trace_ufshcd_init(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), probe_start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+}
+
+/**
* ufshcd_host_reset_and_restore - reset and restore host controller
* @hba: per-adapter instance
*
@@ -7744,8 +7686,14 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
err = ufshcd_hba_enable(hba);
/* Establish the link again and restore the device */
- if (!err)
- err = ufshcd_probe_hba(hba, false);
+ if (!err) {
+ ktime_t probe_start = ktime_get();
+
+ err = ufshcd_device_init(hba, /*init_dev_params=*/false);
+ if (!err)
+ err = ufshcd_probe_hba(hba, false);
+ ufshcd_process_probe_result(hba, probe_start, err);
+ }
if (err)
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
@@ -7987,11 +7935,13 @@ out:
static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
{
+ struct Scsi_Host *shost = sdev->host;
+
scsi_autopm_get_device(sdev);
blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
if (sdev->rpm_autosuspend)
pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
- RPM_AUTOSUSPEND_DELAY_MS);
+ shost->rpm_autosuspend_delay);
scsi_autopm_put_device(sdev);
}
@@ -8150,29 +8100,36 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
}
}
-static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf)
+static void ufshcd_set_rtt(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
- u32 ext_ufs_feature;
- u32 ext_iid_en = 0;
- int err;
+ u32 rtt = 0;
+ u32 dev_rtt = 0;
+ int host_rtt_cap = hba->vops && hba->vops->max_num_rtt ?
+ hba->vops->max_num_rtt : hba->nortt;
- /* Only UFS-4.0 and above may support EXT_IID */
+ /* RTT override makes sense only for UFS-4.0 and above */
if (dev_info->wspecversion < 0x400)
- goto out;
+ return;
- ext_ufs_feature = get_unaligned_be32(desc_buf +
- DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
- if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP))
- goto out;
+ if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &dev_rtt)) {
+ dev_err(hba->dev, "failed reading bMaxNumOfRTT\n");
+ return;
+ }
- err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en);
- if (err)
- dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err);
+ /* do not override if it was already written */
+ if (dev_rtt != DEFAULT_MAX_NUM_RTT)
+ return;
-out:
- dev_info->b_ext_iid_en = ext_iid_en;
+ rtt = min_t(int, dev_info->rtt_cap, host_rtt_cap);
+
+ if (rtt == dev_rtt)
+ return;
+
+ if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt))
+ dev_err(hba->dev, "failed writing bMaxNumOfRTT\n");
}
void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
@@ -8223,10 +8180,13 @@ static void ufshcd_update_rtc(struct ufs_hba *hba)
*/
val = ts64.tv_sec - hba->dev_info.rtc_time_baseline;
- ufshcd_rpm_get_sync(hba);
+ /* Skip update RTC if RPM state is not RPM_ACTIVE */
+ if (ufshcd_rpm_get_if_active(hba) <= 0)
+ return;
+
err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED,
0, 0, &val);
- ufshcd_rpm_put_sync(hba);
+ ufshcd_rpm_put(hba);
if (err)
dev_err(hba->dev, "%s: Failed to update rtc %d\n", __func__, err);
@@ -8241,7 +8201,9 @@ static void ufshcd_rtc_work(struct work_struct *work)
hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work);
/* Update RTC only when there are no requests in progress and UFSHCI is operational */
- if (!ufshcd_is_ufs_dev_busy(hba) && hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL)
+ if (!ufshcd_is_ufs_dev_busy(hba) &&
+ hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL &&
+ !hba->clk_gating.active_reqs)
ufshcd_update_rtc(hba);
if (ufshcd_is_ufs_dev_active(hba) && hba->dev_info.rtc_update_period)
@@ -8310,6 +8272,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
+ dev_info->rtt_cap = desc_buf[DEVICE_DESC_PARAM_RTT_CAP];
+
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index,
@@ -8331,9 +8295,6 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufs_init_rtc(hba, desc_buf);
- if (hba->ext_iid_sup)
- ufshcd_ext_iid_probe(hba, desc_buf);
-
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -8354,83 +8315,6 @@ static void ufs_put_device_desc(struct ufs_hba *hba)
}
/**
- * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
- * @hba: per-adapter instance
- *
- * PA_TActivate parameter can be tuned manually if UniPro version is less than
- * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
- * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
- * the hibern8 exit latency.
- *
- * Return: zero on success, non-zero error value on failure.
- */
-static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
-{
- int ret = 0;
- u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
-
- ret = ufshcd_dme_peer_get(hba,
- UIC_ARG_MIB_SEL(
- RX_MIN_ACTIVATETIME_CAPABILITY,
- UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
- &peer_rx_min_activatetime);
- if (ret)
- goto out;
-
- /* make sure proper unit conversion is applied */
- tuned_pa_tactivate =
- ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
- / PA_TACTIVATE_TIME_UNIT_US);
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
- tuned_pa_tactivate);
-
-out:
- return ret;
-}
-
-/**
- * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
- * @hba: per-adapter instance
- *
- * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
- * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
- * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
- * This optimal value can help reduce the hibern8 exit latency.
- *
- * Return: zero on success, non-zero error value on failure.
- */
-static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
-{
- int ret = 0;
- u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
- u32 max_hibern8_time, tuned_pa_hibern8time;
-
- ret = ufshcd_dme_get(hba,
- UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
- UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
- &local_tx_hibern8_time_cap);
- if (ret)
- goto out;
-
- ret = ufshcd_dme_peer_get(hba,
- UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
- UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
- &peer_rx_hibern8_time_cap);
- if (ret)
- goto out;
-
- max_hibern8_time = max(local_tx_hibern8_time_cap,
- peer_rx_hibern8_time_cap);
- /* make sure proper unit conversion is applied */
- tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
- / PA_HIBERN8_TIME_UNIT_US);
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
- tuned_pa_hibern8time);
-out:
- return ret;
-}
-
-/**
* ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
* less than device PA_TACTIVATE time.
* @hba: per-adapter instance
@@ -8502,11 +8386,6 @@ out:
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
- if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
- ufshcd_tune_pa_tactivate(hba);
- ufshcd_tune_pa_hibern8time(hba);
- }
-
ufshcd_vops_apply_dev_quirks(hba);
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
@@ -8644,6 +8523,8 @@ static int ufshcd_device_params_init(struct ufs_hba *hba)
goto out;
}
+ ufshcd_set_rtt(hba);
+
ufshcd_get_ref_clk_gating_wait(hba);
if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
@@ -8670,9 +8551,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
if (dev_info->wspecversion < 0x400)
return;
- ufshcd_hold(hba);
-
- mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_dev_man_lock(hba);
ufshcd_init_query(hba, &request, &response,
UPIU_QUERY_OPCODE_WRITE_ATTR,
@@ -8690,8 +8569,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
dev_err(hba->dev, "%s: failed to set timestamp %d\n",
__func__, err);
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
}
/**
@@ -8724,6 +8602,14 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
ufshcd_init_clk_scaling_sysfs(hba);
}
+ /*
+ * The RTC update code accesses the hba->ufs_device_wlun->sdev_gendev
+ * pointer and hence must only be started after the WLUN pointer has
+ * been initialized by ufshcd_scsi_add_wlus().
+ */
+ schedule_delayed_work(&hba->ufs_rtc_update_work,
+ msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
+
ufs_bsg_probe(hba);
scsi_scan_host(hba->host);
@@ -8777,6 +8663,9 @@ static int ufshcd_alloc_mcq(struct ufs_hba *hba)
if (ret)
goto err;
+ hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
+ hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
+
return 0;
err:
hba->nutrs = old_nutrs;
@@ -8798,24 +8687,49 @@ static void ufshcd_config_mcq(struct ufs_hba *hba)
ufshcd_mcq_make_queues_operational(hba);
ufshcd_mcq_config_mac(hba, hba->nutrs);
- hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
- hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
-
- /* Select MCQ mode */
- ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
- REG_UFS_MEM_CFG);
- hba->mcq_enabled = true;
-
dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT],
hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL],
hba->nutrs);
}
+static int ufshcd_post_device_init(struct ufs_hba *hba)
+{
+ int ret;
+
+ ufshcd_tune_unipro_params(hba);
+
+ /* UFS device is also active now */
+ ufshcd_set_ufs_dev_active(hba);
+ ufshcd_force_reset_auto_bkops(hba);
+
+ ufshcd_set_timestamp_attr(hba);
+
+ if (!hba->max_pwr_info.is_valid)
+ return 0;
+
+ /*
+ * Set the right value to bRefClkFreq before attempting to
+ * switch to HS gears.
+ */
+ if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
+ ufshcd_set_dev_ref_clk(hba);
+ /* Gear up to HS gear. */
+ ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
{
int ret;
- struct Scsi_Host *host = hba->host;
+
+ WARN_ON_ONCE(!hba->scsi_host_added);
hba->ufshcd_state = UFSHCD_STATE_RESET;
@@ -8833,8 +8747,10 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
ufshcd_set_link_active(hba);
/* Reconfigure MCQ upon reset */
- if (is_mcq_enabled(hba) && !init_dev_params)
+ if (hba->mcq_enabled && !init_dev_params) {
ufshcd_config_mcq(hba);
+ ufshcd_mcq_enable(hba);
+ }
/* Verify device initialization by sending NOP OUT UPIU */
ret = ufshcd_verify_dev_init(hba);
@@ -8854,55 +8770,14 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
ret = ufshcd_device_params_init(hba);
if (ret)
return ret;
- if (is_mcq_supported(hba) && !hba->scsi_host_added) {
- ret = ufshcd_alloc_mcq(hba);
- if (!ret) {
- ufshcd_config_mcq(hba);
- } else {
- /* Continue with SDB mode */
- use_mcq_mode = false;
- dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
- ret);
- }
- ret = scsi_add_host(host, hba->dev);
- if (ret) {
- dev_err(hba->dev, "scsi_add_host failed\n");
- return ret;
- }
- hba->scsi_host_added = true;
- } else if (is_mcq_supported(hba)) {
- /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */
+ if (is_mcq_supported(hba) &&
+ hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) {
ufshcd_config_mcq(hba);
+ ufshcd_mcq_enable(hba);
}
}
- ufshcd_tune_unipro_params(hba);
-
- /* UFS device is also active now */
- ufshcd_set_ufs_dev_active(hba);
- ufshcd_force_reset_auto_bkops(hba);
-
- ufshcd_set_timestamp_attr(hba);
- schedule_delayed_work(&hba->ufs_rtc_update_work,
- msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
-
- /* Gear up to HS gear if supported */
- if (hba->max_pwr_info.is_valid) {
- /*
- * Set the right value to bRefClkFreq before attempting to
- * switch to HS gears.
- */
- if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
- ufshcd_set_dev_ref_clk(hba);
- ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
- if (ret) {
- dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
- __func__, ret);
- return ret;
- }
- }
-
- return 0;
+ return ufshcd_post_device_init(hba);
}
/**
@@ -8916,32 +8791,26 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
*/
static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
{
- ktime_t start = ktime_get();
- unsigned long flags;
int ret;
- ret = ufshcd_device_init(hba, init_dev_params);
- if (ret)
- goto out;
-
if (!hba->pm_op_in_progress &&
(hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
/* Reset the device and controller before doing reinit */
ufshcd_device_reset(hba);
+ ufs_put_device_desc(hba);
ufshcd_hba_stop(hba);
- ufshcd_vops_reinit_notify(hba);
ret = ufshcd_hba_enable(hba);
if (ret) {
dev_err(hba->dev, "Host controller enable failed\n");
ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
- goto out;
+ return ret;
}
/* Reinit the device */
ret = ufshcd_device_init(hba, init_dev_params);
if (ret)
- goto out;
+ return ret;
}
ufshcd_print_pwr_info(hba);
@@ -8961,18 +8830,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
ufshcd_write_ee_control(hba);
ufshcd_configure_auto_hibern8(hba);
-out:
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (ret)
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
- else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- trace_ufshcd_init(dev_name(hba->dev), ret,
- ktime_to_us(ktime_sub(ktime_get(), start)),
- hba->curr_dev_pwr_mode, hba->uic_link_state);
- return ret;
+ return 0;
}
/**
@@ -8983,11 +8841,14 @@ out:
static void ufshcd_async_scan(void *data, async_cookie_t cookie)
{
struct ufs_hba *hba = (struct ufs_hba *)data;
+ ktime_t probe_start;
int ret;
down(&hba->host_sem);
/* Initialize hba, detect and initialize UFS device */
+ probe_start = ktime_get();
ret = ufshcd_probe_hba(hba, true);
+ ufshcd_process_probe_result(hba, probe_start, ret);
up(&hba->host_sem);
if (ret)
goto out;
@@ -9021,7 +8882,7 @@ static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
__func__, hba->outstanding_tasks);
- return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
+ return scsi_host_busy(hba->host) ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
}
static const struct attribute_group *ufshcd_driver_groups[] = {
@@ -9047,9 +8908,9 @@ static const struct scsi_host_template ufshcd_driver_template = {
.map_queues = ufshcd_map_queues,
.queuecommand = ufshcd_queuecommand,
.mq_poll = ufshcd_poll,
- .slave_alloc = ufshcd_slave_alloc,
- .slave_configure = ufshcd_slave_configure,
- .slave_destroy = ufshcd_slave_destroy,
+ .sdev_init = ufshcd_sdev_init,
+ .sdev_configure = ufshcd_sdev_configure,
+ .sdev_destroy = ufshcd_sdev_destroy,
.change_queue_depth = ufshcd_change_queue_depth,
.eh_abort_handler = ufshcd_abort,
.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
@@ -9057,15 +8918,12 @@ static const struct scsi_host_template ufshcd_driver_template = {
.eh_timed_out = ufshcd_eh_timed_out,
.this_id = -1,
.sg_tablesize = SG_ALL,
- .cmd_per_lun = UFSHCD_CMD_PER_LUN,
- .can_queue = UFSHCD_CAN_QUEUE,
.max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
.max_sectors = SZ_1M / SECTOR_SIZE,
.max_host_blocked = 1,
.track_queue_depth = 1,
.skip_settle_delay = 1,
.sdev_groups = ufshcd_driver_groups,
- .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
};
static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
@@ -9238,7 +9096,6 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
int ret = 0;
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
- unsigned long flags;
ktime_t start = ktime_get();
bool clk_state_changed = false;
@@ -9280,18 +9137,19 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
if (ret)
return ret;
+ if (!ufshcd_is_clkscaling_supported(hba))
+ ufshcd_pm_qos_update(hba, on);
out:
if (ret) {
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
clk_disable_unprepare(clki->clk);
}
- } else if (!ret && on) {
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_gating.state = CLKS_ON;
+ } else if (!ret && on && hba->clk_gating.is_initialized) {
+ scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
+ hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
if (clk_state_changed)
@@ -9457,6 +9315,7 @@ out:
static void ufshcd_hba_exit(struct ufs_hba *hba)
{
if (hba->is_powered) {
+ ufshcd_pm_qos_exit(hba);
ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
if (hba->eh_wq)
@@ -9476,7 +9335,17 @@ static int ufshcd_execute_start_stop(struct scsi_device *sdev,
struct scsi_sense_hdr *sshdr)
{
const unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 };
+ struct scsi_failure failure_defs[] = {
+ {
+ .allowed = 2,
+ .result = SCMD_FAILURE_RESULT_ANY,
+ },
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args args = {
+ .failures = &failures,
.sshdr = sshdr,
.req_flags = BLK_MQ_REQ_PM,
.scmd_flags = SCMD_FAIL_IF_RECOVERING,
@@ -9502,7 +9371,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
struct scsi_sense_hdr sshdr;
struct scsi_device *sdp;
unsigned long flags;
- int ret, retries;
+ int ret;
spin_lock_irqsave(hba->host->host_lock, flags);
sdp = hba->ufs_device_wlun;
@@ -9528,15 +9397,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* callbacks hence set the RQF_PM flag so that it doesn't resume the
* already suspended childs.
*/
- for (retries = 3; retries > 0; --retries) {
- ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr);
- /*
- * scsi_execute() only returns a negative value if the request
- * queue is dying.
- */
- if (ret <= 0)
- break;
- }
+ ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr);
if (ret) {
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
@@ -9745,7 +9606,10 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* UFS device & link must be active before we enter in this function */
if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
- ret = -EINVAL;
+ /* Wait err handler finish or trigger err recovery */
+ if (!ufshcd_eh_in_progress(hba))
+ ufshcd_force_error_recovery(hba);
+ ret = -EBUSY;
goto enable_scaling;
}
@@ -9756,7 +9620,7 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* allow background operations if bkops status shows
* that performance might be impacted.
*/
- ret = ufshcd_urgent_bkops(hba);
+ ret = ufshcd_bkops_ctrl(hba);
if (ret) {
/*
* If return err in suspend flow, IO will hang.
@@ -9945,7 +9809,7 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* If BKOPs operations are urgently needed at this moment then
* keep auto-bkops enabled or else disable it.
*/
- ufshcd_urgent_bkops(hba);
+ ufshcd_bkops_ctrl(hba);
if (hba->ee_usr_mask)
ufshcd_write_ee_control(hba);
@@ -10109,6 +9973,7 @@ static int ufshcd_suspend(struct ufs_hba *hba)
ufshcd_vreg_set_lpm(hba);
/* Put the host controller in low power mode if possible */
ufshcd_hba_vreg_set_lpm(hba);
+ ufshcd_pm_qos_update(hba, false);
return ret;
}
@@ -10278,7 +10143,9 @@ static void ufshcd_wl_shutdown(struct device *dev)
shost_for_each_device(sdev, hba->host) {
if (sdev == hba->ufs_device_wlun)
continue;
- scsi_device_quiesce(sdev);
+ mutex_lock(&sdev->state_mutex);
+ scsi_device_set_state(sdev, SDEV_OFFLINE);
+ mutex_unlock(&sdev->state_mutex);
}
__ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
@@ -10304,10 +10171,12 @@ void ufshcd_remove(struct ufs_hba *hba)
ufs_hwmon_remove(hba);
ufs_bsg_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
+ cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
blk_mq_destroy_queue(hba->tmf_queue);
blk_put_queue(hba->tmf_queue);
blk_mq_free_tag_set(&hba->tmf_tag_set);
- scsi_remove_host(hba->host);
+ if (hba->scsi_host_added)
+ scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba);
@@ -10348,10 +10217,7 @@ int ufshcd_system_restore(struct device *dev)
* are updated with the latest queue addresses. Only after
* updating these addresses, we can queue the new commands.
*/
- mb();
-
- /* Resuming from hibernate, assume that link was OFF */
- ufshcd_set_link_off(hba);
+ ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H);
return 0;
@@ -10366,16 +10232,6 @@ EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
#endif /* CONFIG_PM_SLEEP */
/**
- * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
- * @hba: pointer to Host Bus Adapter (HBA)
- */
-void ufshcd_dealloc_host(struct ufs_hba *hba)
-{
- scsi_host_put(hba->host);
-}
-EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
-
-/**
* ufshcd_set_dma_mask - Set dma mask based on the controller
* addressing capability
* @hba: per adapter instance
@@ -10384,6 +10240,8 @@ EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
*/
static int ufshcd_set_dma_mask(struct ufs_hba *hba)
{
+ if (hba->vops && hba->vops->set_dma_mask)
+ return hba->vops->set_dma_mask(hba);
if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
return 0;
@@ -10392,11 +10250,25 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
}
/**
+ * ufshcd_devres_release - devres cleanup handler, invoked during release of
+ * hba->dev
+ * @host: pointer to SCSI host
+ */
+static void ufshcd_devres_release(void *host)
+{
+ scsi_host_put(host);
+}
+
+/**
* ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
* @dev: pointer to device handle
* @hba_handle: driver private handle
*
* Return: 0 on success, non-zero value on failure.
+ *
+ * NOTE: There is no corresponding ufshcd_dealloc_host() because this function
+ * keeps track of its allocations using devres and deallocates everything on
+ * device removal automatically.
*/
int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
{
@@ -10418,6 +10290,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
+
+ err = devm_add_action_or_reset(dev, ufshcd_devres_release,
+ host);
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to add ufshcd dealloc action\n");
+
host->nr_maps = HCTX_TYPE_POLL + 1;
hba = shost_priv(host);
hba->host = host;
@@ -10447,6 +10326,73 @@ static const struct blk_mq_ops ufshcd_tmf_ops = {
.queue_rq = ufshcd_queue_tmf,
};
+static int ufshcd_add_scsi_host(struct ufs_hba *hba)
+{
+ int err;
+
+ if (is_mcq_supported(hba)) {
+ ufshcd_mcq_enable(hba);
+ err = ufshcd_alloc_mcq(hba);
+ if (!err) {
+ ufshcd_config_mcq(hba);
+ } else {
+ /* Continue with SDB mode */
+ ufshcd_mcq_disable(hba);
+ use_mcq_mode = false;
+ dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
+ err);
+ }
+ }
+ if (!is_mcq_supported(hba) && !hba->lsdb_sup) {
+ dev_err(hba->dev,
+ "%s: failed to initialize (legacy doorbell mode not supported)\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ err = scsi_add_host(hba->host, hba->dev);
+ if (err) {
+ dev_err(hba->dev, "scsi_add_host failed\n");
+ return err;
+ }
+ hba->scsi_host_added = true;
+
+ hba->tmf_tag_set = (struct blk_mq_tag_set) {
+ .nr_hw_queues = 1,
+ .queue_depth = hba->nutmrs,
+ .ops = &ufshcd_tmf_ops,
+ };
+ err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
+ if (err < 0)
+ goto remove_scsi_host;
+ hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL);
+ if (IS_ERR(hba->tmf_queue)) {
+ err = PTR_ERR(hba->tmf_queue);
+ goto free_tmf_tag_set;
+ }
+ hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
+ sizeof(*hba->tmf_rqs), GFP_KERNEL);
+ if (!hba->tmf_rqs) {
+ err = -ENOMEM;
+ goto free_tmf_queue;
+ }
+
+ return 0;
+
+free_tmf_queue:
+ blk_mq_destroy_queue(hba->tmf_queue);
+ blk_put_queue(hba->tmf_queue);
+
+free_tmf_tag_set:
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
+
+remove_scsi_host:
+ if (hba->scsi_host_added)
+ scsi_remove_host(hba->host);
+
+ return err;
+}
+
/**
* ufshcd_init - Driver initialization routine
* @hba: per-adapter instance
@@ -10460,7 +10406,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
int err;
struct Scsi_Host *host = hba->host;
struct device *dev = hba->dev;
- char eh_wq_name[sizeof("ufs_eh_wq_00")];
/*
* dev_set_drvdata() must be called before any callbacks are registered
@@ -10480,6 +10425,27 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->irq = irq;
hba->vps = &ufs_hba_vps;
+ /*
+ * Initialize clk_gating.lock early since it is being used in
+ * ufshcd_setup_clocks()
+ */
+ spin_lock_init(&hba->clk_gating.lock);
+
+ /*
+ * Set the default power management level for runtime and system PM.
+ * Host controller drivers can override them in their
+ * 'ufs_hba_variant_ops::init' callback.
+ *
+ * Default power saving mode is to keep UFS link in Hibern8 state
+ * and UFS device in sleep state.
+ */
+ hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+ hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
@@ -10520,12 +10486,15 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
host->max_cmd_len = UFS_CDB_SIZE;
host->queuecommand_may_block = !!(hba->caps & UFSHCD_CAP_CLK_GATING);
+ /* Use default RPM delay if host not set */
+ if (host->rpm_autosuspend_delay == 0)
+ host->rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS;
+
hba->max_pwr_info.is_valid = false;
/* Initialize work queues */
- snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
- hba->host->host_no);
- hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
+ hba->eh_wq = alloc_ordered_workqueue("ufs_eh_wq_%d", WQ_MEM_RECLAIM,
+ hba->host->host_no);
if (!hba->eh_wq) {
dev_err(hba->dev, "%s: failed to create eh workqueue\n",
__func__);
@@ -10565,7 +10534,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
* Make sure that UFS interrupts are disabled and any pending interrupt
* status is cleared before registering UFS interrupt handler.
*/
- mb();
+ ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
/* IRQ registration */
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
@@ -10576,35 +10545,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->is_irq_enabled = true;
}
- if (!is_mcq_supported(hba)) {
- err = scsi_add_host(host, hba->dev);
- if (err) {
- dev_err(hba->dev, "scsi_add_host failed\n");
- goto out_disable;
- }
- }
-
- hba->tmf_tag_set = (struct blk_mq_tag_set) {
- .nr_hw_queues = 1,
- .queue_depth = hba->nutmrs,
- .ops = &ufshcd_tmf_ops,
- .flags = BLK_MQ_F_NO_SCHED,
- };
- err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
- if (err < 0)
- goto out_remove_scsi_host;
- hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
- if (IS_ERR(hba->tmf_queue)) {
- err = PTR_ERR(hba->tmf_queue);
- goto free_tmf_tag_set;
- }
- hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
- sizeof(*hba->tmf_rqs), GFP_KERNEL);
- if (!hba->tmf_rqs) {
- err = -ENOMEM;
- goto free_tmf_queue;
- }
-
/* Reset the attached device */
ufshcd_device_reset(hba);
@@ -10616,21 +10556,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
dev_err(hba->dev, "Host controller enable failed\n");
ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
- goto free_tmf_queue;
+ goto out_disable;
}
- /*
- * Set the default power management level for runtime and system PM.
- * Default power saving mode is to keep UFS link in Hibern8 state
- * and UFS device in sleep state.
- */
- hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
- hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
-
INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work);
INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work);
@@ -10642,7 +10570,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
- atomic_set(&hba->scsi_block_reqs_cnt, 0);
+
/*
* We are assuming that device wasn't put in sleep/power-down
* state exclusively during the boot stage before kernel.
@@ -10651,19 +10579,56 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
*/
ufshcd_set_ufs_dev_active(hba);
+ /* Initialize hba, detect and initialize UFS device */
+ ktime_t probe_start = ktime_get();
+
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+
+ err = ufshcd_link_startup(hba);
+ if (err)
+ goto out_disable;
+
+ if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
+ goto initialized;
+
+ /* Debug counters initialization */
+ ufshcd_clear_dbg_ufs_stats(hba);
+
+ /* UniPro link is active now */
+ ufshcd_set_link_active(hba);
+
+ /* Verify device initialization by sending NOP OUT UPIU */
+ err = ufshcd_verify_dev_init(hba);
+ if (err)
+ goto out_disable;
+
+ /* Initiate UFS initialization, and waiting until completion */
+ err = ufshcd_complete_dev_init(hba);
+ if (err)
+ goto out_disable;
+
+ err = ufshcd_device_params_init(hba);
+ if (err)
+ goto out_disable;
+
+ err = ufshcd_post_device_init(hba);
+
+initialized:
+ ufshcd_process_probe_result(hba, probe_start, err);
+ if (err)
+ goto out_disable;
+
+ err = ufshcd_add_scsi_host(hba);
+ if (err)
+ goto out_disable;
+
async_schedule(ufshcd_async_scan, hba);
ufs_sysfs_add_nodes(hba->dev);
device_enable_async_suspend(dev);
+ ufshcd_pm_qos_init(hba);
return 0;
-free_tmf_queue:
- blk_mq_destroy_queue(hba->tmf_queue);
- blk_put_queue(hba->tmf_queue);
-free_tmf_tag_set:
- blk_mq_free_tag_set(&hba->tmf_tag_set);
-out_remove_scsi_host:
- scsi_remove_host(hba->host);
out_disable:
hba->is_irq_enabled = false;
ufshcd_hba_exit(hba);
@@ -10844,7 +10809,6 @@ static void ufshcd_check_header_layout(void)
static struct scsi_driver ufs_dev_wlun_template = {
.gendrv = {
.name = "ufs_device_wlun",
- .owner = THIS_MODULE,
.probe = ufshcd_wl_probe,
.remove = ufshcd_wl_remove,
.pm = &ufshcd_wl_pm_ops,