summaryrefslogtreecommitdiff
path: root/drivers/ufs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-22 13:41:41 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-22 13:41:41 -0800
commit8762069330316392331e693befd8a5b632833618 (patch)
tree26716f1064b9b0b376d4bf77604cdcdc600efdb2 /drivers/ufs
parent6861eaf79155f0a5544ff989754159f806795c31 (diff)
parent833f7d4819a88f027033e0033ea44f7ae3e45a9b (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "Updates to the usual drivers (ufs, lpfc, qla2xxx, libsas). The major core change is a rework to remove the two helpers around scsi_execute_cmd and use it as the only submission interface along with other minor fixes and updates" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (142 commits) scsi: ufs: core: Fix an error handling path in ufshcd_read_desc_param() scsi: ufs: core: Fix device management cmd timeout flow scsi: aic94xx: Add missing check for dma_map_single() scsi: smartpqi: Replace one-element array with flexible-array member scsi: mpt3sas: Fix a memory leak scsi: qla2xxx: Remove the unused variable wwn scsi: ufs: core: Fix kernel-doc syntax scsi: ufs: core: Add hibernation callbacks scsi: snic: Fix memory leak with using debugfs_lookup() scsi: ufs: core: Limit DMA alignment check scsi: Documentation: Correct spelling scsi: Documentation: Correct spelling scsi: target: Documentation: Correct spelling scsi: aacraid: Allocate cmd_priv with scsicmd scsi: ufs: qcom: dt-bindings: Add SM8550 compatible string scsi: ufs: ufs-qcom: Clear qunipro_g4_sel for HW version major 5 scsi: ufs: qcom: fix platform_msi_domain_free_irqs() reference scsi: ufs: core: Enable DMA clustering scsi: ufs: exynos: Fix the maximum segment size scsi: ufs: exynos: Fix DMA alignment for PAGE_SIZE != 4096 ...
Diffstat (limited to 'drivers/ufs')
-rw-r--r--drivers/ufs/core/Makefile2
-rw-r--r--drivers/ufs/core/ufs-mcq.c431
-rw-r--r--drivers/ufs/core/ufs_bsg.c144
-rw-r--r--drivers/ufs/core/ufshcd-priv.h109
-rw-r--r--drivers/ufs/core/ufshcd.c863
-rw-r--r--drivers/ufs/core/ufshpb.c4
-rw-r--r--drivers/ufs/host/Kconfig19
-rw-r--r--drivers/ufs/host/Makefile1
-rw-r--r--drivers/ufs/host/ufs-exynos.c10
-rw-r--r--drivers/ufs/host/ufs-qcom.c548
-rw-r--r--drivers/ufs/host/ufs-qcom.h97
-rw-r--r--drivers/ufs/host/ufs-sprd.c458
-rw-r--r--drivers/ufs/host/ufs-sprd.h85
13 files changed, 2282 insertions, 489 deletions
diff --git a/drivers/ufs/core/Makefile b/drivers/ufs/core/Makefile
index 62f38c5bf857..4d02e0f2de10 100644
--- a/drivers/ufs/core/Makefile
+++ b/drivers/ufs/core/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
-ufshcd-core-y += ufshcd.o ufs-sysfs.o
+ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o
ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
new file mode 100644
index 000000000000..31df052fbc41
--- /dev/null
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center. All rights reserved.
+ *
+ * Authors:
+ * Asutosh Das <quic_asutoshd@quicinc.com>
+ * Can Guo <quic_cang@quicinc.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "ufshcd-priv.h"
+
+#define MAX_QUEUE_SUP GENMASK(7, 0)
+#define UFS_MCQ_MIN_RW_QUEUES 2
+#define UFS_MCQ_MIN_READ_QUEUES 0
+#define UFS_MCQ_NUM_DEV_CMD_QUEUES 1
+#define UFS_MCQ_MIN_POLL_QUEUES 0
+#define QUEUE_EN_OFFSET 31
+#define QUEUE_ID_OFFSET 16
+
+#define MAX_DEV_CMD_ENTRIES 2
+#define MCQ_CFG_MAC_MASK GENMASK(16, 8)
+#define MCQ_QCFG_SIZE 0x40
+#define MCQ_ENTRY_SIZE_IN_DWORD 8
+#define CQE_UCD_BA GENMASK_ULL(63, 7)
+
+static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
+ num_possible_cpus());
+}
+
+static const struct kernel_param_ops rw_queue_count_ops = {
+ .set = rw_queue_count_set,
+ .get = param_get_uint,
+};
+
+static unsigned int rw_queues;
+module_param_cb(rw_queues, &rw_queue_count_ops, &rw_queues, 0644);
+MODULE_PARM_DESC(rw_queues,
+ "Number of interrupt driven I/O queues used for rw. Default value is nr_cpus");
+
+static int read_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_READ_QUEUES,
+ num_possible_cpus());
+}
+
+static const struct kernel_param_ops read_queue_count_ops = {
+ .set = read_queue_count_set,
+ .get = param_get_uint,
+};
+
+static unsigned int read_queues;
+module_param_cb(read_queues, &read_queue_count_ops, &read_queues, 0644);
+MODULE_PARM_DESC(read_queues,
+ "Number of interrupt driven read queues used for read. Default value is 0");
+
+static int poll_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_POLL_QUEUES,
+ num_possible_cpus());
+}
+
+static const struct kernel_param_ops poll_queue_count_ops = {
+ .set = poll_queue_count_set,
+ .get = param_get_uint,
+};
+
+static unsigned int poll_queues = 1;
+module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644);
+MODULE_PARM_DESC(poll_queues,
+ "Number of poll queues used for r/w. Default value is 1");
+
+/**
+ * ufshcd_mcq_config_mac - Set the #Max Activ Cmds.
+ * @hba: per adapter instance
+ * @max_active_cmds: maximum # of active commands to the device at any time.
+ *
+ * The controller won't send more than the max_active_cmds to the device at
+ * any time.
+ */
+void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
+{
+ u32 val;
+
+ val = ufshcd_readl(hba, REG_UFS_MCQ_CFG);
+ val &= ~MCQ_CFG_MAC_MASK;
+ val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds);
+ ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
+}
+
+/**
+ * ufshcd_mcq_req_to_hwq - find the hardware queue on which the
+ * request would be issued.
+ * @hba: per adapter instance
+ * @req: pointer to the request to be issued
+ *
+ * Returns the hardware queue instance on which the request would
+ * be queued.
+ */
+struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
+ struct request *req)
+{
+ u32 utag = blk_mq_unique_tag(req);
+ u32 hwq = blk_mq_unique_tag_to_hwq(utag);
+
+ /* uhq[0] is used to serve device commands */
+ return &hba->uhq[hwq + UFSHCD_MCQ_IO_QUEUE_OFFSET];
+}
+
+/**
+ * ufshcd_mcq_decide_queue_depth - decide the queue depth
+ * @hba: per adapter instance
+ *
+ * Returns queue-depth on success, non-zero on error
+ *
+ * MAC - Max. Active Command of the Host Controller (HC)
+ * HC wouldn't send more than this commands to the device.
+ * It is mandatory to implement get_hba_mac() to enable MCQ mode.
+ * Calculates and adjusts the queue depth based on the depth
+ * supported by the HC and ufs device.
+ */
+int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
+{
+ int mac;
+
+ /* Mandatory to implement get_hba_mac() */
+ mac = ufshcd_mcq_vops_get_hba_mac(hba);
+ if (mac < 0) {
+ dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
+ return mac;
+ }
+
+ WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
+ /*
+ * max. value of bqueuedepth = 256, mac is host dependent.
+ * It is mandatory for UFS device to define bQueueDepth if
+ * shared queuing architecture is enabled.
+ */
+ return min_t(int, mac, hba->dev_info.bqueuedepth);
+}
+
+static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
+{
+ int i;
+ u32 hba_maxq, rem, tot_queues;
+ struct Scsi_Host *host = hba->host;
+
+ hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities);
+
+ tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues +
+ rw_queues;
+
+ if (hba_maxq < tot_queues) {
+ dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
+ tot_queues, hba_maxq);
+ return -EOPNOTSUPP;
+ }
+
+ rem = hba_maxq - UFS_MCQ_NUM_DEV_CMD_QUEUES;
+
+ if (rw_queues) {
+ hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues;
+ rem -= hba->nr_queues[HCTX_TYPE_DEFAULT];
+ } else {
+ rw_queues = num_possible_cpus();
+ }
+
+ if (poll_queues) {
+ hba->nr_queues[HCTX_TYPE_POLL] = poll_queues;
+ rem -= hba->nr_queues[HCTX_TYPE_POLL];
+ }
+
+ if (read_queues) {
+ hba->nr_queues[HCTX_TYPE_READ] = read_queues;
+ rem -= hba->nr_queues[HCTX_TYPE_READ];
+ }
+
+ if (!hba->nr_queues[HCTX_TYPE_DEFAULT])
+ hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues,
+ num_possible_cpus());
+
+ for (i = 0; i < HCTX_MAX_TYPES; i++)
+ host->nr_hw_queues += hba->nr_queues[i];
+
+ hba->nr_hw_queues = host->nr_hw_queues + UFS_MCQ_NUM_DEV_CMD_QUEUES;
+ return 0;
+}
+
+int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
+{
+ struct ufs_hw_queue *hwq;
+ size_t utrdl_size, cqe_size;
+ int i;
+
+ for (i = 0; i < hba->nr_hw_queues; i++) {
+ hwq = &hba->uhq[i];
+
+ utrdl_size = sizeof(struct utp_transfer_req_desc) *
+ hwq->max_entries;
+ hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
+ &hwq->sqe_dma_addr,
+ GFP_KERNEL);
+ if (!hwq->sqe_dma_addr) {
+ dev_err(hba->dev, "SQE allocation failed\n");
+ return -ENOMEM;
+ }
+
+ cqe_size = sizeof(struct cq_entry) * hwq->max_entries;
+ hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
+ &hwq->cqe_dma_addr,
+ GFP_KERNEL);
+ if (!hwq->cqe_dma_addr) {
+ dev_err(hba->dev, "CQE allocation failed\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+
+/* Operation and runtime registers configuration */
+#define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i))
+#define MCQ_OPR_OFFSET_n(p, i) \
+ (hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i))
+
+static void __iomem *mcq_opr_base(struct ufs_hba *hba,
+ enum ufshcd_mcq_opr n, int i)
+{
+ struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[n];
+
+ return opr->base + opr->stride * i;
+}
+
+u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i)
+{
+ return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
+}
+
+void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
+{
+ writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis);
+
+/*
+ * Current MCQ specification doesn't provide a Task Tag or its equivalent in
+ * the Completion Queue Entry. Find the Task Tag using an indirect method.
+ */
+static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq,
+ struct cq_entry *cqe)
+{
+ u64 addr;
+
+ /* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */
+ BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc) & GENMASK(6, 0));
+
+ /* Bits 63:7 UCD base address, 6:5 are reserved, 4:0 is SQ ID */
+ addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) -
+ hba->ucdl_dma_addr;
+
+ return div_u64(addr, sizeof(struct utp_transfer_cmd_desc));
+}
+
+static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
+ int tag = ufshcd_mcq_get_tag(hba, hwq, cqe);
+
+ ufshcd_compl_one_cqe(hba, tag, cqe);
+}
+
+unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ unsigned long completed_reqs = 0;
+
+ ufshcd_mcq_update_cq_tail_slot(hwq);
+ while (!ufshcd_mcq_is_cq_empty(hwq)) {
+ ufshcd_mcq_process_cqe(hba, hwq);
+ ufshcd_mcq_inc_cq_head_slot(hwq);
+ completed_reqs++;
+ }
+
+ if (completed_reqs)
+ ufshcd_mcq_update_cq_head(hwq);
+
+ return completed_reqs;
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_nolock);
+
+unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ unsigned long completed_reqs;
+
+ spin_lock(&hwq->cq_lock);
+ completed_reqs = ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+ spin_unlock(&hwq->cq_lock);
+
+ return completed_reqs;
+}
+
+void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
+{
+ struct ufs_hw_queue *hwq;
+ u16 qsize;
+ int i;
+
+ for (i = 0; i < hba->nr_hw_queues; i++) {
+ hwq = &hba->uhq[i];
+ hwq->id = i;
+ qsize = hwq->max_entries * MCQ_ENTRY_SIZE_IN_DWORD - 1;
+
+ /* Submission Queue Lower Base Address */
+ ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr),
+ MCQ_CFG_n(REG_SQLBA, i));
+ /* Submission Queue Upper Base Address */
+ ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr),
+ MCQ_CFG_n(REG_SQUBA, i));
+ /* Submission Queue Doorbell Address Offset */
+ ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i),
+ MCQ_CFG_n(REG_SQDAO, i));
+ /* Submission Queue Interrupt Status Address Offset */
+ ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i),
+ MCQ_CFG_n(REG_SQISAO, i));
+
+ /* Completion Queue Lower Base Address */
+ ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr),
+ MCQ_CFG_n(REG_CQLBA, i));
+ /* Completion Queue Upper Base Address */
+ ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr),
+ MCQ_CFG_n(REG_CQUBA, i));
+ /* Completion Queue Doorbell Address Offset */
+ ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i),
+ MCQ_CFG_n(REG_CQDAO, i));
+ /* Completion Queue Interrupt Status Address Offset */
+ ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i),
+ MCQ_CFG_n(REG_CQISAO, i));
+
+ /* Save the base addresses for quicker access */
+ hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP;
+ hwq->mcq_sq_tail = mcq_opr_base(hba, OPR_SQD, i) + REG_SQTP;
+ hwq->mcq_cq_head = mcq_opr_base(hba, OPR_CQD, i) + REG_CQHP;
+ hwq->mcq_cq_tail = mcq_opr_base(hba, OPR_CQD, i) + REG_CQTP;
+
+ /* Reinitializing is needed upon HC reset */
+ hwq->sq_tail_slot = hwq->cq_tail_slot = hwq->cq_head_slot = 0;
+
+ /* Enable Tail Entry Push Status interrupt only for non-poll queues */
+ if (i < hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL])
+ writel(1, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIE);
+
+ /* Completion Queue Enable|Size to Completion Queue Attribute */
+ ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize,
+ MCQ_CFG_n(REG_CQATTR, i));
+
+ /*
+ * Submission Qeueue Enable|Size|Completion Queue ID to
+ * Submission Queue Attribute
+ */
+ ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize |
+ (i << QUEUE_ID_OFFSET),
+ MCQ_CFG_n(REG_SQATTR, i));
+ }
+}
+
+void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2,
+ REG_UFS_MEM_CFG);
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi);
+
+void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg)
+{
+ ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA);
+ ufshcd_writel(hba, msg->address_hi, REG_UFS_ESIUBA);
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_config_esi);
+
+int ufshcd_mcq_init(struct ufs_hba *hba)
+{
+ struct Scsi_Host *host = hba->host;
+ struct ufs_hw_queue *hwq;
+ int ret, i;
+
+ ret = ufshcd_mcq_config_nr_queues(hba);
+ if (ret)
+ return ret;
+
+ ret = ufshcd_vops_mcq_config_resource(hba);
+ if (ret)
+ return ret;
+
+ ret = ufshcd_mcq_vops_op_runtime_config(hba);
+ if (ret) {
+ dev_err(hba->dev, "Operation runtime config failed, ret=%d\n",
+ ret);
+ return ret;
+ }
+ hba->uhq = devm_kzalloc(hba->dev,
+ hba->nr_hw_queues * sizeof(struct ufs_hw_queue),
+ GFP_KERNEL);
+ if (!hba->uhq) {
+ dev_err(hba->dev, "ufs hw queue memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hba->nr_hw_queues; i++) {
+ hwq = &hba->uhq[i];
+ hwq->max_entries = hba->nutrs;
+ spin_lock_init(&hwq->sq_lock);
+ spin_lock_init(&hwq->cq_lock);
+ }
+
+ /* The very first HW queue serves device commands */
+ hba->dev_cmd_queue = &hba->uhq[0];
+ /* Give dev_cmd_queue the minimal number of entries */
+ hba->dev_cmd_queue->max_entries = MAX_DEV_CMD_ENTRIES;
+
+ host->host_tagset = 1;
+ return 0;
+}
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index b99e3f3dc4ef..0d38e7fa34cc 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -6,6 +6,7 @@
*/
#include <linux/bsg-lib.h>
+#include <linux/dma-mapping.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include "ufs_bsg.h"
@@ -16,31 +17,11 @@ static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
struct utp_upiu_query *qr)
{
int desc_size = be16_to_cpu(qr->length);
- int desc_id = qr->idn;
if (desc_size <= 0)
return -EINVAL;
- ufshcd_map_desc_id_to_length(hba, desc_id, desc_len);
- if (!*desc_len)
- return -EINVAL;
-
- *desc_len = min_t(int, *desc_len, desc_size);
-
- return 0;
-}
-
-static int ufs_bsg_verify_query_size(struct ufs_hba *hba,
- unsigned int request_len,
- unsigned int reply_len)
-{
- int min_req_len = sizeof(struct ufs_bsg_request);
- int min_rsp_len = sizeof(struct ufs_bsg_reply);
-
- if (min_req_len > request_len || min_rsp_len > reply_len) {
- dev_err(hba->dev, "not enough space assigned\n");
- return -EINVAL;
- }
+ *desc_len = min_t(int, QUERY_DESC_MAX_SIZE, desc_size);
return 0;
}
@@ -83,23 +64,84 @@ out:
return 0;
}
+static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *job)
+{
+ struct ufs_rpmb_request *rpmb_request = job->request;
+ struct ufs_rpmb_reply *rpmb_reply = job->reply;
+ struct bsg_buffer *payload = NULL;
+ enum dma_data_direction dir;
+ struct scatterlist *sg_list = NULL;
+ int rpmb_req_type;
+ int sg_cnt = 0;
+ int ret;
+ int data_len;
+
+ if (hba->ufs_version < ufshci_version(4, 0) || !hba->dev_info.b_advanced_rpmb_en ||
+ !(hba->capabilities & MASK_EHSLUTRD_SUPPORTED))
+ return -EINVAL;
+
+ if (rpmb_request->ehs_req.length != 2 || rpmb_request->ehs_req.ehs_type != 1)
+ return -EINVAL;
+
+ rpmb_req_type = be16_to_cpu(rpmb_request->ehs_req.meta.req_resp_type);
+
+ switch (rpmb_req_type) {
+ case UFS_RPMB_WRITE_KEY:
+ case UFS_RPMB_READ_CNT:
+ case UFS_RPMB_PURGE_ENABLE:
+ dir = DMA_NONE;
+ break;
+ case UFS_RPMB_WRITE:
+ case UFS_RPMB_SEC_CONF_WRITE:
+ dir = DMA_TO_DEVICE;
+ break;
+ case UFS_RPMB_READ:
+ case UFS_RPMB_SEC_CONF_READ:
+ case UFS_RPMB_PURGE_STATUS_READ:
+ dir = DMA_FROM_DEVICE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (dir != DMA_NONE) {
+ payload = &job->request_payload;
+ if (!payload || !payload->payload_len || !payload->sg_cnt)
+ return -EINVAL;
+
+ sg_cnt = dma_map_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);
+ if (unlikely(!sg_cnt))
+ return -ENOMEM;
+ sg_list = payload->sg_list;
+ data_len = payload->payload_len;
+ }
+
+ ret = ufshcd_advanced_rpmb_req_handler(hba, &rpmb_request->bsg_request.upiu_req,
+ &rpmb_reply->bsg_reply.upiu_rsp, &rpmb_request->ehs_req,
+ &rpmb_reply->ehs_rsp, sg_cnt, sg_list, dir);
+
+ if (dir != DMA_NONE) {
+ dma_unmap_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);
+
+ if (!ret)
+ rpmb_reply->bsg_reply.reply_payload_rcv_len = data_len;
+ }
+
+ return ret;
+}
+
static int ufs_bsg_request(struct bsg_job *job)
{
struct ufs_bsg_request *bsg_request = job->request;
struct ufs_bsg_reply *bsg_reply = job->reply;
struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent));
- unsigned int req_len = job->request_len;
- unsigned int reply_len = job->reply_len;
struct uic_command uc = {};
int msgcode;
- uint8_t *desc_buff = NULL;
+ uint8_t *buff = NULL;
int desc_len = 0;
enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP;
int ret;
-
- ret = ufs_bsg_verify_query_size(hba, req_len, reply_len);
- if (ret)
- goto out;
+ bool rpmb = false;
bsg_reply->reply_payload_rcv_len = 0;
@@ -109,34 +151,39 @@ static int ufs_bsg_request(struct bsg_job *job)
switch (msgcode) {
case UPIU_TRANSACTION_QUERY_REQ:
desc_op = bsg_request->upiu_req.qr.opcode;
- ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff,
- &desc_len, desc_op);
- if (ret) {
- ufshcd_rpm_put_sync(hba);
+ ret = ufs_bsg_alloc_desc_buffer(hba, job, &buff, &desc_len, desc_op);
+ if (ret)
goto out;
- }
-
fallthrough;
case UPIU_TRANSACTION_NOP_OUT:
case UPIU_TRANSACTION_TASK_REQ:
ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
&bsg_reply->upiu_rsp, msgcode,
- desc_buff, &desc_len, desc_op);
+ buff, &desc_len, desc_op);
if (ret)
- dev_err(hba->dev,
- "exe raw upiu: error code %d\n", ret);
-
+ dev_err(hba->dev, "exe raw upiu: error code %d\n", ret);
+ else if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len) {
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ buff, desc_len);
+ }
break;
case UPIU_TRANSACTION_UIC_CMD:
memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
ret = ufshcd_send_uic_cmd(hba, &uc);
if (ret)
- dev_err(hba->dev,
- "send uic cmd: error code %d\n", ret);
+ dev_err(hba->dev, "send uic cmd: error code %d\n", ret);
memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE);
break;
+ case UPIU_TRANSACTION_ARPMB_CMD:
+ rpmb = true;
+ ret = ufs_bsg_exec_advanced_rpmb_req(hba, job);
+ if (ret)
+ dev_err(hba->dev, "ARPMB OP failed: error code %d\n", ret);
+ break;
default:
ret = -ENOTSUPP;
dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode);
@@ -144,22 +191,11 @@ static int ufs_bsg_request(struct bsg_job *job)
break;
}
- ufshcd_rpm_put_sync(hba);
-
- if (!desc_buff)
- goto out;
-
- if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len)
- bsg_reply->reply_payload_rcv_len =
- sg_copy_from_buffer(job->request_payload.sg_list,
- job->request_payload.sg_cnt,
- desc_buff, desc_len);
-
- kfree(desc_buff);
-
out:
+ ufshcd_rpm_put_sync(hba);
+ kfree(buff);
bsg_reply->result = ret;
- job->reply_len = sizeof(struct ufs_bsg_reply);
+ job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply);
/* complete the job here only if no error */
if (ret == 0)
bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index a9e8e1f5afe7..529f8507a5e4 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -61,7 +61,24 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
-
+void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
+ struct cq_entry *cqe);
+int ufshcd_mcq_init(struct ufs_hba *hba);
+int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
+int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
+void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
+void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
+void ufshcd_mcq_select_mcq_mode(struct ufs_hba *hba);
+u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
+void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
+unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq);
+struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
+ struct request *req);
+unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq);
+
+#define UFSHCD_MCQ_IO_QUEUE_OFFSET 1
#define SD_ASCII_STD true
#define SD_RAW false
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
@@ -70,9 +87,6 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
int ufshcd_hold(struct ufs_hba *hba, bool async);
void ufshcd_release(struct ufs_hba *hba);
-void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
- int *desc_length);
-
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
@@ -226,6 +240,53 @@ static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
hba->vops->config_scaling_param(hba, p, data);
}
+static inline void ufshcd_vops_reinit_notify(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->reinit_notify)
+ hba->vops->reinit_notify(hba);
+}
+
+static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->mcq_config_resource)
+ return hba->vops->mcq_config_resource(hba);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int ufshcd_mcq_vops_get_hba_mac(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->get_hba_mac)
+ return hba->vops->get_hba_mac(hba);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int ufshcd_mcq_vops_op_runtime_config(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->op_runtime_config)
+ return hba->vops->op_runtime_config(hba);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int ufshcd_vops_get_outstanding_cqs(struct ufs_hba *hba,
+ unsigned long *ocqs)
+{
+ if (hba->vops && hba->vops->get_outstanding_cqs)
+ return hba->vops->get_outstanding_cqs(hba, ocqs);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int ufshcd_mcq_vops_config_esi(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->config_esi)
+ return hba->vops->config_esi(hba);
+
+ return -EOPNOTSUPP;
+}
+
extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/**
@@ -302,4 +363,44 @@ static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, u8
return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
}
+static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q)
+{
+ u32 mask = q->max_entries - 1;
+ u32 val;
+
+ q->sq_tail_slot = (q->sq_tail_slot + 1) & mask;
+ val = q->sq_tail_slot * sizeof(struct utp_transfer_req_desc);
+ writel(val, q->mcq_sq_tail);
+}
+
+static inline void ufshcd_mcq_update_cq_tail_slot(struct ufs_hw_queue *q)
+{
+ u32 val = readl(q->mcq_cq_tail);
+
+ q->cq_tail_slot = val / sizeof(struct cq_entry);
+}
+
+static inline bool ufshcd_mcq_is_cq_empty(struct ufs_hw_queue *q)
+{
+ return q->cq_head_slot == q->cq_tail_slot;
+}
+
+static inline void ufshcd_mcq_inc_cq_head_slot(struct ufs_hw_queue *q)
+{
+ q->cq_head_slot++;
+ if (q->cq_head_slot == q->max_entries)
+ q->cq_head_slot = 0;
+}
+
+static inline void ufshcd_mcq_update_cq_head(struct ufs_hw_queue *q)
+{
+ writel(q->cq_head_slot * sizeof(struct cq_entry), q->mcq_cq_head);
+}
+
+static inline struct cq_entry *ufshcd_mcq_cur_cqe(struct ufs_hw_queue *q)
+{
+ struct cq_entry *cqe = q->cqe_base_addr;
+
+ return cqe + q->cq_head_slot;
+}
#endif /* _UFSHCD_PRIV_H_ */
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 3a1c4d31e010..276a82b2e5ee 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -43,6 +43,12 @@
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)
+
+#define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
+ UFSHCD_ERROR_MASK |\
+ MCQ_CQ_EVENT_STATUS)
+
+
/* UIC command timeout, unit: ms */
#define UIC_CMD_TIMEOUT 500
@@ -56,6 +62,9 @@
/* Query request timeout */
#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
+/* Advanced RPMB request timeout */
+#define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
+
/* Task management command timeout */
#define TM_CMD_TIMEOUT 100 /* msecs */
@@ -89,6 +98,33 @@
/* Polling time to wait for fDeviceInit */
#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
+/* UFSHC 4.0 compliant HC support this mode, refer param_set_mcq_mode() */
+static bool use_mcq_mode = true;
+
+static bool is_mcq_supported(struct ufs_hba *hba)
+{
+ return hba->mcq_sup && use_mcq_mode;
+}
+
+static int param_set_mcq_mode(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_bool(val, kp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct kernel_param_ops mcq_mode_ops = {
+ .set = param_set_mcq_mode,
+ .get = param_get_bool,
+};
+
+module_param_cb(use_mcq_mode, &mcq_mode_ops, &use_mcq_mode, 0644);
+MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -528,7 +564,7 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
prdt_length = le16_to_cpu(
lrbp->utr_descriptor_ptr->prd_table_length);
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
- prdt_length /= sizeof(struct ufshcd_sg_entry);
+ prdt_length /= ufshcd_sg_entry_size(hba);
dev_err(hba->dev,
"UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
@@ -537,7 +573,7 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
if (pr_prdt)
ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
- sizeof(struct ufshcd_sg_entry) * prdt_length);
+ ufshcd_sg_entry_size(hba) * prdt_length);
}
}
@@ -740,12 +776,17 @@ static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
/**
* ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
* @lrbp: pointer to local command reference block
+ * @cqe: pointer to the completion queue entry
*
* This function is used to get the OCS field from UTRD
* Returns the OCS field in the UTRD
*/
-static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
+static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
+ struct cq_entry *cqe)
{
+ if (cqe)
+ return le32_to_cpu(cqe->status) & MASK_OCS;
+
return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
}
@@ -1121,6 +1162,12 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
return pending;
}
+/*
+ * Wait until all pending SCSI commands and TMFs have finished or the timeout
+ * has expired.
+ *
+ * Return: 0 upon success; -EBUSY upon timeout.
+ */
static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
u64 wait_timeout_us)
{
@@ -1154,7 +1201,7 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
- schedule();
+ io_schedule_timeout(msecs_to_jiffies(20));
if (ktime_to_us(ktime_sub(ktime_get(), start)) >
wait_timeout_us) {
timeout = true;
@@ -1225,9 +1272,14 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
return ret;
}
-static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
+/*
+ * Wait until all pending SCSI commands and TMFs have finished or the timeout
+ * has expired.
+ *
+ * Return: 0 upon success; -EBUSY upon timeout.
+ */
+static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
{
- #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
int ret = 0;
/*
* make sure that there are no outstanding requests when
@@ -1238,7 +1290,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
down_write(&hba->clk_scaling_lock);
if (!hba->clk_scaling.is_allowed ||
- ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+ ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
mutex_unlock(&hba->wb_mutex);
@@ -1280,7 +1332,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
- ret = ufshcd_clock_scaling_prepare(hba);
+ ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
if (ret)
return ret;
@@ -2136,9 +2188,11 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *
* ufshcd_send_command - Send SCSI or device management commands
* @hba: per adapter instance
* @task_tag: Task tag of the command
+ * @hwq: pointer to hardware queue instance
*/
static inline
-void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
+ struct ufs_hw_queue *hwq)
{
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
unsigned long flags;
@@ -2152,12 +2206,24 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_start_monitor(hba, lrbp);
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- if (hba->vops && hba->vops->setup_xfer_req)
- hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
- __set_bit(task_tag, &hba->outstanding_reqs);
- ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+ if (is_mcq_enabled(hba)) {
+ int utrd_size = sizeof(struct utp_transfer_req_desc);
+
+ spin_lock(&hwq->sq_lock);
+ memcpy(hwq->sqe_base_addr + (hwq->sq_tail_slot * utrd_size),
+ lrbp->utr_descriptor_ptr, utrd_size);
+ ufshcd_inc_sq_tail(hwq);
+ spin_unlock(&hwq->sq_lock);
+ } else {
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ if (hba->vops && hba->vops->setup_xfer_req)
+ hba->vops->setup_xfer_req(hba, lrbp->task_tag,
+ !!lrbp->cmd);
+ __set_bit(lrbp->task_tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << lrbp->task_tag,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+ }
}
/**
@@ -2245,6 +2311,14 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
if (err)
dev_err(hba->dev, "crypto setup failed\n");
+ hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
+ if (!hba->mcq_sup)
+ return err;
+
+ hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
+ hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
+ hba->mcq_capabilities);
+
return err;
}
@@ -2397,38 +2471,30 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
}
/**
- * ufshcd_map_sg - Map scatter-gather list to prdt
- * @hba: per adapter instance
- * @lrbp: pointer to local reference block
- *
- * Returns 0 in case of success, non-zero value in case of failure
+ * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
+ * @hba: per-adapter instance
+ * @lrbp: pointer to local reference block
+ * @sg_entries: The number of sg lists actually used
+ * @sg_list: Pointer to SG list
*/
-static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries,
+ struct scatterlist *sg_list)
{
- struct ufshcd_sg_entry *prd_table;
+ struct ufshcd_sg_entry *prd;
struct scatterlist *sg;
- struct scsi_cmnd *cmd;
- int sg_segments;
int i;
- cmd = lrbp->cmd;
- sg_segments = scsi_dma_map(cmd);
- if (sg_segments < 0)
- return sg_segments;
-
- if (sg_segments) {
+ if (sg_entries) {
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((sg_segments *
- sizeof(struct ufshcd_sg_entry)));
+ cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba));
else
- lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16(sg_segments);
+ lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries);
- prd_table = lrbp->ucd_prdt_ptr;
+ prd = lrbp->ucd_prdt_ptr;
- scsi_for_each_sg(cmd, sg, sg_segments, i) {
+ for_each_sg(sg_list, sg, sg_entries, i) {
const unsigned int len = sg_dma_len(sg);
/*
@@ -2440,13 +2506,32 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
* indicates 4 bytes, '7' indicates 8 bytes, etc."
*/
WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
- prd_table[i].size = cpu_to_le32(len - 1);
- prd_table[i].addr = cpu_to_le64(sg->dma_address);
- prd_table[i].reserved = 0;
+ prd->size = cpu_to_le32(len - 1);
+ prd->addr = cpu_to_le64(sg->dma_address);
+ prd->reserved = 0;
+ prd = (void *)prd + ufshcd_sg_entry_size(hba);
}
} else {
lrbp->utr_descriptor_ptr->prd_table_length = 0;
}
+}
+
+/**
+ * ufshcd_map_sg - Map scatter-gather list to prdt
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ *
+ * Returns 0 in case of success, non-zero value in case of failure
+ */
+static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct scsi_cmnd *cmd = lrbp->cmd;
+ int sg_segments = scsi_dma_map(cmd);
+
+ if (sg_segments < 0)
+ return sg_segments;
+
+ ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
return 0;
}
@@ -2494,14 +2579,15 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
}
/**
- * ufshcd_prepare_req_desc_hdr() - Fills the requests header
+ * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
* descriptor according to request
* @lrbp: pointer to local reference block
* @upiu_flags: flags required in the header
* @cmd_dir: requests data direction
+ * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
*/
-static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
- u8 *upiu_flags, enum dma_data_direction cmd_dir)
+static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
+ enum dma_data_direction cmd_dir, int ehs_length)
{
struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
u32 data_direction;
@@ -2520,8 +2606,8 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
*upiu_flags = UPIU_CMD_FLAGS_NONE;
}
- dword_0 = data_direction | (lrbp->command_type
- << UPIU_COMMAND_TYPE_OFFSET);
+ dword_0 = data_direction | (lrbp->command_type << UPIU_COMMAND_TYPE_OFFSET) |
+ ehs_length << 8;
if (lrbp->intr_cmd)
dword_0 |= UTP_REQ_DESC_INT_CMD;
@@ -2576,8 +2662,7 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
}
/**
- * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
- * for query requsts
+ * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
* @hba: UFS hba
* @lrbp: local reference block pointer
* @upiu_flags: flags
@@ -2648,7 +2733,7 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
else
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
@@ -2676,8 +2761,7 @@ static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
if (likely(lrbp->cmd)) {
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
- lrbp->cmd->sc_data_direction);
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
} else {
ret = -EINVAL;
@@ -2709,33 +2793,38 @@ static inline bool is_device_wlun(struct scsi_device *sdev)
*/
static void ufshcd_map_queues(struct Scsi_Host *shost)
{
- int i;
+ struct ufs_hba *hba = shost_priv(shost);
+ int i, queue_offset = 0;
+
+ if (!is_mcq_supported(hba)) {
+ hba->nr_queues[HCTX_TYPE_DEFAULT] = 1;
+ hba->nr_queues[HCTX_TYPE_READ] = 0;
+ hba->nr_queues[HCTX_TYPE_POLL] = 1;
+ hba->nr_hw_queues = 1;
+ }
for (i = 0; i < shost->nr_maps; i++) {
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
- switch (i) {
- case HCTX_TYPE_DEFAULT:
- case HCTX_TYPE_POLL:
- map->nr_queues = 1;
- break;
- case HCTX_TYPE_READ:
- map->nr_queues = 0;
+ map->nr_queues = hba->nr_queues[i];
+ if (!map->nr_queues)
continue;
- default:
- WARN_ON_ONCE(true);
- }
- map->queue_offset = 0;
+ map->queue_offset = queue_offset;
+ if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba))
+ map->queue_offset = 0;
+
blk_mq_map_queues(map);
+ queue_offset += map->nr_queues;
}
}
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
{
- struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
+ struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
+ i * sizeof_utp_transfer_cmd_desc(hba);
struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
- i * sizeof(struct utp_transfer_cmd_desc);
+ i * sizeof_utp_transfer_cmd_desc(hba);
u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
response_upiu);
u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
@@ -2743,11 +2832,11 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
lrb->utr_descriptor_ptr = utrdlp + i;
lrb->utrd_dma_addr = hba->utrdl_dma_addr +
i * sizeof(struct utp_transfer_req_desc);
- lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
+ lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
lrb->ucd_req_dma_addr = cmd_desc_element_addr;
- lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+ lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
- lrb->ucd_prdt_ptr = cmd_descp[i].prd_table;
+ lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
}
@@ -2764,6 +2853,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp;
int err = 0;
+ struct ufs_hw_queue *hwq = NULL;
WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
@@ -2848,7 +2938,10 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto out;
}
- ufshcd_send_command(hba, tag);
+ if (is_mcq_enabled(hba))
+ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+
+ ufshcd_send_command(hba, tag, hwq);
out:
rcu_read_unlock();
@@ -2943,6 +3036,12 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
__func__);
break;
+ case UPIU_TRANSACTION_RESPONSE:
+ if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) {
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp);
+ }
+ break;
default:
err = -EINVAL;
dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
@@ -2972,7 +3071,7 @@ retry:
* not trigger any race conditions.
*/
hba->dev_cmd.complete = NULL;
- err = ufshcd_get_tr_ocs(lrbp);
+ err = ufshcd_get_tr_ocs(lrbp, hba->dev_cmd.cqe);
if (!err)
err = ufshcd_dev_cmd_completion(hba, lrbp);
} else {
@@ -3008,6 +3107,22 @@ retry:
} else {
dev_err(hba->dev, "%s: failed to clear tag %d\n",
__func__, lrbp->task_tag);
+
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ pending = test_bit(lrbp->task_tag,
+ &hba->outstanding_reqs);
+ if (pending)
+ hba->dev_cmd.complete = NULL;
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
+ if (!pending) {
+ /*
+ * The completion handler ran while we tried to
+ * clear the command.
+ */
+ time_left = 1;
+ goto retry;
+ }
}
}
@@ -3043,10 +3158,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
goto out;
hba->dev_cmd.complete = &wait;
+ hba->dev_cmd.cqe = NULL;
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
- ufshcd_send_command(hba, tag);
+ ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
@@ -3367,37 +3483,6 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
}
/**
- * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
- * @hba: Pointer to adapter instance
- * @desc_id: descriptor idn value
- * @desc_len: mapped desc length (out)
- */
-void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
- int *desc_len)
-{
- if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
- desc_id == QUERY_DESC_IDN_RFU_1)
- *desc_len = 0;
- else
- *desc_len = hba->desc_size[desc_id];
-}
-EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
-
-static void ufshcd_update_desc_length(struct ufs_hba *hba,
- enum desc_idn desc_id, int desc_index,
- unsigned char desc_len)
-{
- if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
- desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
- /* For UFS 3.1, the normal unit descriptor is 10 bytes larger
- * than the RPMB unit, however, both descriptors share the same
- * desc_idn, to cover both unit descriptors with one length, we
- * choose the normal unit descriptor length by desc_index.
- */
- hba->desc_size[desc_id] = desc_len;
-}
-
-/**
* ufshcd_read_desc_param - read the specified descriptor parameter
* @hba: Pointer to adapter instance
* @desc_id: descriptor idn value
@@ -3417,26 +3502,13 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
{
int ret;
u8 *desc_buf;
- int buff_len;
+ int buff_len = QUERY_DESC_MAX_SIZE;
bool is_kmalloc = true;
/* Safety check */
if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
return -EINVAL;
- /* Get the length of descriptor */
- ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
- if (!buff_len) {
- dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
- return -EINVAL;
- }
-
- if (param_offset >= buff_len) {
- dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
- __func__, param_offset, desc_id, buff_len);
- return -EINVAL;
- }
-
/* Check whether we need temp memory */
if (param_offset != 0 || param_size < buff_len) {
desc_buf = kzalloc(buff_len, GFP_KERNEL);
@@ -3449,15 +3521,24 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
/* Request for full descriptor */
ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
- desc_id, desc_index, 0,
- desc_buf, &buff_len);
-
+ desc_id, desc_index, 0,
+ desc_buf, &buff_len);
if (ret) {
dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
__func__, desc_id, desc_index, param_offset, ret);
goto out;
}
+ /* Update descriptor length */
+ buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
+
+ if (param_offset >= buff_len) {
+ dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
+ __func__, param_offset, desc_id, buff_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
/* Sanity check */
if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
@@ -3466,10 +3547,6 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
goto out;
}
- /* Update descriptor length */
- buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
- ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
-
if (is_kmalloc) {
/* Make sure we don't copy more data than available */
if (param_offset >= buff_len)
@@ -3656,7 +3733,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
size_t utmrdl_size, utrdl_size, ucdl_size;
/* Allocate memory for UTP command descriptors */
- ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+ ucdl_size = sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs;
hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
ucdl_size,
&hba->ucdl_dma_addr,
@@ -3664,12 +3741,9 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/*
* UFSHCI requires UTP command descriptor to be 128 byte aligned.
- * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
- * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
- * be aligned to 128 bytes as well
*/
if (!hba->ucdl_base_addr ||
- WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
+ WARN_ON(hba->ucdl_dma_addr & (128 - 1))) {
dev_err(hba->dev,
"Command Descriptor Memory allocation failed\n");
goto out;
@@ -3685,13 +3759,21 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
&hba->utrdl_dma_addr,
GFP_KERNEL);
if (!hba->utrdl_base_addr ||
- WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
+ WARN_ON(hba->utrdl_dma_addr & (1024 - 1))) {
dev_err(hba->dev,
"Transfer Descriptor Memory allocation failed\n");
goto out;
}
/*
+ * Skip utmrdl allocation; it may have been
+ * allocated during first pass and not released during
+ * MCQ memory allocation.
+ * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
+ */
+ if (hba->utmrdl_base_addr)
+ goto skip_utmrdl;
+ /*
* Allocate memory for UTP Task Management descriptors
* UFSHCI requires 1024 byte alignment of UTMRD
*/
@@ -3701,12 +3783,13 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
&hba->utmrdl_dma_addr,
GFP_KERNEL);
if (!hba->utmrdl_base_addr ||
- WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
+ WARN_ON(hba->utmrdl_dma_addr & (1024 - 1))) {
dev_err(hba->dev,
"Task Management Descriptor Memory allocation failed\n");
goto out;
}
+skip_utmrdl:
/* Allocate memory for local reference block */
hba->lrb = devm_kcalloc(hba->dev,
hba->nutrs, sizeof(struct ufshcd_lrb),
@@ -3750,7 +3833,7 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
prdt_offset =
offsetof(struct utp_transfer_cmd_desc, prd_table);
- cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
+ cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba);
cmd_desc_dma_addr = hba->ucdl_dma_addr;
for (i = 0; i < hba->nutrs; i++) {
@@ -4907,7 +4990,7 @@ static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
*/
static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
{
- int len = hba->desc_size[QUERY_DESC_IDN_UNIT];
+ int len = QUERY_DESC_MAX_SIZE;
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
u8 lun_qdepth = hba->nutrs;
u8 *desc_buf;
@@ -4942,6 +5025,12 @@ static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP)
hba->dev_info.is_lu_power_on_wp = true;
+ /* In case of RPMB LU, check if advanced RPMB mode is enabled */
+ if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN &&
+ desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
+ hba->dev_info.b_advanced_rpmb_en = true;
+
+
kfree(desc_buf);
set_qdepth:
/*
@@ -5030,8 +5119,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
ufshcd_hpb_configure(hba, sdev);
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
- if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
- blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
+ if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
+ blk_queue_update_dma_alignment(q, 4096 - 1);
/*
* Block runtime-pm until all consumers are added.
* Refer ufshcd_setup_links().
@@ -5130,18 +5219,20 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
* ufshcd_transfer_rsp_status - Get overall status of the response
* @hba: per adapter instance
* @lrbp: pointer to local reference block of completed command
+ * @cqe: pointer to the completion queue entry
*
* Returns result of the command to notify SCSI midlayer
*/
static inline int
-ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+ struct cq_entry *cqe)
{
int result = 0;
int scsi_status;
enum utp_ocs ocs;
/* overall command status of utrd */
- ocs = ufshcd_get_tr_ocs(lrbp);
+ ocs = ufshcd_get_tr_ocs(lrbp, cqe);
if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
@@ -5306,42 +5397,53 @@ static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
}
/**
- * __ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * ufshcd_compl_one_cqe - handle a completion queue entry
* @hba: per adapter instance
- * @completed_reqs: bitmask that indicates which requests to complete
+ * @task_tag: the task tag of the request to be completed
+ * @cqe: pointer to the completion queue entry
*/
-static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
- unsigned long completed_reqs)
+void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
+ struct cq_entry *cqe)
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
- int index;
-
- for_each_set_bit(index, &completed_reqs, hba->nutrs) {
- lrbp = &hba->lrb[index];
- lrbp->compl_time_stamp = ktime_get();
- lrbp->compl_time_stamp_local_clock = local_clock();
- cmd = lrbp->cmd;
- if (cmd) {
- if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
- ufshcd_update_monitor(hba, lrbp);
- ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
- cmd->result = ufshcd_transfer_rsp_status(hba, lrbp);
- ufshcd_release_scsi_cmd(hba, lrbp);
- /* Do not touch lrbp after scsi done */
- scsi_done(cmd);
- } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
- lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
- if (hba->dev_cmd.complete) {
- ufshcd_add_command_trace(hba, index,
- UFS_DEV_COMP);
- complete(hba->dev_cmd.complete);
- ufshcd_clk_scaling_update_busy(hba);
- }
+
+ lrbp = &hba->lrb[task_tag];
+ lrbp->compl_time_stamp = ktime_get();
+ cmd = lrbp->cmd;
+ if (cmd) {
+ if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
+ ufshcd_update_monitor(hba, lrbp);
+ ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP);
+ cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe);
+ ufshcd_release_scsi_cmd(hba, lrbp);
+ /* Do not touch lrbp after scsi done */
+ scsi_done(cmd);
+ } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
+ lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
+ if (hba->dev_cmd.complete) {
+ hba->dev_cmd.cqe = cqe;
+ ufshcd_add_command_trace(hba, task_tag, UFS_DEV_COMP);
+ complete(hba->dev_cmd.complete);
+ ufshcd_clk_scaling_update_busy(hba);
}
}
}
+/**
+ * __ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ * @completed_reqs: bitmask that indicates which requests to complete
+ */
+static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
+ unsigned long completed_reqs)
+{
+ int tag;
+
+ for_each_set_bit(tag, &completed_reqs, hba->nutrs)
+ ufshcd_compl_one_cqe(hba, tag, NULL);
+}
+
/* Any value that is not an existing queue number is fine for this constant. */
enum {
UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1
@@ -5371,6 +5473,13 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
struct ufs_hba *hba = shost_priv(shost);
unsigned long completed_reqs, flags;
u32 tr_doorbell;
+ struct ufs_hw_queue *hwq;
+
+ if (is_mcq_enabled(hba)) {
+ hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
+
+ return ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ }
spin_lock_irqsave(&hba->outstanding_lock, flags);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -6604,6 +6713,40 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
}
/**
+ * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
+ * @hba: per adapter instance
+ *
+ * Returns IRQ_HANDLED if interrupt is handled
+ */
+static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
+{
+ struct ufs_hw_queue *hwq;
+ unsigned long outstanding_cqs;
+ unsigned int nr_queues;
+ int i, ret;
+ u32 events;
+
+ ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
+ if (ret)
+ outstanding_cqs = (1U << hba->nr_hw_queues) - 1;
+
+ /* Exclude the poll queues */
+ nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
+ for_each_set_bit(i, &outstanding_cqs, nr_queues) {
+ hwq = &hba->uhq[i];
+
+ events = ufshcd_mcq_read_cqis(hba, i);
+ if (events)
+ ufshcd_mcq_write_cqis(hba, events, i);
+
+ if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
+ ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
* ufshcd_sl_intr - Interrupt service routine
* @hba: per adapter instance
* @intr_status: contains interrupts generated by the controller
@@ -6628,6 +6771,9 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
if (intr_status & UTP_TRANSFER_REQ_COMPL)
retval |= ufshcd_transfer_req_compl(hba);
+ if (intr_status & MCQ_CQ_EVENT_STATUS)
+ retval |= ufshcd_handle_mcq_cq_events(hba);
+
return retval;
}
@@ -6876,7 +7022,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
/* update the task tag in the request upiu */
req_upiu->header.dword_0 |= cpu_to_be32(tag);
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
/* just copy the upiu request as it is */
memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
@@ -6895,7 +7041,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
- ufshcd_send_command(hba, tag);
+ ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
/*
* ignore the returning value here - ufshcd_check_query_response is
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
@@ -7000,6 +7146,100 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
}
/**
+ * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
+ * @hba: per adapter instance
+ * @req_upiu: upiu request
+ * @rsp_upiu: upiu reply
+ * @req_ehs: EHS field which contains Advanced RPMB Request Message
+ * @rsp_ehs: EHS field which returns Advanced RPMB Response Message
+ * @sg_cnt: The number of sg lists actually used
+ * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
+ * @dir: DMA direction
+ *
+ * Returns zero on success, non-zero on failure
+ */
+int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
+ struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
+ struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
+ enum dma_data_direction dir)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ const u32 tag = hba->reserved_slot;
+ struct ufshcd_lrb *lrbp;
+ int err = 0;
+ int result;
+ u8 upiu_flags;
+ u8 *ehs_data;
+ u16 ehs_len;
+
+ /* Protects use of hba->reserved_slot. */
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->dev_cmd.lock);
+ down_read(&hba->clk_scaling_lock);
+
+ lrbp = &hba->lrb[tag];
+ WARN_ON(lrbp->cmd);
+ lrbp->cmd = NULL;
+ lrbp->task_tag = tag;
+ lrbp->lun = UFS_UPIU_RPMB_WLUN;
+
+ lrbp->intr_cmd = true;
+ ufshcd_prepare_lrbp_crypto(NULL, lrbp);
+ hba->dev_cmd.type = DEV_CMD_TYPE_RPMB;
+
+ /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
+
+ /* update the task tag and LUN in the request upiu */
+ req_upiu->header.dword_0 |= cpu_to_be32(upiu_flags << 16 | UFS_UPIU_RPMB_WLUN << 8 | tag);
+
+ /* copy the UPIU(contains CDB) request as it is */
+ memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
+ /* Copy EHS, starting with byte32, immediately after the CDB package */
+ memcpy(lrbp->ucd_req_ptr + 1, req_ehs, sizeof(*req_ehs));
+
+ if (dir != DMA_NONE && sg_list)
+ ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list);
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+
+ hba->dev_cmd.complete = &wait;
+
+ ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
+
+ err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT);
+
+ if (!err) {
+ /* Just copy the upiu response as it is */
+ memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
+ /* Get the response UPIU result */
+ result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
+
+ ehs_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) >> 24;
+ /*
+ * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
+ * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
+ * Message is 02h
+ */
+ if (ehs_len == 2 && rsp_ehs) {
+ /*
+ * ucd_rsp_ptr points to a buffer with a length of 512 bytes
+ * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
+ */
+ ehs_data = (u8 *)lrbp->ucd_rsp_ptr + EHS_OFFSET_IN_RESPONSE;
+ memcpy(rsp_ehs, ehs_data, ehs_len * 32);
+ }
+ }
+
+ up_read(&hba->clk_scaling_lock);
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+ return err ? : result;
+}
+
+/**
* ufshcd_eh_device_reset_handler() - Reset a single logical unit.
* @cmd: SCSI command pointer
*
@@ -7441,12 +7681,11 @@ static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
* In case regulators are not initialized we'll return 0
* @hba: per-adapter instance
* @desc_buf: power descriptor buffer to extract ICC levels from.
- * @len: length of desc_buff
*
* Returns calculated ICC level
*/
static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
- const u8 *desc_buf, int len)
+ const u8 *desc_buf)
{
u32 icc_level = 0;
@@ -7488,25 +7727,23 @@ out:
static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
{
int ret;
- int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
u8 *desc_buf;
u32 icc_level;
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!desc_buf)
return;
ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
- desc_buf, buff_len);
+ desc_buf, QUERY_DESC_MAX_SIZE);
if (ret) {
dev_err(hba->dev,
- "%s: Failed reading power descriptor.len = %d ret = %d",
- __func__, buff_len, ret);
+ "%s: Failed reading power descriptor ret = %d",
+ __func__, ret);
goto out;
}
- icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
- buff_len);
+ icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf);
dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
@@ -7616,10 +7853,6 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
(hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
goto wb_disabled;
- if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
- DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
- goto wb_disabled;
-
ext_ufs_feature = get_unaligned_be32(desc_buf +
DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
@@ -7690,6 +7923,31 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
}
}
+static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ u32 ext_ufs_feature;
+ u32 ext_iid_en = 0;
+ int err;
+
+ /* Only UFS-4.0 and above may support EXT_IID */
+ if (dev_info->wspecversion < 0x400)
+ goto out;
+
+ ext_ufs_feature = get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+ if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP))
+ goto out;
+
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en);
+ if (err)
+ dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err);
+
+out:
+ dev_info->b_ext_iid_en = ext_iid_en;
+}
+
void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
const struct ufs_dev_quirk *fixups)
{
@@ -7727,14 +7985,14 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
u8 *desc_buf;
struct ufs_dev_info *dev_info = &hba->dev_info;
- desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
+ desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!desc_buf) {
err = -ENOMEM;
goto out;
}
err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
- hba->desc_size[QUERY_DESC_IDN_DEVICE]);
+ QUERY_DESC_MAX_SIZE);
if (err) {
dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
__func__, err);
@@ -7751,6 +8009,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
/* getting Specification Version in big endian format */
dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
+ dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
@@ -7788,6 +8047,9 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufshcd_temp_notif_probe(hba, desc_buf);
+ if (hba->ext_iid_sup)
+ ufshcd_ext_iid_probe(hba, desc_buf);
+
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -7981,18 +8243,16 @@ static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
{
int err;
- size_t buff_len;
u8 *desc_buf;
- buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!desc_buf) {
err = -ENOMEM;
goto out;
}
err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
- desc_buf, buff_len);
+ desc_buf, QUERY_DESC_MAX_SIZE);
if (err) {
dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
__func__, err);
@@ -8004,7 +8264,7 @@ static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
hba->dev_info.max_lu_supported = 8;
- if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >=
+ if (desc_buf[QUERY_DESC_LENGTH_OFFSET] >=
GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
ufshpb_get_geo_info(hba, desc_buf);
@@ -8089,11 +8349,7 @@ out:
static int ufshcd_device_params_init(struct ufs_hba *hba)
{
bool flag;
- int ret, i;
-
- /* Init device descriptor sizes */
- for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
- hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
+ int ret;
/* Init UFS geometry descriptor related parameters */
ret = ufshcd_device_geo_params_init(hba);
@@ -8161,27 +8417,96 @@ out:
return ret;
}
-/**
- * ufshcd_probe_hba - probe hba to detect device and initialize it
- * @hba: per-adapter instance
- * @init_dev_params: whether or not to call ufshcd_device_params_init().
- *
- * Execute link-startup and verify device initialization
- */
-static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
+/* SDB - Single Doorbell */
+static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
+{
+ size_t ucdl_size, utrdl_size;
+
+ ucdl_size = sizeof(struct utp_transfer_cmd_desc) * nutrs;
+ dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
+ hba->ucdl_dma_addr);
+
+ utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
+ dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
+ hba->utrdl_dma_addr);
+
+ devm_kfree(hba->dev, hba->lrb);
+}
+
+static int ufshcd_alloc_mcq(struct ufs_hba *hba)
{
int ret;
- unsigned long flags;
- ktime_t start = ktime_get();
+ int old_nutrs = hba->nutrs;
+
+ ret = ufshcd_mcq_decide_queue_depth(hba);
+ if (ret < 0)
+ return ret;
+
+ hba->nutrs = ret;
+ ret = ufshcd_mcq_init(hba);
+ if (ret)
+ goto err;
+
+ /*
+ * Previously allocated memory for nutrs may not be enough in MCQ mode.
+ * Number of supported tags in MCQ mode may be larger than SDB mode.
+ */
+ if (hba->nutrs != old_nutrs) {
+ ufshcd_release_sdb_queue(hba, old_nutrs);
+ ret = ufshcd_memory_alloc(hba);
+ if (ret)
+ goto err;
+ ufshcd_host_memory_configure(hba);
+ }
+
+ ret = ufshcd_mcq_memory_alloc(hba);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ hba->nutrs = old_nutrs;
+ return ret;
+}
+
+static void ufshcd_config_mcq(struct ufs_hba *hba)
+{
+ int ret;
+
+ ret = ufshcd_mcq_vops_config_esi(hba);
+ dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
+
+ ufshcd_enable_intr(hba, UFSHCD_ENABLE_MCQ_INTRS);
+ ufshcd_mcq_make_queues_operational(hba);
+ ufshcd_mcq_config_mac(hba, hba->nutrs);
+
+ hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
+ hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
+
+ /* Select MCQ mode */
+ ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
+ REG_UFS_MEM_CFG);
+ hba->mcq_enabled = true;
+
+ dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
+ hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT],
+ hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL],
+ hba->nutrs);
+}
+
+static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
+{
+ int ret;
+ struct Scsi_Host *host = hba->host;
hba->ufshcd_state = UFSHCD_STATE_RESET;
ret = ufshcd_link_startup(hba);
if (ret)
- goto out;
+ return ret;
if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
- goto out;
+ return ret;
/* Debug counters initialization */
ufshcd_clear_dbg_ufs_stats(hba);
@@ -8189,15 +8514,19 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
/* UniPro link is active now */
ufshcd_set_link_active(hba);
+ /* Reconfigure MCQ upon reset */
+ if (is_mcq_enabled(hba) && !init_dev_params)
+ ufshcd_config_mcq(hba);
+
/* Verify device initialization by sending NOP OUT UPIU */
ret = ufshcd_verify_dev_init(hba);
if (ret)
- goto out;
+ return ret;
/* Initiate UFS initialization, and waiting until completion */
ret = ufshcd_complete_dev_init(hba);
if (ret)
- goto out;
+ return ret;
/*
* Initialize UFS device parameters used by driver, these
@@ -8206,7 +8535,25 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
if (init_dev_params) {
ret = ufshcd_device_params_init(hba);
if (ret)
- goto out;
+ return ret;
+ if (is_mcq_supported(hba) && !hba->scsi_host_added) {
+ ret = ufshcd_alloc_mcq(hba);
+ if (ret) {
+ /* Continue with SDB mode */
+ use_mcq_mode = false;
+ dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
+ ret);
+ }
+ ret = scsi_add_host(host, hba->dev);
+ if (ret) {
+ dev_err(hba->dev, "scsi_add_host failed\n");
+ return ret;
+ }
+ hba->scsi_host_added = true;
+ }
+ /* MCQ may be disabled if ufshcd_alloc_mcq() fails */
+ if (is_mcq_supported(hba) && use_mcq_mode)
+ ufshcd_config_mcq(hba);
}
ufshcd_tune_unipro_params(hba);
@@ -8227,11 +8574,51 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
if (ret) {
dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
__func__, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ufshcd_probe_hba - probe hba to detect device and initialize it
+ * @hba: per-adapter instance
+ * @init_dev_params: whether or not to call ufshcd_device_params_init().
+ *
+ * Execute link-startup and verify device initialization
+ */
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
+{
+ ktime_t start = ktime_get();
+ unsigned long flags;
+ int ret;
+
+ ret = ufshcd_device_init(hba, init_dev_params);
+ if (ret)
+ goto out;
+
+ if (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) {
+ /* Reset the device and controller before doing reinit */
+ ufshcd_device_reset(hba);
+ ufshcd_hba_stop(hba);
+ ufshcd_vops_reinit_notify(hba);
+ ret = ufshcd_hba_enable(hba);
+ if (ret) {
+ dev_err(hba->dev, "Host controller enable failed\n");
+ ufshcd_print_evt_hist(hba);
+ ufshcd_print_host_state(hba);
goto out;
}
- ufshcd_print_pwr_info(hba);
+
+ /* Reinit the device */
+ ret = ufshcd_device_init(hba, init_dev_params);
+ if (ret)
+ goto out;
}
+ ufshcd_print_pwr_info(hba);
+
/*
* bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
* and for removable UFS card as well, hence always set the parameter.
@@ -8359,7 +8746,6 @@ static struct scsi_host_template ufshcd_driver_template = {
.max_host_blocked = 1,
.track_queue_depth = 1,
.sdev_groups = ufshcd_driver_groups,
- .dma_boundary = PAGE_SIZE - 1,
.rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
};
@@ -9453,6 +9839,7 @@ static int ufshcd_resume(struct ufs_hba *hba)
/* enable the host irq as host controller would be active soon */
ufshcd_enable_irq(hba);
+
goto out;
disable_vreg:
@@ -9616,6 +10003,56 @@ void ufshcd_remove(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
+#ifdef CONFIG_PM_SLEEP
+int ufshcd_system_freeze(struct device *dev)
+{
+
+ return ufshcd_system_suspend(dev);
+
+}
+EXPORT_SYMBOL_GPL(ufshcd_system_freeze);
+
+int ufshcd_system_restore(struct device *dev)
+{
+
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ufshcd_system_resume(dev);
+ if (ret)
+ return ret;
+
+ /* Configure UTRL and UTMRL base address registers */
+ ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_H);
+ ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_H);
+ /*
+ * Make sure that UTRL and UTMRL base address registers
+ * are updated with the latest queue addresses. Only after
+ * updating these addresses, we can queue the new commands.
+ */
+ mb();
+
+ /* Resuming from hibernate, assume that link was OFF */
+ ufshcd_set_link_off(hba);
+
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(ufshcd_system_restore);
+
+int ufshcd_system_thaw(struct device *dev)
+{
+ return ufshcd_system_resume(dev);
+}
+EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
+#endif /* CONFIG_PM_SLEEP */
+
/**
* ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
* @hba: pointer to Host Bus Adapter (HBA)
@@ -9674,6 +10111,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
hba->dev = dev;
hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
hba->nop_out_timeout = NOP_OUT_TIMEOUT;
+ ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry));
INIT_LIST_HEAD(&hba->clk_list_head);
spin_lock_init(&hba->outstanding_lock);
@@ -9823,10 +10261,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->is_irq_enabled = true;
}
- err = scsi_add_host(host, hba->dev);
- if (err) {
- dev_err(hba->dev, "scsi_add_host failed\n");
- goto out_disable;
+ if (!is_mcq_supported(hba)) {
+ err = scsi_add_host(host, hba->dev);
+ if (err) {
+ dev_err(hba->dev, "scsi_add_host failed\n");
+ goto out_disable;
+ }
}
hba->tmf_tag_set = (struct blk_mq_tag_set) {
@@ -10054,11 +10494,6 @@ static int __init ufshcd_core_init(void)
{
int ret;
- /* Verify that there are no gaps in struct utp_transfer_cmd_desc. */
- static_assert(sizeof(struct utp_transfer_cmd_desc) ==
- 2 * ALIGNED_UPIU_SIZE +
- SG_ALL * sizeof(struct ufshcd_sg_entry));
-
ufs_debugfs_init();
ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c
index 994f4ac9df5a..a46a7666c891 100644
--- a/drivers/ufs/core/ufshpb.c
+++ b/drivers/ufs/core/ufshpb.c
@@ -2382,12 +2382,10 @@ static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
{
u16 max_active_rgns;
u8 lu_enable;
- int size;
+ int size = QUERY_DESC_MAX_SIZE;
int ret;
char desc_buf[QUERY_DESC_MAX_SIZE];
- ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
-
ufshcd_rpm_get_sync(hba);
ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
QUERY_DESC_IDN_UNIT, lun, 0,
diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig
index 4cc2dbd79ed0..663881437921 100644
--- a/drivers/ufs/host/Kconfig
+++ b/drivers/ufs/host/Kconfig
@@ -57,8 +57,9 @@ config SCSI_UFS_DWC_TC_PLATFORM
config SCSI_UFS_QCOM
tristate "QCOM specific hooks to UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
+ depends on GENERIC_MSI_IRQ
+ depends on RESET_CONTROLLER
select QCOM_SCM if SCSI_UFS_CRYPTO
- select RESET_CONTROLLER
help
This selects the QCOM specific additions to UFSHCD platform driver.
UFS host on QCOM needs some vendor specific configuration before
@@ -124,3 +125,19 @@ config SCSI_UFS_EXYNOS
Select this if you have UFS host controller on Samsung Exynos SoC.
If unsure, say N.
+
+config SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
+ bool
+ default y if SCSI_UFS_EXYNOS && SCSI_UFS_CRYPTO
+
+config SCSI_UFS_SPRD
+ tristate "Unisoc specific hooks to UFS controller platform driver"
+ depends on SCSI_UFSHCD_PLATFORM && (ARCH_SPRD || COMPILE_TEST)
+ help
+ This selects the Unisoc specific additions to UFSHCD platform driver.
+ UFS host on Unisoc needs some vendor specific configuration before
+ accessing the hardware which includes PHY configuration and vendor
+ specific registers.
+
+ Select this if you have UFS controller on Unisoc chipset.
+ If unsure, say N.
diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile
index 7717ca93e7d5..d7c5bf7fa512 100644
--- a/drivers/ufs/host/Makefile
+++ b/drivers/ufs/host/Makefile
@@ -12,4 +12,5 @@ obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o
+obj-$(CONFIG_SCSI_UFS_SPRD) += ufs-sprd.o
obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index c3628a8645a5..7c985fc38db1 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -1300,6 +1300,14 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
+ /*
+ * The maximum segment size must be set after scsi_host_alloc()
+ * has been called and before LUN scanning starts
+ * (ufshcd_async_scan()). Note: this callback may also be called
+ * from other functions than ufshcd_init().
+ */
+ hba->host->max_segment_size = 4096;
+
if (ufs->drv_data->pre_hce_enable) {
ret = ufs->drv_data->pre_hce_enable(ufs);
if (ret)
@@ -1673,7 +1681,7 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = {
UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
- UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE,
+ UFSHCD_QUIRK_4KB_DMA_ALIGNMENT,
.opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 8ad1415e10b6..34fc453f3eb1 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -22,8 +22,11 @@
#include <ufs/ufshci.h>
#include <ufs/ufs_quirks.h>
-#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
- (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
+#define MCQ_QCFGPTR_MASK GENMASK(7, 0)
+#define MCQ_QCFGPTR_UNIT 0x200
+#define MCQ_SQATTR_OFFSET(c) \
+ ((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT)
+#define MCQ_QCFG_SIZE 0x40
enum {
TSTBUS_UAWM,
@@ -52,12 +55,6 @@ static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
return container_of(rcd, struct ufs_qcom_host, rcdev);
}
-static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
- const char *prefix, void *priv)
-{
- ufshcd_dump_regs(hba, offset, len * 4, prefix);
-}
-
static int ufs_qcom_host_clk_get(struct device *dev,
const char *name, struct clk **clk_out, bool optional)
{
@@ -110,7 +107,7 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
{
- int err = 0;
+ int err;
struct device *dev = host->hba->dev;
if (host->is_lane_clks_enabled)
@@ -119,7 +116,7 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
host->rx_l0_sync_clk);
if (err)
- goto out;
+ return err;
err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
host->tx_l0_sync_clk);
@@ -137,7 +134,8 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
goto disable_rx_l1;
host->is_lane_clks_enabled = true;
- goto out;
+
+ return 0;
disable_rx_l1:
clk_disable_unprepare(host->rx_l1_sync_clk);
@@ -145,7 +143,7 @@ disable_tx_l0:
clk_disable_unprepare(host->tx_l0_sync_clk);
disable_rx_l0:
clk_disable_unprepare(host->rx_l0_sync_clk);
-out:
+
return err;
}
@@ -160,25 +158,25 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
&host->rx_l0_sync_clk, false);
if (err)
- goto out;
+ return err;
err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
&host->tx_l0_sync_clk, false);
if (err)
- goto out;
+ return err;
/* In case of single lane per direction, don't read lane1 clocks */
if (host->hba->lanes_per_direction > 1) {
err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
&host->rx_l1_sync_clk, false);
if (err)
- goto out;
+ return err;
err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
&host->tx_l1_sync_clk, true);
}
-out:
- return err;
+
+ return 0;
}
static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
@@ -226,6 +224,10 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
ufshcd_rmwl(host->hba, QUNIPRO_SEL,
ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
REG_UFS_CFG1);
+
+ if (host->hw_ver.major == 0x05)
+ ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
+
/* make sure above configuration is applied before we return */
mb();
}
@@ -241,7 +243,7 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
if (!host->core_reset) {
dev_warn(hba->dev, "%s: reset control not set\n", __func__);
- goto out;
+ return 0;
}
reenable_intr = hba->is_irq_enabled;
@@ -252,7 +254,7 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
if (ret) {
dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
__func__, ret);
- goto out;
+ return ret;
}
/*
@@ -274,16 +276,35 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
hba->is_irq_enabled = true;
}
-out:
- return ret;
+ return 0;
+}
+
+static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ if (host->hw_ver.major == 0x1) {
+ /*
+ * HS-G3 operations may not reliably work on legacy QCOM
+ * UFS host controller hardware even though capability
+ * exchange during link startup phase may end up
+ * negotiating maximum supported gear as G3.
+ * Hence downgrade the maximum supported gear to HS-G2.
+ */
+ return UFS_HS_G2;
+ } else if (host->hw_ver.major >= 0x4) {
+ return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0));
+ }
+
+ /* Default is HS-G3 */
+ return UFS_HS_G3;
}
static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct phy *phy = host->generic_phy;
- int ret = 0;
- bool is_rate_B = UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B;
+ int ret;
/* Reset UFS Host Controller and PHY */
ret = ufs_qcom_host_reset(hba);
@@ -291,17 +312,16 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
dev_warn(hba->dev, "%s: host reset returned %d\n",
__func__, ret);
- if (is_rate_B)
- phy_set_mode(phy, PHY_MODE_UFS_HS_B);
-
/* phy initialization - calibrate the phy */
ret = phy_init(phy);
if (ret) {
dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
__func__, ret);
- goto out;
+ return ret;
}
+ phy_set_mode_ext(phy, PHY_MODE_UFS_HS_B, host->hs_gear);
+
/* power on phy - start serdes and phy's power and clocks */
ret = phy_power_on(phy);
if (ret) {
@@ -316,7 +336,7 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
out_disable_phy:
phy_exit(phy);
-out:
+
return ret;
}
@@ -374,7 +394,6 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
u32 hs, u32 rate, bool update_link_startup_timer)
{
- int ret = 0;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_clk_info *clki;
u32 core_clk_period_in_ns;
@@ -409,11 +428,11 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
* Aggregation logic.
*/
if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
- goto out;
+ return 0;
if (gear == 0) {
dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
- goto out_error;
+ return -EINVAL;
}
list_for_each_entry(clki, &hba->clk_list_head, list) {
@@ -436,7 +455,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
}
if (ufs_qcom_cap_qunipro(host))
- goto out;
+ return 0;
core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
@@ -451,7 +470,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(hs_fr_table_rA));
- goto out_error;
+ return -EINVAL;
}
tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
} else if (rate == PA_HS_MODE_B) {
@@ -460,13 +479,13 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(hs_fr_table_rB));
- goto out_error;
+ return -EINVAL;
}
tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
} else {
dev_err(hba->dev, "%s: invalid rate = %d\n",
__func__, rate);
- goto out_error;
+ return -EINVAL;
}
break;
case SLOWAUTO_MODE:
@@ -476,14 +495,14 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(pwm_fr_table));
- goto out_error;
+ return -EINVAL;
}
tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
break;
case UNCHANGED:
default:
dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
- goto out_error;
+ return -EINVAL;
}
if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
@@ -498,21 +517,17 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
mb();
}
- if (update_link_startup_timer) {
+ if (update_link_startup_timer && host->hw_ver.major != 0x5) {
ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
- REG_UFS_PA_LINK_STARTUP_TIMER);
+ REG_UFS_CFG0);
/*
* make sure that this configuration is applied before
* we return
*/
mb();
}
- goto out;
-out_error:
- ret = -EINVAL;
-out:
- return ret;
+ return 0;
}
static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
@@ -527,8 +542,7 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
0, true)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
if (ufs_qcom_cap_qunipro(host))
@@ -554,7 +568,6 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
break;
}
-out:
return err;
}
@@ -691,8 +704,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
if (!dev_req_params) {
pr_err("%s: incoming dev_req_params is NULL\n", __func__);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
switch (status) {
@@ -700,29 +712,21 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
- if (host->hw_ver.major == 0x1) {
- /*
- * HS-G3 operations may not reliably work on legacy QCOM
- * UFS host controller hardware even though capability
- * exchange during link startup phase may end up
- * negotiating maximum supported gear as G3.
- * Hence downgrade the maximum supported gear to HS-G2.
- */
- if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
- ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
- if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
- ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
- }
+ /* This driver only supports symmetic gear setting i.e., hs_tx_gear == hs_rx_gear */
+ ufs_qcom_cap.hs_tx_gear = ufs_qcom_cap.hs_rx_gear = ufs_qcom_get_hs_gear(hba);
ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
dev_max_params,
dev_req_params);
if (ret) {
- pr_err("%s: failed to determine capabilities\n",
+ dev_err(hba->dev, "%s: failed to determine capabilities\n",
__func__);
- goto out;
+ return ret;
}
+ /* Use the agreed gear */
+ host->hs_gear = dev_req_params->gear_tx;
+
/* enable the device ref clock before changing to HS mode */
if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
ufshcd_is_hs_mode(dev_req_params))
@@ -761,7 +765,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
ret = -EINVAL;
break;
}
-out:
+
return ret;
}
@@ -773,14 +777,11 @@ static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
&pa_vs_config_reg1);
if (err)
- goto out;
+ return err;
/* Allow extension of MSB bits of PA_SaveConfigTime attribute */
- err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+ return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
(pa_vs_config_reg1 | (1 << 12)));
-
-out:
- return err;
}
static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
@@ -839,6 +840,9 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
| UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
| UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
}
+
+ if (host->hw_ver.major > 0x3)
+ hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
}
static void ufs_qcom_set_caps(struct ufs_hba *hba)
@@ -906,8 +910,6 @@ ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
- /* Currently this code only knows about a single reset. */
- WARN_ON(id);
ufs_qcom_assert_reset(host->hba);
/* provide 1ms delay to let the reset pulse propagate. */
usleep_range(1000, 1100);
@@ -919,8 +921,6 @@ ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
{
struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
- /* Currently this code only knows about a single reset. */
- WARN_ON(id);
ufs_qcom_deassert_reset(host->hba);
/*
@@ -957,9 +957,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
- err = -ENOMEM;
dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
- goto out;
+ return -ENOMEM;
}
/* Make a two way bind between the qcom host and the hba */
@@ -980,10 +979,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host->rcdev.owner = dev->driver->owner;
host->rcdev.nr_resets = 1;
err = devm_reset_controller_register(dev, &host->rcdev);
- if (err) {
+ if (err)
dev_warn(dev, "Failed to register reset controller\n");
- err = 0;
- }
if (!has_acpi_companion(dev)) {
host->generic_phy = devm_phy_get(dev, "ufsphy");
@@ -1046,20 +1043,24 @@ static int ufs_qcom_init(struct ufs_hba *hba)
if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
ufs_qcom_hosts[hba->dev->id] = host;
- host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
ufs_qcom_get_default_testbus_cfg(host);
err = ufs_qcom_testbus_config(host);
- if (err) {
+ if (err)
+ /* Failure is non-fatal */
dev_warn(dev, "%s: failed to configure the testbus %d\n",
__func__, err);
- err = 0;
- }
- goto out;
+ /*
+ * Power up the PHY using the minimum supported gear (UFS_HS_G2).
+ * Switching to max gear will be performed during reinit if supported.
+ */
+ host->hs_gear = UFS_HS_G2;
+
+ return 0;
out_variant_clear:
ufshcd_set_variant(hba, NULL);
-out:
+
return err;
}
@@ -1085,7 +1086,7 @@ static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
&core_clk_ctrl_reg);
if (err)
- goto out;
+ return err;
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
core_clk_ctrl_reg |= clk_cycles;
@@ -1093,11 +1094,9 @@ static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
/* Clear CORE_CLK_DIV_EN */
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
- err = ufshcd_dme_set(hba,
+ return ufshcd_dme_set(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
core_clk_ctrl_reg);
-out:
- return err;
}
static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
@@ -1180,7 +1179,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
if (err || !dev_req_params) {
ufshcd_uic_hibern8_exit(hba);
- goto out;
+ return err;
}
ufs_qcom_cfg_timers(hba,
@@ -1191,81 +1190,14 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
ufshcd_uic_hibern8_exit(hba);
}
-out:
- return err;
-}
-
-static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
- void *priv, void (*print_fn)(struct ufs_hba *hba,
- int offset, int num_regs, const char *str, void *priv))
-{
- u32 reg;
- struct ufs_qcom_host *host;
-
- if (unlikely(!hba)) {
- pr_err("%s: hba is NULL\n", __func__);
- return;
- }
- if (unlikely(!print_fn)) {
- dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
- return;
- }
-
- host = ufshcd_get_variant(hba);
- if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
- return;
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
- print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
-
- reg = ufshcd_readl(hba, REG_UFS_CFG1);
- reg |= UTP_DBG_RAMS_EN;
- ufshcd_writel(hba, reg, REG_UFS_CFG1);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
- print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
- print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
- print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
-
- /* clear bit 17 - UTP_DBG_RAMS_EN */
- ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
- print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
- print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
- print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
- print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
- print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
- print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
- print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
+ return 0;
}
static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
{
- if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
- ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
- UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
- ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
- } else {
- ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
- ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
- }
+ ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
+ UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
+ ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
}
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
@@ -1374,10 +1306,53 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
{
+ u32 reg;
+ struct ufs_qcom_host *host;
+
+ host = ufshcd_get_variant(hba);
+
ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
"HCI Vendor Specific Registers ");
- ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
+ ufshcd_dump_regs(hba, reg, 44 * 4, "UFS_UFS_DBG_RD_REG_OCSC ");
+
+ reg = ufshcd_readl(hba, REG_UFS_CFG1);
+ reg |= UTP_DBG_RAMS_EN;
+ ufshcd_writel(hba, reg, REG_UFS_CFG1);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
+ ufshcd_dump_regs(hba, reg, 32 * 4, "UFS_UFS_DBG_RD_EDTL_RAM ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
+ ufshcd_dump_regs(hba, reg, 128 * 4, "UFS_UFS_DBG_RD_DESC_RAM ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
+ ufshcd_dump_regs(hba, reg, 64 * 4, "UFS_UFS_DBG_RD_PRDT_RAM ");
+
+ /* clear bit 17 - UTP_DBG_RAMS_EN */
+ ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
+ ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UAWM ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
+ ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UARM ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
+ ufshcd_dump_regs(hba, reg, 48 * 4, "UFS_DBG_RD_REG_TXUC ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
+ ufshcd_dump_regs(hba, reg, 27 * 4, "UFS_DBG_RD_REG_RXUC ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
+ ufshcd_dump_regs(hba, reg, 19 * 4, "UFS_DBG_RD_REG_DFC ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
+ ufshcd_dump_regs(hba, reg, 34 * 4, "UFS_DBG_RD_REG_TRLUT ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
+ ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT ");
}
/**
@@ -1424,6 +1399,236 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
}
#endif
+static void ufs_qcom_reinit_notify(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ phy_power_off(host->generic_phy);
+}
+
+/* Resources */
+static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
+ {.name = "ufs_mem",},
+ {.name = "mcq",},
+ /* Submission Queue DAO */
+ {.name = "mcq_sqd",},
+ /* Submission Queue Interrupt Status */
+ {.name = "mcq_sqis",},
+ /* Completion Queue DAO */
+ {.name = "mcq_cqd",},
+ /* Completion Queue Interrupt Status */
+ {.name = "mcq_cqis",},
+ /* MCQ vendor specific */
+ {.name = "mcq_vs",},
+};
+
+static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
+{
+ struct platform_device *pdev = to_platform_device(hba->dev);
+ struct ufshcd_res_info *res;
+ struct resource *res_mem, *res_mcq;
+ int i, ret = 0;
+
+ memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info));
+
+ for (i = 0; i < RES_MAX; i++) {
+ res = &hba->res[i];
+ res->resource = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ res->name);
+ if (!res->resource) {
+ dev_info(hba->dev, "Resource %s not provided\n", res->name);
+ if (i == RES_UFS)
+ return -ENOMEM;
+ continue;
+ } else if (i == RES_UFS) {
+ res_mem = res->resource;
+ res->base = hba->mmio_base;
+ continue;
+ }
+
+ res->base = devm_ioremap_resource(hba->dev, res->resource);
+ if (IS_ERR(res->base)) {
+ dev_err(hba->dev, "Failed to map res %s, err=%d\n",
+ res->name, (int)PTR_ERR(res->base));
+ res->base = NULL;
+ ret = PTR_ERR(res->base);
+ return ret;
+ }
+ }
+
+ /* MCQ resource provided in DT */
+ res = &hba->res[RES_MCQ];
+ /* Bail if MCQ resource is provided */
+ if (res->base)
+ goto out;
+
+ /* Explicitly allocate MCQ resource from ufs_mem */
+ res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL);
+ if (!res_mcq)
+ return ret;
+
+ res_mcq->start = res_mem->start +
+ MCQ_SQATTR_OFFSET(hba->mcq_capabilities);
+ res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1;
+ res_mcq->flags = res_mem->flags;
+ res_mcq->name = "mcq";
+
+ ret = insert_resource(&iomem_resource, res_mcq);
+ if (ret) {
+ dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n",
+ ret);
+ goto insert_res_err;
+ }
+
+ res->base = devm_ioremap_resource(hba->dev, res_mcq);
+ if (IS_ERR(res->base)) {
+ dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n",
+ (int)PTR_ERR(res->base));
+ ret = PTR_ERR(res->base);
+ goto ioremap_err;
+ }
+
+out:
+ hba->mcq_base = res->base;
+ return 0;
+ioremap_err:
+ res->base = NULL;
+ remove_resource(res_mcq);
+insert_res_err:
+ devm_kfree(hba->dev, res_mcq);
+ return ret;
+}
+
+static int ufs_qcom_op_runtime_config(struct ufs_hba *hba)
+{
+ struct ufshcd_res_info *mem_res, *sqdao_res;
+ struct ufshcd_mcq_opr_info_t *opr;
+ int i;
+
+ mem_res = &hba->res[RES_UFS];
+ sqdao_res = &hba->res[RES_MCQ_SQD];
+
+ if (!mem_res->base || !sqdao_res->base)
+ return -EINVAL;
+
+ for (i = 0; i < OPR_MAX; i++) {
+ opr = &hba->mcq_opr[i];
+ opr->offset = sqdao_res->resource->start -
+ mem_res->resource->start + 0x40 * i;
+ opr->stride = 0x100;
+ opr->base = sqdao_res->base + 0x40 * i;
+ }
+
+ return 0;
+}
+
+static int ufs_qcom_get_hba_mac(struct ufs_hba *hba)
+{
+ /* Qualcomm HC supports up to 64 */
+ return MAX_SUPP_MAC;
+}
+
+static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba,
+ unsigned long *ocqs)
+{
+ struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS];
+
+ if (!mcq_vs_res->base)
+ return -EINVAL;
+
+ *ocqs = readl(mcq_vs_res->base + UFS_MEM_CQIS_VS);
+
+ return 0;
+}
+
+static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ufshcd_mcq_config_esi(hba, msg);
+}
+
+static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *__hba)
+{
+ struct ufs_hba *hba = __hba;
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ u32 id = irq - host->esi_base;
+ struct ufs_hw_queue *hwq = &hba->uhq[id];
+
+ ufshcd_mcq_write_cqis(hba, 0x1, id);
+ ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+
+ return IRQ_HANDLED;
+}
+
+static int ufs_qcom_config_esi(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct msi_desc *desc;
+ struct msi_desc *failed_desc = NULL;
+ int nr_irqs, ret;
+
+ if (host->esi_enabled)
+ return 0;
+ else if (host->esi_base < 0)
+ return -EINVAL;
+
+ /*
+ * 1. We only handle CQs as of now.
+ * 2. Poll queues do not need ESI.
+ */
+ nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
+ ret = platform_msi_domain_alloc_irqs(hba->dev, nr_irqs,
+ ufs_qcom_write_msi_msg);
+ if (ret)
+ goto out;
+
+ msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
+ if (!desc->msi_index)
+ host->esi_base = desc->irq;
+
+ ret = devm_request_irq(hba->dev, desc->irq,
+ ufs_qcom_mcq_esi_handler,
+ IRQF_SHARED, "qcom-mcq-esi", hba);
+ if (ret) {
+ dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
+ __func__, desc->irq, ret);
+ failed_desc = desc;
+ break;
+ }
+ }
+
+ if (ret) {
+ /* Rewind */
+ msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
+ if (desc == failed_desc)
+ break;
+ devm_free_irq(hba->dev, desc->irq, hba);
+ }
+ platform_msi_domain_free_irqs(hba->dev);
+ } else {
+ if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
+ host->hw_ver.step == 0) {
+ ufshcd_writel(hba,
+ ufshcd_readl(hba, REG_UFS_CFG3) | 0x1F000,
+ REG_UFS_CFG3);
+ }
+ ufshcd_mcq_enable_esi(hba);
+ }
+
+out:
+ if (ret) {
+ host->esi_base = -1;
+ dev_warn(hba->dev, "Failed to request Platform MSI %d\n", ret);
+ } else {
+ host->esi_enabled = true;
+ }
+
+ return ret;
+}
+
/*
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
@@ -1447,6 +1652,12 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.device_reset = ufs_qcom_device_reset,
.config_scaling_param = ufs_qcom_config_scaling_param,
.program_key = ufs_qcom_ice_program_key,
+ .reinit_notify = ufs_qcom_reinit_notify,
+ .mcq_config_resource = ufs_qcom_mcq_config_resource,
+ .get_hba_mac = ufs_qcom_get_hba_mac,
+ .op_runtime_config = ufs_qcom_op_runtime_config,
+ .get_outstanding_cqs = ufs_qcom_get_outstanding_cqs,
+ .config_esi = ufs_qcom_config_esi,
};
/**
@@ -1463,9 +1674,9 @@ static int ufs_qcom_probe(struct platform_device *pdev)
/* Perform generic probe */
err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
if (err)
- dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
+ return dev_err_probe(dev, err, "ufshcd_pltfrm_init() failed\n");
- return err;
+ return 0;
}
/**
@@ -1480,6 +1691,7 @@ static int ufs_qcom_remove(struct platform_device *pdev)
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
+ platform_msi_domain_free_irqs(hba->dev);
return 0;
}
@@ -1498,10 +1710,16 @@ MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
#endif
static const struct dev_pm_ops ufs_qcom_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
+#ifdef CONFIG_PM_SLEEP
+ .suspend = ufshcd_system_suspend,
+ .resume = ufshcd_system_resume,
+ .freeze = ufshcd_system_freeze,
+ .restore = ufshcd_system_restore,
+ .thaw = ufshcd_system_thaw,
+#endif
};
static struct platform_driver ufs_qcom_pltform = {
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 44466a395bb5..39e774254fb2 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -16,13 +16,11 @@
#define HBRN8_POLL_TOUT_MS 100
#define DEFAULT_CLK_RATE_HZ 1000000
#define BUS_VECTOR_NAME_LEN 32
+#define MAX_SUPP_MAC 64
-#define UFS_HW_VER_MAJOR_SHFT (28)
-#define UFS_HW_VER_MAJOR_MASK (0x000F << UFS_HW_VER_MAJOR_SHFT)
-#define UFS_HW_VER_MINOR_SHFT (16)
-#define UFS_HW_VER_MINOR_MASK (0x0FFF << UFS_HW_VER_MINOR_SHFT)
-#define UFS_HW_VER_STEP_SHFT (0)
-#define UFS_HW_VER_STEP_MASK (0xFFFF << UFS_HW_VER_STEP_SHFT)
+#define UFS_HW_VER_MAJOR_MASK GENMASK(31, 28)
+#define UFS_HW_VER_MINOR_MASK GENMASK(27, 16)
+#define UFS_HW_VER_STEP_MASK GENMASK(15, 0)
/* vendor specific pre-defined parameters */
#define SLOW 1
@@ -36,8 +34,10 @@ enum {
REG_UFS_TX_SYMBOL_CLK_NS_US = 0xC4,
REG_UFS_LOCAL_PORT_ID_REG = 0xC8,
REG_UFS_PA_ERR_CODE = 0xCC,
- REG_UFS_RETRY_TIMER_REG = 0xD0,
- REG_UFS_PA_LINK_STARTUP_TIMER = 0xD8,
+ /* On older UFS revisions, this register is called "RETRY_TIMER_REG" */
+ REG_UFS_PARAM0 = 0xD0,
+ /* On older UFS revisions, this register is called "REG_UFS_PA_LINK_STARTUP_TIMER" */
+ REG_UFS_CFG0 = 0xD8,
REG_UFS_CFG1 = 0xDC,
REG_UFS_CFG2 = 0xE0,
REG_UFS_HW_VERSION = 0xE4,
@@ -53,6 +53,8 @@ enum {
* added in HW Version 3.0.0
*/
UFS_AH8_CFG = 0xFC,
+
+ REG_UFS_CFG3 = 0x271C,
};
/* QCOM UFS host controller vendor specific debug registers */
@@ -72,28 +74,43 @@ enum {
UFS_UFS_DBG_RD_EDTL_RAM = 0x1900,
};
+enum {
+ UFS_MEM_CQIS_VS = 0x8,
+};
+
#define UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(x) (0x000 + x)
#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x)
+/* bit definitions for REG_UFS_CFG0 register */
+#define QUNIPRO_G4_SEL BIT(5)
+
/* bit definitions for REG_UFS_CFG1 register */
-#define QUNIPRO_SEL 0x1
-#define UTP_DBG_RAMS_EN 0x20000
+#define QUNIPRO_SEL BIT(0)
+#define UFS_PHY_SOFT_RESET BIT(1)
+#define UTP_DBG_RAMS_EN BIT(17)
#define TEST_BUS_EN BIT(18)
#define TEST_BUS_SEL GENMASK(22, 19)
#define UFS_REG_TEST_BUS_EN BIT(30)
+#define UFS_PHY_RESET_ENABLE 1
+#define UFS_PHY_RESET_DISABLE 0
+
/* bit definitions for REG_UFS_CFG2 register */
-#define UAWM_HW_CGC_EN (1 << 0)
-#define UARM_HW_CGC_EN (1 << 1)
-#define TXUC_HW_CGC_EN (1 << 2)
-#define RXUC_HW_CGC_EN (1 << 3)
-#define DFC_HW_CGC_EN (1 << 4)
-#define TRLUT_HW_CGC_EN (1 << 5)
-#define TMRLUT_HW_CGC_EN (1 << 6)
-#define OCSC_HW_CGC_EN (1 << 7)
+#define UAWM_HW_CGC_EN BIT(0)
+#define UARM_HW_CGC_EN BIT(1)
+#define TXUC_HW_CGC_EN BIT(2)
+#define RXUC_HW_CGC_EN BIT(3)
+#define DFC_HW_CGC_EN BIT(4)
+#define TRLUT_HW_CGC_EN BIT(5)
+#define TMRLUT_HW_CGC_EN BIT(6)
+#define OCSC_HW_CGC_EN BIT(7)
+
+/* bit definitions for REG_UFS_PARAM0 */
+#define MAX_HS_GEAR_MASK GENMASK(6, 4)
+#define UFS_QCOM_MAX_GEAR(x) FIELD_GET(MAX_HS_GEAR_MASK, (x))
/* bit definition for UFS_UFS_TEST_BUS_CTRL_n */
-#define TEST_BUS_SUB_SEL_MASK 0x1F /* All XXX_SEL fields are 5 bits wide */
+#define TEST_BUS_SUB_SEL_MASK GENMASK(4, 0) /* All XXX_SEL fields are 5 bits wide */
#define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\
TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\
@@ -101,26 +118,11 @@ enum {
TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
/* bit offset */
-enum {
- OFFSET_UFS_PHY_SOFT_RESET = 1,
- OFFSET_CLK_NS_REG = 10,
-};
+#define OFFSET_CLK_NS_REG 0xa
/* bit masks */
-enum {
- MASK_UFS_PHY_SOFT_RESET = 0x2,
- MASK_TX_SYMBOL_CLK_1US_REG = 0x3FF,
- MASK_CLK_NS_REG = 0xFFFC00,
-};
-
-/* QCOM UFS debug print bit mask */
-#define UFS_QCOM_DBG_PRINT_REGS_EN BIT(0)
-#define UFS_QCOM_DBG_PRINT_ICE_REGS_EN BIT(1)
-#define UFS_QCOM_DBG_PRINT_TEST_BUS_EN BIT(2)
-
-#define UFS_QCOM_DBG_PRINT_ALL \
- (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_ICE_REGS_EN | \
- UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
+#define MASK_TX_SYMBOL_CLK_1US_REG GENMASK(9, 0)
+#define MASK_CLK_NS_REG GENMASK(23, 10)
/* QUniPro Vendor specific attributes */
#define PA_VS_CONFIG_REG1 0x9000
@@ -135,15 +137,15 @@ ufs_qcom_get_controller_revision(struct ufs_hba *hba,
{
u32 ver = ufshcd_readl(hba, REG_UFS_HW_VERSION);
- *major = (ver & UFS_HW_VER_MAJOR_MASK) >> UFS_HW_VER_MAJOR_SHFT;
- *minor = (ver & UFS_HW_VER_MINOR_MASK) >> UFS_HW_VER_MINOR_SHFT;
- *step = (ver & UFS_HW_VER_STEP_MASK) >> UFS_HW_VER_STEP_SHFT;
+ *major = FIELD_GET(UFS_HW_VER_MAJOR_MASK, ver);
+ *minor = FIELD_GET(UFS_HW_VER_MINOR_MASK, ver);
+ *step = FIELD_GET(UFS_HW_VER_STEP_MASK, ver);
};
static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
{
- ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
- 1 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+ ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, FIELD_PREP(UFS_PHY_SOFT_RESET, UFS_PHY_RESET_ENABLE),
+ REG_UFS_CFG1);
/*
* Make sure assertion of ufs phy reset is written to
@@ -154,8 +156,8 @@ static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
{
- ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
- 0 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+ ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, FIELD_PREP(UFS_PHY_SOFT_RESET, UFS_PHY_RESET_DISABLE),
+ REG_UFS_CFG1);
/*
* Make sure de-assertion of ufs phy reset is written to
@@ -212,8 +214,6 @@ struct ufs_qcom_host {
u32 dev_ref_clk_en_mask;
- /* Bitmask for enabling debug prints */
- u32 dbg_print_en;
struct ufs_qcom_testbus testbus;
/* Reset control of HCI */
@@ -221,6 +221,11 @@ struct ufs_qcom_host {
struct reset_controller_dev rcdev;
struct gpio_desc *device_reset;
+
+ u32 hs_gear;
+
+ int esi_base;
+ bool esi_enabled;
};
static inline u32
diff --git a/drivers/ufs/host/ufs-sprd.c b/drivers/ufs/host/ufs-sprd.c
new file mode 100644
index 000000000000..051f3f40d92c
--- /dev/null
+++ b/drivers/ufs/host/ufs-sprd.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UNISOC UFS Host Controller driver
+ *
+ * Copyright (C) 2022 Unisoc, Inc.
+ * Author: Zhe Wang <zhe.wang1@unisoc.com>
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/regulator/consumer.h>
+
+#include <ufs/ufshcd.h>
+#include "ufshcd-pltfrm.h"
+#include "ufs-sprd.h"
+
+static const struct of_device_id ufs_sprd_of_match[];
+
+static struct ufs_sprd_priv *ufs_sprd_get_priv_data(struct ufs_hba *hba)
+{
+ struct ufs_sprd_host *host = ufshcd_get_variant(hba);
+
+ WARN_ON(!host->priv);
+ return host->priv;
+}
+
+static void ufs_sprd_regmap_update(struct ufs_sprd_priv *priv, unsigned int index,
+ unsigned int reg, unsigned int bits, unsigned int val)
+{
+ regmap_update_bits(priv->sysci[index].regmap, reg, bits, val);
+}
+
+static void ufs_sprd_regmap_read(struct ufs_sprd_priv *priv, unsigned int index,
+ unsigned int reg, unsigned int *val)
+{
+ regmap_read(priv->sysci[index].regmap, reg, val);
+}
+
+static void ufs_sprd_get_unipro_ver(struct ufs_hba *hba)
+{
+ struct ufs_sprd_host *host = ufshcd_get_variant(hba);
+
+ if (ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &host->unipro_ver))
+ host->unipro_ver = 0;
+}
+
+static void ufs_sprd_ctrl_uic_compl(struct ufs_hba *hba, bool enable)
+{
+ u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+
+ if (enable == true)
+ set |= UIC_COMMAND_COMPL;
+ else
+ set &= ~UIC_COMMAND_COMPL;
+ ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
+}
+
+static int ufs_sprd_get_reset_ctrl(struct device *dev, struct ufs_sprd_rst *rci)
+{
+ rci->rc = devm_reset_control_get(dev, rci->name);
+ if (IS_ERR(rci->rc)) {
+ dev_err(dev, "failed to get reset ctrl:%s\n", rci->name);
+ return PTR_ERR(rci->rc);
+ }
+
+ return 0;
+}
+
+static int ufs_sprd_get_syscon_reg(struct device *dev, struct ufs_sprd_syscon *sysci)
+{
+ sysci->regmap = syscon_regmap_lookup_by_phandle(dev->of_node, sysci->name);
+ if (IS_ERR(sysci->regmap)) {
+ dev_err(dev, "failed to get ufs syscon:%s\n", sysci->name);
+ return PTR_ERR(sysci->regmap);
+ }
+
+ return 0;
+}
+
+static int ufs_sprd_get_vreg(struct device *dev, struct ufs_sprd_vreg *vregi)
+{
+ vregi->vreg = devm_regulator_get(dev, vregi->name);
+ if (IS_ERR(vregi->vreg)) {
+ dev_err(dev, "failed to get vreg:%s\n", vregi->name);
+ return PTR_ERR(vregi->vreg);
+ }
+
+ return 0;
+}
+
+static int ufs_sprd_parse_dt(struct device *dev, struct ufs_hba *hba, struct ufs_sprd_host *host)
+{
+ u32 i;
+ struct ufs_sprd_priv *priv = host->priv;
+ int ret = 0;
+
+ /* Parse UFS reset ctrl info */
+ for (i = 0; i < SPRD_UFS_RST_MAX; i++) {
+ if (!priv->rci[i].name)
+ continue;
+ ret = ufs_sprd_get_reset_ctrl(dev, &priv->rci[i]);
+ if (ret)
+ goto out;
+ }
+
+ /* Parse UFS syscon reg info */
+ for (i = 0; i < SPRD_UFS_SYSCON_MAX; i++) {
+ if (!priv->sysci[i].name)
+ continue;
+ ret = ufs_sprd_get_syscon_reg(dev, &priv->sysci[i]);
+ if (ret)
+ goto out;
+ }
+
+ /* Parse UFS vreg info */
+ for (i = 0; i < SPRD_UFS_VREG_MAX; i++) {
+ if (!priv->vregi[i].name)
+ continue;
+ ret = ufs_sprd_get_vreg(dev, &priv->vregi[i]);
+ if (ret)
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int ufs_sprd_common_init(struct ufs_hba *hba)
+{
+ struct device *dev = hba->dev;
+ struct ufs_sprd_host *host;
+ struct platform_device __maybe_unused *pdev = to_platform_device(dev);
+ const struct of_device_id *of_id;
+ int ret = 0;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ of_id = of_match_node(ufs_sprd_of_match, pdev->dev.of_node);
+ if (of_id->data != NULL)
+ host->priv = container_of(of_id->data, struct ufs_sprd_priv,
+ ufs_hba_sprd_vops);
+
+ host->hba = hba;
+ ufshcd_set_variant(hba, host);
+
+ hba->caps |= UFSHCD_CAP_CLK_GATING |
+ UFSHCD_CAP_CRYPTO |
+ UFSHCD_CAP_WB_EN;
+ hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS;
+
+ ret = ufs_sprd_parse_dt(dev, hba, host);
+
+ return ret;
+}
+
+static int sprd_ufs_pwr_change_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ struct ufs_sprd_host *host = ufshcd_get_variant(hba);
+
+ if (status == PRE_CHANGE) {
+ memcpy(dev_req_params, dev_max_params,
+ sizeof(struct ufs_pa_layer_attr));
+ if (host->unipro_ver >= UFS_UNIPRO_VER_1_8)
+ ufshcd_dme_configure_adapt(hba, dev_req_params->gear_tx,
+ PA_INITIAL_ADAPT);
+ }
+
+ return 0;
+}
+
+static int ufs_sprd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
+ enum ufs_notify_change_status status)
+{
+ unsigned long flags;
+
+ if (status == PRE_CHANGE) {
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+ }
+
+ return 0;
+}
+
+static void ufs_sprd_n6_host_reset(struct ufs_hba *hba)
+{
+ struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
+
+ dev_info(hba->dev, "ufs host reset!\n");
+
+ reset_control_assert(priv->rci[SPRD_UFSHCI_SOFT_RST].rc);
+ usleep_range(1000, 1100);
+ reset_control_deassert(priv->rci[SPRD_UFSHCI_SOFT_RST].rc);
+}
+
+static int ufs_sprd_n6_device_reset(struct ufs_hba *hba)
+{
+ struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
+
+ dev_info(hba->dev, "ufs device reset!\n");
+
+ reset_control_assert(priv->rci[SPRD_UFS_DEV_RST].rc);
+ usleep_range(1000, 1100);
+ reset_control_deassert(priv->rci[SPRD_UFS_DEV_RST].rc);
+
+ return 0;
+}
+
+static void ufs_sprd_n6_key_acc_enable(struct ufs_hba *hba)
+{
+ u32 val;
+ u32 retry = 10;
+ struct arm_smccc_res res;
+
+check_hce:
+ /* Key access only can be enabled under HCE enable */
+ val = ufshcd_readl(hba, REG_CONTROLLER_ENABLE);
+ if (!(val & CONTROLLER_ENABLE)) {
+ ufs_sprd_n6_host_reset(hba);
+ val |= CONTROLLER_ENABLE;
+ ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
+ usleep_range(1000, 1100);
+ if (retry) {
+ retry--;
+ goto check_hce;
+ }
+ goto disable_crypto;
+ }
+
+ arm_smccc_smc(SPRD_SIP_SVC_STORAGE_UFS_CRYPTO_ENABLE,
+ 0, 0, 0, 0, 0, 0, 0, &res);
+ if (!res.a0)
+ return;
+
+disable_crypto:
+ dev_err(hba->dev, "key reg access enable fail, disable crypto\n");
+ hba->caps &= ~UFSHCD_CAP_CRYPTO;
+}
+
+static int ufs_sprd_n6_init(struct ufs_hba *hba)
+{
+ struct ufs_sprd_priv *priv;
+ int ret = 0;
+
+ ret = ufs_sprd_common_init(hba);
+ if (ret != 0)
+ return ret;
+
+ priv = ufs_sprd_get_priv_data(hba);
+
+ ret = regulator_enable(priv->vregi[SPRD_UFS_VDD_MPHY].vreg);
+ if (ret)
+ return -ENODEV;
+
+ if (hba->caps & UFSHCD_CAP_CRYPTO)
+ ufs_sprd_n6_key_acc_enable(hba);
+
+ return 0;
+}
+
+static int ufs_sprd_n6_phy_init(struct ufs_hba *hba)
+{
+ int ret = 0;
+ uint32_t val = 0;
+ uint32_t retry = 10;
+ uint32_t offset;
+ struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBREFCLKCTRL2), 0x90);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBCRCTRL), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RXSQCONTROL,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RXSQCONTROL,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(1)), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBRATESEL), 0x01);
+
+ do {
+ /* phy_sram_init_done */
+ ufs_sprd_regmap_read(priv, SPRD_UFS_ANLG, 0xc, &val);
+ if ((val & 0x1) == 0x1) {
+ for (offset = 0x40; offset < 0x42; offset++) {
+ /* Lane afe calibration */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGADDRLSB), 0x1c);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGADDRMSB), offset);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGWRLSB), 0x04);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGWRMSB), 0x00);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGRDWRSEL), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
+ }
+
+ goto update_phy;
+ }
+ udelay(1000);
+ retry--;
+ } while (retry > 0);
+
+ ret = -ETIMEDOUT;
+ goto out;
+
+update_phy:
+ /* phy_sram_ext_ld_done */
+ ufs_sprd_regmap_update(priv, SPRD_UFS_ANLG, 0xc, 0x2, 0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYDISABLE), 0x0);
+out:
+ return ret;
+}
+
+
+static int sprd_ufs_n6_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int err = 0;
+ struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
+
+ if (status == PRE_CHANGE) {
+ /* phy_sram_ext_ld_done */
+ ufs_sprd_regmap_update(priv, SPRD_UFS_ANLG, 0xc, 0x2, 0x2);
+ /* phy_sram_bypass */
+ ufs_sprd_regmap_update(priv, SPRD_UFS_ANLG, 0xc, 0x4, 0x4);
+
+ ufs_sprd_n6_host_reset(hba);
+
+ if (hba->caps & UFSHCD_CAP_CRYPTO)
+ ufs_sprd_n6_key_acc_enable(hba);
+ }
+
+ if (status == POST_CHANGE) {
+ err = ufs_sprd_n6_phy_init(hba);
+ if (err) {
+ dev_err(hba->dev, "Phy setup failed (%d)\n", err);
+ goto out;
+ }
+
+ ufs_sprd_get_unipro_ver(hba);
+ }
+out:
+ return err;
+}
+
+static void sprd_ufs_n6_h8_notify(struct ufs_hba *hba,
+ enum uic_cmd_dme cmd,
+ enum ufs_notify_change_status status)
+{
+ struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
+
+ if (status == PRE_CHANGE) {
+ if (cmd == UIC_CMD_DME_HIBER_ENTER)
+ /*
+ * Disable UIC COMPL INTR to prevent access to UFSHCI after
+ * checking HCS.UPMCRS
+ */
+ ufs_sprd_ctrl_uic_compl(hba, false);
+
+ if (cmd == UIC_CMD_DME_HIBER_EXIT) {
+ ufs_sprd_regmap_update(priv, SPRD_UFS_AON_APB, APB_UFSDEV_REG,
+ APB_UFSDEV_REFCLK_EN, APB_UFSDEV_REFCLK_EN);
+ ufs_sprd_regmap_update(priv, SPRD_UFS_AON_APB, APB_USB31PLL_CTRL,
+ APB_USB31PLLV_REF2MPHY, APB_USB31PLLV_REF2MPHY);
+ }
+ }
+
+ if (status == POST_CHANGE) {
+ if (cmd == UIC_CMD_DME_HIBER_EXIT)
+ ufs_sprd_ctrl_uic_compl(hba, true);
+
+ if (cmd == UIC_CMD_DME_HIBER_ENTER) {
+ ufs_sprd_regmap_update(priv, SPRD_UFS_AON_APB, APB_UFSDEV_REG,
+ APB_UFSDEV_REFCLK_EN, 0);
+ ufs_sprd_regmap_update(priv, SPRD_UFS_AON_APB, APB_USB31PLL_CTRL,
+ APB_USB31PLLV_REF2MPHY, 0);
+ }
+ }
+}
+
+static struct ufs_sprd_priv n6_ufs = {
+ .rci[SPRD_UFSHCI_SOFT_RST] = { .name = "controller", },
+ .rci[SPRD_UFS_DEV_RST] = { .name = "device", },
+
+ .sysci[SPRD_UFS_ANLG] = { .name = "sprd,ufs-anlg-syscon", },
+ .sysci[SPRD_UFS_AON_APB] = { .name = "sprd,aon-apb-syscon", },
+
+ .vregi[SPRD_UFS_VDD_MPHY] = { .name = "vdd-mphy", },
+
+ .ufs_hba_sprd_vops = {
+ .name = "sprd,ums9620-ufs",
+ .init = ufs_sprd_n6_init,
+ .hce_enable_notify = sprd_ufs_n6_hce_enable_notify,
+ .pwr_change_notify = sprd_ufs_pwr_change_notify,
+ .hibern8_notify = sprd_ufs_n6_h8_notify,
+ .device_reset = ufs_sprd_n6_device_reset,
+ .suspend = ufs_sprd_suspend,
+ },
+};
+
+static const struct of_device_id __maybe_unused ufs_sprd_of_match[] = {
+ { .compatible = "sprd,ums9620-ufs", .data = &n6_ufs.ufs_hba_sprd_vops},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ufs_sprd_of_match);
+
+static int ufs_sprd_probe(struct platform_device *pdev)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(ufs_sprd_of_match, dev->of_node);
+ err = ufshcd_pltfrm_init(pdev, of_id->data);
+ if (err)
+ dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
+
+ return err;
+}
+
+static int ufs_sprd_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&(pdev)->dev);
+ ufshcd_remove(hba);
+ return 0;
+}
+
+static const struct dev_pm_ops ufs_sprd_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
+};
+
+static struct platform_driver ufs_sprd_pltform = {
+ .probe = ufs_sprd_probe,
+ .remove = ufs_sprd_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
+ .driver = {
+ .name = "ufshcd-sprd",
+ .pm = &ufs_sprd_pm_ops,
+ .of_match_table = of_match_ptr(ufs_sprd_of_match),
+ },
+};
+module_platform_driver(ufs_sprd_pltform);
+
+MODULE_AUTHOR("Zhe Wang <zhe.wang1@unisoc.com>");
+MODULE_DESCRIPTION("Unisoc UFS Host Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ufs/host/ufs-sprd.h b/drivers/ufs/host/ufs-sprd.h
new file mode 100644
index 000000000000..26ad5c3af4c1
--- /dev/null
+++ b/drivers/ufs/host/ufs-sprd.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * UNISOC UFS Host Controller driver
+ *
+ * Copyright (C) 2022 Unisoc, Inc.
+ * Author: Zhe Wang <zhe.wang1@unisoc.com>
+ */
+
+#ifndef _UFS_SPRD_H_
+#define _UFS_SPRD_H_
+
+/* Vendor specific attributes */
+#define RXSQCONTROL 0x8009
+#define CBRATESEL 0x8114
+#define CBCREGADDRLSB 0x8116
+#define CBCREGADDRMSB 0x8117
+#define CBCREGWRLSB 0x8118
+#define CBCREGWRMSB 0x8119
+#define CBCREGRDWRSEL 0x811C
+#define CBCRCTRL 0x811F
+#define CBREFCLKCTRL2 0x8132
+#define VS_MPHYDISABLE 0xD0C1
+
+#define APB_UFSDEV_REG 0xCE8
+#define APB_UFSDEV_REFCLK_EN 0x2
+#define APB_USB31PLL_CTRL 0xCFC
+#define APB_USB31PLLV_REF2MPHY 0x1
+
+#define SPRD_SIP_SVC_STORAGE_UFS_CRYPTO_ENABLE \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_SIP, \
+ 0x0301)
+
+enum SPRD_UFS_RST_INDEX {
+ SPRD_UFSHCI_SOFT_RST,
+ SPRD_UFS_DEV_RST,
+
+ SPRD_UFS_RST_MAX
+};
+
+enum SPRD_UFS_SYSCON_INDEX {
+ SPRD_UFS_ANLG,
+ SPRD_UFS_AON_APB,
+
+ SPRD_UFS_SYSCON_MAX
+};
+
+enum SPRD_UFS_VREG_INDEX {
+ SPRD_UFS_VDD_MPHY,
+
+ SPRD_UFS_VREG_MAX
+};
+
+struct ufs_sprd_rst {
+ const char *name;
+ struct reset_control *rc;
+};
+
+struct ufs_sprd_syscon {
+ const char *name;
+ struct regmap *regmap;
+};
+
+struct ufs_sprd_vreg {
+ const char *name;
+ struct regulator *vreg;
+};
+
+struct ufs_sprd_priv {
+ struct ufs_sprd_rst rci[SPRD_UFS_RST_MAX];
+ struct ufs_sprd_syscon sysci[SPRD_UFS_SYSCON_MAX];
+ struct ufs_sprd_vreg vregi[SPRD_UFS_VREG_MAX];
+ const struct ufs_hba_variant_ops ufs_hba_sprd_vops;
+};
+
+struct ufs_sprd_host {
+ struct ufs_hba *hba;
+ struct ufs_sprd_priv *priv;
+ void __iomem *ufs_dbg_mmio;
+
+ enum ufs_unipro_ver unipro_ver;
+};
+
+#endif /* _UFS_SPRD_H_ */