summaryrefslogtreecommitdiff
path: root/drivers/dma/amd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/amd')
-rw-r--r--drivers/dma/amd/Kconfig42
-rw-r--r--drivers/dma/amd/Makefile5
-rw-r--r--drivers/dma/amd/ae4dma/Makefile10
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma-dev.c157
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma-pci.c156
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma.h102
-rw-r--r--drivers/dma/amd/ptdma/Makefile10
-rw-r--r--drivers/dma/amd/ptdma/ptdma-debugfs.c143
-rw-r--r--drivers/dma/amd/ptdma/ptdma-dev.c309
-rw-r--r--drivers/dma/amd/ptdma/ptdma-dmaengine.c659
-rw-r--r--drivers/dma/amd/ptdma/ptdma-pci.c243
-rw-r--r--drivers/dma/amd/ptdma/ptdma.h338
-rw-r--r--drivers/dma/amd/qdma/Makefile5
-rw-r--r--drivers/dma/amd/qdma/qdma-comm-regs.c64
-rw-r--r--drivers/dma/amd/qdma/qdma.c1143
-rw-r--r--drivers/dma/amd/qdma/qdma.h266
16 files changed, 3652 insertions, 0 deletions
diff --git a/drivers/dma/amd/Kconfig b/drivers/dma/amd/Kconfig
new file mode 100644
index 000000000000..00d874872a8f
--- /dev/null
+++ b/drivers/dma/amd/Kconfig
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+config AMD_AE4DMA
+ tristate "AMD AE4DMA Engine"
+ depends on (X86_64 || COMPILE_TEST) && PCI
+ depends on AMD_PTDMA
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the AMD AE4DMA controller. This controller
+ provides DMA capabilities to perform high bandwidth memory to
+ memory and IO copy operations. It performs DMA transfer through
+ queue-based descriptor management. This DMA controller is intended
+ to be used with AMD Non-Transparent Bridge devices and not for
+ general purpose peripheral DMA.
+
+config AMD_PTDMA
+ tristate "AMD PassThru DMA Engine"
+ depends on X86_64 && PCI
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the AMD PTDMA controller. This controller
+ provides DMA capabilities to perform high bandwidth memory to
+ memory and IO copy operations. It performs DMA transfer through
+ queue-based descriptor management. This DMA controller is intended
+ to be used with AMD Non-Transparent Bridge devices and not for
+ general purpose peripheral DMA.
+
+config AMD_QDMA
+ tristate "AMD Queue-based DMA"
+ depends on HAS_IOMEM
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select REGMAP_MMIO
+ help
+ Enable support for the AMD Queue-based DMA subsystem. The primary
+ mechanism to transfer data using the QDMA is for the QDMA engine to
+ operate on instructions (descriptors) provided by the host operating
+ system. Using the descriptors, the QDMA can move data in either the
+ Host to Card (H2C) direction or the Card to Host (C2H) direction.
diff --git a/drivers/dma/amd/Makefile b/drivers/dma/amd/Makefile
new file mode 100644
index 000000000000..11278c06374d
--- /dev/null
+++ b/drivers/dma/amd/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_AMD_AE4DMA) += ae4dma/
+obj-$(CONFIG_AMD_PTDMA) += ptdma/
+obj-$(CONFIG_AMD_QDMA) += qdma/
diff --git a/drivers/dma/amd/ae4dma/Makefile b/drivers/dma/amd/ae4dma/Makefile
new file mode 100644
index 000000000000..e918f85a80ec
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# AMD AE4DMA driver
+#
+
+obj-$(CONFIG_AMD_AE4DMA) += ae4dma.o
+
+ae4dma-objs := ae4dma-dev.o
+
+ae4dma-$(CONFIG_PCI) += ae4dma-pci.o
diff --git a/drivers/dma/amd/ae4dma/ae4dma-dev.c b/drivers/dma/amd/ae4dma/ae4dma-dev.c
new file mode 100644
index 000000000000..8de3bef41b58
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/ae4dma-dev.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD AE4DMA driver
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#include "ae4dma.h"
+
+static unsigned int max_hw_q = 1;
+module_param(max_hw_q, uint, 0444);
+MODULE_PARM_DESC(max_hw_q, "max hw queues supported by engine (any non-zero value, default: 1)");
+
+static void ae4_pending_work(struct work_struct *work)
+{
+ struct ae4_cmd_queue *ae4cmd_q = container_of(work, struct ae4_cmd_queue, p_work.work);
+ struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
+ struct pt_cmd *cmd;
+ u32 cridx;
+
+ for (;;) {
+ wait_event_interruptible(ae4cmd_q->q_w,
+ ((atomic64_read(&ae4cmd_q->done_cnt)) <
+ atomic64_read(&ae4cmd_q->intr_cnt)));
+
+ atomic64_inc(&ae4cmd_q->done_cnt);
+
+ mutex_lock(&ae4cmd_q->cmd_lock);
+ cridx = readl(cmd_q->reg_control + AE4_RD_IDX_OFF);
+ while ((ae4cmd_q->dridx != cridx) && !list_empty(&ae4cmd_q->cmd)) {
+ cmd = list_first_entry(&ae4cmd_q->cmd, struct pt_cmd, entry);
+ list_del(&cmd->entry);
+
+ ae4_check_status_error(ae4cmd_q, ae4cmd_q->dridx);
+ cmd->pt_cmd_callback(cmd->data, cmd->ret);
+
+ ae4cmd_q->q_cmd_count--;
+ ae4cmd_q->dridx = (ae4cmd_q->dridx + 1) % CMD_Q_LEN;
+
+ complete_all(&ae4cmd_q->cmp);
+ }
+ mutex_unlock(&ae4cmd_q->cmd_lock);
+ }
+}
+
+static irqreturn_t ae4_core_irq_handler(int irq, void *data)
+{
+ struct ae4_cmd_queue *ae4cmd_q = data;
+ struct pt_cmd_queue *cmd_q;
+ struct pt_device *pt;
+ u32 status;
+
+ cmd_q = &ae4cmd_q->cmd_q;
+ pt = cmd_q->pt;
+
+ pt->total_interrupts++;
+ atomic64_inc(&ae4cmd_q->intr_cnt);
+
+ status = readl(cmd_q->reg_control + AE4_INTR_STS_OFF);
+ if (status & BIT(0)) {
+ status &= GENMASK(31, 1);
+ writel(status, cmd_q->reg_control + AE4_INTR_STS_OFF);
+ }
+
+ wake_up(&ae4cmd_q->q_w);
+
+ return IRQ_HANDLED;
+}
+
+void ae4_destroy_work(struct ae4_device *ae4)
+{
+ struct ae4_cmd_queue *ae4cmd_q;
+ int i;
+
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+
+ if (!ae4cmd_q->pws)
+ break;
+
+ cancel_delayed_work_sync(&ae4cmd_q->p_work);
+ destroy_workqueue(ae4cmd_q->pws);
+ }
+}
+
+int ae4_core_init(struct ae4_device *ae4)
+{
+ struct pt_device *pt = &ae4->pt;
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct device *dev = pt->dev;
+ struct pt_cmd_queue *cmd_q;
+ int i, ret = 0;
+
+ writel(max_hw_q, pt->io_regs);
+
+ for (i = 0; i < max_hw_q; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+ ae4cmd_q->id = ae4->cmd_q_count;
+ ae4->cmd_q_count++;
+
+ cmd_q = &ae4cmd_q->cmd_q;
+ cmd_q->pt = pt;
+
+ cmd_q->reg_control = pt->io_regs + ((i + 1) * AE4_Q_SZ);
+
+ ret = devm_request_irq(dev, ae4->ae4_irq[i], ae4_core_irq_handler, 0,
+ dev_name(pt->dev), ae4cmd_q);
+ if (ret)
+ return ret;
+
+ cmd_q->qsize = Q_SIZE(sizeof(struct ae4dma_desc));
+
+ cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, &cmd_q->qbase_dma,
+ GFP_KERNEL);
+ if (!cmd_q->qbase)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+
+ cmd_q = &ae4cmd_q->cmd_q;
+
+ cmd_q->reg_control = pt->io_regs + ((i + 1) * AE4_Q_SZ);
+
+ /* Update the device registers with queue information. */
+ writel(CMD_Q_LEN, cmd_q->reg_control + AE4_MAX_IDX_OFF);
+
+ cmd_q->qdma_tail = cmd_q->qbase_dma;
+ writel(lower_32_bits(cmd_q->qdma_tail), cmd_q->reg_control + AE4_Q_BASE_L_OFF);
+ writel(upper_32_bits(cmd_q->qdma_tail), cmd_q->reg_control + AE4_Q_BASE_H_OFF);
+
+ INIT_LIST_HEAD(&ae4cmd_q->cmd);
+ init_waitqueue_head(&ae4cmd_q->q_w);
+
+ ae4cmd_q->pws = alloc_ordered_workqueue("ae4dma_%d", WQ_MEM_RECLAIM, ae4cmd_q->id);
+ if (!ae4cmd_q->pws) {
+ ae4_destroy_work(ae4);
+ return -ENOMEM;
+ }
+ INIT_DELAYED_WORK(&ae4cmd_q->p_work, ae4_pending_work);
+ queue_delayed_work(ae4cmd_q->pws, &ae4cmd_q->p_work, usecs_to_jiffies(100));
+
+ init_completion(&ae4cmd_q->cmp);
+ }
+
+ ret = pt_dmaengine_register(pt);
+ if (ret)
+ ae4_destroy_work(ae4);
+ else
+ ptdma_debugfs_setup(pt);
+
+ return ret;
+}
diff --git a/drivers/dma/amd/ae4dma/ae4dma-pci.c b/drivers/dma/amd/ae4dma/ae4dma-pci.c
new file mode 100644
index 000000000000..2c63907db228
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/ae4dma-pci.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD AE4DMA driver
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#include "ae4dma.h"
+
+static int ae4_get_irqs(struct ae4_device *ae4)
+{
+ struct ae4_msix *ae4_msix = ae4->ae4_msix;
+ struct pt_device *pt = &ae4->pt;
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev;
+ int i, v, ret;
+
+ pdev = to_pci_dev(dev);
+
+ for (v = 0; v < ARRAY_SIZE(ae4_msix->msix_entry); v++)
+ ae4_msix->msix_entry[v].entry = v;
+
+ ret = pci_alloc_irq_vectors(pdev, v, v, PCI_IRQ_MSIX);
+ if (ret != v) {
+ if (ret > 0)
+ pci_free_irq_vectors(pdev);
+
+ dev_err(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(dev, "could not enable MSI (%d)\n", ret);
+ return ret;
+ }
+
+ ret = pci_irq_vector(pdev, 0);
+ if (ret < 0) {
+ pci_free_irq_vectors(pdev);
+ return ret;
+ }
+
+ for (i = 0; i < MAX_AE4_HW_QUEUES; i++)
+ ae4->ae4_irq[i] = ret;
+
+ } else {
+ ae4_msix->msix_count = ret;
+ for (i = 0; i < ae4_msix->msix_count; i++)
+ ae4->ae4_irq[i] = pci_irq_vector(pdev, i);
+ }
+
+ return ret;
+}
+
+static void ae4_free_irqs(struct ae4_device *ae4)
+{
+ struct ae4_msix *ae4_msix = ae4->ae4_msix;
+ struct pt_device *pt = &ae4->pt;
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(dev);
+
+ if (ae4_msix && (ae4_msix->msix_count || ae4->ae4_irq[MAX_AE4_HW_QUEUES - 1]))
+ pci_free_irq_vectors(pdev);
+}
+
+static void ae4_deinit(struct ae4_device *ae4)
+{
+ ae4_free_irqs(ae4);
+}
+
+static int ae4_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct ae4_device *ae4;
+ struct pt_device *pt;
+ int bar_mask;
+ int ret = 0;
+
+ ae4 = devm_kzalloc(dev, sizeof(*ae4), GFP_KERNEL);
+ if (!ae4)
+ return -ENOMEM;
+
+ ae4->ae4_msix = devm_kzalloc(dev, sizeof(struct ae4_msix), GFP_KERNEL);
+ if (!ae4->ae4_msix)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ goto ae4_error;
+
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ ret = pcim_iomap_regions(pdev, bar_mask, "ae4dma");
+ if (ret)
+ goto ae4_error;
+
+ pt = &ae4->pt;
+ pt->dev = dev;
+ pt->ver = AE4_DMA_VERSION;
+
+ pt->io_regs = pcim_iomap_table(pdev)[0];
+ if (!pt->io_regs) {
+ ret = -ENOMEM;
+ goto ae4_error;
+ }
+
+ ret = ae4_get_irqs(ae4);
+ if (ret < 0)
+ goto ae4_error;
+
+ pci_set_master(pdev);
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+
+ dev_set_drvdata(dev, ae4);
+
+ ret = ae4_core_init(ae4);
+ if (ret)
+ goto ae4_error;
+
+ return 0;
+
+ae4_error:
+ ae4_deinit(ae4);
+
+ return ret;
+}
+
+static void ae4_pci_remove(struct pci_dev *pdev)
+{
+ struct ae4_device *ae4 = dev_get_drvdata(&pdev->dev);
+
+ ae4_destroy_work(ae4);
+ ae4_deinit(ae4);
+}
+
+static const struct pci_device_id ae4_pci_table[] = {
+ { PCI_VDEVICE(AMD, 0x149B), },
+ /* Last entry must be zero */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ae4_pci_table);
+
+static struct pci_driver ae4_pci_driver = {
+ .name = "ae4dma",
+ .id_table = ae4_pci_table,
+ .probe = ae4_pci_probe,
+ .remove = ae4_pci_remove,
+};
+
+module_pci_driver(ae4_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AMD AE4DMA driver");
diff --git a/drivers/dma/amd/ae4dma/ae4dma.h b/drivers/dma/amd/ae4dma/ae4dma.h
new file mode 100644
index 000000000000..57f6048726bb
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/ae4dma.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD AE4DMA driver
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+#ifndef __AE4DMA_H__
+#define __AE4DMA_H__
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include "../ptdma/ptdma.h"
+#include "../../virt-dma.h"
+
+#define MAX_AE4_HW_QUEUES 16
+
+#define AE4_DESC_COMPLETED 0x03
+
+#define AE4_MAX_IDX_OFF 0x08
+#define AE4_RD_IDX_OFF 0x0c
+#define AE4_WR_IDX_OFF 0x10
+#define AE4_INTR_STS_OFF 0x14
+#define AE4_Q_BASE_L_OFF 0x18
+#define AE4_Q_BASE_H_OFF 0x1c
+#define AE4_Q_SZ 0x20
+
+#define AE4_DMA_VERSION 4
+#define CMD_AE4_DESC_DW0_VAL 2
+
+#define AE4_TIME_OUT 5000
+
+struct ae4_msix {
+ int msix_count;
+ struct msix_entry msix_entry[MAX_AE4_HW_QUEUES];
+};
+
+struct ae4_cmd_queue {
+ struct ae4_device *ae4;
+ struct pt_cmd_queue cmd_q;
+ struct list_head cmd;
+ /* protect command operations */
+ struct mutex cmd_lock;
+ struct delayed_work p_work;
+ struct workqueue_struct *pws;
+ struct completion cmp;
+ wait_queue_head_t q_w;
+ atomic64_t intr_cnt;
+ atomic64_t done_cnt;
+ u64 q_cmd_count;
+ u32 dridx;
+ u32 tail_wi;
+ u32 id;
+};
+
+union dwou {
+ u32 dw0;
+ struct dword0 {
+ u8 byte0;
+ u8 byte1;
+ u16 timestamp;
+ } dws;
+};
+
+struct dword1 {
+ u8 status;
+ u8 err_code;
+ u16 desc_id;
+};
+
+struct ae4dma_desc {
+ union dwou dwouv;
+ struct dword1 dw1;
+ u32 length;
+ u32 rsvd;
+ u32 src_hi;
+ u32 src_lo;
+ u32 dst_hi;
+ u32 dst_lo;
+};
+
+struct ae4_device {
+ struct pt_device pt;
+ struct ae4_msix *ae4_msix;
+ struct ae4_cmd_queue ae4cmd_q[MAX_AE4_HW_QUEUES];
+ unsigned int ae4_irq[MAX_AE4_HW_QUEUES];
+ unsigned int cmd_q_count;
+};
+
+int ae4_core_init(struct ae4_device *ae4);
+void ae4_destroy_work(struct ae4_device *ae4);
+void ae4_check_status_error(struct ae4_cmd_queue *ae4cmd_q, int idx);
+#endif
diff --git a/drivers/dma/amd/ptdma/Makefile b/drivers/dma/amd/ptdma/Makefile
new file mode 100644
index 000000000000..ce5410268a9a
--- /dev/null
+++ b/drivers/dma/amd/ptdma/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AMD Passthru DMA driver
+#
+
+obj-$(CONFIG_AMD_PTDMA) += ptdma.o
+
+ptdma-objs := ptdma-dev.o ptdma-dmaengine.o ptdma-debugfs.o
+
+ptdma-$(CONFIG_PCI) += ptdma-pci.o
diff --git a/drivers/dma/amd/ptdma/ptdma-debugfs.c b/drivers/dma/amd/ptdma/ptdma-debugfs.c
new file mode 100644
index 000000000000..c7c90bbf6fd8
--- /dev/null
+++ b/drivers/dma/amd/ptdma/ptdma-debugfs.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthrough DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "ptdma.h"
+#include "../ae4dma/ae4dma.h"
+
+/* DebugFS helpers */
+#define RI_VERSION_NUM 0x0000003F
+
+#define RI_NUM_VQM 0x00078000
+#define RI_NVQM_SHIFT 15
+
+static int pt_debugfs_info_show(struct seq_file *s, void *p)
+{
+ struct pt_device *pt = s->private;
+ struct ae4_device *ae4;
+ unsigned int regval;
+
+ seq_printf(s, "Device name: %s\n", dev_name(pt->dev));
+
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ seq_printf(s, " # Queues: %d\n", ae4->cmd_q_count);
+ seq_printf(s, " # Cmds per queue: %d\n", CMD_Q_LEN);
+ } else {
+ seq_printf(s, " # Queues: %d\n", 1);
+ seq_printf(s, " # Cmds: %d\n", pt->cmd_count);
+ }
+
+ regval = ioread32(pt->io_regs + CMD_PT_VERSION);
+
+ seq_printf(s, " Version: %d\n", regval & RI_VERSION_NUM);
+ seq_puts(s, " Engines:");
+ seq_puts(s, "\n");
+ seq_printf(s, " Queues: %d\n", (regval & RI_NUM_VQM) >> RI_NVQM_SHIFT);
+
+ return 0;
+}
+
+/*
+ * Return a formatted buffer containing the current
+ * statistics of queue for PTDMA
+ */
+static int pt_debugfs_stats_show(struct seq_file *s, void *p)
+{
+ struct pt_device *pt = s->private;
+
+ seq_printf(s, "Total Interrupts Handled: %ld\n", pt->total_interrupts);
+
+ return 0;
+}
+
+static int pt_debugfs_queue_show(struct seq_file *s, void *p)
+{
+ struct pt_cmd_queue *cmd_q = s->private;
+ struct pt_device *pt;
+ unsigned int regval;
+
+ if (!cmd_q)
+ return 0;
+
+ seq_printf(s, " Pass-Thru: %ld\n", cmd_q->total_pt_ops);
+
+ pt = cmd_q->pt;
+ if (pt->ver == AE4_DMA_VERSION) {
+ regval = readl(cmd_q->reg_control + 0x4);
+ seq_printf(s, " Enabled Interrupts:: status 0x%x\n", regval);
+ } else {
+ regval = ioread32(cmd_q->reg_control + 0x000C);
+
+ seq_puts(s, " Enabled Interrupts:");
+ if (regval & INT_EMPTY_QUEUE)
+ seq_puts(s, " EMPTY");
+ if (regval & INT_QUEUE_STOPPED)
+ seq_puts(s, " STOPPED");
+ if (regval & INT_ERROR)
+ seq_puts(s, " ERROR");
+ if (regval & INT_COMPLETION)
+ seq_puts(s, " COMPLETION");
+ seq_puts(s, "\n");
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(pt_debugfs_info);
+DEFINE_SHOW_ATTRIBUTE(pt_debugfs_queue);
+DEFINE_SHOW_ATTRIBUTE(pt_debugfs_stats);
+
+void ptdma_debugfs_setup(struct pt_device *pt)
+{
+ struct dentry *debugfs_q_instance;
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct pt_cmd_queue *cmd_q;
+ struct ae4_device *ae4;
+ char name[30];
+ int i;
+
+ if (!debugfs_initialized())
+ return;
+
+ debugfs_create_file("info", 0400, pt->dma_dev.dbg_dev_root, pt,
+ &pt_debugfs_info_fops);
+
+ debugfs_create_file("stats", 0400, pt->dma_dev.dbg_dev_root, pt,
+ &pt_debugfs_stats_fops);
+
+
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+ cmd_q = &ae4cmd_q->cmd_q;
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, 29, "q%d", ae4cmd_q->id);
+
+ debugfs_q_instance =
+ debugfs_create_dir(name, pt->dma_dev.dbg_dev_root);
+
+ debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
+ &pt_debugfs_queue_fops);
+ }
+ } else {
+ debugfs_q_instance =
+ debugfs_create_dir("q", pt->dma_dev.dbg_dev_root);
+ cmd_q = &pt->cmd_q;
+ debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
+ &pt_debugfs_queue_fops);
+ }
+}
+EXPORT_SYMBOL_GPL(ptdma_debugfs_setup);
diff --git a/drivers/dma/amd/ptdma/ptdma-dev.c b/drivers/dma/amd/ptdma/ptdma-dev.c
new file mode 100644
index 000000000000..a2bf13ff18b6
--- /dev/null
+++ b/drivers/dma/amd/ptdma/ptdma-dev.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthru DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "ptdma.h"
+
+/* Human-readable error strings */
+static char *pt_error_codes[] = {
+ "",
+ "ERR 01: ILLEGAL_ENGINE",
+ "ERR 03: ILLEGAL_FUNCTION_TYPE",
+ "ERR 04: ILLEGAL_FUNCTION_MODE",
+ "ERR 06: ILLEGAL_FUNCTION_SIZE",
+ "ERR 08: ILLEGAL_FUNCTION_RSVD",
+ "ERR 09: ILLEGAL_BUFFER_LENGTH",
+ "ERR 10: VLSB_FAULT",
+ "ERR 11: ILLEGAL_MEM_ADDR",
+ "ERR 12: ILLEGAL_MEM_SEL",
+ "ERR 13: ILLEGAL_CONTEXT_ID",
+ "ERR 15: 0xF Reserved",
+ "ERR 18: CMD_TIMEOUT",
+ "ERR 19: IDMA0_AXI_SLVERR",
+ "ERR 20: IDMA0_AXI_DECERR",
+ "ERR 21: 0x15 Reserved",
+ "ERR 22: IDMA1_AXI_SLAVE_FAULT",
+ "ERR 23: IDMA1_AIXI_DECERR",
+ "ERR 24: 0x18 Reserved",
+ "ERR 27: 0x1B Reserved",
+ "ERR 38: ODMA0_AXI_SLVERR",
+ "ERR 39: ODMA0_AXI_DECERR",
+ "ERR 40: 0x28 Reserved",
+ "ERR 41: ODMA1_AXI_SLVERR",
+ "ERR 42: ODMA1_AXI_DECERR",
+ "ERR 43: LSB_PARITY_ERR",
+};
+
+static void pt_log_error(struct pt_device *d, int e)
+{
+ dev_err(d->dev, "PTDMA error: %s (0x%x)\n", pt_error_codes[e], e);
+}
+
+void pt_start_queue(struct pt_cmd_queue *cmd_q)
+{
+ /* Turn on the run bit */
+ iowrite32(cmd_q->qcontrol | CMD_Q_RUN, cmd_q->reg_control);
+}
+
+void pt_stop_queue(struct pt_cmd_queue *cmd_q)
+{
+ /* Turn off the run bit */
+ iowrite32(cmd_q->qcontrol & ~CMD_Q_RUN, cmd_q->reg_control);
+}
+
+static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd_q)
+{
+ bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
+ u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
+ u32 tail;
+ unsigned long flags;
+
+ if (soc) {
+ desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
+ desc->dw0 &= ~DWORD0_SOC;
+ }
+ spin_lock_irqsave(&cmd_q->q_lock, flags);
+
+ /* Copy 32-byte command descriptor to hw queue. */
+ memcpy(q_desc, desc, 32);
+ cmd_q->qidx = (cmd_q->qidx + 1) % CMD_Q_LEN;
+
+ /* The data used by this command must be flushed to memory */
+ wmb();
+
+ /* Write the new tail address back to the queue register */
+ tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
+ iowrite32(tail, cmd_q->reg_control + 0x0004);
+
+ /* Turn the queue back on using our cached control register */
+ pt_start_queue(cmd_q);
+ spin_unlock_irqrestore(&cmd_q->q_lock, flags);
+
+ return 0;
+}
+
+int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
+ struct pt_passthru_engine *pt_engine)
+{
+ struct ptdma_desc desc;
+ struct pt_device *pt = container_of(cmd_q, struct pt_device, cmd_q);
+
+ cmd_q->cmd_error = 0;
+ cmd_q->total_pt_ops++;
+ memset(&desc, 0, sizeof(desc));
+ desc.dw0 = CMD_DESC_DW0_VAL;
+ desc.length = pt_engine->src_len;
+ desc.src_lo = lower_32_bits(pt_engine->src_dma);
+ desc.dw3.src_hi = upper_32_bits(pt_engine->src_dma);
+ desc.dst_lo = lower_32_bits(pt_engine->dst_dma);
+ desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma);
+
+ if (cmd_q->int_en)
+ pt_core_enable_queue_interrupts(pt);
+ else
+ pt_core_disable_queue_interrupts(pt);
+
+ return pt_core_execute_cmd(&desc, cmd_q);
+}
+
+static void pt_do_cmd_complete(unsigned long data)
+{
+ struct pt_tasklet_data *tdata = (struct pt_tasklet_data *)data;
+ struct pt_cmd *cmd = tdata->cmd;
+ struct pt_cmd_queue *cmd_q = &cmd->pt->cmd_q;
+ u32 tail;
+
+ if (cmd_q->cmd_error) {
+ /*
+ * Log the error and flush the queue by
+ * moving the head pointer
+ */
+ tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
+ pt_log_error(cmd_q->pt, cmd_q->cmd_error);
+ iowrite32(tail, cmd_q->reg_control + 0x0008);
+ }
+
+ cmd->pt_cmd_callback(cmd->data, cmd->ret);
+}
+
+void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q)
+{
+ u32 status;
+
+ status = ioread32(cmd_q->reg_control + 0x0010);
+ if (status) {
+ cmd_q->int_status = status;
+ cmd_q->q_status = ioread32(cmd_q->reg_control + 0x0100);
+ cmd_q->q_int_status = ioread32(cmd_q->reg_control + 0x0104);
+
+ /* On error, only save the first error value */
+ if ((status & INT_ERROR) && !cmd_q->cmd_error)
+ cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+
+ /* Acknowledge the completion */
+ iowrite32(status, cmd_q->reg_control + 0x0010);
+ pt_do_cmd_complete((ulong)&pt->tdata);
+ }
+}
+
+static irqreturn_t pt_core_irq_handler(int irq, void *data)
+{
+ struct pt_device *pt = data;
+ struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+
+ pt_core_disable_queue_interrupts(pt);
+ pt->total_interrupts++;
+ pt_check_status_trans(pt, cmd_q);
+ pt_core_enable_queue_interrupts(pt);
+ return IRQ_HANDLED;
+}
+
+int pt_core_init(struct pt_device *pt)
+{
+ char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
+ struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+ u32 dma_addr_lo, dma_addr_hi;
+ struct device *dev = pt->dev;
+ struct dma_pool *dma_pool;
+ int ret;
+
+ /* Allocate a dma pool for the queue */
+ snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q", dev_name(pt->dev));
+
+ dma_pool = dma_pool_create(dma_pool_name, dev,
+ PT_DMAPOOL_MAX_SIZE,
+ PT_DMAPOOL_ALIGN, 0);
+ if (!dma_pool)
+ return -ENOMEM;
+
+ /* ptdma core initialisation */
+ iowrite32(CMD_CONFIG_VHB_EN, pt->io_regs + CMD_CONFIG_OFFSET);
+ iowrite32(CMD_QUEUE_PRIO, pt->io_regs + CMD_QUEUE_PRIO_OFFSET);
+ iowrite32(CMD_TIMEOUT_DISABLE, pt->io_regs + CMD_TIMEOUT_OFFSET);
+ iowrite32(CMD_CLK_GATE_CONFIG, pt->io_regs + CMD_CLK_GATE_CTL_OFFSET);
+ iowrite32(CMD_CONFIG_REQID, pt->io_regs + CMD_REQID_CONFIG_OFFSET);
+
+ cmd_q->pt = pt;
+ cmd_q->dma_pool = dma_pool;
+ spin_lock_init(&cmd_q->q_lock);
+
+ /* Page alignment satisfies our needs for N <= 128 */
+ cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+ cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize,
+ &cmd_q->qbase_dma,
+ GFP_KERNEL);
+ if (!cmd_q->qbase) {
+ dev_err(dev, "unable to allocate command queue\n");
+ ret = -ENOMEM;
+ goto e_destroy_pool;
+ }
+
+ cmd_q->qidx = 0;
+
+ /* Preset some register values */
+ cmd_q->reg_control = pt->io_regs + CMD_Q_STATUS_INCR;
+
+ /* Turn off the queues and disable interrupts until ready */
+ pt_core_disable_queue_interrupts(pt);
+
+ cmd_q->qcontrol = 0; /* Start with nothing */
+ iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
+
+ ioread32(cmd_q->reg_control + 0x0104);
+ ioread32(cmd_q->reg_control + 0x0100);
+
+ /* Clear the interrupt status */
+ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
+
+ /* Request an irq */
+ ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
+ if (ret) {
+ dev_err(dev, "unable to allocate an IRQ\n");
+ goto e_free_dma;
+ }
+
+ /* Update the device registers with queue information. */
+ cmd_q->qcontrol &= ~CMD_Q_SIZE;
+ cmd_q->qcontrol |= FIELD_PREP(CMD_Q_SIZE, QUEUE_SIZE_VAL);
+
+ cmd_q->qdma_tail = cmd_q->qbase_dma;
+ dma_addr_lo = lower_32_bits(cmd_q->qdma_tail);
+ iowrite32((u32)dma_addr_lo, cmd_q->reg_control + 0x0004);
+ iowrite32((u32)dma_addr_lo, cmd_q->reg_control + 0x0008);
+
+ dma_addr_hi = upper_32_bits(cmd_q->qdma_tail);
+ cmd_q->qcontrol |= (dma_addr_hi << 16);
+ iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
+
+ pt_core_enable_queue_interrupts(pt);
+
+ /* Register the DMA engine support */
+ ret = pt_dmaengine_register(pt);
+ if (ret)
+ goto e_free_irq;
+
+ /* Set up debugfs entries */
+ ptdma_debugfs_setup(pt);
+
+ return 0;
+
+e_free_irq:
+ free_irq(pt->pt_irq, pt);
+
+e_free_dma:
+ dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
+
+e_destroy_pool:
+ dma_pool_destroy(pt->cmd_q.dma_pool);
+
+ return ret;
+}
+
+void pt_core_destroy(struct pt_device *pt)
+{
+ struct device *dev = pt->dev;
+ struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+ struct pt_cmd *cmd;
+
+ /* Unregister the DMA engine */
+ pt_dmaengine_unregister(pt);
+
+ /* Disable and clear interrupts */
+ pt_core_disable_queue_interrupts(pt);
+
+ /* Turn off the run bit */
+ pt_stop_queue(cmd_q);
+
+ /* Clear the interrupt status */
+ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
+ ioread32(cmd_q->reg_control + 0x0104);
+ ioread32(cmd_q->reg_control + 0x0100);
+
+ free_irq(pt->pt_irq, pt);
+
+ dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase,
+ cmd_q->qbase_dma);
+
+ /* Flush the cmd queue */
+ while (!list_empty(&pt->cmd)) {
+ /* Invoke the callback directly with an error code */
+ cmd = list_first_entry(&pt->cmd, struct pt_cmd, entry);
+ list_del(&cmd->entry);
+ cmd->pt_cmd_callback(cmd->data, -ENODEV);
+ }
+}
diff --git a/drivers/dma/amd/ptdma/ptdma-dmaengine.c b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
new file mode 100644
index 000000000000..628c49ce5de9
--- /dev/null
+++ b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthrough DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/bitfield.h>
+#include "ptdma.h"
+#include "../ae4dma/ae4dma.h"
+#include "../../dmaengine.h"
+
+static char *ae4_error_codes[] = {
+ "",
+ "ERR 01: INVALID HEADER DW0",
+ "ERR 02: INVALID STATUS",
+ "ERR 03: INVALID LENGTH - 4 BYTE ALIGNMENT",
+ "ERR 04: INVALID SRC ADDR - 4 BYTE ALIGNMENT",
+ "ERR 05: INVALID DST ADDR - 4 BYTE ALIGNMENT",
+ "ERR 06: INVALID ALIGNMENT",
+ "ERR 07: INVALID DESCRIPTOR",
+};
+
+static void ae4_log_error(struct pt_device *d, int e)
+{
+ /* ERR 01 - 07 represents Invalid AE4 errors */
+ if (e <= 7)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", ae4_error_codes[e], e);
+ /* ERR 08 - 15 represents Invalid Descriptor errors */
+ else if (e > 7 && e <= 15)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "INVALID DESCRIPTOR", e);
+ /* ERR 16 - 31 represents Firmware errors */
+ else if (e > 15 && e <= 31)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "FIRMWARE ERROR", e);
+ /* ERR 32 - 63 represents Fatal errors */
+ else if (e > 31 && e <= 63)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "FATAL ERROR", e);
+ /* ERR 64 - 255 represents PTE errors */
+ else if (e > 63 && e <= 255)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "PTE ERROR", e);
+ else
+ dev_info(d->dev, "Unknown AE4DMA error");
+}
+
+void ae4_check_status_error(struct ae4_cmd_queue *ae4cmd_q, int idx)
+{
+ struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
+ struct ae4dma_desc desc;
+ u8 status;
+
+ memcpy(&desc, &cmd_q->qbase[idx], sizeof(struct ae4dma_desc));
+ status = desc.dw1.status;
+ if (status && status != AE4_DESC_COMPLETED) {
+ cmd_q->cmd_error = desc.dw1.err_code;
+ if (cmd_q->cmd_error)
+ ae4_log_error(cmd_q->pt, cmd_q->cmd_error);
+ }
+}
+EXPORT_SYMBOL_GPL(ae4_check_status_error);
+
+static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
+{
+ return container_of(dma_chan, struct pt_dma_chan, vc.chan);
+}
+
+static inline struct pt_dma_desc *to_pt_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct pt_dma_desc, vd);
+}
+
+static void pt_free_chan_resources(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+
+ vchan_free_chan_resources(&chan->vc);
+}
+
+static void pt_synchronize(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+
+ vchan_synchronize(&chan->vc);
+}
+
+static void pt_do_cleanup(struct virt_dma_desc *vd)
+{
+ struct pt_dma_desc *desc = to_pt_desc(vd);
+ struct pt_device *pt = desc->pt;
+
+ kmem_cache_free(pt->dma_desc_cache, desc);
+}
+
+static struct pt_cmd_queue *pt_get_cmd_queue(struct pt_device *pt, struct pt_dma_chan *chan)
+{
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct pt_cmd_queue *cmd_q;
+ struct ae4_device *ae4;
+
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ ae4cmd_q = &ae4->ae4cmd_q[chan->id];
+ cmd_q = &ae4cmd_q->cmd_q;
+ } else {
+ cmd_q = &pt->cmd_q;
+ }
+
+ return cmd_q;
+}
+
+static int ae4_core_execute_cmd(struct ae4dma_desc *desc, struct ae4_cmd_queue *ae4cmd_q)
+{
+ bool soc = FIELD_GET(DWORD0_SOC, desc->dwouv.dw0);
+ struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
+
+ if (soc) {
+ desc->dwouv.dw0 |= FIELD_PREP(DWORD0_IOC, desc->dwouv.dw0);
+ desc->dwouv.dw0 &= ~DWORD0_SOC;
+ }
+
+ mutex_lock(&ae4cmd_q->cmd_lock);
+ memcpy(&cmd_q->qbase[ae4cmd_q->tail_wi], desc, sizeof(struct ae4dma_desc));
+ ae4cmd_q->q_cmd_count++;
+ ae4cmd_q->tail_wi = (ae4cmd_q->tail_wi + 1) % CMD_Q_LEN;
+ writel(ae4cmd_q->tail_wi, cmd_q->reg_control + AE4_WR_IDX_OFF);
+ mutex_unlock(&ae4cmd_q->cmd_lock);
+
+ wake_up(&ae4cmd_q->q_w);
+
+ return 0;
+}
+
+static int pt_core_perform_passthru_ae4(struct pt_cmd_queue *cmd_q,
+ struct pt_passthru_engine *pt_engine)
+{
+ struct ae4_cmd_queue *ae4cmd_q = container_of(cmd_q, struct ae4_cmd_queue, cmd_q);
+ struct ae4dma_desc desc;
+
+ cmd_q->cmd_error = 0;
+ cmd_q->total_pt_ops++;
+ memset(&desc, 0, sizeof(desc));
+ desc.dwouv.dws.byte0 = CMD_AE4_DESC_DW0_VAL;
+
+ desc.dw1.status = 0;
+ desc.dw1.err_code = 0;
+ desc.dw1.desc_id = 0;
+
+ desc.length = pt_engine->src_len;
+
+ desc.src_lo = upper_32_bits(pt_engine->src_dma);
+ desc.src_hi = lower_32_bits(pt_engine->src_dma);
+ desc.dst_lo = upper_32_bits(pt_engine->dst_dma);
+ desc.dst_hi = lower_32_bits(pt_engine->dst_dma);
+
+ return ae4_core_execute_cmd(&desc, ae4cmd_q);
+}
+
+static int pt_dma_start_desc(struct pt_dma_desc *desc, struct pt_dma_chan *chan)
+{
+ struct pt_passthru_engine *pt_engine;
+ struct pt_device *pt;
+ struct pt_cmd *pt_cmd;
+ struct pt_cmd_queue *cmd_q;
+
+ desc->issued_to_hw = 1;
+
+ pt_cmd = &desc->pt_cmd;
+ pt = pt_cmd->pt;
+
+ cmd_q = pt_get_cmd_queue(pt, chan);
+
+ pt_engine = &pt_cmd->passthru;
+
+ pt->tdata.cmd = pt_cmd;
+
+ /* Execute the command */
+ if (pt->ver == AE4_DMA_VERSION)
+ pt_cmd->ret = pt_core_perform_passthru_ae4(cmd_q, pt_engine);
+ else
+ pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
+
+ return 0;
+}
+
+static struct pt_dma_desc *pt_next_dma_desc(struct pt_dma_chan *chan)
+{
+ /* Get the next DMA descriptor on the active list */
+ struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
+
+ return vd ? to_pt_desc(vd) : NULL;
+}
+
+static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
+ struct pt_dma_desc *desc)
+{
+ struct dma_async_tx_descriptor *tx_desc;
+ struct virt_dma_desc *vd;
+ struct pt_device *pt;
+ unsigned long flags;
+
+ pt = chan->pt;
+ /* Loop over descriptors until one is found with commands */
+ do {
+ if (desc) {
+ if (!desc->issued_to_hw) {
+ /* No errors, keep going */
+ if (desc->status != DMA_ERROR)
+ return desc;
+ }
+
+ tx_desc = &desc->vd.tx;
+ vd = &desc->vd;
+ } else {
+ tx_desc = NULL;
+ }
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ if (pt->ver != AE4_DMA_VERSION && desc) {
+ if (desc->status != DMA_COMPLETE) {
+ if (desc->status != DMA_ERROR)
+ desc->status = DMA_COMPLETE;
+
+ dma_cookie_complete(tx_desc);
+ dma_descriptor_unmap(tx_desc);
+ list_del(&desc->vd.node);
+ } else {
+ /* Don't handle it twice */
+ tx_desc = NULL;
+ }
+ }
+
+ desc = pt_next_dma_desc(chan);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ if (pt->ver != AE4_DMA_VERSION && tx_desc) {
+ dmaengine_desc_get_callback_invoke(tx_desc, NULL);
+ dma_run_dependencies(tx_desc);
+ vchan_vdesc_fini(vd);
+ }
+ } while (desc);
+
+ return NULL;
+}
+
+static inline bool ae4_core_queue_full(struct pt_cmd_queue *cmd_q)
+{
+ u32 front_wi = readl(cmd_q->reg_control + AE4_WR_IDX_OFF);
+ u32 rear_ri = readl(cmd_q->reg_control + AE4_RD_IDX_OFF);
+
+ if (((MAX_CMD_QLEN + front_wi - rear_ri) % MAX_CMD_QLEN) >= (MAX_CMD_QLEN - 1))
+ return true;
+
+ return false;
+}
+
+static void pt_cmd_callback(void *data, int err)
+{
+ struct pt_dma_desc *desc = data;
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct dma_chan *dma_chan;
+ struct pt_dma_chan *chan;
+ struct ae4_device *ae4;
+ struct pt_device *pt;
+ int ret;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ dma_chan = desc->vd.tx.chan;
+ chan = to_pt_chan(dma_chan);
+ pt = chan->pt;
+
+ if (err)
+ desc->status = DMA_ERROR;
+
+ while (true) {
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ ae4cmd_q = &ae4->ae4cmd_q[chan->id];
+
+ if (ae4cmd_q->q_cmd_count >= (CMD_Q_LEN - 1) ||
+ ae4_core_queue_full(&ae4cmd_q->cmd_q)) {
+ wake_up(&ae4cmd_q->q_w);
+
+ if (wait_for_completion_timeout(&ae4cmd_q->cmp,
+ msecs_to_jiffies(AE4_TIME_OUT))
+ == 0) {
+ dev_err(pt->dev, "TIMEOUT %d:\n", ae4cmd_q->id);
+ break;
+ }
+
+ reinit_completion(&ae4cmd_q->cmp);
+ continue;
+ }
+ }
+
+ /* Check for DMA descriptor completion */
+ desc = pt_handle_active_desc(chan, desc);
+
+ /* Don't submit cmd if no descriptor or DMA is paused */
+ if (!desc)
+ break;
+
+ ret = pt_dma_start_desc(desc, chan);
+ if (!ret)
+ break;
+
+ desc->status = DMA_ERROR;
+ }
+}
+
+static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
+ unsigned long flags)
+{
+ struct pt_dma_desc *desc;
+
+ desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ vchan_tx_prep(&chan->vc, &desc->vd, flags);
+
+ desc->pt = chan->pt;
+ desc->pt->cmd_q.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ desc->issued_to_hw = 0;
+ desc->status = DMA_IN_PROGRESS;
+
+ return desc;
+}
+
+static void pt_cmd_callback_work(void *data, int err)
+{
+ struct dma_async_tx_descriptor *tx_desc;
+ struct pt_dma_desc *desc = data;
+ struct dma_chan *dma_chan;
+ struct virt_dma_desc *vd;
+ struct pt_dma_chan *chan;
+ unsigned long flags;
+
+ if (!desc)
+ return;
+
+ dma_chan = desc->vd.tx.chan;
+ chan = to_pt_chan(dma_chan);
+
+ if (err == -EINPROGRESS)
+ return;
+
+ tx_desc = &desc->vd.tx;
+ vd = &desc->vd;
+
+ if (err)
+ desc->status = DMA_ERROR;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ if (desc->status != DMA_COMPLETE) {
+ if (desc->status != DMA_ERROR)
+ desc->status = DMA_COMPLETE;
+
+ dma_cookie_complete(tx_desc);
+ dma_descriptor_unmap(tx_desc);
+ } else {
+ tx_desc = NULL;
+ }
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ if (tx_desc) {
+ dmaengine_desc_get_callback_invoke(tx_desc, NULL);
+ dma_run_dependencies(tx_desc);
+ list_del(&desc->vd.node);
+ vchan_vdesc_fini(vd);
+ }
+}
+
+static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
+ dma_addr_t dst,
+ dma_addr_t src,
+ unsigned int len,
+ unsigned long flags)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_passthru_engine *pt_engine;
+ struct pt_device *pt = chan->pt;
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct pt_dma_desc *desc;
+ struct ae4_device *ae4;
+ struct pt_cmd *pt_cmd;
+
+ desc = pt_alloc_dma_desc(chan, flags);
+ if (!desc)
+ return NULL;
+
+ pt_cmd = &desc->pt_cmd;
+ pt_cmd->pt = pt;
+ pt_engine = &pt_cmd->passthru;
+ pt_cmd->engine = PT_ENGINE_PASSTHRU;
+ pt_engine->src_dma = src;
+ pt_engine->dst_dma = dst;
+ pt_engine->src_len = len;
+ pt_cmd->pt_cmd_callback = pt_cmd_callback;
+ pt_cmd->data = desc;
+
+ desc->len = len;
+
+ if (pt->ver == AE4_DMA_VERSION) {
+ pt_cmd->pt_cmd_callback = pt_cmd_callback_work;
+ ae4 = container_of(pt, struct ae4_device, pt);
+ ae4cmd_q = &ae4->ae4cmd_q[chan->id];
+ mutex_lock(&ae4cmd_q->cmd_lock);
+ list_add_tail(&pt_cmd->entry, &ae4cmd_q->cmd);
+ mutex_unlock(&ae4cmd_q->cmd_lock);
+ }
+
+ return desc;
+}
+
+static struct dma_async_tx_descriptor *
+pt_prep_dma_memcpy(struct dma_chan *dma_chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct pt_dma_desc *desc;
+
+ desc = pt_create_desc(dma_chan, dst, src, len, flags);
+ if (!desc)
+ return NULL;
+
+ return &desc->vd.tx;
+}
+
+static struct dma_async_tx_descriptor *
+pt_prep_dma_interrupt(struct dma_chan *dma_chan, unsigned long flags)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_dma_desc *desc;
+
+ desc = pt_alloc_dma_desc(chan, flags);
+ if (!desc)
+ return NULL;
+
+ return &desc->vd.tx;
+}
+
+static void pt_issue_pending(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_dma_desc *desc;
+ struct pt_device *pt;
+ unsigned long flags;
+ bool engine_is_idle = true;
+
+ pt = chan->pt;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ desc = pt_next_dma_desc(chan);
+ if (desc && pt->ver != AE4_DMA_VERSION)
+ engine_is_idle = false;
+
+ vchan_issue_pending(&chan->vc);
+
+ desc = pt_next_dma_desc(chan);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ /* If there was nothing active, start processing */
+ if (engine_is_idle && desc)
+ pt_cmd_callback(desc, 0);
+}
+
+static void pt_check_status_trans_ae4(struct pt_device *pt, struct pt_cmd_queue *cmd_q)
+{
+ struct ae4_cmd_queue *ae4cmd_q = container_of(cmd_q, struct ae4_cmd_queue, cmd_q);
+ int i;
+
+ for (i = 0; i < CMD_Q_LEN; i++)
+ ae4_check_status_error(ae4cmd_q, i);
+}
+
+static enum dma_status
+pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct pt_dma_chan *chan = to_pt_chan(c);
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
+
+ cmd_q = pt_get_cmd_queue(pt, chan);
+
+ if (pt->ver == AE4_DMA_VERSION)
+ pt_check_status_trans_ae4(pt, cmd_q);
+ else
+ pt_check_status_trans(pt, cmd_q);
+
+ return dma_cookie_status(c, cookie, txstate);
+}
+
+static int pt_pause(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ cmd_q = pt_get_cmd_queue(pt, chan);
+ pt_stop_queue(cmd_q);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ return 0;
+}
+
+static int pt_resume(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_dma_desc *desc = NULL;
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ cmd_q = pt_get_cmd_queue(pt, chan);
+ pt_start_queue(cmd_q);
+ desc = pt_next_dma_desc(chan);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ /* If there was something active, re-start */
+ if (desc)
+ pt_cmd_callback(desc, 0);
+
+ return 0;
+}
+
+static int pt_terminate_all(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ cmd_q = pt_get_cmd_queue(pt, chan);
+ if (pt->ver == AE4_DMA_VERSION)
+ pt_stop_queue(cmd_q);
+ else
+ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ vchan_get_all_descriptors(&chan->vc, &head);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&chan->vc, &head);
+ vchan_free_chan_resources(&chan->vc);
+
+ return 0;
+}
+
+int pt_dmaengine_register(struct pt_device *pt)
+{
+ struct dma_device *dma_dev = &pt->dma_dev;
+ struct ae4_cmd_queue *ae4cmd_q = NULL;
+ struct ae4_device *ae4 = NULL;
+ struct pt_dma_chan *chan;
+ char *desc_cache_name;
+ int ret, i;
+
+ if (pt->ver == AE4_DMA_VERSION)
+ ae4 = container_of(pt, struct ae4_device, pt);
+
+ if (ae4)
+ pt->pt_dma_chan = devm_kcalloc(pt->dev, ae4->cmd_q_count,
+ sizeof(*pt->pt_dma_chan), GFP_KERNEL);
+ else
+ pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
+ GFP_KERNEL);
+
+ if (!pt->pt_dma_chan)
+ return -ENOMEM;
+
+ desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
+ "%s-dmaengine-desc-cache",
+ dev_name(pt->dev));
+ if (!desc_cache_name)
+ return -ENOMEM;
+
+ pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
+ sizeof(struct pt_dma_desc), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!pt->dma_desc_cache)
+ return -ENOMEM;
+
+ dma_dev->dev = pt->dev;
+ dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
+ dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
+ dma_dev->directions = DMA_MEM_TO_MEM;
+ dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
+
+ /*
+ * PTDMA is intended to be used with the AMD NTB devices, hence
+ * marking it as DMA_PRIVATE.
+ */
+ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ /* Set base and prep routines */
+ dma_dev->device_free_chan_resources = pt_free_chan_resources;
+ dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
+ dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
+ dma_dev->device_issue_pending = pt_issue_pending;
+ dma_dev->device_tx_status = pt_tx_status;
+ dma_dev->device_pause = pt_pause;
+ dma_dev->device_resume = pt_resume;
+ dma_dev->device_terminate_all = pt_terminate_all;
+ dma_dev->device_synchronize = pt_synchronize;
+
+ if (ae4) {
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ chan = pt->pt_dma_chan + i;
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+ chan->id = ae4cmd_q->id;
+ chan->pt = pt;
+ chan->vc.desc_free = pt_do_cleanup;
+ vchan_init(&chan->vc, dma_dev);
+ }
+ } else {
+ chan = pt->pt_dma_chan;
+ chan->pt = pt;
+ chan->vc.desc_free = pt_do_cleanup;
+ vchan_init(&chan->vc, dma_dev);
+ }
+
+ ret = dma_async_device_register(dma_dev);
+ if (ret)
+ goto err_reg;
+
+ return 0;
+
+err_reg:
+ kmem_cache_destroy(pt->dma_desc_cache);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pt_dmaengine_register);
+
+void pt_dmaengine_unregister(struct pt_device *pt)
+{
+ struct dma_device *dma_dev = &pt->dma_dev;
+
+ dma_async_device_unregister(dma_dev);
+
+ kmem_cache_destroy(pt->dma_desc_cache);
+}
diff --git a/drivers/dma/amd/ptdma/ptdma-pci.c b/drivers/dma/amd/ptdma/ptdma-pci.c
new file mode 100644
index 000000000000..22739ff0c3c5
--- /dev/null
+++ b/drivers/dma/amd/ptdma/ptdma-pci.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthru DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/pci_ids.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+
+#include "ptdma.h"
+
+struct pt_msix {
+ int msix_count;
+ struct msix_entry msix_entry;
+};
+
+/*
+ * pt_alloc_struct - allocate and initialize the pt_device struct
+ *
+ * @dev: device struct of the PTDMA
+ */
+static struct pt_device *pt_alloc_struct(struct device *dev)
+{
+ struct pt_device *pt;
+
+ pt = devm_kzalloc(dev, sizeof(*pt), GFP_KERNEL);
+
+ if (!pt)
+ return NULL;
+ pt->dev = dev;
+
+ INIT_LIST_HEAD(&pt->cmd);
+
+ return pt;
+}
+
+static int pt_get_msix_irqs(struct pt_device *pt)
+{
+ struct pt_msix *pt_msix = pt->pt_msix;
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pt_msix->msix_entry.entry = 0;
+
+ ret = pci_enable_msix_range(pdev, &pt_msix->msix_entry, 1, 1);
+ if (ret < 0)
+ return ret;
+
+ pt_msix->msix_count = ret;
+
+ pt->pt_irq = pt_msix->msix_entry.vector;
+
+ return 0;
+}
+
+static int pt_get_msi_irq(struct pt_device *pt)
+{
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_enable_msi(pdev);
+ if (ret)
+ return ret;
+
+ pt->pt_irq = pdev->irq;
+
+ return 0;
+}
+
+static int pt_get_irqs(struct pt_device *pt)
+{
+ struct device *dev = pt->dev;
+ int ret;
+
+ ret = pt_get_msix_irqs(pt);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get MSI-X vectors, try MSI */
+ dev_err(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
+ ret = pt_get_msi_irq(pt);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get MSI interrupt */
+ dev_err(dev, "could not enable MSI (%d)\n", ret);
+
+ return ret;
+}
+
+static void pt_free_irqs(struct pt_device *pt)
+{
+ struct pt_msix *pt_msix = pt->pt_msix;
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (pt_msix->msix_count)
+ pci_disable_msix(pdev);
+ else if (pt->pt_irq)
+ pci_disable_msi(pdev);
+
+ pt->pt_irq = 0;
+}
+
+static int pt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct pt_device *pt;
+ struct pt_msix *pt_msix;
+ struct device *dev = &pdev->dev;
+ void __iomem * const *iomap_table;
+ int bar_mask;
+ int ret = -ENOMEM;
+
+ pt = pt_alloc_struct(dev);
+ if (!pt)
+ goto e_err;
+
+ pt_msix = devm_kzalloc(dev, sizeof(*pt_msix), GFP_KERNEL);
+ if (!pt_msix)
+ goto e_err;
+
+ pt->pt_msix = pt_msix;
+ pt->dev_vdata = (struct pt_dev_vdata *)id->driver_data;
+ if (!pt->dev_vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "pcim_enable_device failed (%d)\n", ret);
+ goto e_err;
+ }
+
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ ret = pcim_iomap_regions(pdev, bar_mask, "ptdma");
+ if (ret) {
+ dev_err(dev, "pcim_iomap_regions failed (%d)\n", ret);
+ goto e_err;
+ }
+
+ iomap_table = pcim_iomap_table(pdev);
+ if (!iomap_table) {
+ dev_err(dev, "pcim_iomap_table failed\n");
+ ret = -ENOMEM;
+ goto e_err;
+ }
+
+ pt->io_regs = iomap_table[pt->dev_vdata->bar];
+ if (!pt->io_regs) {
+ dev_err(dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto e_err;
+ }
+
+ ret = pt_get_irqs(pt);
+ if (ret)
+ goto e_err;
+
+ pci_set_master(pdev);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
+ ret);
+ goto e_err;
+ }
+ }
+
+ dev_set_drvdata(dev, pt);
+
+ if (pt->dev_vdata)
+ ret = pt_core_init(pt);
+
+ if (ret)
+ goto e_err;
+
+ return 0;
+
+e_err:
+ dev_err(dev, "initialization failed ret = %d\n", ret);
+
+ return ret;
+}
+
+static void pt_pci_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pt_device *pt = dev_get_drvdata(dev);
+
+ if (!pt)
+ return;
+
+ if (pt->dev_vdata)
+ pt_core_destroy(pt);
+
+ pt_free_irqs(pt);
+}
+
+static const struct pt_dev_vdata dev_vdata[] = {
+ {
+ .bar = 2,
+ },
+};
+
+static const struct pci_device_id pt_pci_table[] = {
+ { PCI_VDEVICE(AMD, 0x1498), (kernel_ulong_t)&dev_vdata[0] },
+ /* Last entry must be zero */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, pt_pci_table);
+
+static struct pci_driver pt_pci_driver = {
+ .name = "ptdma",
+ .id_table = pt_pci_table,
+ .probe = pt_pci_probe,
+ .remove = pt_pci_remove,
+};
+
+module_pci_driver(pt_pci_driver);
+
+MODULE_AUTHOR("Sanjay R Mehta <sanju.mehta@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AMD PassThru DMA driver");
diff --git a/drivers/dma/amd/ptdma/ptdma.h b/drivers/dma/amd/ptdma/ptdma.h
new file mode 100644
index 000000000000..ef3f55632107
--- /dev/null
+++ b/drivers/dma/amd/ptdma/ptdma.h
@@ -0,0 +1,338 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Passthru DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#ifndef __PT_DEV_H__
+#define __PT_DEV_H__
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+
+#include "../../virt-dma.h"
+
+#define MAX_PT_NAME_LEN 16
+#define MAX_DMAPOOL_NAME_LEN 32
+
+#define MAX_HW_QUEUES 1
+#define MAX_CMD_QLEN 100
+
+#define PT_ENGINE_PASSTHRU 5
+
+/* Register Mappings */
+#define IRQ_MASK_REG 0x040
+#define IRQ_STATUS_REG 0x200
+
+#define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f)
+
+#define CMD_QUEUE_PRIO_OFFSET 0x00
+#define CMD_REQID_CONFIG_OFFSET 0x04
+#define CMD_TIMEOUT_OFFSET 0x08
+#define CMD_PT_VERSION 0x10
+
+#define CMD_Q_CONTROL_BASE 0x0000
+#define CMD_Q_TAIL_LO_BASE 0x0004
+#define CMD_Q_HEAD_LO_BASE 0x0008
+#define CMD_Q_INT_ENABLE_BASE 0x000C
+#define CMD_Q_INTERRUPT_STATUS_BASE 0x0010
+
+#define CMD_Q_STATUS_BASE 0x0100
+#define CMD_Q_INT_STATUS_BASE 0x0104
+#define CMD_Q_DMA_STATUS_BASE 0x0108
+#define CMD_Q_DMA_READ_STATUS_BASE 0x010C
+#define CMD_Q_DMA_WRITE_STATUS_BASE 0x0110
+#define CMD_Q_ABORT_BASE 0x0114
+#define CMD_Q_AX_CACHE_BASE 0x0118
+
+#define CMD_CONFIG_OFFSET 0x1120
+#define CMD_CLK_GATE_CTL_OFFSET 0x6004
+
+#define CMD_DESC_DW0_VAL 0x500012
+
+/* Address offset for virtual queue registers */
+#define CMD_Q_STATUS_INCR 0x1000
+
+/* Bit masks */
+#define CMD_CONFIG_REQID 0
+#define CMD_TIMEOUT_DISABLE 0
+#define CMD_CLK_DYN_GATING_DIS 0
+#define CMD_CLK_SW_GATE_MODE 0
+#define CMD_CLK_GATE_CTL 0
+#define CMD_QUEUE_PRIO GENMASK(2, 1)
+#define CMD_CONFIG_VHB_EN BIT(0)
+#define CMD_CLK_DYN_GATING_EN BIT(0)
+#define CMD_CLK_HW_GATE_MODE BIT(0)
+#define CMD_CLK_GATE_ON_DELAY BIT(12)
+#define CMD_CLK_GATE_OFF_DELAY BIT(12)
+
+#define CMD_CLK_GATE_CONFIG (CMD_CLK_GATE_CTL | \
+ CMD_CLK_HW_GATE_MODE | \
+ CMD_CLK_GATE_ON_DELAY | \
+ CMD_CLK_DYN_GATING_EN | \
+ CMD_CLK_GATE_OFF_DELAY)
+
+#define CMD_Q_LEN 32
+#define CMD_Q_RUN BIT(0)
+#define CMD_Q_HALT BIT(1)
+#define CMD_Q_MEM_LOCATION BIT(2)
+#define CMD_Q_SIZE_MASK GENMASK(4, 0)
+#define CMD_Q_SIZE GENMASK(7, 3)
+#define CMD_Q_SHIFT GENMASK(1, 0)
+#define QUEUE_SIZE_VAL ((ffs(CMD_Q_LEN) - 2) & \
+ CMD_Q_SIZE_MASK)
+#define Q_PTR_MASK (2 << (QUEUE_SIZE_VAL + 5) - 1)
+#define Q_DESC_SIZE sizeof(struct ptdma_desc)
+#define Q_SIZE(n) (CMD_Q_LEN * (n))
+
+#define INT_COMPLETION BIT(0)
+#define INT_ERROR BIT(1)
+#define INT_QUEUE_STOPPED BIT(2)
+#define INT_EMPTY_QUEUE BIT(3)
+#define SUPPORTED_INTERRUPTS (INT_COMPLETION | INT_ERROR)
+
+/****** Local Storage Block ******/
+#define LSB_START 0
+#define LSB_END 127
+#define LSB_COUNT (LSB_END - LSB_START + 1)
+
+#define PT_DMAPOOL_MAX_SIZE 64
+#define PT_DMAPOOL_ALIGN BIT(5)
+
+#define PT_PASSTHRU_BLOCKSIZE 512
+
+struct pt_device;
+
+struct pt_tasklet_data {
+ struct completion completion;
+ struct pt_cmd *cmd;
+};
+
+/*
+ * struct pt_passthru_engine - pass-through operation
+ * without performing DMA mapping
+ * @mask: mask to be applied to data
+ * @mask_len: length in bytes of mask
+ * @src_dma: data to be used for this operation
+ * @dst_dma: data produced by this operation
+ * @src_len: length in bytes of data used for this operation
+ *
+ * Variables required to be set when calling pt_enqueue_cmd():
+ * - bit_mod, byte_swap, src, dst, src_len
+ * - mask, mask_len if bit_mod is not PT_PASSTHRU_BITWISE_NOOP
+ */
+struct pt_passthru_engine {
+ dma_addr_t mask;
+ u32 mask_len; /* In bytes */
+
+ dma_addr_t src_dma, dst_dma;
+ u64 src_len; /* In bytes */
+};
+
+/*
+ * struct pt_cmd - PTDMA operation request
+ * @entry: list element
+ * @work: work element used for callbacks
+ * @pt: PT device to be run on
+ * @ret: operation return code
+ * @flags: cmd processing flags
+ * @engine: PTDMA operation to perform (passthru)
+ * @engine_error: PT engine return code
+ * @passthru: engine specific structures, refer to specific engine struct below
+ * @callback: operation completion callback function
+ * @data: parameter value to be supplied to the callback function
+ *
+ * Variables required to be set when calling pt_enqueue_cmd():
+ * - engine, callback
+ * - See the operation structures below for what is required for each
+ * operation.
+ */
+struct pt_cmd {
+ struct list_head entry;
+ struct work_struct work;
+ struct pt_device *pt;
+ int ret;
+ u32 engine;
+ u32 engine_error;
+ struct pt_passthru_engine passthru;
+ /* Completion callback support */
+ void (*pt_cmd_callback)(void *data, int err);
+ void *data;
+};
+
+struct pt_dma_desc {
+ struct virt_dma_desc vd;
+ struct pt_device *pt;
+ enum dma_status status;
+ size_t len;
+ bool issued_to_hw;
+ struct pt_cmd pt_cmd;
+};
+
+struct pt_dma_chan {
+ struct virt_dma_chan vc;
+ struct pt_device *pt;
+ u32 id;
+};
+
+struct pt_cmd_queue {
+ struct pt_device *pt;
+
+ /* Queue dma pool */
+ struct dma_pool *dma_pool;
+
+ /* Queue base address (not necessarily aligned)*/
+ struct ptdma_desc *qbase;
+
+ /* Aligned queue start address (per requirement) */
+ spinlock_t q_lock ____cacheline_aligned;
+ unsigned int qidx;
+
+ unsigned int qsize;
+ dma_addr_t qbase_dma;
+ dma_addr_t qdma_tail;
+
+ unsigned int active;
+ unsigned int suspended;
+
+ /* Interrupt flag */
+ bool int_en;
+
+ /* Register addresses for queue */
+ void __iomem *reg_control;
+ u32 qcontrol; /* Cached control register */
+
+ /* Status values from job */
+ u32 int_status;
+ u32 q_status;
+ u32 q_int_status;
+ u32 cmd_error;
+ /* Queue Statistics */
+ unsigned long total_pt_ops;
+} ____cacheline_aligned;
+
+struct pt_device {
+ struct list_head entry;
+
+ unsigned int ord;
+ char name[MAX_PT_NAME_LEN];
+
+ struct device *dev;
+
+ /* Bus specific device information */
+ struct pt_msix *pt_msix;
+
+ struct pt_dev_vdata *dev_vdata;
+
+ unsigned int pt_irq;
+
+ /* I/O area used for device communication */
+ void __iomem *io_regs;
+
+ spinlock_t cmd_lock ____cacheline_aligned;
+ unsigned int cmd_count;
+ struct list_head cmd;
+
+ /*
+ * The command queue. This represent the queue available on the
+ * PTDMA that are available for processing cmds
+ */
+ struct pt_cmd_queue cmd_q;
+
+ /* Support for the DMA Engine capabilities */
+ struct dma_device dma_dev;
+ struct pt_dma_chan *pt_dma_chan;
+ struct kmem_cache *dma_desc_cache;
+
+ wait_queue_head_t lsb_queue;
+
+ /* Device Statistics */
+ unsigned long total_interrupts;
+
+ struct pt_tasklet_data tdata;
+ int ver;
+};
+
+/*
+ * descriptor for PTDMA commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory type
+ * word 6: reserved 32 bits
+ * word 7: reserved 32 bits
+ */
+
+#define DWORD0_SOC BIT(0)
+#define DWORD0_IOC BIT(1)
+
+struct dword3 {
+ unsigned int src_hi:16;
+ unsigned int src_mem:2;
+ unsigned int lsb_cxt_id:8;
+ unsigned int rsvd1:5;
+ unsigned int fixed:1;
+};
+
+struct dword5 {
+ unsigned int dst_hi:16;
+ unsigned int dst_mem:2;
+ unsigned int rsvd1:13;
+ unsigned int fixed:1;
+};
+
+struct ptdma_desc {
+ u32 dw0;
+ u32 length;
+ u32 src_lo;
+ struct dword3 dw3;
+ u32 dst_lo;
+ struct dword5 dw5;
+ __le32 rsvd1;
+ __le32 rsvd2;
+};
+
+/* Structure to hold PT device data */
+struct pt_dev_vdata {
+ const unsigned int bar;
+};
+
+int pt_dmaengine_register(struct pt_device *pt);
+void pt_dmaengine_unregister(struct pt_device *pt);
+
+void ptdma_debugfs_setup(struct pt_device *pt);
+int pt_core_init(struct pt_device *pt);
+void pt_core_destroy(struct pt_device *pt);
+
+int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
+ struct pt_passthru_engine *pt_engine);
+
+void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q);
+void pt_start_queue(struct pt_cmd_queue *cmd_q);
+void pt_stop_queue(struct pt_cmd_queue *cmd_q);
+
+static inline void pt_core_disable_queue_interrupts(struct pt_device *pt)
+{
+ iowrite32(0, pt->cmd_q.reg_control + 0x000C);
+}
+
+static inline void pt_core_enable_queue_interrupts(struct pt_device *pt)
+{
+ iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C);
+}
+#endif
diff --git a/drivers/dma/amd/qdma/Makefile b/drivers/dma/amd/qdma/Makefile
new file mode 100644
index 000000000000..011268fef377
--- /dev/null
+++ b/drivers/dma/amd/qdma/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_AMD_QDMA) += amd-qdma.o
+
+amd-qdma-$(CONFIG_AMD_QDMA) := qdma.o qdma-comm-regs.o
diff --git a/drivers/dma/amd/qdma/qdma-comm-regs.c b/drivers/dma/amd/qdma/qdma-comm-regs.c
new file mode 100644
index 000000000000..9162f9d367cc
--- /dev/null
+++ b/drivers/dma/amd/qdma/qdma-comm-regs.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef __QDMA_REGS_DEF_H
+#define __QDMA_REGS_DEF_H
+
+#include "qdma.h"
+
+const struct qdma_reg qdma_regos_default[QDMA_REGO_MAX] = {
+ [QDMA_REGO_CTXT_DATA] = QDMA_REGO(0x804, 8),
+ [QDMA_REGO_CTXT_CMD] = QDMA_REGO(0x844, 1),
+ [QDMA_REGO_CTXT_MASK] = QDMA_REGO(0x824, 8),
+ [QDMA_REGO_MM_H2C_CTRL] = QDMA_REGO(0x1004, 1),
+ [QDMA_REGO_MM_C2H_CTRL] = QDMA_REGO(0x1204, 1),
+ [QDMA_REGO_QUEUE_COUNT] = QDMA_REGO(0x120, 1),
+ [QDMA_REGO_RING_SIZE] = QDMA_REGO(0x204, 1),
+ [QDMA_REGO_H2C_PIDX] = QDMA_REGO(0x18004, 1),
+ [QDMA_REGO_C2H_PIDX] = QDMA_REGO(0x18008, 1),
+ [QDMA_REGO_INTR_CIDX] = QDMA_REGO(0x18000, 1),
+ [QDMA_REGO_FUNC_ID] = QDMA_REGO(0x12c, 1),
+ [QDMA_REGO_ERR_INT] = QDMA_REGO(0xb04, 1),
+ [QDMA_REGO_ERR_STAT] = QDMA_REGO(0x248, 1),
+};
+
+const struct qdma_reg_field qdma_regfs_default[QDMA_REGF_MAX] = {
+ /* QDMA_REGO_CTXT_DATA fields */
+ [QDMA_REGF_IRQ_ENABLE] = QDMA_REGF(53, 53),
+ [QDMA_REGF_WBK_ENABLE] = QDMA_REGF(52, 52),
+ [QDMA_REGF_WBI_CHECK] = QDMA_REGF(34, 34),
+ [QDMA_REGF_IRQ_ARM] = QDMA_REGF(16, 16),
+ [QDMA_REGF_IRQ_VEC] = QDMA_REGF(138, 128),
+ [QDMA_REGF_IRQ_AGG] = QDMA_REGF(139, 139),
+ [QDMA_REGF_WBI_INTVL_ENABLE] = QDMA_REGF(35, 35),
+ [QDMA_REGF_MRKR_DISABLE] = QDMA_REGF(62, 62),
+ [QDMA_REGF_QUEUE_ENABLE] = QDMA_REGF(32, 32),
+ [QDMA_REGF_QUEUE_MODE] = QDMA_REGF(63, 63),
+ [QDMA_REGF_DESC_BASE] = QDMA_REGF(127, 64),
+ [QDMA_REGF_DESC_SIZE] = QDMA_REGF(49, 48),
+ [QDMA_REGF_RING_ID] = QDMA_REGF(47, 44),
+ [QDMA_REGF_QUEUE_BASE] = QDMA_REGF(11, 0),
+ [QDMA_REGF_QUEUE_MAX] = QDMA_REGF(44, 32),
+ [QDMA_REGF_FUNCTION_ID] = QDMA_REGF(24, 17),
+ [QDMA_REGF_INTR_AGG_BASE] = QDMA_REGF(66, 15),
+ [QDMA_REGF_INTR_VECTOR] = QDMA_REGF(11, 1),
+ [QDMA_REGF_INTR_SIZE] = QDMA_REGF(69, 67),
+ [QDMA_REGF_INTR_VALID] = QDMA_REGF(0, 0),
+ [QDMA_REGF_INTR_COLOR] = QDMA_REGF(14, 14),
+ [QDMA_REGF_INTR_FUNCTION_ID] = QDMA_REGF(125, 114),
+ /* QDMA_REGO_CTXT_CMD fields */
+ [QDMA_REGF_CMD_INDX] = QDMA_REGF(19, 7),
+ [QDMA_REGF_CMD_CMD] = QDMA_REGF(6, 5),
+ [QDMA_REGF_CMD_TYPE] = QDMA_REGF(4, 1),
+ [QDMA_REGF_CMD_BUSY] = QDMA_REGF(0, 0),
+ /* QDMA_REGO_QUEUE_COUNT fields */
+ [QDMA_REGF_QUEUE_COUNT] = QDMA_REGF(11, 0),
+ /* QDMA_REGO_ERR_INT fields */
+ [QDMA_REGF_ERR_INT_FUNC] = QDMA_REGF(11, 0),
+ [QDMA_REGF_ERR_INT_VEC] = QDMA_REGF(22, 12),
+ [QDMA_REGF_ERR_INT_ARM] = QDMA_REGF(24, 24),
+};
+
+#endif /* __QDMA_REGS_DEF_H */
diff --git a/drivers/dma/amd/qdma/qdma.c b/drivers/dma/amd/qdma/qdma.c
new file mode 100644
index 000000000000..8fb2d5e1df20
--- /dev/null
+++ b/drivers/dma/amd/qdma/qdma.c
@@ -0,0 +1,1143 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DMA driver for AMD Queue-based DMA Subsystem
+ *
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/amd_qdma.h>
+#include <linux/regmap.h>
+
+#include "qdma.h"
+
+#define CHAN_STR(q) (((q)->dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H")
+#define QDMA_REG_OFF(d, r) ((d)->roffs[r].off)
+
+/* MMIO regmap config for all QDMA registers */
+static const struct regmap_config qdma_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static inline struct qdma_queue *to_qdma_queue(struct dma_chan *chan)
+{
+ return container_of(chan, struct qdma_queue, vchan.chan);
+}
+
+static inline struct qdma_mm_vdesc *to_qdma_vdesc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct qdma_mm_vdesc, vdesc);
+}
+
+static inline u32 qdma_get_intr_ring_idx(struct qdma_device *qdev)
+{
+ u32 idx;
+
+ idx = qdev->qintr_rings[qdev->qintr_ring_idx++].ridx;
+ qdev->qintr_ring_idx %= qdev->qintr_ring_num;
+
+ return idx;
+}
+
+static u64 qdma_get_field(const struct qdma_device *qdev, const u32 *data,
+ enum qdma_reg_fields field)
+{
+ const struct qdma_reg_field *f = &qdev->rfields[field];
+ u16 low_pos, hi_pos, low_bit, hi_bit;
+ u64 value = 0, mask;
+
+ low_pos = f->lsb / BITS_PER_TYPE(*data);
+ hi_pos = f->msb / BITS_PER_TYPE(*data);
+
+ if (low_pos == hi_pos) {
+ low_bit = f->lsb % BITS_PER_TYPE(*data);
+ hi_bit = f->msb % BITS_PER_TYPE(*data);
+ mask = GENMASK(hi_bit, low_bit);
+ value = (data[low_pos] & mask) >> low_bit;
+ } else if (hi_pos == low_pos + 1) {
+ low_bit = f->lsb % BITS_PER_TYPE(*data);
+ hi_bit = low_bit + (f->msb - f->lsb);
+ value = ((u64)data[hi_pos] << BITS_PER_TYPE(*data)) |
+ data[low_pos];
+ mask = GENMASK_ULL(hi_bit, low_bit);
+ value = (value & mask) >> low_bit;
+ } else {
+ hi_bit = f->msb % BITS_PER_TYPE(*data);
+ mask = GENMASK(hi_bit, 0);
+ value = data[hi_pos] & mask;
+ low_bit = f->msb - f->lsb - hi_bit;
+ value <<= low_bit;
+ low_bit -= 32;
+ value |= (u64)data[hi_pos - 1] << low_bit;
+ mask = GENMASK(31, 32 - low_bit);
+ value |= (data[hi_pos - 2] & mask) >> low_bit;
+ }
+
+ return value;
+}
+
+static void qdma_set_field(const struct qdma_device *qdev, u32 *data,
+ enum qdma_reg_fields field, u64 value)
+{
+ const struct qdma_reg_field *f = &qdev->rfields[field];
+ u16 low_pos, hi_pos, low_bit;
+
+ low_pos = f->lsb / BITS_PER_TYPE(*data);
+ hi_pos = f->msb / BITS_PER_TYPE(*data);
+ low_bit = f->lsb % BITS_PER_TYPE(*data);
+
+ data[low_pos++] |= value << low_bit;
+ if (low_pos <= hi_pos)
+ data[low_pos++] |= (u32)(value >> (32 - low_bit));
+ if (low_pos <= hi_pos)
+ data[low_pos] |= (u32)(value >> (64 - low_bit));
+}
+
+static inline int qdma_reg_write(const struct qdma_device *qdev,
+ const u32 *data, enum qdma_regs reg)
+{
+ const struct qdma_reg *r = &qdev->roffs[reg];
+ int ret;
+
+ if (r->count > 1)
+ ret = regmap_bulk_write(qdev->regmap, r->off, data, r->count);
+ else
+ ret = regmap_write(qdev->regmap, r->off, *data);
+
+ return ret;
+}
+
+static inline int qdma_reg_read(const struct qdma_device *qdev, u32 *data,
+ enum qdma_regs reg)
+{
+ const struct qdma_reg *r = &qdev->roffs[reg];
+ int ret;
+
+ if (r->count > 1)
+ ret = regmap_bulk_read(qdev->regmap, r->off, data, r->count);
+ else
+ ret = regmap_read(qdev->regmap, r->off, data);
+
+ return ret;
+}
+
+static int qdma_context_cmd_execute(const struct qdma_device *qdev,
+ enum qdma_ctxt_type type,
+ enum qdma_ctxt_cmd cmd, u16 index)
+{
+ u32 value = 0;
+ int ret;
+
+ qdma_set_field(qdev, &value, QDMA_REGF_CMD_INDX, index);
+ qdma_set_field(qdev, &value, QDMA_REGF_CMD_CMD, cmd);
+ qdma_set_field(qdev, &value, QDMA_REGF_CMD_TYPE, type);
+
+ ret = qdma_reg_write(qdev, &value, QDMA_REGO_CTXT_CMD);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(qdev->regmap,
+ QDMA_REG_OFF(qdev, QDMA_REGO_CTXT_CMD),
+ value,
+ !qdma_get_field(qdev, &value,
+ QDMA_REGF_CMD_BUSY),
+ QDMA_POLL_INTRVL_US,
+ QDMA_POLL_TIMEOUT_US);
+ if (ret) {
+ qdma_err(qdev, "Context command execution timed out");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qdma_context_write_data(const struct qdma_device *qdev,
+ const u32 *data)
+{
+ u32 mask[QDMA_CTXT_REGMAP_LEN];
+ int ret;
+
+ memset(mask, ~0, sizeof(mask));
+
+ ret = qdma_reg_write(qdev, mask, QDMA_REGO_CTXT_MASK);
+ if (ret)
+ return ret;
+
+ ret = qdma_reg_write(qdev, data, QDMA_REGO_CTXT_DATA);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void qdma_prep_sw_desc_context(const struct qdma_device *qdev,
+ const struct qdma_ctxt_sw_desc *ctxt,
+ u32 *data)
+{
+ memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data));
+ qdma_set_field(qdev, data, QDMA_REGF_DESC_BASE, ctxt->desc_base);
+ qdma_set_field(qdev, data, QDMA_REGF_IRQ_VEC, ctxt->vec);
+ qdma_set_field(qdev, data, QDMA_REGF_FUNCTION_ID, qdev->fid);
+
+ qdma_set_field(qdev, data, QDMA_REGF_DESC_SIZE, QDMA_DESC_SIZE_32B);
+ qdma_set_field(qdev, data, QDMA_REGF_RING_ID, QDMA_DEFAULT_RING_ID);
+ qdma_set_field(qdev, data, QDMA_REGF_QUEUE_MODE, QDMA_QUEUE_OP_MM);
+ qdma_set_field(qdev, data, QDMA_REGF_IRQ_ENABLE, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_WBK_ENABLE, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_WBI_CHECK, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_IRQ_ARM, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_IRQ_AGG, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_WBI_INTVL_ENABLE, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_QUEUE_ENABLE, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_MRKR_DISABLE, 1);
+}
+
+static void qdma_prep_intr_context(const struct qdma_device *qdev,
+ const struct qdma_ctxt_intr *ctxt,
+ u32 *data)
+{
+ memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data));
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_AGG_BASE, ctxt->agg_base);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_VECTOR, ctxt->vec);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_SIZE, ctxt->size);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_VALID, ctxt->valid);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_COLOR, ctxt->color);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_FUNCTION_ID, qdev->fid);
+}
+
+static void qdma_prep_fmap_context(const struct qdma_device *qdev,
+ const struct qdma_ctxt_fmap *ctxt,
+ u32 *data)
+{
+ memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data));
+ qdma_set_field(qdev, data, QDMA_REGF_QUEUE_BASE, ctxt->qbase);
+ qdma_set_field(qdev, data, QDMA_REGF_QUEUE_MAX, ctxt->qmax);
+}
+
+/*
+ * Program the indirect context register space
+ *
+ * Once the queue is enabled, context is dynamically updated by hardware. Any
+ * modification of the context through this API when the queue is enabled can
+ * result in unexpected behavior. Reading the context when the queue is enabled
+ * is not recommended as it can result in reduced performance.
+ */
+static int qdma_prog_context(struct qdma_device *qdev, enum qdma_ctxt_type type,
+ enum qdma_ctxt_cmd cmd, u16 index, u32 *ctxt)
+{
+ int ret;
+
+ mutex_lock(&qdev->ctxt_lock);
+ if (cmd == QDMA_CTXT_WRITE) {
+ ret = qdma_context_write_data(qdev, ctxt);
+ if (ret)
+ goto failed;
+ }
+
+ ret = qdma_context_cmd_execute(qdev, type, cmd, index);
+ if (ret)
+ goto failed;
+
+ if (cmd == QDMA_CTXT_READ) {
+ ret = qdma_reg_read(qdev, ctxt, QDMA_REGO_CTXT_DATA);
+ if (ret)
+ goto failed;
+ }
+
+failed:
+ mutex_unlock(&qdev->ctxt_lock);
+
+ return ret;
+}
+
+static int qdma_check_queue_status(struct qdma_device *qdev,
+ enum dma_transfer_direction dir, u16 qid)
+{
+ u32 status, data[QDMA_CTXT_REGMAP_LEN] = {0};
+ enum qdma_ctxt_type type;
+ int ret;
+
+ if (dir == DMA_MEM_TO_DEV)
+ type = QDMA_CTXT_DESC_SW_H2C;
+ else
+ type = QDMA_CTXT_DESC_SW_C2H;
+ ret = qdma_prog_context(qdev, type, QDMA_CTXT_READ, qid, data);
+ if (ret)
+ return ret;
+
+ status = qdma_get_field(qdev, data, QDMA_REGF_QUEUE_ENABLE);
+ if (status) {
+ qdma_err(qdev, "queue %d already in use", qid);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int qdma_clear_queue_context(const struct qdma_queue *queue)
+{
+ static const enum qdma_ctxt_type h2c_types[] = {
+ QDMA_CTXT_DESC_SW_H2C,
+ QDMA_CTXT_DESC_HW_H2C,
+ QDMA_CTXT_DESC_CR_H2C,
+ QDMA_CTXT_PFTCH,
+ };
+ static const enum qdma_ctxt_type c2h_types[] = {
+ QDMA_CTXT_DESC_SW_C2H,
+ QDMA_CTXT_DESC_HW_C2H,
+ QDMA_CTXT_DESC_CR_C2H,
+ QDMA_CTXT_PFTCH,
+ };
+ struct qdma_device *qdev = queue->qdev;
+ const enum qdma_ctxt_type *type;
+ int ret, num, i;
+
+ if (queue->dir == DMA_MEM_TO_DEV) {
+ type = h2c_types;
+ num = ARRAY_SIZE(h2c_types);
+ } else {
+ type = c2h_types;
+ num = ARRAY_SIZE(c2h_types);
+ }
+ for (i = 0; i < num; i++) {
+ ret = qdma_prog_context(qdev, type[i], QDMA_CTXT_CLEAR,
+ queue->qid, NULL);
+ if (ret) {
+ qdma_err(qdev, "Failed to clear ctxt %d", type[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int qdma_setup_fmap_context(struct qdma_device *qdev)
+{
+ u32 ctxt[QDMA_CTXT_REGMAP_LEN];
+ struct qdma_ctxt_fmap fmap;
+ int ret;
+
+ ret = qdma_prog_context(qdev, QDMA_CTXT_FMAP, QDMA_CTXT_CLEAR,
+ qdev->fid, NULL);
+ if (ret) {
+ qdma_err(qdev, "Failed clearing context");
+ return ret;
+ }
+
+ fmap.qbase = 0;
+ fmap.qmax = qdev->chan_num * 2;
+ qdma_prep_fmap_context(qdev, &fmap, ctxt);
+ ret = qdma_prog_context(qdev, QDMA_CTXT_FMAP, QDMA_CTXT_WRITE,
+ qdev->fid, ctxt);
+ if (ret)
+ qdma_err(qdev, "Failed setup fmap, ret %d", ret);
+
+ return ret;
+}
+
+static int qdma_setup_queue_context(struct qdma_device *qdev,
+ const struct qdma_ctxt_sw_desc *sw_desc,
+ enum dma_transfer_direction dir, u16 qid)
+{
+ u32 ctxt[QDMA_CTXT_REGMAP_LEN];
+ enum qdma_ctxt_type type;
+ int ret;
+
+ if (dir == DMA_MEM_TO_DEV)
+ type = QDMA_CTXT_DESC_SW_H2C;
+ else
+ type = QDMA_CTXT_DESC_SW_C2H;
+
+ qdma_prep_sw_desc_context(qdev, sw_desc, ctxt);
+ /* Setup SW descriptor context */
+ ret = qdma_prog_context(qdev, type, QDMA_CTXT_WRITE, qid, ctxt);
+ if (ret)
+ qdma_err(qdev, "Failed setup SW desc ctxt for queue: %d", qid);
+
+ return ret;
+}
+
+/*
+ * Enable or disable memory-mapped DMA engines
+ * 1: enable, 0: disable
+ */
+static int qdma_sgdma_control(struct qdma_device *qdev, u32 ctrl)
+{
+ int ret;
+
+ ret = qdma_reg_write(qdev, &ctrl, QDMA_REGO_MM_H2C_CTRL);
+ ret |= qdma_reg_write(qdev, &ctrl, QDMA_REGO_MM_C2H_CTRL);
+
+ return ret;
+}
+
+static int qdma_get_hw_info(struct qdma_device *qdev)
+{
+ struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev);
+ u32 value = 0;
+ int ret;
+
+ ret = qdma_reg_read(qdev, &value, QDMA_REGO_QUEUE_COUNT);
+ if (ret)
+ return ret;
+
+ value = qdma_get_field(qdev, &value, QDMA_REGF_QUEUE_COUNT) + 1;
+ if (pdata->max_mm_channels * 2 > value) {
+ qdma_err(qdev, "not enough hw queues %d", value);
+ return -EINVAL;
+ }
+ qdev->chan_num = pdata->max_mm_channels;
+
+ ret = qdma_reg_read(qdev, &qdev->fid, QDMA_REGO_FUNC_ID);
+ if (ret)
+ return ret;
+
+ qdma_info(qdev, "max channel %d, function id %d",
+ qdev->chan_num, qdev->fid);
+
+ return 0;
+}
+
+static inline int qdma_update_pidx(const struct qdma_queue *queue, u16 pidx)
+{
+ struct qdma_device *qdev = queue->qdev;
+
+ return regmap_write(qdev->regmap, queue->pidx_reg,
+ pidx | QDMA_QUEUE_ARM_BIT);
+}
+
+static inline int qdma_update_cidx(const struct qdma_queue *queue,
+ u16 ridx, u16 cidx)
+{
+ struct qdma_device *qdev = queue->qdev;
+
+ return regmap_write(qdev->regmap, queue->cidx_reg,
+ ((u32)ridx << 16) | cidx);
+}
+
+/**
+ * qdma_free_vdesc - Free descriptor
+ * @vdesc: Virtual DMA descriptor
+ */
+static void qdma_free_vdesc(struct virt_dma_desc *vdesc)
+{
+ struct qdma_mm_vdesc *vd = to_qdma_vdesc(vdesc);
+
+ kfree(vd);
+}
+
+static int qdma_alloc_queues(struct qdma_device *qdev,
+ enum dma_transfer_direction dir)
+{
+ struct qdma_queue *q, **queues;
+ u32 i, pidx_base;
+ int ret;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ queues = &qdev->h2c_queues;
+ pidx_base = QDMA_REG_OFF(qdev, QDMA_REGO_H2C_PIDX);
+ } else {
+ queues = &qdev->c2h_queues;
+ pidx_base = QDMA_REG_OFF(qdev, QDMA_REGO_C2H_PIDX);
+ }
+
+ *queues = devm_kcalloc(&qdev->pdev->dev, qdev->chan_num, sizeof(*q),
+ GFP_KERNEL);
+ if (!*queues)
+ return -ENOMEM;
+
+ for (i = 0; i < qdev->chan_num; i++) {
+ ret = qdma_check_queue_status(qdev, dir, i);
+ if (ret)
+ return ret;
+
+ q = &(*queues)[i];
+ q->ring_size = QDMA_DEFAULT_RING_SIZE;
+ q->idx_mask = q->ring_size - 2;
+ q->qdev = qdev;
+ q->dir = dir;
+ q->qid = i;
+ q->pidx_reg = pidx_base + i * QDMA_DMAP_REG_STRIDE;
+ q->cidx_reg = QDMA_REG_OFF(qdev, QDMA_REGO_INTR_CIDX) +
+ i * QDMA_DMAP_REG_STRIDE;
+ q->vchan.desc_free = qdma_free_vdesc;
+ vchan_init(&q->vchan, &qdev->dma_dev);
+ }
+
+ return 0;
+}
+
+static int qdma_device_verify(struct qdma_device *qdev)
+{
+ u32 value;
+ int ret;
+
+ ret = regmap_read(qdev->regmap, QDMA_IDENTIFIER_REGOFF, &value);
+ if (ret)
+ return ret;
+
+ value = FIELD_GET(QDMA_IDENTIFIER_MASK, value);
+ if (value != QDMA_IDENTIFIER) {
+ qdma_err(qdev, "Invalid identifier");
+ return -ENODEV;
+ }
+ qdev->rfields = qdma_regfs_default;
+ qdev->roffs = qdma_regos_default;
+
+ return 0;
+}
+
+static int qdma_device_setup(struct qdma_device *qdev)
+{
+ u32 ring_sz = QDMA_DEFAULT_RING_SIZE;
+ int ret = 0;
+
+ ret = qdma_setup_fmap_context(qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed setup fmap context");
+ return ret;
+ }
+
+ /* Setup global ring buffer size at QDMA_DEFAULT_RING_ID index */
+ ret = qdma_reg_write(qdev, &ring_sz, QDMA_REGO_RING_SIZE);
+ if (ret) {
+ qdma_err(qdev, "Failed to setup ring %d of size %ld",
+ QDMA_DEFAULT_RING_ID, QDMA_DEFAULT_RING_SIZE);
+ return ret;
+ }
+
+ /* Enable memory-mapped DMA engine in both directions */
+ ret = qdma_sgdma_control(qdev, 1);
+ if (ret) {
+ qdma_err(qdev, "Failed to SGDMA with error %d", ret);
+ return ret;
+ }
+
+ ret = qdma_alloc_queues(qdev, DMA_MEM_TO_DEV);
+ if (ret) {
+ qdma_err(qdev, "Failed to alloc H2C queues, ret %d", ret);
+ return ret;
+ }
+
+ ret = qdma_alloc_queues(qdev, DMA_DEV_TO_MEM);
+ if (ret) {
+ qdma_err(qdev, "Failed to alloc C2H queues, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * qdma_free_queue_resources() - Free queue resources
+ * @chan: DMA channel
+ */
+static void qdma_free_queue_resources(struct dma_chan *chan)
+{
+ struct qdma_queue *queue = to_qdma_queue(chan);
+ struct qdma_device *qdev = queue->qdev;
+ struct qdma_platdata *pdata;
+
+ qdma_clear_queue_context(queue);
+ vchan_free_chan_resources(&queue->vchan);
+ pdata = dev_get_platdata(&qdev->pdev->dev);
+ dma_free_coherent(pdata->dma_dev, queue->ring_size * QDMA_MM_DESC_SIZE,
+ queue->desc_base, queue->dma_desc_base);
+}
+
+/**
+ * qdma_alloc_queue_resources() - Allocate queue resources
+ * @chan: DMA channel
+ */
+static int qdma_alloc_queue_resources(struct dma_chan *chan)
+{
+ struct qdma_queue *queue = to_qdma_queue(chan);
+ struct qdma_device *qdev = queue->qdev;
+ struct qdma_ctxt_sw_desc desc;
+ struct qdma_platdata *pdata;
+ size_t size;
+ int ret;
+
+ ret = qdma_clear_queue_context(queue);
+ if (ret)
+ return ret;
+
+ pdata = dev_get_platdata(&qdev->pdev->dev);
+ size = queue->ring_size * QDMA_MM_DESC_SIZE;
+ queue->desc_base = dma_alloc_coherent(pdata->dma_dev, size,
+ &queue->dma_desc_base,
+ GFP_KERNEL);
+ if (!queue->desc_base) {
+ qdma_err(qdev, "Failed to allocate descriptor ring");
+ return -ENOMEM;
+ }
+
+ /* Setup SW descriptor queue context for DMA memory map */
+ desc.vec = qdma_get_intr_ring_idx(qdev);
+ desc.desc_base = queue->dma_desc_base;
+ ret = qdma_setup_queue_context(qdev, &desc, queue->dir, queue->qid);
+ if (ret) {
+ qdma_err(qdev, "Failed to setup SW desc ctxt for %s",
+ chan->name);
+ dma_free_coherent(pdata->dma_dev, size, queue->desc_base,
+ queue->dma_desc_base);
+ return ret;
+ }
+
+ queue->pidx = 0;
+ queue->cidx = 0;
+
+ return 0;
+}
+
+static bool qdma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct qdma_queue *queue = to_qdma_queue(chan);
+ struct qdma_queue_info *info = param;
+
+ return info->dir == queue->dir;
+}
+
+static int qdma_xfer_start(struct qdma_queue *queue)
+{
+ struct qdma_device *qdev = queue->qdev;
+ int ret;
+
+ if (!vchan_next_desc(&queue->vchan))
+ return 0;
+
+ qdma_dbg(qdev, "Tnx kickoff with P: %d for %s%d",
+ queue->issued_vdesc->pidx, CHAN_STR(queue), queue->qid);
+
+ ret = qdma_update_pidx(queue, queue->issued_vdesc->pidx);
+ if (ret) {
+ qdma_err(qdev, "Failed to update PIDX to %d for %s queue: %d",
+ queue->pidx, CHAN_STR(queue), queue->qid);
+ }
+
+ return ret;
+}
+
+static void qdma_issue_pending(struct dma_chan *chan)
+{
+ struct qdma_queue *queue = to_qdma_queue(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->vchan.lock, flags);
+ if (vchan_issue_pending(&queue->vchan)) {
+ if (queue->submitted_vdesc) {
+ queue->issued_vdesc = queue->submitted_vdesc;
+ queue->submitted_vdesc = NULL;
+ }
+ qdma_xfer_start(queue);
+ }
+
+ spin_unlock_irqrestore(&queue->vchan.lock, flags);
+}
+
+static struct qdma_mm_desc *qdma_get_desc(struct qdma_queue *q)
+{
+ struct qdma_mm_desc *desc;
+
+ if (((q->pidx + 1) & q->idx_mask) == q->cidx)
+ return NULL;
+
+ desc = q->desc_base + q->pidx;
+ q->pidx = (q->pidx + 1) & q->idx_mask;
+
+ return desc;
+}
+
+static int qdma_hw_enqueue(struct qdma_queue *q, struct qdma_mm_vdesc *vdesc)
+{
+ struct qdma_mm_desc *desc;
+ struct scatterlist *sg;
+ u64 addr, *src, *dst;
+ u32 rest, len;
+ int ret = 0;
+ u32 i;
+
+ if (!vdesc->sg_len)
+ return 0;
+
+ if (q->dir == DMA_MEM_TO_DEV) {
+ dst = &vdesc->dev_addr;
+ src = &addr;
+ } else {
+ dst = &addr;
+ src = &vdesc->dev_addr;
+ }
+
+ for_each_sg(vdesc->sgl, sg, vdesc->sg_len, i) {
+ addr = sg_dma_address(sg) + vdesc->sg_off;
+ rest = sg_dma_len(sg) - vdesc->sg_off;
+ while (rest) {
+ len = min_t(u32, rest, QDMA_MM_DESC_MAX_LEN);
+ desc = qdma_get_desc(q);
+ if (!desc) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ desc->src_addr = cpu_to_le64(*src);
+ desc->dst_addr = cpu_to_le64(*dst);
+ desc->len = cpu_to_le32(len);
+
+ vdesc->dev_addr += len;
+ vdesc->sg_off += len;
+ vdesc->pending_descs++;
+ addr += len;
+ rest -= len;
+ }
+ vdesc->sg_off = 0;
+ }
+out:
+ vdesc->sg_len -= i;
+ vdesc->pidx = q->pidx;
+ return ret;
+}
+
+static void qdma_fill_pending_vdesc(struct qdma_queue *q)
+{
+ struct virt_dma_chan *vc = &q->vchan;
+ struct qdma_mm_vdesc *vdesc = NULL;
+ struct virt_dma_desc *vd;
+ int ret;
+
+ if (!list_empty(&vc->desc_issued)) {
+ vd = &q->issued_vdesc->vdesc;
+ list_for_each_entry_from(vd, &vc->desc_issued, node) {
+ vdesc = to_qdma_vdesc(vd);
+ ret = qdma_hw_enqueue(q, vdesc);
+ if (ret) {
+ q->issued_vdesc = vdesc;
+ return;
+ }
+ }
+ q->issued_vdesc = vdesc;
+ }
+
+ if (list_empty(&vc->desc_submitted))
+ return;
+
+ if (q->submitted_vdesc)
+ vd = &q->submitted_vdesc->vdesc;
+ else
+ vd = list_first_entry(&vc->desc_submitted, typeof(*vd), node);
+
+ list_for_each_entry_from(vd, &vc->desc_submitted, node) {
+ vdesc = to_qdma_vdesc(vd);
+ ret = qdma_hw_enqueue(q, vdesc);
+ if (ret)
+ break;
+ }
+ q->submitted_vdesc = vdesc;
+}
+
+static dma_cookie_t qdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct virt_dma_chan *vc = to_virt_chan(tx->chan);
+ struct qdma_queue *q = to_qdma_queue(&vc->chan);
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ vd = container_of(tx, struct virt_dma_desc, tx);
+ spin_lock_irqsave(&vc->lock, flags);
+ cookie = dma_cookie_assign(tx);
+
+ list_move_tail(&vd->node, &vc->desc_submitted);
+ qdma_fill_pending_vdesc(q);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor *
+qdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct qdma_queue *q = to_qdma_queue(chan);
+ struct dma_async_tx_descriptor *tx;
+ struct qdma_mm_vdesc *vdesc;
+
+ vdesc = kzalloc(sizeof(*vdesc), GFP_NOWAIT);
+ if (!vdesc)
+ return NULL;
+ vdesc->sgl = sgl;
+ vdesc->sg_len = sg_len;
+ if (dir == DMA_MEM_TO_DEV)
+ vdesc->dev_addr = q->cfg.dst_addr;
+ else
+ vdesc->dev_addr = q->cfg.src_addr;
+
+ tx = vchan_tx_prep(&q->vchan, &vdesc->vdesc, flags);
+ tx->tx_submit = qdma_tx_submit;
+
+ return tx;
+}
+
+static int qdma_device_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct qdma_queue *q = to_qdma_queue(chan);
+
+ memcpy(&q->cfg, cfg, sizeof(*cfg));
+
+ return 0;
+}
+
+static int qdma_arm_err_intr(const struct qdma_device *qdev)
+{
+ u32 value = 0;
+
+ qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_FUNC, qdev->fid);
+ qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_VEC, qdev->err_irq_idx);
+ qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_ARM, 1);
+
+ return qdma_reg_write(qdev, &value, QDMA_REGO_ERR_INT);
+}
+
+static irqreturn_t qdma_error_isr(int irq, void *data)
+{
+ struct qdma_device *qdev = data;
+ u32 err_stat = 0;
+ int ret;
+
+ ret = qdma_reg_read(qdev, &err_stat, QDMA_REGO_ERR_STAT);
+ if (ret) {
+ qdma_err(qdev, "read error state failed, ret %d", ret);
+ goto out;
+ }
+
+ qdma_err(qdev, "global error %d", err_stat);
+ ret = qdma_reg_write(qdev, &err_stat, QDMA_REGO_ERR_STAT);
+ if (ret)
+ qdma_err(qdev, "clear error state failed, ret %d", ret);
+
+out:
+ qdma_arm_err_intr(qdev);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qdma_queue_isr(int irq, void *data)
+{
+ struct qdma_intr_ring *intr = data;
+ struct qdma_queue *q = NULL;
+ struct qdma_device *qdev;
+ u32 index, comp_desc;
+ u64 intr_ent;
+ u8 color;
+ int ret;
+ u16 qid;
+
+ qdev = intr->qdev;
+ index = intr->cidx;
+ while (1) {
+ struct virt_dma_desc *vd;
+ struct qdma_mm_vdesc *vdesc;
+ unsigned long flags;
+ u32 cidx;
+
+ intr_ent = le64_to_cpu(intr->base[index]);
+ color = FIELD_GET(QDMA_INTR_MASK_COLOR, intr_ent);
+ if (color != intr->color)
+ break;
+
+ qid = FIELD_GET(QDMA_INTR_MASK_QID, intr_ent);
+ if (FIELD_GET(QDMA_INTR_MASK_TYPE, intr_ent))
+ q = qdev->c2h_queues;
+ else
+ q = qdev->h2c_queues;
+ q += qid;
+
+ cidx = FIELD_GET(QDMA_INTR_MASK_CIDX, intr_ent);
+
+ spin_lock_irqsave(&q->vchan.lock, flags);
+ comp_desc = (cidx - q->cidx) & q->idx_mask;
+
+ vd = vchan_next_desc(&q->vchan);
+ if (!vd)
+ goto skip;
+
+ vdesc = to_qdma_vdesc(vd);
+ while (comp_desc > vdesc->pending_descs) {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ comp_desc -= vdesc->pending_descs;
+ vd = vchan_next_desc(&q->vchan);
+ vdesc = to_qdma_vdesc(vd);
+ }
+ vdesc->pending_descs -= comp_desc;
+ if (!vdesc->pending_descs && QDMA_VDESC_QUEUED(vdesc)) {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ }
+ q->cidx = cidx;
+
+ qdma_fill_pending_vdesc(q);
+ qdma_xfer_start(q);
+
+skip:
+ spin_unlock_irqrestore(&q->vchan.lock, flags);
+
+ /*
+ * Wrap the index value and flip the expected color value if
+ * interrupt aggregation PIDX has wrapped around.
+ */
+ index++;
+ index &= QDMA_INTR_RING_IDX_MASK;
+ if (!index)
+ intr->color = !intr->color;
+ }
+
+ /*
+ * Update the software interrupt aggregation ring CIDX if a valid entry
+ * was found.
+ */
+ if (q) {
+ qdma_dbg(qdev, "update intr ring%d %d", intr->ridx, index);
+
+ /*
+ * Record the last read index of status descriptor from the
+ * interrupt aggregation ring.
+ */
+ intr->cidx = index;
+
+ ret = qdma_update_cidx(q, intr->ridx, index);
+ if (ret) {
+ qdma_err(qdev, "Failed to update IRQ CIDX");
+ return IRQ_NONE;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int qdma_init_error_irq(struct qdma_device *qdev)
+{
+ struct device *dev = &qdev->pdev->dev;
+ int ret;
+ u32 vec;
+
+ vec = qdev->queue_irq_start - 1;
+
+ ret = devm_request_threaded_irq(dev, vec, NULL, qdma_error_isr,
+ IRQF_ONESHOT, "amd-qdma-error", qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed to request error IRQ vector: %d", vec);
+ return ret;
+ }
+
+ ret = qdma_arm_err_intr(qdev);
+ if (ret)
+ qdma_err(qdev, "Failed to arm err interrupt, ret %d", ret);
+
+ return ret;
+}
+
+static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
+{
+ struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev);
+ struct device *dev = &qdev->pdev->dev;
+ u32 ctxt[QDMA_CTXT_REGMAP_LEN];
+ struct qdma_intr_ring *ring;
+ struct qdma_ctxt_intr intr_ctxt;
+ u32 vector;
+ int ret, i;
+
+ qdev->qintr_ring_num = qdev->queue_irq_num;
+ qdev->qintr_rings = devm_kcalloc(dev, qdev->qintr_ring_num,
+ sizeof(*qdev->qintr_rings),
+ GFP_KERNEL);
+ if (!qdev->qintr_rings)
+ return -ENOMEM;
+
+ vector = qdev->queue_irq_start;
+ for (i = 0; i < qdev->qintr_ring_num; i++, vector++) {
+ ring = &qdev->qintr_rings[i];
+ ring->qdev = qdev;
+ ring->msix_id = qdev->err_irq_idx + i + 1;
+ ring->ridx = i;
+ ring->color = 1;
+ ring->base = dmam_alloc_coherent(pdata->dma_dev,
+ QDMA_INTR_RING_SIZE,
+ &ring->dev_base, GFP_KERNEL);
+ if (!ring->base) {
+ qdma_err(qdev, "Failed to alloc intr ring %d", i);
+ return -ENOMEM;
+ }
+ intr_ctxt.agg_base = QDMA_INTR_RING_BASE(ring->dev_base);
+ intr_ctxt.size = (QDMA_INTR_RING_SIZE - 1) / 4096;
+ intr_ctxt.vec = ring->msix_id;
+ intr_ctxt.valid = true;
+ intr_ctxt.color = true;
+ ret = qdma_prog_context(qdev, QDMA_CTXT_INTR_COAL,
+ QDMA_CTXT_CLEAR, ring->ridx, NULL);
+ if (ret) {
+ qdma_err(qdev, "Failed clear intr ctx, ret %d", ret);
+ return ret;
+ }
+
+ qdma_prep_intr_context(qdev, &intr_ctxt, ctxt);
+ ret = qdma_prog_context(qdev, QDMA_CTXT_INTR_COAL,
+ QDMA_CTXT_WRITE, ring->ridx, ctxt);
+ if (ret) {
+ qdma_err(qdev, "Failed setup intr ctx, ret %d", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(dev, vector, NULL,
+ qdma_queue_isr, IRQF_ONESHOT,
+ "amd-qdma-queue", ring);
+ if (ret) {
+ qdma_err(qdev, "Failed to request irq %d", vector);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int qdma_intr_init(struct qdma_device *qdev)
+{
+ int ret;
+
+ ret = qdma_init_error_irq(qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed to init error IRQs, ret %d", ret);
+ return ret;
+ }
+
+ ret = qdmam_alloc_qintr_rings(qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed to init queue IRQs, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void amd_qdma_remove(struct platform_device *pdev)
+{
+ struct qdma_device *qdev = platform_get_drvdata(pdev);
+
+ qdma_sgdma_control(qdev, 0);
+ dma_async_device_unregister(&qdev->dma_dev);
+
+ mutex_destroy(&qdev->ctxt_lock);
+}
+
+static int amd_qdma_probe(struct platform_device *pdev)
+{
+ struct qdma_platdata *pdata = dev_get_platdata(&pdev->dev);
+ struct qdma_device *qdev;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ qdev = devm_kzalloc(&pdev->dev, sizeof(*qdev), GFP_KERNEL);
+ if (!qdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, qdev);
+ qdev->pdev = pdev;
+ mutex_init(&qdev->ctxt_lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ qdma_err(qdev, "Failed to get IRQ resource");
+ ret = -ENODEV;
+ goto failed;
+ }
+ qdev->err_irq_idx = pdata->irq_index;
+ qdev->queue_irq_start = res->start + 1;
+ qdev->queue_irq_num = resource_size(res) - 1;
+
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(regs)) {
+ ret = PTR_ERR(regs);
+ qdma_err(qdev, "Failed to map IO resource, err %d", ret);
+ goto failed;
+ }
+
+ qdev->regmap = devm_regmap_init_mmio(&pdev->dev, regs,
+ &qdma_regmap_config);
+ if (IS_ERR(qdev->regmap)) {
+ ret = PTR_ERR(qdev->regmap);
+ qdma_err(qdev, "Regmap init failed, err %d", ret);
+ goto failed;
+ }
+
+ ret = qdma_device_verify(qdev);
+ if (ret)
+ goto failed;
+
+ ret = qdma_get_hw_info(qdev);
+ if (ret)
+ goto failed;
+
+ INIT_LIST_HEAD(&qdev->dma_dev.channels);
+
+ ret = qdma_device_setup(qdev);
+ if (ret)
+ goto failed;
+
+ ret = qdma_intr_init(qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed to initialize IRQs %d", ret);
+ goto failed_disable_engine;
+ }
+
+ dma_cap_set(DMA_SLAVE, qdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_PRIVATE, qdev->dma_dev.cap_mask);
+
+ qdev->dma_dev.dev = &pdev->dev;
+ qdev->dma_dev.filter.map = pdata->device_map;
+ qdev->dma_dev.filter.mapcnt = qdev->chan_num * 2;
+ qdev->dma_dev.filter.fn = qdma_filter_fn;
+ qdev->dma_dev.device_alloc_chan_resources = qdma_alloc_queue_resources;
+ qdev->dma_dev.device_free_chan_resources = qdma_free_queue_resources;
+ qdev->dma_dev.device_prep_slave_sg = qdma_prep_device_sg;
+ qdev->dma_dev.device_config = qdma_device_config;
+ qdev->dma_dev.device_issue_pending = qdma_issue_pending;
+ qdev->dma_dev.device_tx_status = dma_cookie_status;
+ qdev->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+
+ ret = dma_async_device_register(&qdev->dma_dev);
+ if (ret) {
+ qdma_err(qdev, "Failed to register AMD QDMA: %d", ret);
+ goto failed_disable_engine;
+ }
+
+ return 0;
+
+failed_disable_engine:
+ qdma_sgdma_control(qdev, 0);
+failed:
+ mutex_destroy(&qdev->ctxt_lock);
+ qdma_err(qdev, "Failed to probe AMD QDMA driver");
+ return ret;
+}
+
+static struct platform_driver amd_qdma_driver = {
+ .driver = {
+ .name = "amd-qdma",
+ },
+ .probe = amd_qdma_probe,
+ .remove = amd_qdma_remove,
+};
+
+module_platform_driver(amd_qdma_driver);
+
+MODULE_DESCRIPTION("AMD QDMA driver");
+MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/amd/qdma/qdma.h b/drivers/dma/amd/qdma/qdma.h
new file mode 100644
index 000000000000..94089f1f0c11
--- /dev/null
+++ b/drivers/dma/amd/qdma/qdma.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * DMA header for AMD Queue-based DMA Subsystem
+ *
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef __QDMA_H
+#define __QDMA_H
+
+#include <linux/bitfield.h>
+#include <linux/dmaengine.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../../virt-dma.h"
+
+#define DISABLE 0
+#define ENABLE 1
+
+#define QDMA_MIN_IRQ 3
+#define QDMA_INTR_NAME_MAX_LEN 30
+#define QDMA_INTR_PREFIX "amd-qdma"
+
+#define QDMA_IDENTIFIER 0x1FD3
+#define QDMA_DEFAULT_RING_SIZE (BIT(10) + 1)
+#define QDMA_DEFAULT_RING_ID 0
+#define QDMA_POLL_INTRVL_US 10 /* 10us */
+#define QDMA_POLL_TIMEOUT_US (500 * 1000) /* 500ms */
+#define QDMA_DMAP_REG_STRIDE 16
+#define QDMA_CTXT_REGMAP_LEN 8 /* 8 regs */
+#define QDMA_MM_DESC_SIZE 32 /* Bytes */
+#define QDMA_MM_DESC_LEN_BITS 28
+#define QDMA_MM_DESC_MAX_LEN (BIT(QDMA_MM_DESC_LEN_BITS) - 1)
+#define QDMA_MIN_DMA_ALLOC_SIZE 4096
+#define QDMA_INTR_RING_SIZE BIT(13)
+#define QDMA_INTR_RING_IDX_MASK GENMASK(9, 0)
+#define QDMA_INTR_RING_BASE(_addr) ((_addr) >> 12)
+
+#define QDMA_IDENTIFIER_REGOFF 0x0
+#define QDMA_IDENTIFIER_MASK GENMASK(31, 16)
+#define QDMA_QUEUE_ARM_BIT BIT(16)
+
+#define qdma_err(qdev, fmt, args...) \
+ dev_err(&(qdev)->pdev->dev, fmt, ##args)
+
+#define qdma_dbg(qdev, fmt, args...) \
+ dev_dbg(&(qdev)->pdev->dev, fmt, ##args)
+
+#define qdma_info(qdev, fmt, args...) \
+ dev_info(&(qdev)->pdev->dev, fmt, ##args)
+
+enum qdma_reg_fields {
+ QDMA_REGF_IRQ_ENABLE,
+ QDMA_REGF_WBK_ENABLE,
+ QDMA_REGF_WBI_CHECK,
+ QDMA_REGF_IRQ_ARM,
+ QDMA_REGF_IRQ_VEC,
+ QDMA_REGF_IRQ_AGG,
+ QDMA_REGF_WBI_INTVL_ENABLE,
+ QDMA_REGF_MRKR_DISABLE,
+ QDMA_REGF_QUEUE_ENABLE,
+ QDMA_REGF_QUEUE_MODE,
+ QDMA_REGF_DESC_BASE,
+ QDMA_REGF_DESC_SIZE,
+ QDMA_REGF_RING_ID,
+ QDMA_REGF_CMD_INDX,
+ QDMA_REGF_CMD_CMD,
+ QDMA_REGF_CMD_TYPE,
+ QDMA_REGF_CMD_BUSY,
+ QDMA_REGF_QUEUE_COUNT,
+ QDMA_REGF_QUEUE_MAX,
+ QDMA_REGF_QUEUE_BASE,
+ QDMA_REGF_FUNCTION_ID,
+ QDMA_REGF_INTR_AGG_BASE,
+ QDMA_REGF_INTR_VECTOR,
+ QDMA_REGF_INTR_SIZE,
+ QDMA_REGF_INTR_VALID,
+ QDMA_REGF_INTR_COLOR,
+ QDMA_REGF_INTR_FUNCTION_ID,
+ QDMA_REGF_ERR_INT_FUNC,
+ QDMA_REGF_ERR_INT_VEC,
+ QDMA_REGF_ERR_INT_ARM,
+ QDMA_REGF_MAX
+};
+
+enum qdma_regs {
+ QDMA_REGO_CTXT_DATA,
+ QDMA_REGO_CTXT_CMD,
+ QDMA_REGO_CTXT_MASK,
+ QDMA_REGO_MM_H2C_CTRL,
+ QDMA_REGO_MM_C2H_CTRL,
+ QDMA_REGO_QUEUE_COUNT,
+ QDMA_REGO_RING_SIZE,
+ QDMA_REGO_H2C_PIDX,
+ QDMA_REGO_C2H_PIDX,
+ QDMA_REGO_INTR_CIDX,
+ QDMA_REGO_FUNC_ID,
+ QDMA_REGO_ERR_INT,
+ QDMA_REGO_ERR_STAT,
+ QDMA_REGO_MAX
+};
+
+struct qdma_reg_field {
+ u16 lsb; /* Least significant bit of field */
+ u16 msb; /* Most significant bit of field */
+};
+
+struct qdma_reg {
+ u32 off;
+ u32 count;
+};
+
+#define QDMA_REGF(_msb, _lsb) { \
+ .lsb = (_lsb), \
+ .msb = (_msb), \
+}
+
+#define QDMA_REGO(_off, _count) { \
+ .off = (_off), \
+ .count = (_count), \
+}
+
+enum qdma_desc_size {
+ QDMA_DESC_SIZE_8B,
+ QDMA_DESC_SIZE_16B,
+ QDMA_DESC_SIZE_32B,
+ QDMA_DESC_SIZE_64B,
+};
+
+enum qdma_queue_op_mode {
+ QDMA_QUEUE_OP_STREAM,
+ QDMA_QUEUE_OP_MM,
+};
+
+enum qdma_ctxt_type {
+ QDMA_CTXT_DESC_SW_C2H,
+ QDMA_CTXT_DESC_SW_H2C,
+ QDMA_CTXT_DESC_HW_C2H,
+ QDMA_CTXT_DESC_HW_H2C,
+ QDMA_CTXT_DESC_CR_C2H,
+ QDMA_CTXT_DESC_CR_H2C,
+ QDMA_CTXT_WRB,
+ QDMA_CTXT_PFTCH,
+ QDMA_CTXT_INTR_COAL,
+ QDMA_CTXT_RSVD,
+ QDMA_CTXT_HOST_PROFILE,
+ QDMA_CTXT_TIMER,
+ QDMA_CTXT_FMAP,
+ QDMA_CTXT_FNC_STS,
+};
+
+enum qdma_ctxt_cmd {
+ QDMA_CTXT_CLEAR,
+ QDMA_CTXT_WRITE,
+ QDMA_CTXT_READ,
+ QDMA_CTXT_INVALIDATE,
+ QDMA_CTXT_MAX
+};
+
+struct qdma_ctxt_sw_desc {
+ u64 desc_base;
+ u16 vec;
+};
+
+struct qdma_ctxt_intr {
+ u64 agg_base;
+ u16 vec;
+ u32 size;
+ bool valid;
+ bool color;
+};
+
+struct qdma_ctxt_fmap {
+ u16 qbase;
+ u16 qmax;
+};
+
+struct qdma_device;
+
+struct qdma_mm_desc {
+ __le64 src_addr;
+ __le32 len;
+ __le32 reserved1;
+ __le64 dst_addr;
+ __le64 reserved2;
+} __packed;
+
+struct qdma_mm_vdesc {
+ struct virt_dma_desc vdesc;
+ struct qdma_queue *queue;
+ struct scatterlist *sgl;
+ u64 sg_off;
+ u32 sg_len;
+ u64 dev_addr;
+ u32 pidx;
+ u32 pending_descs;
+ struct dma_slave_config cfg;
+};
+
+#define QDMA_VDESC_QUEUED(vdesc) (!(vdesc)->sg_len)
+
+struct qdma_queue {
+ struct qdma_device *qdev;
+ struct virt_dma_chan vchan;
+ enum dma_transfer_direction dir;
+ struct dma_slave_config cfg;
+ struct qdma_mm_desc *desc_base;
+ struct qdma_mm_vdesc *submitted_vdesc;
+ struct qdma_mm_vdesc *issued_vdesc;
+ dma_addr_t dma_desc_base;
+ u32 pidx_reg;
+ u32 cidx_reg;
+ u32 ring_size;
+ u32 idx_mask;
+ u16 qid;
+ u32 pidx;
+ u32 cidx;
+};
+
+struct qdma_intr_ring {
+ struct qdma_device *qdev;
+ __le64 *base;
+ dma_addr_t dev_base;
+ char msix_name[QDMA_INTR_NAME_MAX_LEN];
+ u32 msix_vector;
+ u16 msix_id;
+ u32 ring_size;
+ u16 ridx;
+ u16 cidx;
+ u8 color;
+};
+
+#define QDMA_INTR_MASK_PIDX GENMASK_ULL(15, 0)
+#define QDMA_INTR_MASK_CIDX GENMASK_ULL(31, 16)
+#define QDMA_INTR_MASK_DESC_COLOR GENMASK_ULL(32, 32)
+#define QDMA_INTR_MASK_STATE GENMASK_ULL(34, 33)
+#define QDMA_INTR_MASK_ERROR GENMASK_ULL(36, 35)
+#define QDMA_INTR_MASK_TYPE GENMASK_ULL(38, 38)
+#define QDMA_INTR_MASK_QID GENMASK_ULL(62, 39)
+#define QDMA_INTR_MASK_COLOR GENMASK_ULL(63, 63)
+
+struct qdma_device {
+ struct platform_device *pdev;
+ struct dma_device dma_dev;
+ struct regmap *regmap;
+ struct mutex ctxt_lock; /* protect ctxt registers */
+ const struct qdma_reg_field *rfields;
+ const struct qdma_reg *roffs;
+ struct qdma_queue *h2c_queues;
+ struct qdma_queue *c2h_queues;
+ struct qdma_intr_ring *qintr_rings;
+ u32 qintr_ring_num;
+ u32 qintr_ring_idx;
+ u32 chan_num;
+ u32 queue_irq_start;
+ u32 queue_irq_num;
+ u32 err_irq_idx;
+ u32 fid;
+};
+
+extern const struct qdma_reg qdma_regos_default[QDMA_REGO_MAX];
+extern const struct qdma_reg_field qdma_regfs_default[QDMA_REGF_MAX];
+
+#endif /* __QDMA_H */