summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/idma64.c4
-rw-r--r--drivers/dma/idxd/cdev.c82
-rw-r--r--drivers/dma/idxd/debugfs.c4
-rw-r--r--drivers/dma/idxd/device.c8
-rw-r--r--drivers/dma/idxd/idxd.h5
-rw-r--r--drivers/dma/idxd/init.c6
-rw-r--r--drivers/dma/idxd/irq.c4
-rw-r--r--drivers/dma/idxd/perfmon.c9
-rw-r--r--drivers/dma/idxd/registers.h3
-rw-r--r--drivers/dma/idxd/sysfs.c27
-rw-r--r--drivers/dma/owl-dma.c4
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/dma/tegra186-gpc-dma.c3
-rw-r--r--drivers/dma/xilinx/xdma-regs.h3
-rw-r--r--drivers/dma/xilinx/xdma.c42
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c13
16 files changed, 173 insertions, 47 deletions
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 78a938969d7d..1398814d8fbb 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -171,6 +171,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
u32 status_err;
unsigned short i;
+ /* Since IRQ may be shared, check if DMA controller is powered on */
+ if (status == GENMASK(31, 0))
+ return IRQ_NONE;
+
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
/* Check if we have any interrupt from the DMA controller */
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 8078ab9acfbc..39935071174a 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -342,7 +342,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
if (!evl)
return;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = status.tail;
h = status.head;
@@ -354,9 +354,8 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
set_bit(h, evl->bmap);
h = (h + 1) % size;
}
- spin_unlock(&evl->lock);
-
drain_workqueue(wq->wq);
+ mutex_unlock(&evl->lock);
}
static int idxd_cdev_release(struct inode *node, struct file *filep)
@@ -401,6 +400,18 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
int rc;
dev_dbg(&pdev->dev, "%s called\n", __func__);
+
+ /*
+ * Due to an erratum in some of the devices supported by the driver,
+ * direct user submission to the device can be unsafe.
+ * (See the INTEL-SA-01084 security advisory)
+ *
+ * For the devices that exhibit this behavior, require that the user
+ * has CAP_SYS_RAWIO capabilities.
+ */
+ if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
rc = check_vma(wq, vma, __func__);
if (rc < 0)
return rc;
@@ -415,6 +426,70 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_page_prot);
}
+static int idxd_submit_user_descriptor(struct idxd_user_context *ctx,
+ struct dsa_hw_desc __user *udesc)
+{
+ struct idxd_wq *wq = ctx->wq;
+ struct idxd_dev *idxd_dev = &wq->idxd->idxd_dev;
+ const uint64_t comp_addr_align = is_dsa_dev(idxd_dev) ? 0x20 : 0x40;
+ void __iomem *portal = idxd_wq_portal_addr(wq);
+ struct dsa_hw_desc descriptor __aligned(64);
+ int rc;
+
+ rc = copy_from_user(&descriptor, udesc, sizeof(descriptor));
+ if (rc)
+ return -EFAULT;
+
+ /*
+ * DSA devices are capable of indirect ("batch") command submission.
+ * On devices where direct user submissions are not safe, we cannot
+ * allow this since there is no good way for us to verify these
+ * indirect commands.
+ */
+ if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH &&
+ !wq->idxd->user_submission_safe)
+ return -EINVAL;
+ /*
+ * As per the programming specification, the completion address must be
+ * aligned to 32 or 64 bytes. If this is violated the hardware
+ * engine can get very confused (security issue).
+ */
+ if (!IS_ALIGNED(descriptor.completion_addr, comp_addr_align))
+ return -EINVAL;
+
+ if (wq_dedicated(wq))
+ iosubmit_cmds512(portal, &descriptor, 1);
+ else {
+ descriptor.priv = 0;
+ descriptor.pasid = ctx->pasid;
+ rc = idxd_enqcmds(wq, portal, &descriptor);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t len,
+ loff_t *unused)
+{
+ struct dsa_hw_desc __user *udesc = (struct dsa_hw_desc __user *)buf;
+ struct idxd_user_context *ctx = filp->private_data;
+ ssize_t written = 0;
+ int i;
+
+ for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) {
+ int rc = idxd_submit_user_descriptor(ctx, udesc + i);
+
+ if (rc)
+ return written ? written : rc;
+
+ written += sizeof(struct dsa_hw_desc);
+ }
+
+ return written;
+}
+
static __poll_t idxd_cdev_poll(struct file *filp,
struct poll_table_struct *wait)
{
@@ -437,6 +512,7 @@ static const struct file_operations idxd_cdev_fops = {
.open = idxd_cdev_open,
.release = idxd_cdev_release,
.mmap = idxd_cdev_mmap,
+ .write = idxd_cdev_write,
.poll = idxd_cdev_poll,
};
diff --git a/drivers/dma/idxd/debugfs.c b/drivers/dma/idxd/debugfs.c
index f3f25ee676f3..ad4245cb301d 100644
--- a/drivers/dma/idxd/debugfs.c
+++ b/drivers/dma/idxd/debugfs.c
@@ -66,7 +66,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
if (!evl || !evl->log)
return 0;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
@@ -87,7 +87,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
dump_event_entry(idxd, s, i, &count, processed);
}
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
return 0;
}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index ecfdf4a8f1f8..c41ef195eeb9 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -775,7 +775,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
goto err_alloc;
}
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
evl->log = addr;
evl->dma = dma_addr;
evl->log_size = size;
@@ -796,7 +796,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
gencfg.evl_en = 1;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
return 0;
err_alloc:
@@ -819,7 +819,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
if (!gencfg.evl_en)
return;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
gencfg.evl_en = 0;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
@@ -836,7 +836,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
evl_dma = evl->dma;
evl->log = NULL;
evl->size = IDXD_EVL_SIZE_MIN;
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index a4099a1e2340..868b724a3b75 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -288,12 +288,13 @@ struct idxd_driver_data {
int evl_cr_off;
int cr_status_off;
int cr_result_off;
+ bool user_submission_safe;
load_device_defaults_fn_t load_device_defaults;
};
struct idxd_evl {
/* Lock to protect event log access. */
- spinlock_t lock;
+ struct mutex lock;
void *log;
dma_addr_t dma;
/* Total size of event log = number of entries * entry size. */
@@ -374,6 +375,8 @@ struct idxd_device {
struct dentry *dbgfs_dir;
struct dentry *dbgfs_evl_file;
+
+ bool user_submission_safe;
};
static inline unsigned int evl_ent_size(struct idxd_device *idxd)
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 4954adc6bb60..a7295943fa22 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -47,6 +47,7 @@ static struct idxd_driver_data idxd_driver_data[] = {
.align = 32,
.dev_type = &dsa_device_type,
.evl_cr_off = offsetof(struct dsa_evl_entry, cr),
+ .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
.cr_status_off = offsetof(struct dsa_completion_record, status),
.cr_result_off = offsetof(struct dsa_completion_record, result),
},
@@ -57,6 +58,7 @@ static struct idxd_driver_data idxd_driver_data[] = {
.align = 64,
.dev_type = &iax_device_type,
.evl_cr_off = offsetof(struct iax_evl_entry, cr),
+ .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
.cr_status_off = offsetof(struct iax_completion_record, status),
.cr_result_off = offsetof(struct iax_completion_record, error_code),
.load_device_defaults = idxd_load_iaa_device_defaults,
@@ -354,7 +356,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
if (!evl)
return -ENOMEM;
- spin_lock_init(&evl->lock);
+ mutex_init(&evl->lock);
evl->size = IDXD_EVL_SIZE_MIN;
idxd_name = dev_name(idxd_confdev(idxd));
@@ -774,6 +776,8 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
idxd->hw.version);
+ idxd->user_submission_safe = data->user_submission_safe;
+
return 0;
err_dev_register:
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 348aa21389a9..8dc029c86551 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -363,7 +363,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.bits = 0;
evl_status.int_pending = 1;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
/* Clear interrupt pending bit */
iowrite32(evl_status.bits_upper32,
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
@@ -380,7 +380,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.head = h;
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
}
irqreturn_t idxd_misc_thread(int vec, void *data)
diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
index fdda6d604262..5e94247e1ea7 100644
--- a/drivers/dma/idxd/perfmon.c
+++ b/drivers/dma/idxd/perfmon.c
@@ -528,14 +528,11 @@ static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
-
/* migrate events if there is a valid target */
- if (target < nr_cpu_ids)
+ if (target < nr_cpu_ids) {
cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
- else
- target = -1;
-
- perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ }
return 0;
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 315c004f58e4..e16dbf9ab324 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -6,9 +6,6 @@
#include <uapi/linux/idxd.h>
/* PCI Config */
-#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
-#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
-
#define DEVICE_VERSION_1 0x100
#define DEVICE_VERSION_2 0x200
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 7f28f01be672..f706eae0e76b 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1197,12 +1197,35 @@ static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attrib
static struct device_attribute dev_attr_wq_enqcmds_retries =
__ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
+static ssize_t op_cap_show_common(struct device *dev, char *buf, unsigned long *opcap_bmap)
+{
+ ssize_t pos;
+ int i;
+
+ pos = 0;
+ for (i = IDXD_MAX_OPCAP_BITS/64 - 1; i >= 0; i--) {
+ unsigned long val = opcap_bmap[i];
+
+ /* On systems where direct user submissions are not safe, we need to clear out
+ * the BATCH capability from the capability mask in sysfs since we cannot support
+ * that command on such systems.
+ */
+ if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe)
+ clear_bit(DSA_OPCODE_BATCH % 64, &val);
+
+ pos += sysfs_emit_at(buf, pos, "%*pb", 64, &val);
+ pos += sysfs_emit_at(buf, pos, "%c", i == 0 ? '\n' : ',');
+ }
+
+ return pos;
+}
+
static ssize_t wq_op_config_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idxd_wq *wq = confdev_to_wq(dev);
- return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, wq->opcap_bmap);
+ return op_cap_show_common(dev, buf, wq->opcap_bmap);
}
static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask)
@@ -1455,7 +1478,7 @@ static ssize_t op_cap_show(struct device *dev,
{
struct idxd_device *idxd = confdev_to_idxd(dev);
- return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap);
+ return op_cap_show_common(dev, buf, idxd->opcap_bmap);
}
static DEVICE_ATTR_RO(op_cap);
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 4e76c4ec2d39..e001f4f7aa64 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -250,7 +250,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
else
regval &= ~val;
- writel(val, pchan->base + reg);
+ writel(regval, pchan->base + reg);
}
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
@@ -274,7 +274,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
else
regval &= ~val;
- writel(val, od->base + reg);
+ writel(regval, od->base + reg);
}
static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 5f6d7f1e095f..ad8e3da1b2cd 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1053,9 +1053,6 @@ static bool _trigger(struct pl330_thread *thrd)
thrd->req_running = idx;
- if (desc->rqtype == DMA_MEM_TO_DEV || desc->rqtype == DMA_DEV_TO_MEM)
- UNTIL(thrd, PL330_STATE_WFP);
-
return true;
}
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 88547a23825b..3642508e88bb 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -746,6 +746,9 @@ static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
bytes_xfer = dma_desc->bytes_xfer +
sg_req[dma_desc->sg_idx].len - (wcount * 4);
+ if (dma_desc->bytes_req == bytes_xfer)
+ return 0;
+
residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
return residual;
diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h
index 98f5f6fb9ff9..6ad08878e938 100644
--- a/drivers/dma/xilinx/xdma-regs.h
+++ b/drivers/dma/xilinx/xdma-regs.h
@@ -117,6 +117,9 @@ struct xdma_hw_desc {
CHAN_CTRL_IE_WRITE_ERROR | \
CHAN_CTRL_IE_DESC_ERROR)
+/* bits of the channel status register */
+#define XDMA_CHAN_STATUS_BUSY BIT(0)
+
#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 170017ff2aad..313b217388fe 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -71,6 +71,8 @@ struct xdma_chan {
enum dma_transfer_direction dir;
struct dma_slave_config cfg;
u32 irq;
+ struct completion last_interrupt;
+ bool stop_requested;
};
/**
@@ -376,6 +378,8 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
return ret;
xchan->busy = true;
+ xchan->stop_requested = false;
+ reinit_completion(&xchan->last_interrupt);
return 0;
}
@@ -387,7 +391,6 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
static int xdma_xfer_stop(struct xdma_chan *xchan)
{
int ret;
- u32 val;
struct xdma_device *xdev = xchan->xdev_hdl;
/* clear run stop bit to prevent any further auto-triggering */
@@ -395,13 +398,7 @@ static int xdma_xfer_stop(struct xdma_chan *xchan)
CHAN_CTRL_RUN_STOP);
if (ret)
return ret;
-
- /* Clear the channel status register */
- ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val);
- if (ret)
- return ret;
-
- return 0;
+ return ret;
}
/**
@@ -474,6 +471,8 @@ static int xdma_alloc_channels(struct xdma_device *xdev,
xchan->xdev_hdl = xdev;
xchan->base = base + i * XDMA_CHAN_STRIDE;
xchan->dir = dir;
+ xchan->stop_requested = false;
+ init_completion(&xchan->last_interrupt);
ret = xdma_channel_init(xchan);
if (ret)
@@ -521,6 +520,7 @@ static int xdma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
xdma_chan->busy = false;
+ xdma_chan->stop_requested = true;
vd = vchan_next_desc(&xdma_chan->vchan);
if (vd) {
list_del(&vd->node);
@@ -542,17 +542,26 @@ static int xdma_terminate_all(struct dma_chan *chan)
static void xdma_synchronize(struct dma_chan *chan)
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan->xdev_hdl;
+ int st = 0;
+
+ /* If the engine continues running, wait for the last interrupt */
+ regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st);
+ if (st & XDMA_CHAN_STATUS_BUSY)
+ wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000));
vchan_synchronize(&xdma_chan->vchan);
}
/**
- * xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses
- * @sw_desc: tx descriptor state container
- * @src_addr: Value for a ->src_addr field of a first descriptor
- * @dst_addr: Value for a ->dst_addr field of a first descriptor
- * @size: Total size of a contiguous memory block
- * @filled_descs_num: Number of filled hardware descriptors for corresponding sw_desc
+ * xdma_fill_descs() - Fill hardware descriptors for one contiguous memory chunk.
+ * More than one descriptor will be used if the size is bigger
+ * than XDMA_DESC_BLEN_MAX.
+ * @sw_desc: Descriptor container
+ * @src_addr: First value for the ->src_addr field
+ * @dst_addr: First value for the ->dst_addr field
+ * @size: Size of the contiguous memory block
+ * @filled_descs_num: Index of the first descriptor to take care of in @sw_desc
*/
static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
u64 dst_addr, u32 size, u32 filled_descs_num)
@@ -704,7 +713,7 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
desc_num = 0;
for (i = 0; i < periods; i++) {
desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
- addr += i * period_size;
+ addr += period_size;
}
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@@ -876,6 +885,9 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
u32 st;
bool repeat_tx;
+ if (xchan->stop_requested)
+ complete(&xchan->last_interrupt);
+
spin_lock(&xchan->vchan.lock);
/* get submitted request */
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index b82815e64d24..eb0637d90342 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -214,7 +214,8 @@ struct xilinx_dpdma_tx_desc {
* @running: true if the channel is running
* @first_frame: flag for the first frame of stream
* @video_group: flag if multi-channel operation is needed for video channels
- * @lock: lock to access struct xilinx_dpdma_chan
+ * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
+ * @vchan.lock, if both are to be held.
* @desc_pool: descriptor allocation pool
* @err_task: error IRQ bottom half handler
* @desc: References to descriptors being processed
@@ -1097,12 +1098,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
* Complete the active descriptor, if any, promote the pending
* descriptor to active, and queue the next transfer, if any.
*/
+ spin_lock(&chan->vchan.lock);
if (chan->desc.active)
vchan_cookie_complete(&chan->desc.active->vdesc);
chan->desc.active = pending;
chan->desc.pending = NULL;
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
out:
spin_unlock_irqrestore(&chan->lock, flags);
@@ -1264,10 +1267,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
unsigned long flags;
- spin_lock_irqsave(&chan->vchan.lock, flags);
+ spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
if (vchan_issue_pending(&chan->vchan))
xilinx_dpdma_chan_queue_transfer(chan);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ spin_unlock(&chan->vchan.lock);
+ spin_unlock_irqrestore(&chan->lock, flags);
}
static int xilinx_dpdma_config(struct dma_chan *dchan,
@@ -1495,7 +1500,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
spin_unlock_irqrestore(&chan->lock, flags);
}