summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-10-13 08:52:57 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-10-13 08:52:57 -0700
commit3439b2a87edcdb86557b5e64787372c6f280dcba (patch)
tree683dc123a57382299b2cf82342b3a70d4e194467 /drivers/dma
parent7c367d8eadc00ff04bfab184ccd07b34ea89661a (diff)
parent3fa53518ad419bfacceae046a9d8027e4c4c5290 (diff)
Merge tag 'dmaengine-fix-6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine
Pull dmaengine fixes from Vinod Koul: "Driver fixes for: - stm32 dma residue calculation and chaining - stm32 mdma for setting inflight bytes, residue calculation and resume abort - channel request, channel enable and dma error in fsl_edma - runtime pm imbalance in ste_dma40 driver - deadlock fix in mediatek driver" * tag 'dmaengine-fix-6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: dmaengine: fsl-edma: fix all channels requested when call fsl_edma3_xlate() dmaengine: stm32-dma: fix residue in case of MDMA chaining dmaengine: stm32-dma: fix stm32_dma_prep_slave_sg in case of MDMA chaining dmaengine: stm32-mdma: set in_flight_bytes in case CRQA flag is set dmaengine: stm32-mdma: use Link Address Register to compute residue dmaengine: stm32-mdma: abort resume if no ongoing transfer dmaengine: ste_dma40: Fix PM disable depth imbalance in d40_probe dmaengine: mediatek: Fix deadlock caused by synchronize_irq() dmaengine: idxd: use spin_lock_irqsave before wait_event_lock_irq dmaengine: fsl-edma: fix edma4 channel enable failure on second attempt dt-bindings: dmaengine: zynqmp_dma: add xlnx,bus-width required property dmaengine: fsl-dma: fix DMA error when enabling sg if 'DONE' bit is set
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/fsl-edma-common.c25
-rw-r--r--drivers/dma/fsl-edma-common.h14
-rw-r--r--drivers/dma/fsl-edma-main.c8
-rw-r--r--drivers/dma/idxd/device.c5
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c3
-rw-r--r--drivers/dma/ste_dma40.c1
-rw-r--r--drivers/dma/stm32-dma.c11
-rw-r--r--drivers/dma/stm32-mdma.c33
8 files changed, 76 insertions, 24 deletions
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index a0f5741abcc4..6a3abe5b1790 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -92,8 +92,14 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
edma_writel_chreg(fsl_chan, val, ch_sbr);
- if (flags & FSL_EDMA_DRV_HAS_CHMUX)
- edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
+ if (flags & FSL_EDMA_DRV_HAS_CHMUX) {
+ /*
+ * ch_mux: With the exception of 0, attempts to write a value
+ * already in use will be forced to 0.
+ */
+ if (!edma_readl_chreg(fsl_chan, ch_mux))
+ edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
+ }
val = edma_readl_chreg(fsl_chan, ch_csr);
val |= EDMA_V3_CH_CSR_ERQ;
@@ -448,12 +454,25 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga);
+ csr = le16_to_cpu(tcd->csr);
+
if (fsl_chan->is_sw) {
- csr = le16_to_cpu(tcd->csr);
csr |= EDMA_TCD_CSR_START;
tcd->csr = cpu_to_le16(csr);
}
+ /*
+ * Must clear CHn_CSR[DONE] bit before enable TCDn_CSR[ESG] at EDMAv3
+ * eDMAv4 have not such requirement.
+ * Change MLINK need clear CHn_CSR[DONE] for both eDMAv3 and eDMAv4.
+ */
+ if (((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_SG) &&
+ (csr & EDMA_TCD_CSR_E_SG)) ||
+ ((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_LINK) &&
+ (csr & EDMA_TCD_CSR_E_LINK)))
+ edma_writel_chreg(fsl_chan, edma_readl_chreg(fsl_chan, ch_csr), ch_csr);
+
+
edma_write_tcdreg(fsl_chan, tcd->csr, csr);
}
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 3cc0cc8fc2d0..40d50cc3d75a 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -183,11 +183,23 @@ struct fsl_edma_desc {
#define FSL_EDMA_DRV_BUS_8BYTE BIT(10)
#define FSL_EDMA_DRV_DEV_TO_DEV BIT(11)
#define FSL_EDMA_DRV_ALIGN_64BYTE BIT(12)
+/* Need clean CHn_CSR DONE before enable TCD's ESG */
+#define FSL_EDMA_DRV_CLEAR_DONE_E_SG BIT(13)
+/* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */
+#define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14)
#define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
FSL_EDMA_DRV_BUS_8BYTE | \
FSL_EDMA_DRV_DEV_TO_DEV | \
- FSL_EDMA_DRV_ALIGN_64BYTE)
+ FSL_EDMA_DRV_ALIGN_64BYTE | \
+ FSL_EDMA_DRV_CLEAR_DONE_E_SG | \
+ FSL_EDMA_DRV_CLEAR_DONE_E_LINK)
+
+#define FSL_EDMA_DRV_EDMA4 (FSL_EDMA_DRV_SPLIT_REG | \
+ FSL_EDMA_DRV_BUS_8BYTE | \
+ FSL_EDMA_DRV_DEV_TO_DEV | \
+ FSL_EDMA_DRV_ALIGN_64BYTE | \
+ FSL_EDMA_DRV_CLEAR_DONE_E_LINK)
struct fsl_edma_drvdata {
u32 dmamuxs; /* only used before v3 */
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 63d48d046f04..8c4ed7012e23 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -154,18 +154,20 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
fsl_chan = to_fsl_edma_chan(chan);
i = fsl_chan - fsl_edma->chans;
- chan = dma_get_slave_channel(chan);
- chan->device->privatecnt++;
fsl_chan->priority = dma_spec->args[1];
fsl_chan->is_rxchan = dma_spec->args[2] & ARGS_RX;
fsl_chan->is_remote = dma_spec->args[2] & ARGS_REMOTE;
fsl_chan->is_multi_fifo = dma_spec->args[2] & ARGS_MULTI_FIFO;
if (!b_chmux && i == dma_spec->args[0]) {
+ chan = dma_get_slave_channel(chan);
+ chan->device->privatecnt++;
mutex_unlock(&fsl_edma->fsl_edma_mutex);
return chan;
} else if (b_chmux && !fsl_chan->srcid) {
/* if controller support channel mux, choose a free channel */
+ chan = dma_get_slave_channel(chan);
+ chan->device->privatecnt++;
fsl_chan->srcid = dma_spec->args[0];
mutex_unlock(&fsl_edma->fsl_edma_mutex);
return chan;
@@ -355,7 +357,7 @@ static struct fsl_edma_drvdata imx93_data3 = {
};
static struct fsl_edma_drvdata imx93_data4 = {
- .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
+ .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4,
.chreg_space_sz = 0x8000,
.chreg_off = 0x10000,
.setup_irq = fsl_edma3_irq_init,
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 22d6f4e455b7..8f754f922217 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -477,6 +477,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
union idxd_command_reg cmd;
DECLARE_COMPLETION_ONSTACK(done);
u32 stat;
+ unsigned long flags;
if (idxd_device_is_halted(idxd)) {
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
@@ -490,7 +491,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
cmd.operand = operand;
cmd.int_req = 1;
- spin_lock(&idxd->cmd_lock);
+ spin_lock_irqsave(&idxd->cmd_lock, flags);
wait_event_lock_irq(idxd->cmd_waitq,
!test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
idxd->cmd_lock);
@@ -507,7 +508,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
* After command submitted, release lock and go to sleep until
* the command completes via interrupt.
*/
- spin_unlock(&idxd->cmd_lock);
+ spin_unlock_irqrestore(&idxd->cmd_lock, flags);
wait_for_completion(&done);
stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
spin_lock(&idxd->cmd_lock);
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index c51dc017b48a..06d12ac39144 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -450,9 +450,8 @@ static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
- synchronize_irq(c->irq);
-
spin_unlock_irqrestore(&c->vc.lock, flags);
+ synchronize_irq(c->irq);
return 0;
}
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 89e82508c133..002833fb1fa0 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3668,6 +3668,7 @@ static int __init d40_probe(struct platform_device *pdev)
regulator_disable(base->lcpa_regulator);
regulator_put(base->lcpa_regulator);
}
+ pm_runtime_disable(base->dev);
report_failure:
d40_err(dev, "probe failed\n");
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 5c36811aa134..0b30151fb45c 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -1113,8 +1113,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
/* Activate Double Buffer Mode if DMA triggers STM32 MDMA and more than 1 sg */
- if (chan->trig_mdma && sg_len > 1)
+ if (chan->trig_mdma && sg_len > 1) {
chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
+ chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT;
+ }
for_each_sg(sgl, sg, sg_len, i) {
ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
@@ -1387,11 +1389,12 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
residue = stm32_dma_get_remaining_bytes(chan);
- if (chan->desc->cyclic && !stm32_dma_is_current_sg(chan)) {
+ if ((chan->desc->cyclic || chan->trig_mdma) && !stm32_dma_is_current_sg(chan)) {
n_sg++;
if (n_sg == chan->desc->num_sgs)
n_sg = 0;
- residue = sg_req->len;
+ if (!chan->trig_mdma)
+ residue = sg_req->len;
}
/*
@@ -1401,7 +1404,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
* residue = remaining bytes from NDTR + remaining
* periods/sg to be transferred
*/
- if (!chan->desc->cyclic || n_sg != 0)
+ if ((!chan->desc->cyclic && !chan->trig_mdma) || n_sg != 0)
for (i = n_sg; i < desc->num_sgs; i++)
residue += desc->sg_req[i].len;
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 0de234022c6d..bae08b3f55c7 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -777,8 +777,6 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
/* Enable interrupts */
ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE;
- if (sg_len > 1)
- ccr |= STM32_MDMA_CCR_BTIE;
desc->ccr = ccr;
return 0;
@@ -1236,6 +1234,10 @@ static int stm32_mdma_resume(struct dma_chan *c)
unsigned long flags;
u32 status, reg;
+ /* Transfer can be terminated */
+ if (!chan->desc || (stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & STM32_MDMA_CCR_EN))
+ return -EPERM;
+
hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc;
spin_lock_irqsave(&chan->vchan.lock, flags);
@@ -1316,21 +1318,35 @@ static int stm32_mdma_slave_config(struct dma_chan *c,
static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
struct stm32_mdma_desc *desc,
- u32 curr_hwdesc)
+ u32 curr_hwdesc,
+ struct dma_tx_state *state)
{
struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
struct stm32_mdma_hwdesc *hwdesc;
- u32 cbndtr, residue, modulo, burst_size;
+ u32 cisr, clar, cbndtr, residue, modulo, burst_size;
int i;
+ cisr = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
+
residue = 0;
- for (i = curr_hwdesc + 1; i < desc->count; i++) {
+ /* Get the next hw descriptor to process from current transfer */
+ clar = stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id));
+ for (i = desc->count - 1; i >= 0; i--) {
hwdesc = desc->node[i].hwdesc;
+
+ if (hwdesc->clar == clar)
+ break;/* Current transfer found, stop cumulating */
+
+ /* Cumulate residue of unprocessed hw descriptors */
residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr);
}
cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
+ state->in_flight_bytes = 0;
+ if (chan->chan_config.m2m_hw && (cisr & STM32_MDMA_CISR_CRQA))
+ state->in_flight_bytes = cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
+
if (!chan->mem_burst)
return residue;
@@ -1360,11 +1376,10 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
vdesc = vchan_find_desc(&chan->vchan, cookie);
if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
- residue = stm32_mdma_desc_residue(chan, chan->desc,
- chan->curr_hwdesc);
+ residue = stm32_mdma_desc_residue(chan, chan->desc, chan->curr_hwdesc, state);
else if (vdesc)
- residue = stm32_mdma_desc_residue(chan,
- to_stm32_mdma_desc(vdesc), 0);
+ residue = stm32_mdma_desc_residue(chan, to_stm32_mdma_desc(vdesc), 0, state);
+
dma_set_residue(state, residue);
spin_unlock_irqrestore(&chan->vchan.lock, flags);