summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
authorVinod Koul <vkoul@kernel.org>2022-01-05 12:48:05 +0530
committerVinod Koul <vkoul@kernel.org>2022-01-05 12:48:05 +0530
commit5cb664fbeba0a11592c1d2774833a4cc49d1ca19 (patch)
treef5eb47d25d0b90cebbe46d0cbe33bf39bc339eef /drivers/dma
parent105a8c525675bb7d4d64871f9b2edf39460de881 (diff)
parent822c9f2b833c53fc67e8adf6f63ecc3ea24d502c (diff)
Merge branch 'fixes' into next
We have a conflict in idxd driver between 'fixes' and 'next' and there are patches dependent on this so, merge the 'fixes' branch into next
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c4
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c10
-rw-r--r--drivers/dma/idxd/irq.c2
-rw-r--r--drivers/dma/idxd/submit.c18
-rw-r--r--drivers/dma/st_fdma.c2
-rw-r--r--drivers/dma/ti/k3-udma.c157
6 files changed, 129 insertions, 64 deletions
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index cd0d745eb071..33baf1591a49 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -373,7 +373,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
struct axi_dma_desc *first)
{
u32 priority = chan->chip->dw->hdata->priority[chan->id];
- struct axi_dma_chan_config config;
+ struct axi_dma_chan_config config = {};
u32 irq_mask;
u8 lms = 0; /* Select AXI0 master for LLI fetching */
@@ -391,7 +391,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;
config.prior = priority;
config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
- config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
+ config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;
switch (chan->direction) {
case DMA_MEM_TO_DEV:
dw_axi_dma_set_byte_halfword(chan, true);
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 198f6cd8ac1b..cee7aa231d7b 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -187,17 +187,9 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
/* DMA configuration */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
+ if (err) {
pci_err(pdev, "DMA mask 64 set failed\n");
return err;
- } else {
- pci_err(pdev, "DMA mask 64 set failed\n");
-
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- pci_err(pdev, "DMA mask 32 set failed\n");
- return err;
- }
}
/* Data structure allocation */
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 0b0055a0ad2a..925171e9738c 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -318,10 +318,10 @@ halt:
INIT_WORK(&idxd->work, idxd_device_reinit);
queue_work(idxd->wq, &idxd->work);
} else {
- spin_lock(&idxd->dev_lock);
idxd->state = IDXD_DEV_HALTED;
idxd_wqs_quiesce(idxd);
idxd_wqs_unmap_portal(idxd);
+ spin_lock(&idxd->dev_lock);
idxd_device_clear_state(idxd);
dev_err(&idxd->pdev->dev,
"idxd halted, need %s.\n",
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 569815a84e95..11ac06be1f0a 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -97,6 +97,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
{
struct idxd_desc *d, *t, *found = NULL;
struct llist_node *head;
+ LIST_HEAD(flist);
desc->completion->status = IDXD_COMP_DESC_ABORT;
/*
@@ -111,7 +112,11 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
found = desc;
continue;
}
- list_add_tail(&desc->list, &ie->work_list);
+
+ if (d->completion->status)
+ list_add_tail(&d->list, &flist);
+ else
+ list_add_tail(&d->list, &ie->work_list);
}
}
@@ -121,6 +126,17 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
if (found)
idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false);
+
+ /*
+ * completing the descriptor will return desc to allocator and
+ * the desc can be acquired by a different process and the
+ * desc->list can be modified. Delete desc from list so the
+ * list trasversing does not get corrupted by the other process.
+ */
+ list_for_each_entry_safe(d, t, &flist, list) {
+ list_del_init(&d->list);
+ idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true);
+ }
}
/*
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
index 962b6e05287b..d95c421877fb 100644
--- a/drivers/dma/st_fdma.c
+++ b/drivers/dma/st_fdma.c
@@ -874,4 +874,4 @@ MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
-MODULE_ALIAS("platform: " DRIVER_NAME);
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 895dcd0e8b60..4d10837fd859 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -4535,45 +4535,60 @@ static int udma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+ irq_res.sets = 1;
} else {
bitmap_fill(ud->tchan_map, ud->tchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->tchan_map,
&rm_res->desc[i], "tchan");
+ irq_res.sets = rm_res->sets;
}
- irq_res.sets = rm_res->sets;
/* rchan and matching default flow ranges */
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+ irq_res.sets++;
} else {
bitmap_fill(ud->rchan_map, ud->rchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->rchan_map,
&rm_res->desc[i], "rchan");
+ irq_res.sets += rm_res->sets;
}
- irq_res.sets += rm_res->sets;
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ if (!irq_res.desc)
+ return -ENOMEM;
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
- for (i = 0; i < rm_res->sets; i++) {
- irq_res.desc[i].start = rm_res->desc[i].start;
- irq_res.desc[i].num = rm_res->desc[i].num;
- irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
- irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[0].start = 0;
+ irq_res.desc[0].num = ud->tchan_cnt;
+ i = 1;
+ } else {
+ for (i = 0; i < rm_res->sets; i++) {
+ irq_res.desc[i].start = rm_res->desc[i].start;
+ irq_res.desc[i].num = rm_res->desc[i].num;
+ irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
+ irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+ }
}
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
- for (j = 0; j < rm_res->sets; j++, i++) {
- if (rm_res->desc[j].num) {
- irq_res.desc[i].start = rm_res->desc[j].start +
- ud->soc_data->oes.udma_rchan;
- irq_res.desc[i].num = rm_res->desc[j].num;
- }
- if (rm_res->desc[j].num_sec) {
- irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
- ud->soc_data->oes.udma_rchan;
- irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[i].start = 0;
+ irq_res.desc[i].num = ud->rchan_cnt;
+ } else {
+ for (j = 0; j < rm_res->sets; j++, i++) {
+ if (rm_res->desc[j].num) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ ud->soc_data->oes.udma_rchan;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+ }
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ ud->soc_data->oes.udma_rchan;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ }
}
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
@@ -4691,14 +4706,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->bchan_map, ud->bchan_cnt);
+ irq_res.sets++;
} else {
bitmap_fill(ud->bchan_map, ud->bchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->bchan_map,
&rm_res->desc[i],
"bchan");
+ irq_res.sets += rm_res->sets;
}
- irq_res.sets += rm_res->sets;
}
/* tchan ranges */
@@ -4706,14 +4722,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+ irq_res.sets += 2;
} else {
bitmap_fill(ud->tchan_map, ud->tchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->tchan_map,
&rm_res->desc[i],
"tchan");
+ irq_res.sets += rm_res->sets * 2;
}
- irq_res.sets += rm_res->sets * 2;
}
/* rchan ranges */
@@ -4721,47 +4738,72 @@ static int bcdma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+ irq_res.sets += 2;
} else {
bitmap_fill(ud->rchan_map, ud->rchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->rchan_map,
&rm_res->desc[i],
"rchan");
+ irq_res.sets += rm_res->sets * 2;
}
- irq_res.sets += rm_res->sets * 2;
}
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ if (!irq_res.desc)
+ return -ENOMEM;
if (ud->bchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
- for (i = 0; i < rm_res->sets; i++) {
- irq_res.desc[i].start = rm_res->desc[i].start +
- oes->bcdma_bchan_ring;
- irq_res.desc[i].num = rm_res->desc[i].num;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[0].start = oes->bcdma_bchan_ring;
+ irq_res.desc[0].num = ud->bchan_cnt;
+ i = 1;
+ } else {
+ for (i = 0; i < rm_res->sets; i++) {
+ irq_res.desc[i].start = rm_res->desc[i].start +
+ oes->bcdma_bchan_ring;
+ irq_res.desc[i].num = rm_res->desc[i].num;
+ }
}
}
if (ud->tchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
- for (j = 0; j < rm_res->sets; j++, i += 2) {
- irq_res.desc[i].start = rm_res->desc[j].start +
- oes->bcdma_tchan_data;
- irq_res.desc[i].num = rm_res->desc[j].num;
-
- irq_res.desc[i + 1].start = rm_res->desc[j].start +
- oes->bcdma_tchan_ring;
- irq_res.desc[i + 1].num = rm_res->desc[j].num;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[i].start = oes->bcdma_tchan_data;
+ irq_res.desc[i].num = ud->tchan_cnt;
+ irq_res.desc[i + 1].start = oes->bcdma_tchan_ring;
+ irq_res.desc[i + 1].num = ud->tchan_cnt;
+ i += 2;
+ } else {
+ for (j = 0; j < rm_res->sets; j++, i += 2) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ oes->bcdma_tchan_data;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+
+ irq_res.desc[i + 1].start = rm_res->desc[j].start +
+ oes->bcdma_tchan_ring;
+ irq_res.desc[i + 1].num = rm_res->desc[j].num;
+ }
}
}
if (ud->rchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
- for (j = 0; j < rm_res->sets; j++, i += 2) {
- irq_res.desc[i].start = rm_res->desc[j].start +
- oes->bcdma_rchan_data;
- irq_res.desc[i].num = rm_res->desc[j].num;
-
- irq_res.desc[i + 1].start = rm_res->desc[j].start +
- oes->bcdma_rchan_ring;
- irq_res.desc[i + 1].num = rm_res->desc[j].num;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[i].start = oes->bcdma_rchan_data;
+ irq_res.desc[i].num = ud->rchan_cnt;
+ irq_res.desc[i + 1].start = oes->bcdma_rchan_ring;
+ irq_res.desc[i + 1].num = ud->rchan_cnt;
+ i += 2;
+ } else {
+ for (j = 0; j < rm_res->sets; j++, i += 2) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ oes->bcdma_rchan_data;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+
+ irq_res.desc[i + 1].start = rm_res->desc[j].start +
+ oes->bcdma_rchan_ring;
+ irq_res.desc[i + 1].num = rm_res->desc[j].num;
+ }
}
}
@@ -4859,39 +4901,54 @@ static int pktdma_setup_resources(struct udma_dev *ud)
if (IS_ERR(rm_res)) {
/* all rflows are assigned exclusively to Linux */
bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
+ irq_res.sets = 1;
} else {
bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->rflow_in_use,
&rm_res->desc[i], "rflow");
+ irq_res.sets = rm_res->sets;
}
- irq_res.sets = rm_res->sets;
/* tflow ranges */
rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
if (IS_ERR(rm_res)) {
/* all tflows are assigned exclusively to Linux */
bitmap_zero(ud->tflow_map, ud->tflow_cnt);
+ irq_res.sets++;
} else {
bitmap_fill(ud->tflow_map, ud->tflow_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->tflow_map,
&rm_res->desc[i], "tflow");
+ irq_res.sets += rm_res->sets;
}
- irq_res.sets += rm_res->sets;
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ if (!irq_res.desc)
+ return -ENOMEM;
rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
- for (i = 0; i < rm_res->sets; i++) {
- irq_res.desc[i].start = rm_res->desc[i].start +
- oes->pktdma_tchan_flow;
- irq_res.desc[i].num = rm_res->desc[i].num;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[0].start = oes->pktdma_tchan_flow;
+ irq_res.desc[0].num = ud->tflow_cnt;
+ i = 1;
+ } else {
+ for (i = 0; i < rm_res->sets; i++) {
+ irq_res.desc[i].start = rm_res->desc[i].start +
+ oes->pktdma_tchan_flow;
+ irq_res.desc[i].num = rm_res->desc[i].num;
+ }
}
rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
- for (j = 0; j < rm_res->sets; j++, i++) {
- irq_res.desc[i].start = rm_res->desc[j].start +
- oes->pktdma_rchan_flow;
- irq_res.desc[i].num = rm_res->desc[j].num;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[i].start = oes->pktdma_rchan_flow;
+ irq_res.desc[i].num = ud->rflow_cnt;
+ } else {
+ for (j = 0; j < rm_res->sets; j++, i++) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ oes->pktdma_rchan_flow;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+ }
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
kfree(irq_res.desc);