summaryrefslogtreecommitdiff
path: root/drivers/ntb/ntb_transport.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ntb/ntb_transport.c')
-rw-r--r--drivers/ntb/ntb_transport.c110
1 files changed, 83 insertions, 27 deletions
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index d5c5894f252e..e61db11e6ccc 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -102,6 +102,9 @@ struct ntb_queue_entry {
void *buf;
unsigned int len;
unsigned int flags;
+ int retries;
+ int errors;
+ unsigned int tx_index;
struct ntb_transport_qp *qp;
union {
@@ -259,6 +262,9 @@ enum {
static void ntb_transport_rxc_db(unsigned long data);
static const struct ntb_ctx_ops ntb_transport_ops;
static struct ntb_client ntb_transport_client;
+static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry);
+static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset);
static int ntb_transport_bus_match(struct device *dev,
struct device_driver *drv)
@@ -1467,12 +1473,39 @@ static void ntb_transport_rxc_db(unsigned long data)
}
}
-static void ntb_tx_copy_callback(void *data)
+static void ntb_tx_copy_callback(void *data,
+ const struct dmaengine_result *res)
{
struct ntb_queue_entry *entry = data;
struct ntb_transport_qp *qp = entry->qp;
struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
+ /* we need to check DMA results if we are using DMA */
+ if (res) {
+ enum dmaengine_tx_result dma_err = res->result;
+
+ switch (dma_err) {
+ case DMA_TRANS_READ_FAILED:
+ case DMA_TRANS_WRITE_FAILED:
+ entry->errors++;
+ case DMA_TRANS_ABORTED:
+ {
+ void __iomem *offset =
+ qp->tx_mw + qp->tx_max_frame *
+ entry->tx_index;
+
+ /* resubmit via CPU */
+ ntb_memcpy_tx(entry, offset);
+ qp->tx_memcpy++;
+ return;
+ }
+
+ case DMA_TRANS_NOERROR:
+ default:
+ break;
+ }
+ }
+
iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
@@ -1507,40 +1540,25 @@ static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
/* Ensure that the data is fully copied out before setting the flags */
wmb();
- ntb_tx_copy_callback(entry);
+ ntb_tx_copy_callback(entry, NULL);
}
-static void ntb_async_tx(struct ntb_transport_qp *qp,
- struct ntb_queue_entry *entry)
+static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry)
{
- struct ntb_payload_header __iomem *hdr;
struct dma_async_tx_descriptor *txd;
struct dma_chan *chan = qp->tx_dma_chan;
struct dma_device *device;
+ size_t len = entry->len;
+ void *buf = entry->buf;
size_t dest_off, buff_off;
struct dmaengine_unmap_data *unmap;
dma_addr_t dest;
dma_cookie_t cookie;
- void __iomem *offset;
- size_t len = entry->len;
- void *buf = entry->buf;
int retries = 0;
- offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
- hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
- entry->tx_hdr = hdr;
-
- iowrite32(entry->len, &hdr->len);
- iowrite32((u32)qp->tx_pkts, &hdr->ver);
-
- if (!chan)
- goto err;
-
- if (len < copy_bytes)
- goto err;
-
device = chan->device;
- dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
+ dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
buff_off = (size_t)buf & ~PAGE_MASK;
dest_off = (size_t)dest & ~PAGE_MASK;
@@ -1560,8 +1578,9 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
unmap->to_cnt = 1;
for (retries = 0; retries < DMA_RETRIES; retries++) {
- txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0],
- len, DMA_PREP_INTERRUPT);
+ txd = device->device_prep_dma_memcpy(chan, dest,
+ unmap->addr[0], len,
+ DMA_PREP_INTERRUPT);
if (txd)
break;
@@ -1574,7 +1593,7 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
goto err_get_unmap;
}
- txd->callback = ntb_tx_copy_callback;
+ txd->callback_result = ntb_tx_copy_callback;
txd->callback_param = entry;
dma_set_unmap(txd, unmap);
@@ -1585,14 +1604,48 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
dmaengine_unmap_put(unmap);
dma_async_issue_pending(chan);
- qp->tx_async++;
- return;
+ return 0;
err_set_unmap:
dmaengine_unmap_put(unmap);
err_get_unmap:
dmaengine_unmap_put(unmap);
err:
+ return -ENXIO;
+}
+
+static void ntb_async_tx(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry)
+{
+ struct ntb_payload_header __iomem *hdr;
+ struct dma_chan *chan = qp->tx_dma_chan;
+ void __iomem *offset;
+ int res;
+
+ entry->tx_index = qp->tx_index;
+ offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index;
+ hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
+ entry->tx_hdr = hdr;
+
+ iowrite32(entry->len, &hdr->len);
+ iowrite32((u32)qp->tx_pkts, &hdr->ver);
+
+ if (!chan)
+ goto err;
+
+ if (entry->len < copy_bytes)
+ goto err;
+
+ res = ntb_async_tx_submit(qp, entry);
+ if (res < 0)
+ goto err;
+
+ if (!entry->retries)
+ qp->tx_async++;
+
+ return;
+
+err:
ntb_memcpy_tx(entry, offset);
qp->tx_memcpy++;
}
@@ -1970,6 +2023,9 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
entry->buf = data;
entry->len = len;
entry->flags = 0;
+ entry->errors = 0;
+ entry->retries = 0;
+ entry->tx_index = 0;
rc = ntb_process_tx(qp, entry);
if (rc)