summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlxsw/pci.c
diff options
context:
space:
mode:
authorIdo Schimmel <idosch@mellanox.com>2019-01-18 15:57:55 +0000
committerDavid S. Miller <davem@davemloft.net>2019-01-18 15:12:16 -0800
commitc9ebea04cb1b0a84d08de00f4800f726ca544ad2 (patch)
treea887fc5b4e54743594e18721106dff5a7c3c3dbc /drivers/net/ethernet/mellanox/mlxsw/pci.c
parent20f5248a502b6d41746c02c699ec9791a03a4e27 (diff)
mlxsw: pci: Ring CQ's doorbell before RDQ's
When a packet should be trapped to the CPU the device consumes a WQE (work queue element) from an RDQ (receive descriptor queue) and copies the packet to the address specified in the WQE. The device then tries to post a CQE (completion queue element) that contains various metadata (e.g., ingress port) about the packet to a CQ (completion queue). In case the device managed to consume a WQE, but did not manage to post the corresponding CQE, it will get stuck. This unlikely situation can be triggered due to the scheme the driver is currently using to process CQEs. The driver will consume up to 512 CQEs at a time and after processing each corresponding WQE it will ring the RDQ's doorbell, letting the device know that a new WQE was posted for it to consume. Only after processing all the CQEs (up to 512), the driver will ring the CQ's doorbell, letting the device know that new ones can be posted. Fix this by having the driver ring the CQ's doorbell for every processed CQE, but before ringing the RDQ's doorbell. This guarantees that whenever we post a new WQE, there is a corresponding CQE available. Copy the currently processed CQE to prevent the device from overwriting it with a new CQE after ringing the doorbell. Note that the driver still arms the CQ only after processing all the pending CQEs, so that interrupts for this CQ will only be delivered after the driver finished its processing. Before commit 8404f6f2e8ed ("mlxsw: pci: Allow to use CQEs of version 1 and version 2") the issue was virtually impossible to trigger since the number of CQEs was twice the number of WQEs and the number of CQEs processed at a time was equal to the number of available WQEs. Fixes: 8404f6f2e8ed ("mlxsw: pci: Allow to use CQEs of version 1 and version 2") Signed-off-by: Ido Schimmel <idosch@mellanox.com> Reported-by: Semion Lisyansky <semionl@mellanox.com> Tested-by: Semion Lisyansky <semionl@mellanox.com> Acked-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlxsw/pci.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 66b8098c6fd2..91bf294f7677 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -604,29 +604,31 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
+ char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
+
+ memcpy(ncqe, cqe, q->elem_size);
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
if (sendq) {
struct mlxsw_pci_queue *sdq;
sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
- wqe_counter, cqe);
+ wqe_counter, ncqe);
q->u.cq.comp_sdq_count++;
} else {
struct mlxsw_pci_queue *rdq;
rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
- wqe_counter, q->u.cq.v, cqe);
+ wqe_counter, q->u.cq.v, ncqe);
q->u.cq.comp_rdq_count++;
}
if (++items == credits)
break;
}
- if (items) {
- mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ if (items)
mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
- }
}
static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)