summaryrefslogtreecommitdiff
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2021-04-13 01:35:26 -0400
committerMichael S. Tsirkin <mst@redhat.com>2021-07-03 04:51:17 -0400
commit5a2f966d0f3fa0ef6dada7ab9eda74cacee96b8a (patch)
tree4a35a4b359dbc99a4a6f7d5d43f6513c0a326fc3 /drivers/net/virtio_net.c
parent6f5312f801836e6af9bcbb0bdb44dc423e129206 (diff)
virtio_net: move tx vq operation under tx queue lock
It's unsafe to operate a vq from multiple threads. Unfortunately this is exactly what we do when invoking clean tx poll from rx napi. Same happens with napi-tx even without the opportunistic cleaning from the receive interrupt: that races with processing the vq in start_xmit. As a fix move everything that deals with the vq to under tx lock. Fixes: b92f1e6751a6 ("virtio-net: transmit napi") Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c22
1 files changed, 21 insertions, 1 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4fff7cd24a88..9573f7622ef6 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1592,6 +1592,8 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
struct virtnet_info *vi = sq->vq->vdev->priv;
unsigned int index = vq2txq(sq->vq);
struct netdev_queue *txq;
+ int opaque;
+ bool done;
if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
/* We don't need to enable cb for XDP */
@@ -1601,10 +1603,28 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
txq = netdev_get_tx_queue(vi->dev, index);
__netif_tx_lock(txq, raw_smp_processor_id());
+ virtqueue_disable_cb(sq->vq);
free_old_xmit_skbs(sq, true);
+
+ opaque = virtqueue_enable_cb_prepare(sq->vq);
+
+ done = napi_complete_done(napi, 0);
+
+ if (!done)
+ virtqueue_disable_cb(sq->vq);
+
__netif_tx_unlock(txq);
- virtqueue_napi_complete(napi, sq->vq, 0);
+ if (done) {
+ if (unlikely(virtqueue_poll(sq->vq, opaque))) {
+ if (napi_schedule_prep(napi)) {
+ __netif_tx_lock(txq, raw_smp_processor_id());
+ virtqueue_disable_cb(sq->vq);
+ __netif_tx_unlock(txq);
+ __napi_schedule(napi);
+ }
+ }
+ }
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
netif_tx_wake_queue(txq);