diff options
| author | Jason Gunthorpe <jgg@nvidia.com> | 2023-06-27 14:06:29 -0300 |
|---|---|---|
| committer | Jason Gunthorpe <jgg@nvidia.com> | 2023-06-27 14:06:29 -0300 |
| commit | 5f004bcaee4cb552cf1b46a505f18f08777db7e5 (patch) | |
| tree | c81bdd8e01ba8de9862c8bdb1b210559d4bcf154 /drivers/net/virtio_net.c | |
| parent | 4251f631fdfba0b38e4634510c5950ee157cc069 (diff) | |
| parent | 6995e2de6891c724bfeb2db33d7b87775f913ad1 (diff) | |
Merge tag 'v6.4' into rdma.git for-next
Linux 6.4
Resolve conflicts between rdma rc and next in rxe_cq matching linux-next:
drivers/infiniband/sw/rxe/rxe_cq.c:
https://lore.kernel.org/r/20230622115246.365d30ad@canb.auug.org.au
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/net/virtio_net.c')
| -rw-r--r-- | drivers/net/virtio_net.c | 77 |
1 files changed, 52 insertions, 25 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index a12ae26db0e2..486b5849033d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -205,6 +205,8 @@ struct control_buf { __virtio16 vid; __virtio64 offloads; struct virtio_net_ctrl_rss rss; + struct virtio_net_ctrl_coal_tx coal_tx; + struct virtio_net_ctrl_coal_rx coal_rx; }; struct virtnet_info { @@ -1868,6 +1870,38 @@ static int virtnet_poll(struct napi_struct *napi, int budget) return received; } +static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index) +{ + virtnet_napi_tx_disable(&vi->sq[qp_index].napi); + napi_disable(&vi->rq[qp_index].napi); + xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); +} + +static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index) +{ + struct net_device *dev = vi->dev; + int err; + + err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index, + vi->rq[qp_index].napi.napi_id); + if (err < 0) + return err; + + err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err < 0) + goto err_xdp_reg_mem_model; + + virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); + virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); + + return 0; + +err_xdp_reg_mem_model: + xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); + return err; +} + static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); @@ -1881,22 +1915,20 @@ static int virtnet_open(struct net_device *dev) if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); - err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id); + err = virtnet_enable_queue_pair(vi, i); if (err < 0) - return err; - - err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq, - MEM_TYPE_PAGE_SHARED, NULL); - if (err < 0) { - xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); - return err; - } - - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); - virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); + goto err_enable_qp; } return 0; + +err_enable_qp: + disable_delayed_refill(vi); + cancel_delayed_work_sync(&vi->refill); + + for (i--; i >= 0; i--) + virtnet_disable_queue_pair(vi, i); + return err; } static int virtnet_poll_tx(struct napi_struct *napi, int budget) @@ -2305,11 +2337,8 @@ static int virtnet_close(struct net_device *dev) /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); - for (i = 0; i < vi->max_queue_pairs; i++) { - virtnet_napi_tx_disable(&vi->sq[i].napi); - napi_disable(&vi->rq[i].napi); - xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); - } + for (i = 0; i < vi->max_queue_pairs; i++) + virtnet_disable_queue_pair(vi, i); return 0; } @@ -2907,12 +2936,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, struct ethtool_coalesce *ec) { struct scatterlist sgs_tx, sgs_rx; - struct virtio_net_ctrl_coal_tx coal_tx; - struct virtio_net_ctrl_coal_rx coal_rx; - coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); - coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); - sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx)); + vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); + vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); + sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_TX_SET, @@ -2923,9 +2950,9 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, vi->tx_usecs = ec->tx_coalesce_usecs; vi->tx_max_packets = ec->tx_max_coalesced_frames; - coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); - coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); - sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx)); + vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); + vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); + sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_RX_SET, |
