diff options
author | Jason Gunthorpe <jgg@nvidia.com> | 2024-08-27 10:32:25 -0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2024-08-28 09:11:29 -0300 |
commit | 34cd19288161313b6ba24004206ab6a64fdef7c5 (patch) | |
tree | f83e4dc97edda3e90ddf03d6129f3a458d28c0e2 /drivers/net/virtio_net.c | |
parent | 04e36fd27a2aebb03ef019debc4df247f2a427c6 (diff) | |
parent | 10a104c0debbb19a1e45193d5670510216e339ff (diff) |
Merge branch 'bnxt_re_variable_wqes' into rdma.git for-next
Selvin Xavier says:
=============
Enable the Variable size Work Queue entry support for Gen P7
adapters. This would help in the better utilization of the queue memory
and pci bandwidth due to the smaller send queue Work entries.
=============
Based on v6.11-rc5 for dependencies.
* bnxt_re_variable_wqes: (829 commits)
RDMA/bnxt_re: Enable variable size WQEs for user space applications
RDMA/bnxt_re: Handle variable WQE support for user applications
RDMA/bnxt_re: Fix the table size for PSN/MSN entries
RDMA/bnxt_re: Get the WQE index from slot index while completing the WQEs
RDMA/bnxt_re: Add support for Variable WQE in Genp7 adapters
Linux 6.11-rc5
...
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r-- | drivers/net/virtio_net.c | 16 |
1 files changed, 13 insertions, 3 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 0383a3e136d6..c6af18948092 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2867,8 +2867,8 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index) if (err < 0) goto err_xdp_reg_mem_model; - virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, qp_index)); + virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); return 0; @@ -3658,6 +3658,9 @@ static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi, { int err; + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) + return -EOPNOTSUPP; + err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue), max_usecs, max_packets); if (err) @@ -3675,6 +3678,9 @@ static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi, { int err; + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) + return -EOPNOTSUPP; + err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue), max_usecs, max_packets); if (err) @@ -3743,7 +3749,11 @@ static int virtnet_set_ringparam(struct net_device *dev, err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i, vi->intr_coal_tx.max_usecs, vi->intr_coal_tx.max_packets); - if (err) + + /* Don't break the tx resize action if the vq coalescing is not + * supported. The same is true for rx resize below. + */ + if (err && err != -EOPNOTSUPP) return err; } @@ -3758,7 +3768,7 @@ static int virtnet_set_ringparam(struct net_device *dev, vi->intr_coal_rx.max_usecs, vi->intr_coal_rx.max_packets); mutex_unlock(&vi->rq[i].dim_lock); - if (err) + if (err && err != -EOPNOTSUPP) return err; } } |