summaryrefslogtreecommitdiff
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c215
1 files changed, 182 insertions, 33 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 98dc9b49d56b..fe7f314d65c9 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -22,6 +22,7 @@
#include <net/route.h>
#include <net/xdp.h>
#include <net/net_failover.h>
+#include <net/netdev_rx_queue.h>
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
@@ -126,6 +127,11 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
+struct virtnet_interrupt_coalesce {
+ u32 max_packets;
+ u32 max_usecs;
+};
+
/* The dma information of pages allocated at a time. */
struct virtnet_rq_dma {
dma_addr_t addr;
@@ -147,6 +153,8 @@ struct send_queue {
struct virtnet_sq_stats stats;
+ struct virtnet_interrupt_coalesce intr_coal;
+
struct napi_struct napi;
/* Record whether sq is in reset state. */
@@ -164,6 +172,8 @@ struct receive_queue {
struct virtnet_rq_stats stats;
+ struct virtnet_interrupt_coalesce intr_coal;
+
/* Chain pages by the private ptr. */
struct page *pages;
@@ -221,6 +231,7 @@ struct control_buf {
struct virtio_net_ctrl_rss rss;
struct virtio_net_ctrl_coal_tx coal_tx;
struct virtio_net_ctrl_coal_rx coal_rx;
+ struct virtio_net_ctrl_coal_vq coal_vq;
};
struct virtnet_info {
@@ -295,10 +306,8 @@ struct virtnet_info {
u32 speed;
/* Interrupt coalescing settings */
- u32 tx_usecs;
- u32 rx_usecs;
- u32 tx_max_packets;
- u32 rx_max_packets;
+ struct virtnet_interrupt_coalesce intr_coal_tx;
+ struct virtnet_interrupt_coalesce intr_coal_rx;
unsigned long guest_offloads;
unsigned long guest_offloads_capable;
@@ -317,6 +326,14 @@ struct padded_vnet_hdr {
char padding[12];
};
+struct virtio_net_common_hdr {
+ union {
+ struct virtio_net_hdr hdr;
+ struct virtio_net_hdr_mrg_rxbuf mrg_hdr;
+ struct virtio_net_hdr_v1_hash hash_v1_hdr;
+ };
+};
+
static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
@@ -358,9 +375,10 @@ static int rxq2vq(int rxq)
return rxq * 2;
}
-static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
+static inline struct virtio_net_common_hdr *
+skb_vnet_common_hdr(struct sk_buff *skb)
{
- return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
+ return (struct virtio_net_common_hdr *)skb->cb;
}
/*
@@ -483,7 +501,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
unsigned int headroom)
{
struct sk_buff *skb;
- struct virtio_net_hdr_mrg_rxbuf *hdr;
+ struct virtio_net_common_hdr *hdr;
unsigned int copy, hdr_len, hdr_padded_len;
struct page *page_to_free = NULL;
int tailroom, shinfo_size;
@@ -568,7 +586,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
give_pages(rq, page);
ok:
- hdr = skb_vnet_hdr(skb);
+ hdr = skb_vnet_common_hdr(skb);
memcpy(hdr, hdr_p, hdr_len);
if (page_to_free)
put_page(page_to_free);
@@ -1130,7 +1148,7 @@ static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
return NULL;
buf += header_offset;
- memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
+ memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len);
return skb;
}
@@ -1741,7 +1759,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
{
struct net_device *dev = vi->dev;
struct sk_buff *skb;
- struct virtio_net_hdr_mrg_rxbuf *hdr;
+ struct virtio_net_common_hdr *hdr;
if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
@@ -1761,9 +1779,9 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
if (unlikely(!skb))
return;
- hdr = skb_vnet_hdr(skb);
+ hdr = skb_vnet_common_hdr(skb);
if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
- virtio_skb_set_hash((const struct virtio_net_hdr_v1_hash *)hdr, skb);
+ virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2275,7 +2293,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
if (can_push)
hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
else
- hdr = skb_vnet_hdr(skb);
+ hdr = &skb_vnet_common_hdr(skb)->mrg_hdr;
if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
virtio_is_little_endian(vi->vdev), false,
@@ -3226,8 +3244,8 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
return -EINVAL;
/* Save parameters */
- vi->tx_usecs = ec->tx_coalesce_usecs;
- vi->tx_max_packets = ec->tx_max_coalesced_frames;
+ vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
+ vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
@@ -3239,8 +3257,57 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
return -EINVAL;
/* Save parameters */
- vi->rx_usecs = ec->rx_coalesce_usecs;
- vi->rx_max_packets = ec->rx_max_coalesced_frames;
+ vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
+ vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
+
+ return 0;
+}
+
+static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
+ u16 vqn, u32 max_usecs, u32 max_packets)
+{
+ struct scatterlist sgs;
+
+ vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
+ vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
+ vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
+ sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
+ &sgs))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
+ struct ethtool_coalesce *ec,
+ u16 queue)
+{
+ int err;
+
+ if (ec->rx_coalesce_usecs || ec->rx_max_coalesced_frames) {
+ err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
+ ec->rx_coalesce_usecs,
+ ec->rx_max_coalesced_frames);
+ if (err)
+ return err;
+ /* Save parameters */
+ vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
+ vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
+ }
+
+ if (ec->tx_coalesce_usecs || ec->tx_max_coalesced_frames) {
+ err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
+ ec->tx_coalesce_usecs,
+ ec->tx_max_coalesced_frames);
+ if (err)
+ return err;
+ /* Save parameters */
+ vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
+ vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
+ }
return 0;
}
@@ -3260,22 +3327,42 @@ static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
return 0;
}
+static int virtnet_should_update_vq_weight(int dev_flags, int weight,
+ int vq_weight, bool *should_update)
+{
+ if (weight ^ vq_weight) {
+ if (dev_flags & IFF_UP)
+ return -EBUSY;
+ *should_update = true;
+ }
+
+ return 0;
+}
+
static int virtnet_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct virtnet_info *vi = netdev_priv(dev);
- int ret, i, napi_weight;
+ int ret, queue_number, napi_weight;
bool update_napi = false;
/* Can't change NAPI weight if the link is up */
napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
- if (napi_weight ^ vi->sq[0].napi.weight) {
- if (dev->flags & IFF_UP)
- return -EBUSY;
- else
- update_napi = true;
+ for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) {
+ ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
+ vi->sq[queue_number].napi.weight,
+ &update_napi);
+ if (ret)
+ return ret;
+
+ if (update_napi) {
+ /* All queues that belong to [queue_number, vi->max_queue_pairs] will be
+ * updated for the sake of simplicity, which might not be necessary
+ */
+ break;
+ }
}
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
@@ -3287,8 +3374,8 @@ static int virtnet_set_coalesce(struct net_device *dev,
return ret;
if (update_napi) {
- for (i = 0; i < vi->max_queue_pairs; i++)
- vi->sq[i].napi.weight = napi_weight;
+ for (; queue_number < vi->max_queue_pairs; queue_number++)
+ vi->sq[queue_number].napi.weight = napi_weight;
}
return ret;
@@ -3302,10 +3389,67 @@ static int virtnet_get_coalesce(struct net_device *dev,
struct virtnet_info *vi = netdev_priv(dev);
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
- ec->rx_coalesce_usecs = vi->rx_usecs;
- ec->tx_coalesce_usecs = vi->tx_usecs;
- ec->tx_max_coalesced_frames = vi->tx_max_packets;
- ec->rx_max_coalesced_frames = vi->rx_max_packets;
+ ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs;
+ ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
+ ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
+ ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
+ } else {
+ ec->rx_max_coalesced_frames = 1;
+
+ if (vi->sq[0].napi.weight)
+ ec->tx_max_coalesced_frames = 1;
+ }
+
+ return 0;
+}
+
+static int virtnet_set_per_queue_coalesce(struct net_device *dev,
+ u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ int ret, napi_weight;
+ bool update_napi = false;
+
+ if (queue >= vi->max_queue_pairs)
+ return -EINVAL;
+
+ /* Can't change NAPI weight if the link is up */
+ napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
+ ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
+ vi->sq[queue].napi.weight,
+ &update_napi);
+ if (ret)
+ return ret;
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
+ ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue);
+ else
+ ret = virtnet_coal_params_supported(ec);
+
+ if (ret)
+ return ret;
+
+ if (update_napi)
+ vi->sq[queue].napi.weight = napi_weight;
+
+ return 0;
+}
+
+static int virtnet_get_per_queue_coalesce(struct net_device *dev,
+ u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ if (queue >= vi->max_queue_pairs)
+ return -EINVAL;
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
+ ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
+ ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
+ ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
+ ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
} else {
ec->rx_max_coalesced_frames = 1;
@@ -3446,6 +3590,8 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.set_link_ksettings = virtnet_set_link_ksettings,
.set_coalesce = virtnet_set_coalesce,
.get_coalesce = virtnet_get_coalesce,
+ .set_per_queue_coalesce = virtnet_set_per_queue_coalesce,
+ .get_per_queue_coalesce = virtnet_get_per_queue_coalesce,
.get_rxfh_key_size = virtnet_get_rxfh_key_size,
.get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
.get_rxfh = virtnet_get_rxfh,
@@ -4128,6 +4274,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
"VIRTIO_NET_F_CTRL_VQ") ||
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
+ "VIRTIO_NET_F_CTRL_VQ") ||
+ VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL,
"VIRTIO_NET_F_CTRL_VQ"))) {
return false;
}
@@ -4295,10 +4443,10 @@ static int virtnet_probe(struct virtio_device *vdev)
}
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
- vi->rx_usecs = 0;
- vi->tx_usecs = 0;
- vi->tx_max_packets = 0;
- vi->rx_max_packets = 0;
+ vi->intr_coal_rx.max_usecs = 0;
+ vi->intr_coal_tx.max_usecs = 0;
+ vi->intr_coal_tx.max_packets = 0;
+ vi->intr_coal_rx.max_packets = 0;
}
if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
@@ -4552,6 +4700,7 @@ static struct virtio_device_id id_table[] = {
VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
+ VIRTIO_NET_F_VQ_NOTF_COAL, \
VIRTIO_NET_F_GUEST_HDRLEN
static unsigned int features[] = {