summaryrefslogtreecommitdiff
path: root/drivers/net/hyperv
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/hyperv')
-rw-r--r--drivers/net/hyperv/hyperv_net.h19
-rw-r--r--drivers/net/hyperv/netvsc.c73
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c123
-rw-r--r--drivers/net/hyperv/netvsc_trace.h8
-rw-r--r--drivers/net/hyperv/rndis_filter.c46
6 files changed, 170 insertions, 101 deletions
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 810977952f95..cb6f5482d203 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -158,7 +158,6 @@ struct hv_netvsc_packet {
u8 cp_partial; /* partial copy into send buffer */
u8 rmsg_size; /* RNDIS header and PPI size */
- u8 rmsg_pgcnt; /* page count of RNDIS header and PPI */
u8 page_buf_cnt;
u16 q_idx;
@@ -464,7 +463,7 @@ struct nvsp_1_message_send_receive_buffer_complete {
* LargeOffset SmallOffset
*/
- struct nvsp_1_receive_buffer_section sections[1];
+ struct nvsp_1_receive_buffer_section sections[];
} __packed;
/*
@@ -882,7 +881,7 @@ struct nvsp_message {
#define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */
#define VRSS_CHANNEL_MAX 64
-#define VRSS_CHANNEL_DEFAULT 8
+#define VRSS_CHANNEL_DEFAULT 16
#define RNDIS_MAX_PKT_DEFAULT 8
#define RNDIS_PKT_ALIGN_DEFAULT 8
@@ -893,6 +892,18 @@ struct nvsp_message {
sizeof(struct nvsp_message))
#define NETVSC_MIN_IN_MSG_SIZE sizeof(struct vmpacket_descriptor)
+/* Maximum # of contiguous data ranges that can make up a trasmitted packet.
+ * Typically it's the max SKB fragments plus 2 for the rndis packet and the
+ * linear portion of the SKB. But if MAX_SKB_FRAGS is large, the value may
+ * need to be limited to MAX_PAGE_BUFFER_COUNT, which is the max # of entries
+ * in a GPA direct packet sent to netvsp over VMBus.
+ */
+#if MAX_SKB_FRAGS + 2 < MAX_PAGE_BUFFER_COUNT
+#define MAX_DATA_RANGES (MAX_SKB_FRAGS + 2)
+#else
+#define MAX_DATA_RANGES MAX_PAGE_BUFFER_COUNT
+#endif
+
/* Estimated requestor size:
* out_ring_size/min_out_msg_size + in_ring_size/min_in_msg_size
*/
@@ -1166,6 +1177,8 @@ struct netvsc_device {
u32 max_chn;
u32 num_chn;
+ u32 netvsc_gso_max_size;
+
atomic_t open_chn;
struct work_struct subchan_work;
wait_queue_head_t subchan_open;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 2b6ec979a62f..720104661d7f 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -712,8 +712,13 @@ void netvsc_device_remove(struct hv_device *device)
for (i = 0; i < net_device->num_chn; i++) {
/* See also vmbus_reset_channel_cb(). */
/* only disable enabled NAPI channel */
- if (i < ndev->real_num_rx_queues)
+ if (i < ndev->real_num_rx_queues) {
+ netif_queue_set_napi(ndev, i, NETDEV_QUEUE_TYPE_TX,
+ NULL);
+ netif_queue_set_napi(ndev, i, NETDEV_QUEUE_TYPE_RX,
+ NULL);
napi_disable(&net_device->chan_table[i].napi);
+ }
netif_napi_del(&net_device->chan_table[i].napi);
}
@@ -861,7 +866,8 @@ static void netvsc_send_completion(struct net_device *ndev,
case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
if (msglen < sizeof(struct nvsp_message_header) +
- sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
+ struct_size_t(struct nvsp_1_message_send_receive_buffer_complete,
+ sections, 1)) {
netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
msglen);
return;
@@ -947,8 +953,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+ pend_size;
int i;
u32 padding = 0;
- u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
- packet->page_buf_cnt;
+ u32 page_count = packet->cp_partial ? 1 : packet->page_buf_cnt;
u32 remain;
/* Add padding */
@@ -1049,6 +1054,42 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
return 0;
}
+/* Build an "array" of mpb entries describing the data to be transferred
+ * over VMBus. After the desc header fields, each "array" entry is variable
+ * size, and each entry starts after the end of the previous entry. The
+ * "offset" and "len" fields for each entry imply the size of the entry.
+ *
+ * The pfns are in HV_HYP_PAGE_SIZE, because all communication with Hyper-V
+ * uses that granularity, even if the system page size of the guest is larger.
+ * Each entry in the input "pb" array must describe a contiguous range of
+ * guest physical memory so that the pfns are sequential if the range crosses
+ * a page boundary. The offset field must be < HV_HYP_PAGE_SIZE.
+ */
+static inline void netvsc_build_mpb_array(struct hv_page_buffer *pb,
+ u32 page_buffer_count,
+ struct vmbus_packet_mpb_array *desc,
+ u32 *desc_size)
+{
+ struct hv_mpb_array *mpb_entry = &desc->range;
+ int i, j;
+
+ for (i = 0; i < page_buffer_count; i++) {
+ u32 offset = pb[i].offset;
+ u32 len = pb[i].len;
+
+ mpb_entry->offset = offset;
+ mpb_entry->len = len;
+
+ for (j = 0; j < HVPFN_UP(offset + len); j++)
+ mpb_entry->pfn_array[j] = pb[i].pfn + j;
+
+ mpb_entry = (struct hv_mpb_array *)&mpb_entry->pfn_array[j];
+ }
+
+ desc->rangecount = page_buffer_count;
+ *desc_size = (char *)mpb_entry - (char *)desc;
+}
+
static inline int netvsc_send_pkt(
struct hv_device *device,
struct hv_netvsc_packet *packet,
@@ -1091,8 +1132,11 @@ static inline int netvsc_send_pkt(
packet->dma_range = NULL;
if (packet->page_buf_cnt) {
+ struct vmbus_channel_packet_page_buffer desc;
+ u32 desc_size;
+
if (packet->cp_partial)
- pb += packet->rmsg_pgcnt;
+ pb++;
ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
if (ret) {
@@ -1100,11 +1144,12 @@ static inline int netvsc_send_pkt(
goto exit;
}
- ret = vmbus_sendpacket_pagebuffer(out_channel,
- pb, packet->page_buf_cnt,
- &nvmsg, sizeof(nvmsg),
- req_id);
-
+ netvsc_build_mpb_array(pb, packet->page_buf_cnt,
+ (struct vmbus_packet_mpb_array *)&desc,
+ &desc_size);
+ ret = vmbus_sendpacket_mpb_desc(out_channel,
+ (struct vmbus_packet_mpb_array *)&desc,
+ desc_size, &nvmsg, sizeof(nvmsg), req_id);
if (ret)
netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
} else {
@@ -1253,7 +1298,7 @@ int netvsc_send(struct net_device *ndev,
packet->send_buf_index = section_index;
if (packet->cp_partial) {
- packet->page_buf_cnt -= packet->rmsg_pgcnt;
+ packet->page_buf_cnt--;
packet->total_data_buflen = msd_len + packet->rmsg_size;
} else {
packet->page_buf_cnt = 0;
@@ -1787,6 +1832,10 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
napi_enable(&net_device->chan_table[0].napi);
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX,
+ &net_device->chan_table[0].napi);
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX,
+ &net_device->chan_table[0].napi);
/* Connect with the NetVsp */
ret = netvsc_connect_vsp(device, net_device, device_info);
@@ -1805,6 +1854,8 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
close:
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
napi_disable(&net_device->chan_table[0].napi);
/* Now, we can close the channel safely */
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index 4a9522689fa4..e01c5997a551 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -183,7 +183,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
xdp.command = XDP_SETUP_PROG;
xdp.prog = prog;
- ret = vf_netdev->netdev_ops->ndo_bpf(vf_netdev, &xdp);
+ ret = dev_xdp_propagate(vf_netdev, &xdp);
if (ret && prog)
bpf_prog_put(prog);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 11831a1c9762..14a0d04e21ae 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -29,6 +29,7 @@
#include <linux/bpf.h>
#include <net/arp.h>
+#include <net/netdev_lock.h>
#include <net/route.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
@@ -325,43 +326,10 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
return txq;
}
-static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len,
- struct hv_page_buffer *pb)
-{
- int j = 0;
-
- hvpfn += offset >> HV_HYP_PAGE_SHIFT;
- offset = offset & ~HV_HYP_PAGE_MASK;
-
- while (len > 0) {
- unsigned long bytes;
-
- bytes = HV_HYP_PAGE_SIZE - offset;
- if (bytes > len)
- bytes = len;
- pb[j].pfn = hvpfn;
- pb[j].offset = offset;
- pb[j].len = bytes;
-
- offset += bytes;
- len -= bytes;
-
- if (offset == HV_HYP_PAGE_SIZE && len) {
- hvpfn++;
- offset = 0;
- j++;
- }
- }
-
- return j + 1;
-}
-
static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
struct hv_netvsc_packet *packet,
struct hv_page_buffer *pb)
{
- u32 slots_used = 0;
- char *data = skb->data;
int frags = skb_shinfo(skb)->nr_frags;
int i;
@@ -370,28 +338,27 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
* 2. skb linear data
* 3. skb fragment data
*/
- slots_used += fill_pg_buf(virt_to_hvpfn(hdr),
- offset_in_hvpage(hdr),
- len,
- &pb[slots_used]);
+ pb[0].offset = offset_in_hvpage(hdr);
+ pb[0].len = len;
+ pb[0].pfn = virt_to_hvpfn(hdr);
packet->rmsg_size = len;
- packet->rmsg_pgcnt = slots_used;
- slots_used += fill_pg_buf(virt_to_hvpfn(data),
- offset_in_hvpage(data),
- skb_headlen(skb),
- &pb[slots_used]);
+ pb[1].offset = offset_in_hvpage(skb->data);
+ pb[1].len = skb_headlen(skb);
+ pb[1].pfn = virt_to_hvpfn(skb->data);
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+ struct hv_page_buffer *cur_pb = &pb[i + 2];
+ u64 pfn = page_to_hvpfn(skb_frag_page(frag));
+ u32 offset = skb_frag_off(frag);
- slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)),
- skb_frag_off(frag),
- skb_frag_size(frag),
- &pb[slots_used]);
+ cur_pb->offset = offset_in_hvpage(offset);
+ cur_pb->len = skb_frag_size(frag);
+ cur_pb->pfn = pfn + (offset >> HV_HYP_PAGE_SHIFT);
}
- return slots_used;
+ return frags + 2;
}
static int count_skb_frag_slots(struct sk_buff *skb)
@@ -482,7 +449,7 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
struct net_device *vf_netdev;
u32 rndis_msg_size;
u32 hash;
- struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
+ struct hv_page_buffer pb[MAX_DATA_RANGES];
/* If VF is present and up then redirect packets to it.
* Skip the VF if it is marked down or has no carrier.
@@ -987,7 +954,8 @@ struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev)
dev_info->bprog = prog;
}
} else {
- dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
+ dev_info->num_chn = max(VRSS_CHANNEL_DEFAULT,
+ netif_get_num_default_rss_queues());
dev_info->send_sections = NETVSC_DEFAULT_TX;
dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
dev_info->recv_sections = NETVSC_DEFAULT_RX;
@@ -1233,14 +1201,14 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
if (ret)
goto rollback_vf;
- ndev->mtu = mtu;
+ WRITE_ONCE(ndev->mtu, mtu);
ret = netvsc_attach(ndev, device_info);
if (!ret)
goto out;
/* Attempt rollback to original MTU */
- ndev->mtu = orig_mtu;
+ WRITE_ONCE(ndev->mtu, orig_mtu);
if (netvsc_attach(ndev, device_info))
netdev_err(ndev, "restoring mtu failed\n");
@@ -1403,7 +1371,7 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
struct net_device_context *ndc = netdev_priv(ndev);
struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
- struct sockaddr *addr = p;
+ struct sockaddr_storage *addr = p;
int err;
err = eth_prepare_mac_addr_change(ndev, p);
@@ -1419,12 +1387,12 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
return err;
}
- err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
+ err = rndis_filter_set_device_mac(nvdev, addr->__data);
if (!err) {
eth_commit_mac_addr_change(ndev, p);
} else if (vf_netdev) {
/* rollback change on VF */
- memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
+ memcpy(addr->__data, ndev->dev_addr, ETH_ALEN);
dev_set_mac_address(vf_netdev, addr, NULL);
}
@@ -1556,7 +1524,7 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
data[i++] = xdp_tx;
}
- pcpu_sum = kvmalloc_array(num_possible_cpus(),
+ pcpu_sum = kvmalloc_array(nr_cpu_ids,
sizeof(struct netvsc_ethtool_pcpu_stats),
GFP_KERNEL);
if (!pcpu_sum)
@@ -2460,6 +2428,21 @@ static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
} else {
netdev_info(ndev, "Data path switched %s VF: %s\n",
vf_is_up ? "to" : "from", vf_netdev->name);
+
+ /* In Azure, when accelerated networking in enabled, other NICs
+ * like MANA, MLX, are configured as a bonded nic with
+ * Netvsc(failover) NIC. For bonded NICs, the min of the max
+ * pkt aggregate size of the members is propagated in the stack.
+ * In order to allow these NICs (MANA/MLX) to use up to
+ * GSO_MAX_SIZE gso packet size, we need to allow Netvsc NIC to
+ * also support this in the guest.
+ * This value is only increased for netvsc NIC when datapath is
+ * switched over to the VF
+ */
+ if (vf_is_up)
+ netif_set_tso_max_size(ndev, vf_netdev->tso_max_size);
+ else
+ netif_set_tso_max_size(ndev, netvsc_dev->netvsc_gso_max_size);
}
return NOTIFY_OK;
@@ -2797,6 +2780,31 @@ static struct hv_driver netvsc_drv = {
},
};
+/* Set VF's namespace same as the synthetic NIC */
+static void netvsc_event_set_vf_ns(struct net_device *ndev)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ struct net_device *vf_netdev;
+ int ret;
+
+ vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
+ if (!vf_netdev)
+ return;
+
+ if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
+ ret = dev_change_net_namespace(vf_netdev, dev_net(ndev),
+ "eth%d");
+ if (ret)
+ netdev_err(vf_netdev,
+ "Cannot move to same namespace as %s: %d\n",
+ ndev->name, ret);
+ else
+ netdev_info(vf_netdev,
+ "Moved VF to namespace with: %s\n",
+ ndev->name);
+ }
+}
+
/*
* On Hyper-V, every VF interface is matched with a corresponding
* synthetic interface. The synthetic interface is presented first
@@ -2809,6 +2817,11 @@ static int netvsc_netdev_event(struct notifier_block *this,
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
int ret = 0;
+ if (event_dev->netdev_ops == &device_ops && event == NETDEV_REGISTER) {
+ netvsc_event_set_vf_ns(event_dev);
+ return NOTIFY_DONE;
+ }
+
ret = check_dev_is_matching_vf(event_dev);
if (ret != 0)
return NOTIFY_DONE;
diff --git a/drivers/net/hyperv/netvsc_trace.h b/drivers/net/hyperv/netvsc_trace.h
index f7585563dea5..05e620cbdd29 100644
--- a/drivers/net/hyperv/netvsc_trace.h
+++ b/drivers/net/hyperv/netvsc_trace.h
@@ -51,7 +51,7 @@ DECLARE_EVENT_CLASS(rndis_msg_class,
__field( u32, msg_len )
),
TP_fast_assign(
- __assign_str(name, ndev->name);
+ __assign_str(name);
__entry->queue = q;
__entry->req_id = msg->msg.init_req.req_id;
__entry->msg_type = msg->ndis_msg_type;
@@ -121,7 +121,7 @@ TRACE_EVENT(nvsp_send,
__field( u32, msg_type )
),
TP_fast_assign(
- __assign_str(name, ndev->name);
+ __assign_str(name);
__entry->msg_type = msg->hdr.msg_type;
),
TP_printk("dev=%s type=%s",
@@ -142,7 +142,7 @@ TRACE_EVENT(nvsp_send_pkt,
__field( u32, section_size )
),
TP_fast_assign(
- __assign_str(name, ndev->name);
+ __assign_str(name);
__entry->qid = chan->offermsg.offer.sub_channel_index;
__entry->channel_type = rpkt->channel_type;
__entry->section_index = rpkt->send_buf_section_index;
@@ -165,7 +165,7 @@ TRACE_EVENT(nvsp_recv,
__field( u32, msg_type )
),
TP_fast_assign(
- __assign_str(name, ndev->name);
+ __assign_str(name);
__entry->qid = chan->offermsg.offer.sub_channel_index;
__entry->msg_type = msg->hdr.msg_type;
),
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index ecc2128ca9b7..9e73959e61ee 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -225,8 +225,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
struct rndis_request *req)
{
struct hv_netvsc_packet *packet;
- struct hv_page_buffer page_buf[2];
- struct hv_page_buffer *pb = page_buf;
+ struct hv_page_buffer pb;
int ret;
/* Setup the packet to send it */
@@ -235,27 +234,14 @@ static int rndis_filter_send_request(struct rndis_device *dev,
packet->total_data_buflen = req->request_msg.msg_len;
packet->page_buf_cnt = 1;
- pb[0].pfn = virt_to_phys(&req->request_msg) >>
- HV_HYP_PAGE_SHIFT;
- pb[0].len = req->request_msg.msg_len;
- pb[0].offset = offset_in_hvpage(&req->request_msg);
-
- /* Add one page_buf when request_msg crossing page boundary */
- if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
- packet->page_buf_cnt++;
- pb[0].len = HV_HYP_PAGE_SIZE -
- pb[0].offset;
- pb[1].pfn = virt_to_phys((void *)&req->request_msg
- + pb[0].len) >> HV_HYP_PAGE_SHIFT;
- pb[1].offset = 0;
- pb[1].len = req->request_msg.msg_len -
- pb[0].len;
- }
+ pb.pfn = virt_to_phys(&req->request_msg) >> HV_HYP_PAGE_SHIFT;
+ pb.len = req->request_msg.msg_len;
+ pb.offset = offset_in_hvpage(&req->request_msg);
trace_rndis_send(dev->ndev, 0, &req->request_msg);
rcu_read_lock_bh();
- ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
+ ret = netvsc_send(dev->ndev, packet, NULL, &pb, NULL, false);
rcu_read_unlock_bh();
return ret;
@@ -1269,10 +1255,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
ret = vmbus_open(new_sc, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, nvchan);
- if (ret == 0)
+ if (ret == 0) {
napi_enable(&nvchan->napi);
- else
+ netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
+ &nvchan->napi);
+ netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
+ &nvchan->napi);
+ } else {
netdev_notice(ndev, "sub channel open failed: %d\n", ret);
+ }
if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
wake_up(&nvscdev->subchan_open);
@@ -1351,9 +1342,10 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
struct net_device_context *net_device_ctx = netdev_priv(net);
struct ndis_offload hwcaps;
struct ndis_offload_params offloads;
- unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE;
int ret;
+ nvdev->netvsc_gso_max_size = GSO_LEGACY_MAX_SIZE;
+
/* Find HW offload capabilities */
ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
if (ret != 0)
@@ -1385,8 +1377,8 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
net->hw_features |= NETIF_F_TSO;
- if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
- gso_max_size = hwcaps.lsov2.ip4_maxsz;
+ if (hwcaps.lsov2.ip4_maxsz < nvdev->netvsc_gso_max_size)
+ nvdev->netvsc_gso_max_size = hwcaps.lsov2.ip4_maxsz;
}
if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
@@ -1406,8 +1398,8 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
net->hw_features |= NETIF_F_TSO6;
- if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
- gso_max_size = hwcaps.lsov2.ip6_maxsz;
+ if (hwcaps.lsov2.ip6_maxsz < nvdev->netvsc_gso_max_size)
+ nvdev->netvsc_gso_max_size = hwcaps.lsov2.ip6_maxsz;
}
if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
@@ -1433,7 +1425,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
*/
net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
- netif_set_tso_max_size(net, gso_max_size);
+ netif_set_tso_max_size(net, nvdev->netvsc_gso_max_size);
ret = rndis_filter_set_offload_params(net, nvdev, &offloads);