summaryrefslogtreecommitdiff
path: root/drivers/net/xen-netfront.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r--drivers/net/xen-netfront.c295
1 files changed, 169 insertions, 126 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 8b18246ad999..7c2220366623 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -45,7 +45,7 @@
#include <linux/slab.h>
#include <net/ip.h>
#include <linux/bpf.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
#include <linux/bpf_trace.h>
#include <xen/xen.h>
@@ -66,6 +66,10 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
"Maximum number of queues per virtual interface");
+static bool __read_mostly xennet_trusted = true;
+module_param_named(trusted, xennet_trusted, bool, 0644);
+MODULE_PARM_DESC(trusted, "Is the backend trusted");
+
#define XENNET_TIMEOUT (5 * HZ)
static const struct ethtool_ops xennet_ethtool_ops;
@@ -78,8 +82,6 @@ struct netfront_cb {
#define RX_COPY_THRESHOLD 256
-#define GRANT_INVALID_REF 0
-
#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
@@ -175,6 +177,9 @@ struct netfront_info {
/* Is device behaving sane? */
bool broken;
+ /* Should skbs be bounced into a zeroed buffer? */
+ bool bounce;
+
atomic_t rx_gso_checksum_fixup;
};
@@ -224,7 +229,7 @@ static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
{
int i = xennet_rxidx(ri);
grant_ref_t ref = queue->grant_rx_ref[i];
- queue->grant_rx_ref[i] = GRANT_INVALID_REF;
+ queue->grant_rx_ref[i] = INVALID_GRANT_REF;
return ref;
}
@@ -240,7 +245,8 @@ static bool xennet_can_sg(struct net_device *dev)
static void rx_refill_timeout(struct timer_list *t)
{
- struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
+ struct netfront_queue *queue = timer_container_of(queue, t,
+ rx_refill_timer);
napi_schedule(&queue->napi);
}
@@ -273,12 +279,14 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
if (unlikely(!skb))
return NULL;
- page = page_pool_dev_alloc_pages(queue->page_pool);
+ page = page_pool_alloc_pages(queue->page_pool,
+ GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
if (unlikely(!page)) {
kfree_skb(skb);
return NULL;
}
skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
+ skb_mark_for_recycle(skb);
/* Align ip header to a 16 bytes boundary */
skb_reserve(skb, NET_IP_ALIGN);
@@ -424,17 +432,15 @@ static bool xennet_tx_buf_gc(struct netfront_queue *queue)
queue->tx_link[id] = TX_LINK_NONE;
skb = queue->tx_skbs[id];
queue->tx_skbs[id] = NULL;
- if (unlikely(gnttab_query_foreign_access(
- queue->grant_tx_ref[id]) != 0)) {
+ if (unlikely(!gnttab_end_foreign_access_ref(
+ queue->grant_tx_ref[id]))) {
dev_alert(dev,
"Grant still in use by backend domain\n");
goto err;
}
- gnttab_end_foreign_access_ref(
- queue->grant_tx_ref[id], GNTMAP_readonly);
gnttab_release_grant_reference(
&queue->gref_tx_head, queue->grant_tx_ref[id]);
- queue->grant_tx_ref[id] = GRANT_INVALID_REF;
+ queue->grant_tx_ref[id] = INVALID_GRANT_REF;
queue->grant_tx_page[id] = NULL;
add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
dev_kfree_skb_irq(skb);
@@ -632,8 +638,6 @@ static int xennet_xdp_xmit_one(struct net_device *dev,
tx_stats->packets++;
u64_stats_update_end(&tx_stats->syncp);
- xennet_tx_buf_gc(queue);
-
return 0;
}
@@ -669,6 +673,33 @@ static int xennet_xdp_xmit(struct net_device *dev, int n,
return nxmit;
}
+static struct sk_buff *bounce_skb(const struct sk_buff *skb)
+{
+ unsigned int headerlen = skb_headroom(skb);
+ /* Align size to allocate full pages and avoid contiguous data leaks */
+ unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
+ XEN_PAGE_SIZE);
+ struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
+
+ if (!n)
+ return NULL;
+
+ if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
+ WARN_ONCE(1, "misaligned skb allocated\n");
+ kfree_skb(n);
+ return NULL;
+ }
+
+ /* Set the data pointer */
+ skb_reserve(n, headerlen);
+ /* Set the tail pointer and length */
+ skb_put(n, skb->len);
+
+ BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
+
+ skb_copy_header(n, skb);
+ return n;
+}
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
@@ -722,9 +753,13 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
/* The first req should be at least ETH_HLEN size or the packet will be
* dropped by netback.
+ *
+ * If the backend is not trusted bounce all data to zeroed pages to
+ * avoid exposing contiguous data on the granted page not belonging to
+ * the skb.
*/
- if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
- nskb = skb_copy(skb, GFP_ATOMIC);
+ if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
+ nskb = bounce_skb(skb);
if (!nskb)
goto drop;
dev_consume_skb_any(skb);
@@ -812,9 +847,6 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
tx_stats->packets++;
u64_stats_update_end(&tx_stats->syncp);
- /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
- xennet_tx_buf_gc(queue);
-
if (!netfront_tx_slot_available(queue))
netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
@@ -831,7 +863,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
static int xennet_close(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
- unsigned int num_queues = dev->real_num_tx_queues;
+ unsigned int num_queues = np->queues ? dev->real_num_tx_queues : 0;
unsigned int i;
struct netfront_queue *queue;
netif_tx_stop_all_queues(np->netdev);
@@ -842,13 +874,38 @@ static int xennet_close(struct net_device *dev)
return 0;
}
+static void xennet_destroy_queues(struct netfront_info *info)
+{
+ unsigned int i;
+
+ if (!info->queues)
+ return;
+
+ for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
+ struct netfront_queue *queue = &info->queues[i];
+
+ if (netif_running(info->netdev))
+ napi_disable(&queue->napi);
+ netif_napi_del(&queue->napi);
+ }
+
+ kfree(info->queues);
+ info->queues = NULL;
+}
+
+static void xennet_uninit(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ xennet_destroy_queues(np);
+}
+
static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
{
unsigned long flags;
spin_lock_irqsave(&queue->rx_cons_lock, flags);
queue->rx.rsp_cons = val;
- queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
+ queue->rx_rsp_unconsumed = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
}
@@ -924,20 +981,27 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_TX:
- get_page(pdata);
xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ trace_xdp_exception(queue->info->netdev, prog, act);
+ break;
+ }
+ get_page(pdata);
err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
- if (unlikely(!err))
+ if (unlikely(err <= 0)) {
+ if (err < 0)
+ trace_xdp_exception(queue->info->netdev, prog, act);
xdp_return_frame_rx_napi(xdpf);
- else if (unlikely(err < 0))
- trace_xdp_exception(queue->info->netdev, prog, act);
+ }
break;
case XDP_REDIRECT:
get_page(pdata);
err = xdp_do_redirect(queue->info->netdev, xdp, prog);
*need_xdp_flush = true;
- if (unlikely(err))
+ if (unlikely(err)) {
trace_xdp_exception(queue->info->netdev, prog, act);
+ xdp_return_buff(xdp);
+ }
break;
case XDP_PASS:
case XDP_DROP:
@@ -968,7 +1032,6 @@ static int xennet_get_responses(struct netfront_queue *queue,
struct device *dev = &queue->info->netdev->dev;
struct bpf_prog *xdp_prog;
struct xdp_buff xdp;
- unsigned long ret;
int slots = 1;
int err = 0;
u32 verdict;
@@ -987,22 +1050,12 @@ static int xennet_get_responses(struct netfront_queue *queue,
}
for (;;) {
- if (unlikely(rx->status < 0 ||
- rx->offset + rx->status > XEN_PAGE_SIZE)) {
- if (net_ratelimit())
- dev_warn(dev, "rx->offset: %u, size: %d\n",
- rx->offset, rx->status);
- xennet_move_rx_slot(queue, skb, ref);
- err = -EINVAL;
- goto next;
- }
-
/*
* This definitely indicates a bug, either in this driver or in
* the backend driver. In future this should flag the bad
* situation to the system controller to reboot the backend.
*/
- if (ref == GRANT_INVALID_REF) {
+ if (ref == INVALID_GRANT_REF) {
if (net_ratelimit())
dev_warn(dev, "Bad rx response id %d.\n",
rx->id);
@@ -1010,8 +1063,23 @@ static int xennet_get_responses(struct netfront_queue *queue,
goto next;
}
- ret = gnttab_end_foreign_access_ref(ref, 0);
- BUG_ON(!ret);
+ if (unlikely(rx->status < 0 ||
+ rx->offset + rx->status > XEN_PAGE_SIZE)) {
+ if (net_ratelimit())
+ dev_warn(dev, "rx->offset: %u, size: %d\n",
+ rx->offset, rx->status);
+ xennet_move_rx_slot(queue, skb, ref);
+ err = -EINVAL;
+ goto next;
+ }
+
+ if (!gnttab_end_foreign_access_ref(ref)) {
+ dev_alert(dev,
+ "Grant still in use by backend domain\n");
+ queue->info->broken = true;
+ dev_alert(dev, "Disabled for further use\n");
+ return -EINVAL;
+ }
gnttab_release_grant_reference(&queue->gref_rx_head, ref);
@@ -1031,8 +1099,10 @@ static int xennet_get_responses(struct netfront_queue *queue,
}
}
rcu_read_unlock();
-next:
+
__skb_queue_tail(list, skb);
+
+next:
if (!(rx->flags & XEN_NETRXF_more_data))
break;
@@ -1232,6 +1302,10 @@ static int xennet_poll(struct napi_struct *napi, int budget)
&need_xdp_flush);
if (unlikely(err)) {
+ if (queue->info->broken) {
+ spin_unlock(&queue->rx_lock);
+ return 0;
+ }
err:
while ((skb = __skb_dequeue(&tmpq)))
__skb_queue_tail(&errq, skb);
@@ -1308,7 +1382,7 @@ static int xennet_change_mtu(struct net_device *dev, int mtu)
if (mtu > max)
return -EINVAL;
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
return 0;
}
@@ -1325,16 +1399,16 @@ static void xennet_get_stats64(struct net_device *dev,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ start = u64_stats_fetch_begin(&tx_stats->syncp);
tx_packets = tx_stats->packets;
tx_bytes = tx_stats->bytes;
- } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
do {
- start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ start = u64_stats_fetch_begin(&rx_stats->syncp);
rx_packets = rx_stats->packets;
rx_bytes = rx_stats->bytes;
- } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
tot->rx_packets += rx_packets;
tot->tx_packets += tx_packets;
@@ -1360,10 +1434,9 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
queue->tx_skbs[i] = NULL;
get_page(queue->grant_tx_page[i]);
gnttab_end_foreign_access(queue->grant_tx_ref[i],
- GNTMAP_readonly,
- (unsigned long)page_address(queue->grant_tx_page[i]));
+ queue->grant_tx_page[i]);
queue->grant_tx_page[i] = NULL;
- queue->grant_tx_ref[i] = GRANT_INVALID_REF;
+ queue->grant_tx_ref[i] = INVALID_GRANT_REF;
add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
dev_kfree_skb_irq(skb);
}
@@ -1384,7 +1457,7 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue)
continue;
ref = queue->grant_rx_ref[id];
- if (ref == GRANT_INVALID_REF)
+ if (ref == INVALID_GRANT_REF)
continue;
page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
@@ -1393,9 +1466,8 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue)
* foreign access is ended (which may be deferred).
*/
get_page(page);
- gnttab_end_foreign_access(ref, 0,
- (unsigned long)page_address(page));
- queue->grant_rx_ref[id] = GRANT_INVALID_REF;
+ gnttab_end_foreign_access(ref, page);
+ queue->grant_rx_ref[id] = INVALID_GRANT_REF;
kfree_skb(skb);
}
@@ -1473,7 +1545,7 @@ static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
return false;
spin_lock_irqsave(&queue->rx_cons_lock, flags);
- work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
+ work_queued = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
if (work_queued > queue->rx_rsp_unconsumed) {
queue->rx_rsp_unconsumed = work_queued;
*eoi = 0;
@@ -1611,6 +1683,7 @@ static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
static const struct net_device_ops xennet_netdev_ops = {
+ .ndo_uninit = xennet_uninit,
.ndo_open = xennet_open,
.ndo_stop = xennet_close,
.ndo_start_xmit = xennet_start_xmit,
@@ -1675,6 +1748,8 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
* negotiate with the backend regarding supported features.
*/
netdev->features |= netdev->hw_features;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
netdev->ethtool_ops = &xennet_ethtool_ops;
netdev->min_mtu = ETH_MIN_MTU;
@@ -1733,8 +1808,8 @@ static int netfront_probe(struct xenbus_device *dev,
static void xennet_end_access(int ref, void *page)
{
/* This frees the page as a side-effect */
- if (ref != GRANT_INVALID_REF)
- gnttab_end_foreign_access(ref, 0, (unsigned long)page);
+ if (ref != INVALID_GRANT_REF)
+ gnttab_end_foreign_access(ref, virt_to_page(page));
}
static void xennet_disconnect_backend(struct netfront_info *info)
@@ -1747,7 +1822,7 @@ static void xennet_disconnect_backend(struct netfront_info *info)
for (i = 0; i < num_queues && info->queues; ++i) {
struct netfront_queue *queue = &info->queues[i];
- del_timer_sync(&queue->rx_refill_timer);
+ timer_delete_sync(&queue->rx_refill_timer);
if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
unbind_from_irqhandler(queue->tx_irq, queue);
@@ -1770,8 +1845,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
- queue->tx_ring_ref = GRANT_INVALID_REF;
- queue->rx_ring_ref = GRANT_INVALID_REF;
+ queue->tx_ring_ref = INVALID_GRANT_REF;
+ queue->rx_ring_ref = INVALID_GRANT_REF;
queue->tx.sring = NULL;
queue->rx.sring = NULL;
@@ -1796,6 +1871,12 @@ static int netfront_resume(struct xenbus_device *dev)
netif_tx_unlock_bh(info->netdev);
xennet_disconnect_backend(info);
+
+ rtnl_lock();
+ if (info->queues)
+ xennet_destroy_queues(info);
+ rtnl_unlock();
+
return 0;
}
@@ -1896,41 +1977,26 @@ static int setup_netfront(struct xenbus_device *dev,
{
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
- grant_ref_t gref;
int err;
- queue->tx_ring_ref = GRANT_INVALID_REF;
- queue->rx_ring_ref = GRANT_INVALID_REF;
+ queue->tx_ring_ref = INVALID_GRANT_REF;
+ queue->rx_ring_ref = INVALID_GRANT_REF;
queue->rx.sring = NULL;
queue->tx.sring = NULL;
- txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
- if (!txs) {
- err = -ENOMEM;
- xenbus_dev_fatal(dev, err, "allocating tx ring page");
+ err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&txs,
+ 1, &queue->tx_ring_ref);
+ if (err)
goto fail;
- }
- SHARED_RING_INIT(txs);
- FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
- err = xenbus_grant_ring(dev, txs, 1, &gref);
- if (err < 0)
- goto grant_tx_ring_fail;
- queue->tx_ring_ref = gref;
+ XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
- rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
- if (!rxs) {
- err = -ENOMEM;
- xenbus_dev_fatal(dev, err, "allocating rx ring page");
- goto alloc_rx_ring_fail;
- }
- SHARED_RING_INIT(rxs);
- FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
+ err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&rxs,
+ 1, &queue->rx_ring_ref);
+ if (err)
+ goto fail;
- err = xenbus_grant_ring(dev, rxs, 1, &gref);
- if (err < 0)
- goto grant_rx_ring_fail;
- queue->rx_ring_ref = gref;
+ XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
if (feature_split_evtchn)
err = setup_netfront_split(queue);
@@ -1942,22 +2008,14 @@ static int setup_netfront(struct xenbus_device *dev,
err = setup_netfront_single(queue);
if (err)
- goto alloc_evtchn_fail;
+ goto fail;
return 0;
- /* If we fail to setup netfront, it is safe to just revoke access to
- * granted pages because backend is not accessing it at this point.
- */
-alloc_evtchn_fail:
- gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
-grant_rx_ring_fail:
- free_page((unsigned long)rxs);
-alloc_rx_ring_fail:
- gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
-grant_tx_ring_fail:
- free_page((unsigned long)txs);
-fail:
+ fail:
+ xenbus_teardown_ring((void **)&queue->rx.sring, 1, &queue->rx_ring_ref);
+ xenbus_teardown_ring((void **)&queue->tx.sring, 1, &queue->tx_ring_ref);
+
return err;
}
@@ -1986,7 +2044,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
queue->tx_pend_queue = TX_LINK_NONE;
for (i = 0; i < NET_TX_RING_SIZE; i++) {
queue->tx_link[i] = i + 1;
- queue->grant_tx_ref[i] = GRANT_INVALID_REF;
+ queue->grant_tx_ref[i] = INVALID_GRANT_REF;
queue->grant_tx_page[i] = NULL;
}
queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
@@ -1994,7 +2052,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
/* Clear out rx_skbs */
for (i = 0; i < NET_RX_RING_SIZE; i++) {
queue->rx_skbs[i] = NULL;
- queue->grant_rx_ref[i] = GRANT_INVALID_REF;
+ queue->grant_rx_ref[i] = INVALID_GRANT_REF;
}
/* A grant for every tx ring slot */
@@ -2103,22 +2161,6 @@ error:
return err;
}
-static void xennet_destroy_queues(struct netfront_info *info)
-{
- unsigned int i;
-
- for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
- struct netfront_queue *queue = &info->queues[i];
-
- if (netif_running(info->netdev))
- napi_disable(&queue->napi);
- netif_napi_del(&queue->napi);
- }
-
- kfree(info->queues);
- info->queues = NULL;
-}
-
static int xennet_create_page_pool(struct netfront_queue *queue)
@@ -2197,8 +2239,7 @@ static int xennet_create_queues(struct netfront_info *info,
return ret;
}
- netif_napi_add(queue->info->netdev, &queue->napi,
- xennet_poll, 64);
+ netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll);
if (netif_running(info->netdev))
napi_enable(&queue->napi);
}
@@ -2228,6 +2269,10 @@ static int talk_to_netback(struct xenbus_device *dev,
info->netdev->irq = 0;
+ /* Check if backend is trusted. */
+ info->bounce = !xennet_trusted ||
+ !xenbus_read_unsigned(dev->nodename, "trusted", 1);
+
/* Check if backend supports multiple queues */
max_queues = xenbus_read_unsigned(info->xbdev->otherend,
"multi-queue-max-queues", 1);
@@ -2395,6 +2440,9 @@ static int xennet_connect(struct net_device *dev)
return err;
if (np->netback_has_xdp_headroom)
pr_info("backend supports XDP headroom\n");
+ if (np->bounce)
+ dev_info(&np->xbdev->dev,
+ "bouncing transmitted data to zeroed pages\n");
/* talk_to_netback() sets the correct number of queues */
num_queues = dev->real_num_tx_queues;
@@ -2430,10 +2478,6 @@ static int xennet_connect(struct net_device *dev)
if (queue->tx_irq != queue->rx_irq)
notify_remote_via_irq(queue->rx_irq);
- spin_lock_irq(&queue->tx_lock);
- xennet_tx_buf_gc(queue);
- spin_unlock_irq(&queue->tx_lock);
-
spin_lock_bh(&queue->rx_lock);
xennet_alloc_rx_buffers(queue);
spin_unlock_bh(&queue->rx_lock);
@@ -2611,7 +2655,7 @@ static void xennet_bus_close(struct xenbus_device *dev)
} while (!ret);
}
-static int xennet_remove(struct xenbus_device *dev)
+static void xennet_remove(struct xenbus_device *dev)
{
struct netfront_info *info = dev_get_drvdata(&dev->dev);
@@ -2627,8 +2671,6 @@ static int xennet_remove(struct xenbus_device *dev)
rtnl_unlock();
}
xennet_free_netdev(info->netdev);
-
- return 0;
}
static const struct xenbus_device_id netfront_ids[] = {
@@ -2654,8 +2696,9 @@ static int __init netif_init(void)
pr_info("Initialising Xen virtual ethernet driver\n");
- /* Allow as many queues as there are CPUs inut max. 8 if user has not
- * specified a value.
+ /* Allow the number of queues to match the number of CPUs, but not exceed
+ * the maximum limit. If the user has not specified a value, the default
+ * maximum limit is 8.
*/
if (xennet_max_queues == 0)
xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,