diff options
author | Tvrtko Ursulin <tvrtko.ursulin@intel.com> | 2022-10-03 17:04:02 +0100 |
---|---|---|
committer | Tvrtko Ursulin <tvrtko.ursulin@intel.com> | 2022-10-03 17:04:02 +0100 |
commit | 97acb6a8fcc4e5c2cdc2693a35acdc5a7461aaa3 (patch) | |
tree | c4f1a18b38d655b7806a72515992bd9aae14ef53 /drivers/net/xen-netfront.c | |
parent | 6fa964c045a6bc3321a9186e87bfbcfd1059b0f1 (diff) | |
parent | 7860d720a84c74b2761c6b7995392a798ab0a3cb (diff) |
Merge drm/drm-next into drm-intel-gt-next
Daniele needs 84d4333c1e28 ("misc/mei: Add NULL check to component match
callback functions") in order to merge the DG2 HuC patches.
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r-- | drivers/net/xen-netfront.c | 172 |
1 files changed, 92 insertions, 80 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index e2b4a1893a13..27a11cc08c61 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -66,6 +66,10 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, "Maximum number of queues per virtual interface"); +static bool __read_mostly xennet_trusted = true; +module_param_named(trusted, xennet_trusted, bool, 0644); +MODULE_PARM_DESC(trusted, "Is the backend trusted"); + #define XENNET_TIMEOUT (5 * HZ) static const struct ethtool_ops xennet_ethtool_ops; @@ -78,8 +82,6 @@ struct netfront_cb { #define RX_COPY_THRESHOLD 256 -#define GRANT_INVALID_REF 0 - #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE) #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE) @@ -175,6 +177,9 @@ struct netfront_info { /* Is device behaving sane? */ bool broken; + /* Should skbs be bounced into a zeroed buffer? */ + bool bounce; + atomic_t rx_gso_checksum_fixup; }; @@ -224,7 +229,7 @@ static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, { int i = xennet_rxidx(ri); grant_ref_t ref = queue->grant_rx_ref[i]; - queue->grant_rx_ref[i] = GRANT_INVALID_REF; + queue->grant_rx_ref[i] = INVALID_GRANT_REF; return ref; } @@ -273,7 +278,8 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) if (unlikely(!skb)) return NULL; - page = page_pool_dev_alloc_pages(queue->page_pool); + page = page_pool_alloc_pages(queue->page_pool, + GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO); if (unlikely(!page)) { kfree_skb(skb); return NULL; @@ -432,7 +438,7 @@ static bool xennet_tx_buf_gc(struct netfront_queue *queue) } gnttab_release_grant_reference( &queue->gref_tx_head, queue->grant_tx_ref[id]); - queue->grant_tx_ref[id] = GRANT_INVALID_REF; + queue->grant_tx_ref[id] = INVALID_GRANT_REF; queue->grant_tx_page[id] = NULL; add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id); dev_kfree_skb_irq(skb); @@ -667,6 +673,33 @@ static int xennet_xdp_xmit(struct net_device *dev, int n, return nxmit; } +struct sk_buff *bounce_skb(const struct sk_buff *skb) +{ + unsigned int headerlen = skb_headroom(skb); + /* Align size to allocate full pages and avoid contiguous data leaks */ + unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len, + XEN_PAGE_SIZE); + struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO); + + if (!n) + return NULL; + + if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) { + WARN_ONCE(1, "misaligned skb allocated\n"); + kfree_skb(n); + return NULL; + } + + /* Set the data pointer */ + skb_reserve(n, headerlen); + /* Set the tail pointer and length */ + skb_put(n, skb->len); + + BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); + + skb_copy_header(n, skb); + return n; +} #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) @@ -720,9 +753,13 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev /* The first req should be at least ETH_HLEN size or the packet will be * dropped by netback. + * + * If the backend is not trusted bounce all data to zeroed pages to + * avoid exposing contiguous data on the granted page not belonging to + * the skb. */ - if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) { - nskb = skb_copy(skb, GFP_ATOMIC); + if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) { + nskb = bounce_skb(skb); if (!nskb) goto drop; dev_consume_skb_any(skb); @@ -868,7 +905,7 @@ static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) spin_lock_irqsave(&queue->rx_cons_lock, flags); queue->rx.rsp_cons = val; - queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx); + queue->rx_rsp_unconsumed = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx); spin_unlock_irqrestore(&queue->rx_cons_lock, flags); } @@ -1006,22 +1043,12 @@ static int xennet_get_responses(struct netfront_queue *queue, } for (;;) { - if (unlikely(rx->status < 0 || - rx->offset + rx->status > XEN_PAGE_SIZE)) { - if (net_ratelimit()) - dev_warn(dev, "rx->offset: %u, size: %d\n", - rx->offset, rx->status); - xennet_move_rx_slot(queue, skb, ref); - err = -EINVAL; - goto next; - } - /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backend. */ - if (ref == GRANT_INVALID_REF) { + if (ref == INVALID_GRANT_REF) { if (net_ratelimit()) dev_warn(dev, "Bad rx response id %d.\n", rx->id); @@ -1029,6 +1056,16 @@ static int xennet_get_responses(struct netfront_queue *queue, goto next; } + if (unlikely(rx->status < 0 || + rx->offset + rx->status > XEN_PAGE_SIZE)) { + if (net_ratelimit()) + dev_warn(dev, "rx->offset: %u, size: %d\n", + rx->offset, rx->status); + xennet_move_rx_slot(queue, skb, ref); + err = -EINVAL; + goto next; + } + if (!gnttab_end_foreign_access_ref(ref)) { dev_alert(dev, "Grant still in use by backend domain\n"); @@ -1055,8 +1092,10 @@ static int xennet_get_responses(struct netfront_queue *queue, } } rcu_read_unlock(); -next: + __skb_queue_tail(list, skb); + +next: if (!(rx->flags & XEN_NETRXF_more_data)) break; @@ -1388,9 +1427,9 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue) queue->tx_skbs[i] = NULL; get_page(queue->grant_tx_page[i]); gnttab_end_foreign_access(queue->grant_tx_ref[i], - (unsigned long)page_address(queue->grant_tx_page[i])); + queue->grant_tx_page[i]); queue->grant_tx_page[i] = NULL; - queue->grant_tx_ref[i] = GRANT_INVALID_REF; + queue->grant_tx_ref[i] = INVALID_GRANT_REF; add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i); dev_kfree_skb_irq(skb); } @@ -1411,7 +1450,7 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue) continue; ref = queue->grant_rx_ref[id]; - if (ref == GRANT_INVALID_REF) + if (ref == INVALID_GRANT_REF) continue; page = skb_frag_page(&skb_shinfo(skb)->frags[0]); @@ -1420,9 +1459,8 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue) * foreign access is ended (which may be deferred). */ get_page(page); - gnttab_end_foreign_access(ref, - (unsigned long)page_address(page)); - queue->grant_rx_ref[id] = GRANT_INVALID_REF; + gnttab_end_foreign_access(ref, page); + queue->grant_rx_ref[id] = INVALID_GRANT_REF; kfree_skb(skb); } @@ -1500,7 +1538,7 @@ static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi) return false; spin_lock_irqsave(&queue->rx_cons_lock, flags); - work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx); + work_queued = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx); if (work_queued > queue->rx_rsp_unconsumed) { queue->rx_rsp_unconsumed = work_queued; *eoi = 0; @@ -1761,8 +1799,8 @@ static int netfront_probe(struct xenbus_device *dev, static void xennet_end_access(int ref, void *page) { /* This frees the page as a side-effect */ - if (ref != GRANT_INVALID_REF) - gnttab_end_foreign_access(ref, (unsigned long)page); + if (ref != INVALID_GRANT_REF) + gnttab_end_foreign_access(ref, virt_to_page(page)); } static void xennet_disconnect_backend(struct netfront_info *info) @@ -1798,8 +1836,8 @@ static void xennet_disconnect_backend(struct netfront_info *info) xennet_end_access(queue->tx_ring_ref, queue->tx.sring); xennet_end_access(queue->rx_ring_ref, queue->rx.sring); - queue->tx_ring_ref = GRANT_INVALID_REF; - queue->rx_ring_ref = GRANT_INVALID_REF; + queue->tx_ring_ref = INVALID_GRANT_REF; + queue->rx_ring_ref = INVALID_GRANT_REF; queue->tx.sring = NULL; queue->rx.sring = NULL; @@ -1923,42 +1961,27 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_queue *queue, unsigned int feature_split_evtchn) { struct xen_netif_tx_sring *txs; - struct xen_netif_rx_sring *rxs = NULL; - grant_ref_t gref; + struct xen_netif_rx_sring *rxs; int err; - queue->tx_ring_ref = GRANT_INVALID_REF; - queue->rx_ring_ref = GRANT_INVALID_REF; + queue->tx_ring_ref = INVALID_GRANT_REF; + queue->rx_ring_ref = INVALID_GRANT_REF; queue->rx.sring = NULL; queue->tx.sring = NULL; - txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); - if (!txs) { - err = -ENOMEM; - xenbus_dev_fatal(dev, err, "allocating tx ring page"); + err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&txs, + 1, &queue->tx_ring_ref); + if (err) goto fail; - } - SHARED_RING_INIT(txs); - FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); - err = xenbus_grant_ring(dev, txs, 1, &gref); - if (err < 0) - goto fail; - queue->tx_ring_ref = gref; + XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); - rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); - if (!rxs) { - err = -ENOMEM; - xenbus_dev_fatal(dev, err, "allocating rx ring page"); + err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&rxs, + 1, &queue->rx_ring_ref); + if (err) goto fail; - } - SHARED_RING_INIT(rxs); - FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); - err = xenbus_grant_ring(dev, rxs, 1, &gref); - if (err < 0) - goto fail; - queue->rx_ring_ref = gref; + XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); if (feature_split_evtchn) err = setup_netfront_split(queue); @@ -1974,24 +1997,10 @@ static int setup_netfront(struct xenbus_device *dev, return 0; - /* If we fail to setup netfront, it is safe to just revoke access to - * granted pages because backend is not accessing it at this point. - */ fail: - if (queue->rx_ring_ref != GRANT_INVALID_REF) { - gnttab_end_foreign_access(queue->rx_ring_ref, - (unsigned long)rxs); - queue->rx_ring_ref = GRANT_INVALID_REF; - } else { - free_page((unsigned long)rxs); - } - if (queue->tx_ring_ref != GRANT_INVALID_REF) { - gnttab_end_foreign_access(queue->tx_ring_ref, - (unsigned long)txs); - queue->tx_ring_ref = GRANT_INVALID_REF; - } else { - free_page((unsigned long)txs); - } + xenbus_teardown_ring((void **)&queue->rx.sring, 1, &queue->rx_ring_ref); + xenbus_teardown_ring((void **)&queue->tx.sring, 1, &queue->tx_ring_ref); + return err; } @@ -2020,7 +2029,7 @@ static int xennet_init_queue(struct netfront_queue *queue) queue->tx_pend_queue = TX_LINK_NONE; for (i = 0; i < NET_TX_RING_SIZE; i++) { queue->tx_link[i] = i + 1; - queue->grant_tx_ref[i] = GRANT_INVALID_REF; + queue->grant_tx_ref[i] = INVALID_GRANT_REF; queue->grant_tx_page[i] = NULL; } queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE; @@ -2028,7 +2037,7 @@ static int xennet_init_queue(struct netfront_queue *queue) /* Clear out rx_skbs */ for (i = 0; i < NET_RX_RING_SIZE; i++) { queue->rx_skbs[i] = NULL; - queue->grant_rx_ref[i] = GRANT_INVALID_REF; + queue->grant_rx_ref[i] = INVALID_GRANT_REF; } /* A grant for every tx ring slot */ @@ -2246,6 +2255,10 @@ static int talk_to_netback(struct xenbus_device *dev, info->netdev->irq = 0; + /* Check if backend is trusted. */ + info->bounce = !xennet_trusted || + !xenbus_read_unsigned(dev->nodename, "trusted", 1); + /* Check if backend supports multiple queues */ max_queues = xenbus_read_unsigned(info->xbdev->otherend, "multi-queue-max-queues", 1); @@ -2413,6 +2426,9 @@ static int xennet_connect(struct net_device *dev) return err; if (np->netback_has_xdp_headroom) pr_info("backend supports XDP headroom\n"); + if (np->bounce) + dev_info(&np->xbdev->dev, + "bouncing transmitted data to zeroed pages\n"); /* talk_to_netback() sets the correct number of queues */ num_queues = dev->real_num_tx_queues; @@ -2448,10 +2464,6 @@ static int xennet_connect(struct net_device *dev) if (queue->tx_irq != queue->rx_irq) notify_remote_via_irq(queue->rx_irq); - spin_lock_irq(&queue->tx_lock); - xennet_tx_buf_gc(queue); - spin_unlock_irq(&queue->tx_lock); - spin_lock_bh(&queue->rx_lock); xennet_alloc_rx_buffers(queue); spin_unlock_bh(&queue->rx_lock); |