summaryrefslogtreecommitdiff
path: root/drivers/net/xen-netback/common.h
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2016-10-04 10:29:14 +0100
committerDavid S. Miller <davem@davemloft.net>2016-10-06 20:37:35 -0400
commiteb1723a29b9a75dd787510a39096a68dba6cc200 (patch)
tree7f6c52ddcb5b98accb591c01b94def17d100007e /drivers/net/xen-netback/common.h
parentfedbc8c132bcf836358103195d8b6df6c03d9daf (diff)
xen-netback: refactor guest rx
Refactor the to-guest (rx) path to: 1. Push responses for completed skbs earlier, reducing latency. 2. Reduce the per-queue memory overhead by greatly reducing the maximum number of grant copy ops in each hypercall (from 4352 to 64). Each struct xenvif_queue is now only 44 kB instead of 220 kB. 3. Make the code more maintainable. Signed-off-by: David Vrabel <david.vrabel@citrix.com> [re-based] Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/common.h')
-rw-r--r--drivers/net/xen-netback/common.h23
1 files changed, 10 insertions, 13 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 0ba59106b1a5..7d12a388afc6 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -91,13 +91,6 @@ struct xenvif_rx_meta {
*/
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
-/* It's possible for an skb to have a maximal number of frags
- * but still be less than MAX_BUFFER_OFFSET in size. Thus the
- * worst-case number of copy operations is MAX_XEN_SKB_FRAGS per
- * ring slot.
- */
-#define MAX_GRANT_COPY_OPS (MAX_XEN_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
-
#define NETBACK_INVALID_HANDLE -1
/* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
@@ -133,6 +126,14 @@ struct xenvif_stats {
unsigned long tx_frag_overflow;
};
+#define COPY_BATCH_SIZE 64
+
+struct xenvif_copy_state {
+ struct gnttab_copy op[COPY_BATCH_SIZE];
+ RING_IDX idx[COPY_BATCH_SIZE];
+ unsigned int num;
+};
+
struct xenvif_queue { /* Per-queue data for xenvif */
unsigned int id; /* Queue ID, 0-based */
char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
@@ -189,12 +190,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
unsigned long last_rx_time;
bool stalled;
- struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];
-
- /* We create one meta structure per ring request we consume, so
- * the maximum number is the same as the ring size.
- */
- struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
+ struct xenvif_copy_state rx_copy;
/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
unsigned long credit_bytes;
@@ -358,6 +354,7 @@ int xenvif_dealloc_kthread(void *data);
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
+void xenvif_rx_action(struct xenvif_queue *queue);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif);