summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/google/gve/gve_utils.h
diff options
context:
space:
mode:
authorShailend Chand <shailend@google.com>2022-10-29 09:53:22 -0700
committerDavid S. Miller <davem@davemloft.net>2022-11-02 11:52:51 +0000
commit82fd151d38d9fda714c5bb2e9e79ecd6bdc72da6 (patch)
tree2182458d2230c574ea1896c6931190e0fcd84818 /drivers/net/ethernet/google/gve/gve_utils.h
parentd08b0f8f46e45a274fc8c9a5bc92cb9da70d9887 (diff)
gve: Reduce alloc and copy costs in the GQ rx path
Previously, even if just one of the many fragments of a 9k packet required a copy, we'd copy the whole packet into a freshly-allocated 9k-sized linear SKB, and this led to performance issues. By having a pool of pages to copy into, each fragment can be independently handled, leading to a reduced incidence of allocation and copy. Signed-off-by: Shailend Chand <shailend@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/google/gve/gve_utils.h')
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/drivers/net/ethernet/google/gve/gve_utils.h b/drivers/net/ethernet/google/gve/gve_utils.h
index 6d98e69fd3b8..79595940b351 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.h
+++ b/drivers/net/ethernet/google/gve/gve_utils.h
@@ -19,7 +19,7 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len,
- u16 pad, struct gve_rx_ctx *ctx);
+ u16 pad);
/* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info);