summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/google
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/google')
-rw-r--r--drivers/net/ethernet/google/gve/gve.h4
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c9
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c2
5 files changed, 11 insertions, 10 deletions
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index abc0c708b47a..b80349154604 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -49,7 +49,9 @@
/* PTYPEs are always 10 bits. */
#define GVE_NUM_PTYPES 1024
-#define GVE_RX_BUFFER_SIZE_DQO 2048
+#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
+
+#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
#define GVE_XDP_ACTIONS 5
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 233e5946905e..e5397aa1e48f 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -519,7 +519,7 @@ static int gve_set_tunable(struct net_device *netdev,
case ETHTOOL_RX_COPYBREAK:
{
u32 max_copybreak = gve_is_gqi(priv) ?
- (PAGE_SIZE / 2) : priv->data_buffer_size_dqo;
+ GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo;
len = *(u32 *)value;
if (len > max_copybreak)
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index cc169748ffa2..619bf63ec935 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1328,7 +1328,7 @@ static int gve_open(struct net_device *dev)
/* Hard code this for now. This may be tuned in the future for
* performance.
*/
- priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO;
+ priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
}
err = gve_create_rings(priv);
if (err)
@@ -1664,7 +1664,7 @@ static int verify_xdp_configuration(struct net_device *dev)
return -EOPNOTSUPP;
}
- if (dev->mtu > (PAGE_SIZE / 2) - sizeof(struct ethhdr) - GVE_RX_PAD) {
+ if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) {
netdev_warn(dev, "XDP is not supported for mtu %d.\n",
dev->mtu);
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 82aa18588049..7a8dc5386fff 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -283,7 +283,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
/* Allocating half-page buffers allows page-flipping which is faster
* than copying or allocating new pages.
*/
- rx->packet_buffer_size = PAGE_SIZE / 2;
+ rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
gve_rx_add_to_block(priv, idx);
@@ -399,10 +399,10 @@ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr)
{
- const __be64 offset = cpu_to_be64(PAGE_SIZE / 2);
+ const __be64 offset = cpu_to_be64(GVE_DEFAULT_RX_BUFFER_OFFSET);
/* "flip" to other packet buffer on this page */
- page_info->page_offset ^= PAGE_SIZE / 2;
+ page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;
*(slot_addr) ^= offset;
}
@@ -507,8 +507,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
return NULL;
gve_dec_pagecnt_bias(copy_page_info);
- copy_page_info->page_offset += rx->packet_buffer_size;
- copy_page_info->page_offset &= (PAGE_SIZE - 1);
+ copy_page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;
if (copy_page_info->can_flip) {
/* We have used both halves of this copy page, it
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 9f6ffc4a54f0..07ba124780df 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -819,7 +819,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
return 0;
}
-#define GVE_TX_START_THRESH PAGE_SIZE
+#define GVE_TX_START_THRESH 4096
static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u32 to_do, bool try_to_wake)