summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell
diff options
context:
space:
mode:
authorLorenzo Bianconi <lorenzo@kernel.org>2020-11-20 18:05:43 +0100
committerJakub Kicinski <kuba@kernel.org>2020-11-24 15:09:41 -0800
commiteb33f11864fbbcf976ddca4a96fca02d5e4b3733 (patch)
treefa8332f4af9368cd774dd9b9a13a4cc1cce11b3d /drivers/net/ethernet/marvell
parent05c748f7d09ab1104b309a110f61560945f73f8b (diff)
net: mvneta: move skb_shared_info in mvneta_xdp_put_buff caller
Pass skb_shared_info pointer from mvneta_xdp_put_buff caller. This is a preliminary patch to reduce accesses to skb_shared_info area and reduce cache misses. Remove napi parameter in mvneta_xdp_put_buff signature since it is always run in NAPI context Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c32
1 files changed, 21 insertions, 11 deletions
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 60a285f3a55f..17c446b1cb94 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2033,16 +2033,16 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
static void
mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
- struct xdp_buff *xdp, int sync_len, bool napi)
+ struct xdp_buff *xdp, struct skb_shared_info *sinfo,
+ int sync_len)
{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
int i;
for (i = 0; i < sinfo->nr_frags; i++)
page_pool_put_full_page(rxq->page_pool,
- skb_frag_page(&sinfo->frags[i]), napi);
+ skb_frag_page(&sinfo->frags[i]), true);
page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
- sync_len, napi);
+ sync_len, true);
}
static int
@@ -2179,6 +2179,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct bpf_prog *prog, struct xdp_buff *xdp,
u32 frame_sz, struct mvneta_stats *stats)
{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
unsigned int len, data_len, sync;
u32 ret, act;
@@ -2199,7 +2200,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
err = xdp_do_redirect(pp->dev, xdp, prog);
if (unlikely(err)) {
- mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
+ mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
ret = MVNETA_XDP_DROPPED;
} else {
ret = MVNETA_XDP_REDIR;
@@ -2210,7 +2211,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
case XDP_TX:
ret = mvneta_xdp_xmit_back(pp, xdp);
if (ret != MVNETA_XDP_TX)
- mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
+ mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
break;
default:
bpf_warn_invalid_xdp_action(act);
@@ -2219,7 +2220,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
trace_xdp_exception(pp->dev, prog, act);
fallthrough;
case XDP_DROP:
- mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
+ mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
ret = MVNETA_XDP_DROPPED;
stats->xdp_drop++;
break;
@@ -2406,7 +2407,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
continue;
if (size) {
- mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
+ struct skb_shared_info *sinfo;
+
+ sinfo = xdp_get_shared_info_from_buff(&xdp_buf);
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, sinfo, -1);
goto next;
}
@@ -2417,8 +2421,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status);
if (IS_ERR(skb)) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+ struct skb_shared_info *sinfo;
- mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
+ sinfo = xdp_get_shared_info_from_buff(&xdp_buf);
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, sinfo, -1);
u64_stats_update_begin(&stats->syncp);
stats->es.skb_alloc_error++;
@@ -2438,8 +2444,12 @@ next:
}
rcu_read_unlock();
- if (xdp_buf.data_hard_start)
- mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
+ if (xdp_buf.data_hard_start) {
+ struct skb_shared_info *sinfo;
+
+ sinfo = xdp_get_shared_info_from_buff(&xdp_buf);
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, sinfo, -1);
+ }
if (ps.xdp_redirect)
xdp_do_flush_map();