diff options
author | Haiyang Zhang <haiyangz@microsoft.com> | 2021-11-19 16:29:53 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2021-11-22 13:20:19 +0000 |
commit | ed5356b53f070dea5dff5a01b740561cb8222199 (patch) | |
tree | 0a6147703e1a8ab67a697389fe379b6807d78541 /drivers/net/ethernet/microsoft/mana/mana_en.c | |
parent | b8ac21d210dff3320366495d9c2dec6f93602e6d (diff) |
net: mana: Add XDP support
Add support of XDP for the MANA driver.
Supported XDP actions:
XDP_PASS, XDP_TX, XDP_DROP, XDP_ABORTED
XDP actions not yet supported:
XDP_REDIRECT
Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/microsoft/mana/mana_en.c')
-rw-r--r-- | drivers/net/ethernet/microsoft/mana/mana_en.c | 69 |
1 files changed, 59 insertions, 10 deletions
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 72cbf45c42d8..c1d5a374b967 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -125,7 +125,7 @@ frag_err: return -ENOMEM; } -static int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) +int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) { enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT; struct mana_port_context *apc = netdev_priv(ndev); @@ -378,6 +378,7 @@ static const struct net_device_ops mana_devops = { .ndo_start_xmit = mana_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_get_stats64 = mana_get_stats64, + .ndo_bpf = mana_bpf, }; static void mana_cleanup_port_context(struct mana_port_context *apc) @@ -906,6 +907,25 @@ static void mana_post_pkt_rxq(struct mana_rxq *rxq) WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1); } +static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len, + struct xdp_buff *xdp) +{ + struct sk_buff *skb = build_skb(buf_va, PAGE_SIZE); + + if (!skb) + return NULL; + + if (xdp->data_hard_start) { + skb_reserve(skb, xdp->data - xdp->data_hard_start); + skb_put(skb, xdp->data_end - xdp->data); + } else { + skb_reserve(skb, XDP_PACKET_HEADROOM); + skb_put(skb, pkt_len); + } + + return skb; +} + static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq) { @@ -914,8 +934,10 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, uint pkt_len = cqe->ppi[0].pkt_len; u16 rxq_idx = rxq->rxq_idx; struct napi_struct *napi; + struct xdp_buff xdp = {}; struct sk_buff *skb; u32 hash_value; + u32 act; rxq->rx_cq.work_done++; napi = &rxq->rx_cq.napi; @@ -925,15 +947,16 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, return; } - skb = build_skb(buf_va, PAGE_SIZE); + act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len); - if (!skb) { - free_page((unsigned long)buf_va); - ++ndev->stats.rx_dropped; - return; - } + if (act != XDP_PASS && act != XDP_TX) + goto drop; + + skb = mana_build_skb(buf_va, pkt_len, &xdp); + + if (!skb) + goto drop; - skb_put(skb, pkt_len); skb->dev = napi->dev; skb->protocol = eth_type_trans(skb, ndev); @@ -954,12 +977,24 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3); } + if (act == XDP_TX) { + skb_set_queue_mapping(skb, rxq_idx); + mana_xdp_tx(skb, ndev); + return; + } + napi_gro_receive(napi, skb); u64_stats_update_begin(&rx_stats->syncp); rx_stats->packets++; rx_stats->bytes += pkt_len; u64_stats_update_end(&rx_stats->syncp); + return; + +drop: + free_page((unsigned long)buf_va); + ++ndev->stats.rx_dropped; + return; } static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, @@ -1016,7 +1051,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, new_page = alloc_page(GFP_ATOMIC); if (new_page) { - da = dma_map_page(dev, new_page, 0, rxq->datasize, + da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize, DMA_FROM_DEVICE); if (dma_mapping_error(dev, da)) { @@ -1291,6 +1326,9 @@ static void mana_destroy_rxq(struct mana_port_context *apc, napi_synchronize(napi); napi_disable(napi); + + xdp_rxq_info_unreg(&rxq->xdp_rxq); + netif_napi_del(napi); mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); @@ -1342,7 +1380,8 @@ static int mana_alloc_rx_wqe(struct mana_port_context *apc, if (!page) return -ENOMEM; - da = dma_map_page(dev, page, 0, rxq->datasize, DMA_FROM_DEVICE); + da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize, + DMA_FROM_DEVICE); if (dma_mapping_error(dev, da)) { __free_page(page); @@ -1485,6 +1524,12 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, gc->cq_table[cq->gdma_id] = cq->gdma_cq; netif_napi_add(ndev, &cq->napi, mana_poll, 1); + + WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx, + cq->napi.napi_id)); + WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL)); + napi_enable(&cq->napi); mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); @@ -1650,6 +1695,8 @@ int mana_alloc_queues(struct net_device *ndev) if (err) goto destroy_vport; + mana_chn_setxdp(apc, mana_xdp_get(apc)); + return 0; destroy_vport: @@ -1698,6 +1745,8 @@ static int mana_dealloc_queues(struct net_device *ndev) if (apc->port_is_up) return -EINVAL; + mana_chn_setxdp(apc, NULL); + /* No packet can be transmitted now since apc->port_is_up is false. * There is still a tiny chance that mana_poll_tx_cq() can re-enable * a txq because it may not timely see apc->port_is_up being cleared |