summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
diff options
context:
space:
mode:
authorMichael Chan <michael.chan@broadcom.com>2017-02-06 16:55:43 -0500
committerDavid S. Miller <davem@davemloft.net>2017-02-07 13:31:00 -0500
commit38413406277fd060f46855ad527f6f8d4cf2652d (patch)
treea948e2a2552b8cb3b44643243d1dcd79d3b6b2a4 /drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
parentc6d30e8391b85e00eb544e6cf047ee0160ee9938 (diff)
bnxt_en: Add support for XDP_TX action.
Add dedicated transmit function and transmit completion handler for XDP. The XDP transmit logic and completion logic are different than regular TX ring. The TX buffer is recycled back to the RX ring when it completes. v3: Improved the buffer recyling scheme for XDP_TX. v2: Add trace_xdp_exception(). Add dma_sync. Signed-off-by: Michael Chan <michael.chan@broadcom.com> Tested-by: Andy Gospodarek <gospo@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c83
1 files changed, 83 insertions, 0 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index b822e461cbde..899c30fb5188 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -19,6 +19,65 @@
#include "bnxt.h"
#include "bnxt_xdp.h"
+static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ dma_addr_t mapping, u32 len, u16 rx_prod)
+{
+ struct bnxt_sw_tx_bd *tx_buf;
+ struct tx_bd_ext *txbd1;
+ struct tx_bd *txbd;
+ u32 flags;
+ u16 prod;
+
+ prod = txr->tx_prod;
+ tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf->rx_prod = rx_prod;
+
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
+ (2 << TX_BD_FLAGS_BD_CNT_SHIFT) | TX_BD_FLAGS_COAL_NOW |
+ TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ txbd->tx_bd_opaque = prod;
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+ prod = NEXT_TX(prod);
+ txbd1 = (struct tx_bd_ext *)
+ &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ txbd1->tx_bd_hsize_lflags = cpu_to_le32(0);
+ txbd1->tx_bd_mss = cpu_to_le32(0);
+ txbd1->tx_bd_cfa_action = cpu_to_le32(0);
+ txbd1->tx_bd_cfa_meta = cpu_to_le32(0);
+
+ prod = NEXT_TX(prod);
+ txr->tx_prod = prod;
+}
+
+void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+{
+ struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+ struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ struct bnxt_sw_tx_bd *tx_buf;
+ u16 tx_cons = txr->tx_cons;
+ u16 last_tx_cons = tx_cons;
+ u16 rx_prod;
+ int i;
+
+ for (i = 0; i < nr_pkts; i++) {
+ last_tx_cons = tx_cons;
+ tx_cons = NEXT_TX(tx_cons);
+ tx_cons = NEXT_TX(tx_cons);
+ }
+ txr->tx_cons = tx_cons;
+ if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) {
+ rx_prod = rxr->rx_prod;
+ } else {
+ tx_buf = &txr->tx_buf_ring[last_tx_cons];
+ rx_prod = tx_buf->rx_prod;
+ }
+ writel(DB_KEY_RX | rx_prod, rxr->rx_doorbell);
+}
+
/* returns the following:
* true - packet consumed by XDP and new buffer is allocated.
* false - packet should be passed to the stack.
@@ -27,11 +86,13 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
{
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
+ struct bnxt_tx_ring_info *txr;
struct bnxt_sw_rx_bd *rx_buf;
struct pci_dev *pdev;
struct xdp_buff xdp;
dma_addr_t mapping;
void *orig_data;
+ u32 tx_avail;
u32 offset;
u32 act;
@@ -39,6 +100,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
return false;
pdev = bp->pdev;
+ txr = rxr->bnapi->tx_ring;
rx_buf = &rxr->rx_buf_ring[cons];
offset = bp->rx_offset;
@@ -54,6 +116,13 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
act = bpf_prog_run_xdp(xdp_prog, &xdp);
rcu_read_unlock();
+ tx_avail = bnxt_tx_avail(bp, txr);
+ /* If the tx ring is not full, we must not update the rx producer yet
+ * because we may still be transmitting on some BDs.
+ */
+ if (tx_avail != bp->tx_ring_size)
+ *event &= ~BNXT_RX_EVENT;
+
if (orig_data != xdp.data) {
offset = xdp.data - xdp.data_hard_start;
*data_ptr = xdp.data_hard_start + offset;
@@ -63,6 +132,20 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
case XDP_PASS:
return false;
+ case XDP_TX:
+ if (tx_avail < 2) {
+ trace_xdp_exception(bp->dev, xdp_prog, act);
+ bnxt_reuse_rx_data(rxr, cons, page);
+ return true;
+ }
+
+ *event = BNXT_TX_EVENT;
+ dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
+ bp->rx_dir);
+ bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
+ NEXT_RX(rxr->rx_prod));
+ bnxt_reuse_rx_data(rxr, cons, page);
+ return true;
default:
bpf_warn_invalid_xdp_action(act);
/* Fall thru */