summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c67
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c88
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c35
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c225
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c282
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c26
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c27
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.c4
-rw-r--r--drivers/net/ethernet/sfc/mae.c4
30 files changed, 545 insertions, 419 deletions
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index f55d9d9c01a8..3a4b6cb7b7b9 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -77,14 +77,18 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts) {
device_set_wakeup_enable(kdev, 1);
/* Avoid unbalanced enable_irq_wake calls */
- if (priv->wol_irq_disabled)
+ if (priv->wol_irq_disabled) {
enable_irq_wake(priv->wol_irq);
+ enable_irq_wake(priv->irq0);
+ }
priv->wol_irq_disabled = false;
} else {
device_set_wakeup_enable(kdev, 0);
/* Avoid unbalanced disable_irq_wake calls */
- if (!priv->wol_irq_disabled)
+ if (!priv->wol_irq_disabled) {
disable_irq_wake(priv->wol_irq);
+ disable_irq_wake(priv->irq0);
+ }
priv->wol_irq_disabled = true;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 746ccfde7255..a62cffaf6ff1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4598,8 +4598,10 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
net_dev->hw_features = net_dev->features;
net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_XSK_ZEROCOPY |
NETDEV_XDP_ACT_NDO_XMIT;
+ if (priv->dpni_attrs.wriop_version >= DPAA2_WRIOP_VERSION(3, 0, 0) &&
+ priv->dpni_attrs.num_queues <= 8)
+ net_dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
if (priv->dpni_attrs.vlan_filter_entries)
net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 8cfc30fc9840..781475480ff2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -627,6 +627,7 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
vsi->next_base_q = ch->base_q;
break;
case ICE_VSI_VF:
+ case ICE_VSI_LB:
break;
default:
ice_vsi_free_arrays(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index c036be5eb35d..dfd22862e926 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -85,7 +85,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
ICE_TX_DESC_CMD_RE;
- tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
+ tx_buf->type = ICE_TX_BUF_DUMMY;
tx_buf->raw_buf = raw_packet;
tx_desc->cmd_type_offset_bsz =
@@ -112,31 +112,29 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
static void
ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
{
- if (tx_buf->skb) {
- if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) {
- devm_kfree(ring->dev, tx_buf->raw_buf);
- } else if (ice_ring_is_xdp(ring)) {
- if (ring->xsk_pool)
- xsk_buff_free(tx_buf->xdp);
- else
- page_frag_free(tx_buf->raw_buf);
- } else {
- dev_kfree_skb_any(tx_buf->skb);
- }
- if (dma_unmap_len(tx_buf, len))
- dma_unmap_single(ring->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- } else if (dma_unmap_len(tx_buf, len)) {
+ if (dma_unmap_len(tx_buf, len))
dma_unmap_page(ring->dev,
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
+
+ switch (tx_buf->type) {
+ case ICE_TX_BUF_DUMMY:
+ devm_kfree(ring->dev, tx_buf->raw_buf);
+ break;
+ case ICE_TX_BUF_SKB:
+ dev_kfree_skb_any(tx_buf->skb);
+ break;
+ case ICE_TX_BUF_XDP_TX:
+ page_frag_free(tx_buf->raw_buf);
+ break;
+ case ICE_TX_BUF_XDP_XMIT:
+ xdp_return_frame(tx_buf->xdpf);
+ break;
}
tx_buf->next_to_watch = NULL;
- tx_buf->skb = NULL;
+ tx_buf->type = ICE_TX_BUF_EMPTY;
dma_unmap_len_set(tx_buf, len, 0);
/* tx_buf must be completely set up in the transmit path */
}
@@ -269,7 +267,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
DMA_TO_DEVICE);
/* clear tx_buf data */
- tx_buf->skb = NULL;
+ tx_buf->type = ICE_TX_BUF_EMPTY;
dma_unmap_len_set(tx_buf, len, 0);
/* unmap remaining buffers */
@@ -580,7 +578,7 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
case XDP_TX:
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_lock(&xdp_ring->tx_lock);
- ret = __ice_xmit_xdp_ring(xdp, xdp_ring);
+ ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false);
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_unlock(&xdp_ring->tx_lock);
if (ret == ICE_XDP_CONSUMED)
@@ -608,6 +606,25 @@ exit:
}
/**
+ * ice_xmit_xdp_ring - submit frame to XDP ring for transmission
+ * @xdpf: XDP frame that will be converted to XDP buff
+ * @xdp_ring: XDP ring for transmission
+ */
+static int ice_xmit_xdp_ring(const struct xdp_frame *xdpf,
+ struct ice_tx_ring *xdp_ring)
+{
+ struct xdp_buff xdp;
+
+ xdp.data_hard_start = (void *)xdpf;
+ xdp.data = xdpf->data;
+ xdp.data_end = xdp.data + xdpf->len;
+ xdp.frame_sz = xdpf->frame_sz;
+ xdp.flags = xdpf->flags;
+
+ return __ice_xmit_xdp_ring(&xdp, xdp_ring, true);
+}
+
+/**
* ice_xdp_xmit - submit packets to XDP ring for transmission
* @dev: netdev
* @n: number of XDP frames to be transmitted
@@ -652,7 +669,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
for (i = 0; i < n; i++) {
- struct xdp_frame *xdpf = frames[i];
+ const struct xdp_frame *xdpf = frames[i];
int err;
err = ice_xmit_xdp_ring(xdpf, xdp_ring);
@@ -1712,6 +1729,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
DMA_TO_DEVICE);
tx_buf = &tx_ring->tx_buf[i];
+ tx_buf->type = ICE_TX_BUF_FRAG;
}
/* record SW timestamp if HW timestamp is not available */
@@ -2358,6 +2376,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buf[tx_ring->next_to_use];
first->skb = skb;
+ first->type = ICE_TX_BUF_SKB;
first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
first->gso_segs = 1;
first->tx_flags = 0;
@@ -2530,11 +2549,11 @@ void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
- if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
+ if (tx_buf->type == ICE_TX_BUF_DUMMY)
devm_kfree(tx_ring->dev, tx_buf->raw_buf);
/* clear next_to_watch to prevent false hangs */
- tx_buf->raw_buf = NULL;
+ tx_buf->type = ICE_TX_BUF_EMPTY;
tx_buf->tx_flags = 0;
tx_buf->next_to_watch = NULL;
dma_unmap_len_set(tx_buf, len, 0);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index efa3d378f19e..fff0efe28373 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -121,10 +121,7 @@ static inline int ice_skb_pad(void)
#define ICE_TX_FLAGS_TSO BIT(0)
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
-/* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be
- * freed instead of returned like skb packets.
- */
-#define ICE_TX_FLAGS_DUMMY_PKT BIT(3)
+/* Free, was ICE_TX_FLAGS_DUMMY_PKT */
#define ICE_TX_FLAGS_TSYN BIT(4)
#define ICE_TX_FLAGS_IPV4 BIT(5)
#define ICE_TX_FLAGS_IPV6 BIT(6)
@@ -149,22 +146,44 @@ static inline int ice_skb_pad(void)
#define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
+/**
+ * enum ice_tx_buf_type - type of &ice_tx_buf to act on Tx completion
+ * @ICE_TX_BUF_EMPTY: unused OR XSk frame, no action required
+ * @ICE_TX_BUF_DUMMY: dummy Flow Director packet, unmap and kfree()
+ * @ICE_TX_BUF_FRAG: mapped skb OR &xdp_buff frag, only unmap DMA
+ * @ICE_TX_BUF_SKB: &sk_buff, unmap and consume_skb(), update stats
+ * @ICE_TX_BUF_XDP_TX: &xdp_buff, unmap and page_frag_free(), stats
+ * @ICE_TX_BUF_XDP_XMIT: &xdp_frame, unmap and xdp_return_frame(), stats
+ * @ICE_TX_BUF_XSK_TX: &xdp_buff on XSk queue, xsk_buff_free(), stats
+ */
+enum ice_tx_buf_type {
+ ICE_TX_BUF_EMPTY = 0U,
+ ICE_TX_BUF_DUMMY,
+ ICE_TX_BUF_FRAG,
+ ICE_TX_BUF_SKB,
+ ICE_TX_BUF_XDP_TX,
+ ICE_TX_BUF_XDP_XMIT,
+ ICE_TX_BUF_XSK_TX,
+};
+
struct ice_tx_buf {
union {
struct ice_tx_desc *next_to_watch;
u32 rs_idx;
};
union {
- struct sk_buff *skb;
- void *raw_buf; /* used for XDP */
- struct xdp_buff *xdp; /* used for XDP_TX ZC */
+ void *raw_buf; /* used for XDP_TX and FDir rules */
+ struct sk_buff *skb; /* used for .ndo_start_xmit() */
+ struct xdp_frame *xdpf; /* used for .ndo_xdp_xmit() */
+ struct xdp_buff *xdp; /* used for XDP_TX ZC */
};
unsigned int bytecount;
union {
unsigned int gso_segs;
- unsigned int nr_frags; /* used for mbuf XDP */
+ unsigned int nr_frags; /* used for mbuf XDP */
};
- u32 tx_flags;
+ u32 type:16; /* &ice_tx_buf_type */
+ u32 tx_flags:16;
DEFINE_DMA_UNMAP_LEN(len);
DEFINE_DMA_UNMAP_ADDR(dma);
};
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 9bbed3f14e42..7bc5aa340c7d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -222,18 +222,28 @@ ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
/**
* ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
- * @xdp_ring: XDP Tx ring
+ * @dev: device for DMA mapping
* @tx_buf: Tx buffer to clean
+ * @bq: XDP bulk flush struct
*/
static void
-ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
+ice_clean_xdp_tx_buf(struct device *dev, struct ice_tx_buf *tx_buf,
+ struct xdp_frame_bulk *bq)
{
- dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
dma_unmap_len_set(tx_buf, len, 0);
- xdp_ring->xdp_tx_active--;
- page_frag_free(tx_buf->raw_buf);
- tx_buf->raw_buf = NULL;
+
+ switch (tx_buf->type) {
+ case ICE_TX_BUF_XDP_TX:
+ page_frag_free(tx_buf->raw_buf);
+ break;
+ case ICE_TX_BUF_XDP_XMIT:
+ xdp_return_frame_bulk(tx_buf->xdpf, bq);
+ break;
+ }
+
+ tx_buf->type = ICE_TX_BUF_EMPTY;
}
/**
@@ -243,11 +253,13 @@ ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
{
int total_bytes = 0, total_pkts = 0;
+ struct device *dev = xdp_ring->dev;
u32 ntc = xdp_ring->next_to_clean;
struct ice_tx_desc *tx_desc;
u32 cnt = xdp_ring->count;
+ struct xdp_frame_bulk bq;
+ u32 frags, xdp_tx = 0;
u32 ready_frames = 0;
- u32 frags;
u32 idx;
u32 ret;
@@ -261,12 +273,16 @@ static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
ready_frames = idx + cnt - ntc + 1;
}
- if (!ready_frames)
+ if (unlikely(!ready_frames))
return 0;
ret = ready_frames;
+ xdp_frame_bulk_init(&bq);
+ rcu_read_lock(); /* xdp_return_frame_bulk() */
+
while (ready_frames) {
struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
+ struct ice_tx_buf *head = tx_buf;
/* bytecount holds size of head + frags */
total_bytes += tx_buf->bytecount;
@@ -274,11 +290,8 @@ static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
total_pkts++;
/* count head + frags */
ready_frames -= frags + 1;
+ xdp_tx++;
- if (xdp_ring->xsk_pool)
- xsk_buff_free(tx_buf->xdp);
- else
- ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
ntc++;
if (ntc == cnt)
ntc = 0;
@@ -286,15 +299,21 @@ static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
for (int i = 0; i < frags; i++) {
tx_buf = &xdp_ring->tx_buf[ntc];
- ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
+ ice_clean_xdp_tx_buf(dev, tx_buf, &bq);
ntc++;
if (ntc == cnt)
ntc = 0;
}
+
+ ice_clean_xdp_tx_buf(dev, head, &bq);
}
+ xdp_flush_frame_bulk(&bq);
+ rcu_read_unlock();
+
tx_desc->cmd_type_offset_bsz = 0;
xdp_ring->next_to_clean = ntc;
+ xdp_ring->xdp_tx_active -= xdp_tx;
ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
return ret;
@@ -304,8 +323,10 @@ static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
* __ice_xmit_xdp_ring - submit frame to XDP ring for transmission
* @xdp: XDP buffer to be placed onto Tx descriptors
* @xdp_ring: XDP ring for transmission
+ * @frame: whether this comes from .ndo_xdp_xmit()
*/
-int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
+int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
+ bool frame)
{
struct skb_shared_info *sinfo = NULL;
u32 size = xdp->data_end - xdp->data;
@@ -321,17 +342,17 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
u32 frag = 0;
free_space = ICE_DESC_UNUSED(xdp_ring);
-
- if (ICE_DESC_UNUSED(xdp_ring) < ICE_RING_QUARTER(xdp_ring))
+ if (free_space < ICE_RING_QUARTER(xdp_ring))
free_space += ice_clean_xdp_irq(xdp_ring);
+ if (unlikely(!free_space))
+ goto busy;
+
if (unlikely(xdp_buff_has_frags(xdp))) {
sinfo = xdp_get_shared_info_from_buff(xdp);
nr_frags = sinfo->nr_frags;
- if (free_space < nr_frags + 1) {
- xdp_ring->ring_stats->tx_stats.tx_busy++;
- return ICE_XDP_CONSUMED;
- }
+ if (free_space < nr_frags + 1)
+ goto busy;
}
tx_desc = ICE_TX_DESC(xdp_ring, ntu);
@@ -349,9 +370,15 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
dma_unmap_len_set(tx_buf, len, size);
dma_unmap_addr_set(tx_buf, dma, dma);
+ if (frame) {
+ tx_buf->type = ICE_TX_BUF_FRAG;
+ } else {
+ tx_buf->type = ICE_TX_BUF_XDP_TX;
+ tx_buf->raw_buf = data;
+ }
+
tx_desc->buf_addr = cpu_to_le64(dma);
tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0);
- tx_buf->raw_buf = data;
ntu++;
if (ntu == cnt)
@@ -372,6 +399,11 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
tx_head->bytecount = xdp_get_buff_len(xdp);
tx_head->nr_frags = nr_frags;
+ if (frame) {
+ tx_head->type = ICE_TX_BUF_XDP_XMIT;
+ tx_head->xdpf = xdp->data_hard_start;
+ }
+
/* update last descriptor from a frame with EOP */
tx_desc->cmd_type_offset_bsz |=
cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S);
@@ -395,19 +427,11 @@ dma_unmap:
ntu--;
}
return ICE_XDP_CONSUMED;
-}
-/**
- * ice_xmit_xdp_ring - submit frame to XDP ring for transmission
- * @xdpf: XDP frame that will be converted to XDP buff
- * @xdp_ring: XDP ring for transmission
- */
-int ice_xmit_xdp_ring(struct xdp_frame *xdpf, struct ice_tx_ring *xdp_ring)
-{
- struct xdp_buff xdp;
+busy:
+ xdp_ring->ring_stats->tx_stats.tx_busy++;
- xdp_convert_frame_to_buff(xdpf, &xdp);
- return __ice_xmit_xdp_ring(&xdp, xdp_ring);
+ return ICE_XDP_CONSUMED;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index ea977f283c22..115969ecdf7b 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -142,8 +142,8 @@ static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring)
void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx);
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
-int ice_xmit_xdp_ring(struct xdp_frame *xdpf, struct ice_tx_ring *xdp_ring);
-int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
+int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
+ bool frame);
void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
void
ice_process_skb_fields(struct ice_rx_ring *rx_ring,
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index b2d96ae5668c..31565bbafa22 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -598,21 +598,6 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
}
/**
- * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
- * @xdp_ring: XDP Tx ring
- * @tx_buf: Tx buffer to clean
- */
-static void
-ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
-{
- page_frag_free(tx_buf->raw_buf);
- xdp_ring->xdp_tx_active--;
- dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
-}
-
-/**
* ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
* @xdp_ring: XDP Tx ring
*/
@@ -629,8 +614,8 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
- if ((tx_desc->cmd_type_offset_bsz &
- cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
+ if (tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
if (last_rs >= ntc)
completed_frames = last_rs - ntc + 1;
else
@@ -649,9 +634,10 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
for (i = 0; i < completed_frames; i++) {
tx_buf = &xdp_ring->tx_buf[ntc];
- if (tx_buf->raw_buf) {
- ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
- tx_buf->raw_buf = NULL;
+ if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
+ tx_buf->type = ICE_TX_BUF_EMPTY;
+ xsk_buff_free(tx_buf->xdp);
+ xdp_ring->xdp_tx_active--;
} else {
xsk_frames++;
}
@@ -703,6 +689,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
tx_buf = &xdp_ring->tx_buf[ntu];
tx_buf->xdp = xdp;
+ tx_buf->type = ICE_TX_BUF_XSK_TX;
tx_desc = ICE_TX_DESC(xdp_ring, ntu);
tx_desc->buf_addr = cpu_to_le64(dma);
tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
@@ -1101,12 +1088,12 @@ void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
while (ntc != ntu) {
struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
- if (tx_buf->xdp)
+ if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
+ tx_buf->type = ICE_TX_BUF_EMPTY;
xsk_buff_free(tx_buf->xdp);
- else
+ } else {
xsk_frames++;
-
- tx_buf->raw_buf = NULL;
+ }
ntc++;
if (ntc >= xdp_ring->count)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 5eea2b6cf6bd..389663a13d1d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -888,6 +888,9 @@ int rvu_cpt_init(struct rvu *rvu);
int rvu_set_channels_base(struct rvu *rvu);
void rvu_program_channels(struct rvu *rvu);
+/* CN10K NIX */
+void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw);
+
/* CN10K RVU - LMT*/
void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 7dbbc115cde4..4ad9ff025c96 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -538,3 +538,21 @@ void rvu_program_channels(struct rvu *rvu)
rvu_lbk_set_channels(rvu);
rvu_rpm_set_channels(rvu);
}
+
+void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ int blkaddr = nix_hw->blkaddr;
+ u64 cfg;
+
+ /* Set AF vWQE timer interval to a LF configurable range of
+ * 6.4us to 1.632ms.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_VWQE_TIMER, 0x3FULL);
+
+ /* Enable NIX RX stream and global conditional clock to
+ * avoild multiple free of NPA buffers.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CFG);
+ cfg |= BIT_ULL(1) | BIT_ULL(2);
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG, cfg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 89e94569e74c..26e639e57dae 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2058,6 +2058,13 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
int err, restore_tx_en = 0;
u64 cfg;
+ if (!is_rvu_otx2(rvu)) {
+ /* Skip SMQ flush if pkt count is zero */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq));
+ if (!cfg)
+ return 0;
+ }
+
/* enable cgx tx if disabled */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -4309,6 +4316,9 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
+ if (!is_rvu_otx2(rvu))
+ rvu_nix_block_cn10k_init(rvu, nix_hw);
+
if (is_block_implemented(hw, blkaddr)) {
err = nix_setup_txschq(rvu, nix_hw, blkaddr);
if (err)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 5437bd20c719..1729b22580ce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -189,6 +189,7 @@
#define NIX_AF_RX_CFG (0x00D0)
#define NIX_AF_AVG_DELAY (0x00E0)
#define NIX_AF_CINT_DELAY (0x00F0)
+#define NIX_AF_VWQE_TIMER (0x00F8)
#define NIX_AF_RX_MCAST_BASE (0x0100)
#define NIX_AF_RX_MCAST_CFG (0x0110)
#define NIX_AF_RX_MCAST_BUF_BASE (0x0120)
@@ -426,6 +427,7 @@
#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730)
#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
#define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17)
+#define NIX_AF_MDQX_IN_MD_COUNT(a) (0x14e0 | (a) << 16)
#define NIX_PRIV_AF_INT_CFG (0x8000000)
#define NIX_PRIV_LFX_CFG (0x8000010)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c5758637b7be..2f79378fbf6e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -699,32 +699,32 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
} else {
inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN);
- memset(((void *)(inl + 1)) + skb->len, 0,
+ memset(inl->data + skb->len, 0,
MIN_PKT_LEN - skb->len);
}
- skb_copy_from_linear_data(skb, inl + 1, hlen);
+ skb_copy_from_linear_data(skb, inl->data, hlen);
if (shinfo->nr_frags)
- memcpy(((void *)(inl + 1)) + hlen, fragptr,
+ memcpy(inl->data + hlen, fragptr,
skb_frag_size(&shinfo->frags[0]));
} else {
inl->byte_count = cpu_to_be32(1 << 31 | spc);
if (hlen <= spc) {
- skb_copy_from_linear_data(skb, inl + 1, hlen);
+ skb_copy_from_linear_data(skb, inl->data, hlen);
if (hlen < spc) {
- memcpy(((void *)(inl + 1)) + hlen,
+ memcpy(inl->data + hlen,
fragptr, spc - hlen);
fragptr += spc - hlen;
}
- inl = (void *) (inl + 1) + spc;
- memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
+ inl = (void *)inl->data + spc;
+ memcpy(inl->data, fragptr, skb->len - spc);
} else {
- skb_copy_from_linear_data(skb, inl + 1, spc);
- inl = (void *) (inl + 1) + spc;
- skb_copy_from_linear_data_offset(skb, spc, inl + 1,
+ skb_copy_from_linear_data(skb, inl->data, spc);
+ inl = (void *)inl->data + spc;
+ skb_copy_from_linear_data_offset(skb, spc, inl->data,
hlen - spc);
if (shinfo->nr_frags)
- memcpy(((void *)(inl + 1)) + hlen - spc,
+ memcpy(inl->data + hlen - spc,
fragptr,
skb_frag_size(&shinfo->frags[0]));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 26685fd0fdaa..bb1d7b039a7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -85,7 +85,7 @@ config MLX5_BRIDGE
config MLX5_CLS_ACT
bool "MLX5 TC classifier action support"
- depends on MLX5_ESWITCH && NET_CLS_ACT
+ depends on MLX5_ESWITCH && NET_CLS_ACT && NET_TC_SKB_EXT
default y
help
mlx5 ConnectX offloads support for TC classifier action (NET_CLS_ACT),
@@ -100,7 +100,7 @@ config MLX5_CLS_ACT
config MLX5_TC_CT
bool "MLX5 TC connection tracking offload support"
- depends on MLX5_CLS_ACT && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT
+ depends on MLX5_CLS_ACT && NF_FLOW_TABLE && NET_ACT_CT
default y
help
Say Y here if you want to support offloading connection tracking rules
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 3b590cfe33b8..e24b46953542 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies. */
-#include <net/dst_metadata.h>
#include <linux/netdevice.h>
#include <linux/if_macvlan.h>
#include <linux/list.h>
@@ -665,232 +664,54 @@ void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv)
mlx5e_rep_indr_block_unbind);
}
-static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
- struct mlx5e_tc_update_priv *tc_priv,
- u32 tunnel_id)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct tunnel_match_enc_opts enc_opts = {};
- struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *uplink_rpriv;
- struct metadata_dst *tun_dst;
- struct tunnel_match_key key;
- u32 tun_id, enc_opts_id;
- struct net_device *dev;
- int err;
-
- enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
- tun_id = tunnel_id >> ENC_OPTS_BITS;
-
- if (!tun_id)
- return true;
-
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- uplink_priv = &uplink_rpriv->uplink_priv;
-
- err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
- if (err) {
- netdev_dbg(priv->netdev,
- "Couldn't find tunnel for tun_id: %d, err: %d\n",
- tun_id, err);
- return false;
- }
-
- if (enc_opts_id) {
- err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
- enc_opts_id, &enc_opts);
- if (err) {
- netdev_dbg(priv->netdev,
- "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
- enc_opts_id, err);
- return false;
- }
- }
-
- if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
- key.enc_ip.tos, key.enc_ip.ttl,
- key.enc_tp.dst, TUNNEL_KEY,
- key32_to_tunnel_id(key.enc_key_id.keyid),
- enc_opts.key.len);
- } else if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
- tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
- key.enc_ip.tos, key.enc_ip.ttl,
- key.enc_tp.dst, 0, TUNNEL_KEY,
- key32_to_tunnel_id(key.enc_key_id.keyid),
- enc_opts.key.len);
- } else {
- netdev_dbg(priv->netdev,
- "Couldn't restore tunnel, unsupported addr_type: %d\n",
- key.enc_control.addr_type);
- return false;
- }
-
- if (!tun_dst) {
- netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
- return false;
- }
-
- tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
-
- if (enc_opts.key.len)
- ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
- enc_opts.key.data,
- enc_opts.key.len,
- enc_opts.key.dst_opt_type);
-
- skb_dst_set(skb, (struct dst_entry *)tun_dst);
- dev = dev_get_by_index(&init_net, key.filter_ifindex);
- if (!dev) {
- netdev_dbg(priv->netdev,
- "Couldn't find tunnel device with ifindex: %d\n",
- key.filter_ifindex);
- return false;
- }
-
- /* Set fwd_dev so we do dev_put() after datapath */
- tc_priv->fwd_dev = dev;
-
- skb->dev = dev;
-
- return true;
-}
-
-static bool mlx5e_restore_skb_chain(struct sk_buff *skb, u32 chain, u32 reg_c1,
- struct mlx5e_tc_update_priv *tc_priv)
-{
- struct mlx5e_priv *priv = netdev_priv(skb->dev);
- u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
-
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- if (chain) {
- struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *uplink_rpriv;
- struct tc_skb_ext *tc_skb_ext;
- struct mlx5_eswitch *esw;
- u32 zone_restore_id;
-
- tc_skb_ext = tc_skb_ext_alloc(skb);
- if (!tc_skb_ext) {
- WARN_ON(1);
- return false;
- }
- tc_skb_ext->chain = chain;
- zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK;
- esw = priv->mdev->priv.eswitch;
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- uplink_priv = &uplink_rpriv->uplink_priv;
- if (!mlx5e_tc_ct_restore_flow(uplink_priv->ct_priv, skb,
- zone_restore_id))
- return false;
- }
-#endif /* CONFIG_NET_TC_SKB_EXT */
-
- return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
-}
-
-static void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
-{
- if (tc_priv->fwd_dev)
- dev_put(tc_priv->fwd_dev);
-}
-
-static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb,
- struct mlx5_mapped_obj *mapped_obj,
- struct mlx5e_tc_update_priv *tc_priv)
-{
- if (!mlx5e_restore_tunnel(priv, skb, tc_priv, mapped_obj->sample.tunnel_id)) {
- netdev_dbg(priv->netdev,
- "Failed to restore tunnel info for sampled packet\n");
- return;
- }
- mlx5e_tc_sample_skb(skb, mapped_obj);
- mlx5_rep_tc_post_napi_receive(tc_priv);
-}
-
-static bool mlx5e_restore_skb_int_port(struct mlx5e_priv *priv, struct sk_buff *skb,
- struct mlx5_mapped_obj *mapped_obj,
- struct mlx5e_tc_update_priv *tc_priv,
- bool *forward_tx,
- u32 reg_c1)
-{
- u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *uplink_rpriv;
-
- /* Tunnel restore takes precedence over int port restore */
- if (tunnel_id)
- return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
-
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- uplink_priv = &uplink_rpriv->uplink_priv;
-
- if (mlx5e_tc_int_port_dev_fwd(uplink_priv->int_port_priv, skb,
- mapped_obj->int_port_metadata, forward_tx)) {
- /* Set fwd_dev for future dev_put */
- tc_priv->fwd_dev = skb->dev;
-
- return true;
- }
-
- return false;
-}
-
void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
struct sk_buff *skb)
{
- u32 reg_c1 = be32_to_cpu(cqe->ft_metadata);
+ u32 reg_c0, reg_c1, zone_restore_id, tunnel_id;
struct mlx5e_tc_update_priv tc_priv = {};
- struct mlx5_mapped_obj mapped_obj;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5_tc_ct_priv *ct_priv;
+ struct mapping_ctx *mapping_ctx;
struct mlx5_eswitch *esw;
- bool forward_tx = false;
struct mlx5e_priv *priv;
- u32 reg_c0;
- int err;
reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
if (!reg_c0 || reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
goto forward;
- /* If reg_c0 is not equal to the default flow tag then skb->mark
+ /* If mapped_obj_id is not equal to the default flow tag then skb->mark
* is not supported and must be reset back to 0.
*/
skb->mark = 0;
priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch;
- err = mapping_find(esw->offloads.reg_c0_obj_pool, reg_c0, &mapped_obj);
- if (err) {
- netdev_dbg(priv->netdev,
- "Couldn't find mapped object for reg_c0: %d, err: %d\n",
- reg_c0, err);
- goto free_skb;
- }
+ mapping_ctx = esw->offloads.reg_c0_obj_pool;
+ reg_c1 = be32_to_cpu(cqe->ft_metadata);
+ zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK;
+ tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
- if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
- if (!mlx5e_restore_skb_chain(skb, mapped_obj.chain, reg_c1, &tc_priv) &&
- !mlx5_ipsec_is_rx_flow(cqe))
- goto free_skb;
- } else if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) {
- mlx5e_restore_skb_sample(priv, skb, &mapped_obj, &tc_priv);
- goto free_skb;
- } else if (mapped_obj.type == MLX5_MAPPED_OBJ_INT_PORT_METADATA) {
- if (!mlx5e_restore_skb_int_port(priv, skb, &mapped_obj, &tc_priv,
- &forward_tx, reg_c1))
- goto free_skb;
- } else {
- netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ ct_priv = uplink_priv->ct_priv;
+
+ if (!mlx5_ipsec_is_rx_flow(cqe) &&
+ !mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv, zone_restore_id, tunnel_id,
+ &tc_priv))
goto free_skb;
- }
forward:
- if (forward_tx)
+ if (tc_priv.skb_done)
+ goto free_skb;
+
+ if (tc_priv.forward_tx)
dev_queue_xmit(skb);
else
napi_gro_receive(rq->cq.napi, skb);
- mlx5_rep_tc_post_napi_receive(&tc_priv);
+ if (tc_priv.fwd_dev)
+ dev_put(tc_priv.fwd_dev);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
index f2c2c752bd1c..558a776359af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
@@ -237,7 +237,7 @@ sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id,
int err;
err = mlx5e_tc_match_to_reg_set(mdev, mod_acts, MLX5_FLOW_NAMESPACE_FDB,
- CHAIN_TO_REG, obj_id);
+ MAPPED_OBJ_TO_REG, obj_id);
if (err)
goto err_set_regc0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index f0acb02ffc76..314983bc6f08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -60,6 +60,7 @@ struct mlx5_tc_ct_debugfs {
struct mlx5_tc_ct_priv {
struct mlx5_core_dev *dev;
+ struct mlx5e_priv *priv;
const struct net_device *netdev;
struct mod_hdr_tbl *mod_hdr_tbl;
struct xarray tuple_ids;
@@ -86,7 +87,6 @@ struct mlx5_ct_flow {
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_flow_handle *pre_ct_rule;
struct mlx5_ct_ft *ft;
- u32 chain_mapping;
};
struct mlx5_ct_zone_rule {
@@ -1558,6 +1558,7 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
attr->ct_attr.zone = act->ct.zone;
attr->ct_attr.ct_action = act->ct.action;
attr->ct_attr.nf_ft = act->ct.flow_table;
+ attr->ct_attr.act_miss_cookie = act->miss_cookie;
return 0;
}
@@ -1895,7 +1896,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* + ft prio (tc chain) +
* + original match +
* +---------------------+
- * | set chain miss mapping
+ * | set act_miss_cookie mapping
* | set fte_id
* | set tunnel_id
* | do decap
@@ -1940,7 +1941,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_modify_hdr *mod_hdr;
struct mlx5_ct_flow *ct_flow;
- int chain_mapping = 0, err;
+ int act_miss_mapping = 0, err;
struct mlx5_ct_ft *ft;
u16 zone;
@@ -1975,22 +1976,18 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- /* Write chain miss tag for miss in ct table as we
- * don't go though all prios of this chain as normal tc rules
- * miss.
- */
- err = mlx5_chains_get_chain_mapping(ct_priv->chains, attr->chain,
- &chain_mapping);
+ err = mlx5e_tc_action_miss_mapping_get(ct_priv->priv, attr, attr->ct_attr.act_miss_cookie,
+ &act_miss_mapping);
if (err) {
- ct_dbg("Failed to get chain register mapping for chain");
- goto err_get_chain;
+ ct_dbg("Failed to get register mapping for act miss");
+ goto err_get_act_miss;
}
- ct_flow->chain_mapping = chain_mapping;
+ attr->ct_attr.act_miss_mapping = act_miss_mapping;
err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts, ct_priv->ns_type,
- CHAIN_TO_REG, chain_mapping);
+ MAPPED_OBJ_TO_REG, act_miss_mapping);
if (err) {
- ct_dbg("Failed to set chain register mapping");
+ ct_dbg("Failed to set act miss register mapping");
goto err_mapping;
}
@@ -2054,8 +2051,8 @@ err_insert_orig:
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
err_mapping:
mlx5e_mod_hdr_dealloc(pre_mod_acts);
- mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
-err_get_chain:
+ mlx5e_tc_action_miss_mapping_put(ct_priv->priv, attr, act_miss_mapping);
+err_get_act_miss:
kfree(ct_flow->pre_ct_attr);
err_alloc_pre:
mlx5_tc_ct_del_ft_cb(ct_priv, ft);
@@ -2094,7 +2091,7 @@ __mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
mlx5_tc_rule_delete(priv, ct_flow->pre_ct_rule, pre_ct_attr);
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
- mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
+ mlx5e_tc_action_miss_mapping_put(ct_priv->priv, attr, attr->ct_attr.act_miss_mapping);
mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
kfree(ct_flow->pre_ct_attr);
@@ -2191,13 +2188,6 @@ mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
const char *err_msg = NULL;
int err = 0;
-#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- /* cannot restore chain ID on HW miss */
-
- err_msg = "tc skb extension missing";
- err = -EOPNOTSUPP;
- goto out_err;
-#endif
if (IS_ERR_OR_NULL(post_act)) {
/* Ignore_flow_level support isn't supported by default for VFs and so post_act
* won't be supported. Skip showing error msg.
@@ -2274,6 +2264,7 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
}
spin_lock_init(&ct_priv->ht_lock);
+ ct_priv->priv = priv;
ct_priv->ns_type = ns_type;
ct_priv->chains = chains;
ct_priv->netdev = priv->netdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 5bbd6b92840f..5c5ddaa83055 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -28,6 +28,8 @@ struct mlx5_ct_attr {
struct mlx5_ct_flow *ct_flow;
struct nf_flowtable *nf_ft;
u32 ct_labels_id;
+ u32 act_miss_mapping;
+ u64 act_miss_cookie;
};
#define zone_to_reg_ct {\
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 15d9932f741d..3f7b63d6616b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1790,7 +1790,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (mlx5e_cqe_regb_chain(cqe))
- if (!mlx5e_tc_update_skb(cqe, skb)) {
+ if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
dev_kfree_skb_any(skb);
goto free_wqe;
}
@@ -2257,7 +2257,7 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (mlx5e_cqe_regb_chain(cqe))
- if (!mlx5e_tc_update_skb(cqe, skb)) {
+ if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
dev_kfree_skb_any(skb);
goto mpwrq_cqe_out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9bbd31e304be..e34d9b5fb504 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -43,6 +43,7 @@
#include <net/ipv6_stubs.h>
#include <net/bareudp.h>
#include <net/bonding.h>
+#include <net/dst_metadata.h>
#include "en.h"
#include "en/tc/post_act.h"
#include "en/tc/act_stats.h"
@@ -108,7 +109,7 @@ struct mlx5e_tc_table {
};
struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
- [CHAIN_TO_REG] = {
+ [MAPPED_OBJ_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
.moffset = 0,
.mlen = 16,
@@ -135,7 +136,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
* into reg_b that is passed to SW since we don't
* jump between steering domains.
*/
- [NIC_CHAIN_TO_REG] = {
+ [NIC_MAPPED_OBJ_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
.moffset = 0,
.mlen = 16,
@@ -1604,7 +1605,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
goto err_get_chain;
err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
- CHAIN_TO_REG, chain_mapping);
+ MAPPED_OBJ_TO_REG, chain_mapping);
if (err)
goto err_reg_set;
@@ -3815,6 +3816,7 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
attr2->parse_attr = parse_attr;
attr2->dest_chain = 0;
attr2->dest_ft = NULL;
+ attr2->act_id_restore_rule = NULL;
if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
attr2->esw_attr->out_count = 0;
@@ -4176,7 +4178,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
parse_state->actions |= attr->action;
if (!tc_act->stats_action)
- attr->tc_act_cookies[attr->tc_act_cookies_count++] = act->act_cookie;
+ attr->tc_act_cookies[attr->tc_act_cookies_count++] = act->cookie;
/* Split attr for multi table act if not the last act. */
if (jump_state.jump_target ||
@@ -5604,48 +5606,268 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
-bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
- struct sk_buff *skb)
+static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv,
+ u32 tunnel_id)
{
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- u32 chain = 0, chain_tag, reg_b, zone_restore_id;
- struct mlx5e_priv *priv = netdev_priv(skb->dev);
- struct mlx5_mapped_obj mapped_obj;
- struct tc_skb_ext *tc_skb_ext;
- struct mlx5e_tc_table *tc;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct tunnel_match_enc_opts enc_opts = {};
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct metadata_dst *tun_dst;
+ struct tunnel_match_key key;
+ u32 tun_id, enc_opts_id;
+ struct net_device *dev;
int err;
- reg_b = be32_to_cpu(cqe->ft_metadata);
- tc = mlx5e_fs_get_tc(priv->fs);
- chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
+ enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
+ tun_id = tunnel_id >> ENC_OPTS_BITS;
+
+ if (!tun_id)
+ return true;
- err = mapping_find(tc->mapping, chain_tag, &mapped_obj);
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
if (err) {
netdev_dbg(priv->netdev,
- "Couldn't find chain for chain tag: %d, err: %d\n",
- chain_tag, err);
+ "Couldn't find tunnel for tun_id: %d, err: %d\n",
+ tun_id, err);
return false;
}
- if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
- chain = mapped_obj.chain;
- tc_skb_ext = tc_skb_ext_alloc(skb);
- if (WARN_ON(!tc_skb_ext))
+ if (enc_opts_id) {
+ err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
+ enc_opts_id, &enc_opts);
+ if (err) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
+ enc_opts_id, err);
return false;
+ }
+ }
+
+ switch (key.enc_control.addr_type) {
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ key.enc_tp.dst, TUNNEL_KEY,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ enc_opts.key.len);
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ key.enc_tp.dst, 0, TUNNEL_KEY,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ enc_opts.key.len);
+ break;
+ default:
+ netdev_dbg(priv->netdev,
+ "Couldn't restore tunnel, unsupported addr_type: %d\n",
+ key.enc_control.addr_type);
+ return false;
+ }
+
+ if (!tun_dst) {
+ netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
+ return false;
+ }
+
+ tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
+
+ if (enc_opts.key.len)
+ ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
+ enc_opts.key.data,
+ enc_opts.key.len,
+ enc_opts.key.dst_opt_type);
- tc_skb_ext->chain = chain;
+ skb_dst_set(skb, (struct dst_entry *)tun_dst);
+ dev = dev_get_by_index(&init_net, key.filter_ifindex);
+ if (!dev) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel device with ifindex: %d\n",
+ key.filter_ifindex);
+ return false;
+ }
+
+ /* Set fwd_dev so we do dev_put() after datapath */
+ tc_priv->fwd_dev = dev;
- zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
- ESW_ZONE_ID_MASK;
+ skb->dev = dev;
- if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
- zone_restore_id))
+ return true;
+}
+
+static bool mlx5e_tc_restore_skb_tc_meta(struct sk_buff *skb, struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_mapped_obj *mapped_obj, u32 zone_restore_id,
+ u32 tunnel_id, struct mlx5e_tc_update_priv *tc_priv)
+{
+ struct mlx5e_priv *priv = netdev_priv(skb->dev);
+ struct tc_skb_ext *tc_skb_ext;
+ u64 act_miss_cookie;
+ u32 chain;
+
+ chain = mapped_obj->type == MLX5_MAPPED_OBJ_CHAIN ? mapped_obj->chain : 0;
+ act_miss_cookie = mapped_obj->type == MLX5_MAPPED_OBJ_ACT_MISS ?
+ mapped_obj->act_miss_cookie : 0;
+ if (chain || act_miss_cookie) {
+ if (!mlx5e_tc_ct_restore_flow(ct_priv, skb, zone_restore_id))
return false;
- } else {
+
+ tc_skb_ext = tc_skb_ext_alloc(skb);
+ if (!tc_skb_ext) {
+ WARN_ON(1);
+ return false;
+ }
+
+ if (act_miss_cookie) {
+ tc_skb_ext->act_miss_cookie = act_miss_cookie;
+ tc_skb_ext->act_miss = 1;
+ } else {
+ tc_skb_ext->chain = chain;
+ }
+ }
+
+ if (tc_priv)
+ return mlx5e_tc_restore_tunnel(priv, skb, tc_priv, tunnel_id);
+
+ return true;
+}
+
+static void mlx5e_tc_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5_mapped_obj *mapped_obj,
+ struct mlx5e_tc_update_priv *tc_priv)
+{
+ if (!mlx5e_tc_restore_tunnel(priv, skb, tc_priv, mapped_obj->sample.tunnel_id)) {
+ netdev_dbg(priv->netdev,
+ "Failed to restore tunnel info for sampled packet\n");
+ return;
+ }
+ mlx5e_tc_sample_skb(skb, mapped_obj);
+}
+
+static bool mlx5e_tc_restore_skb_int_port(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5_mapped_obj *mapped_obj,
+ struct mlx5e_tc_update_priv *tc_priv,
+ u32 tunnel_id)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ bool forward_tx = false;
+
+ /* Tunnel restore takes precedence over int port restore */
+ if (tunnel_id)
+ return mlx5e_tc_restore_tunnel(priv, skb, tc_priv, tunnel_id);
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ if (mlx5e_tc_int_port_dev_fwd(uplink_priv->int_port_priv, skb,
+ mapped_obj->int_port_metadata, &forward_tx)) {
+ /* Set fwd_dev for future dev_put */
+ tc_priv->fwd_dev = skb->dev;
+ tc_priv->forward_tx = forward_tx;
+
+ return true;
+ }
+
+ return false;
+}
+
+bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
+ struct mapping_ctx *mapping_ctx, u32 mapped_obj_id,
+ struct mlx5_tc_ct_priv *ct_priv,
+ u32 zone_restore_id, u32 tunnel_id,
+ struct mlx5e_tc_update_priv *tc_priv)
+{
+ struct mlx5e_priv *priv = netdev_priv(skb->dev);
+ struct mlx5_mapped_obj mapped_obj;
+ int err;
+
+ err = mapping_find(mapping_ctx, mapped_obj_id, &mapped_obj);
+ if (err) {
+ netdev_dbg(skb->dev,
+ "Couldn't find mapped object for mapped_obj_id: %d, err: %d\n",
+ mapped_obj_id, err);
+ return false;
+ }
+
+ switch (mapped_obj.type) {
+ case MLX5_MAPPED_OBJ_CHAIN:
+ case MLX5_MAPPED_OBJ_ACT_MISS:
+ return mlx5e_tc_restore_skb_tc_meta(skb, ct_priv, &mapped_obj, zone_restore_id,
+ tunnel_id, tc_priv);
+ case MLX5_MAPPED_OBJ_SAMPLE:
+ mlx5e_tc_restore_skb_sample(priv, skb, &mapped_obj, tc_priv);
+ tc_priv->skb_done = true;
+ return true;
+ case MLX5_MAPPED_OBJ_INT_PORT_METADATA:
+ return mlx5e_tc_restore_skb_int_port(priv, skb, &mapped_obj, tc_priv, tunnel_id);
+ default:
netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
return false;
}
-#endif /* CONFIG_NET_TC_SKB_EXT */
- return true;
+ return false;
+}
+
+bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
+{
+ struct mlx5e_priv *priv = netdev_priv(skb->dev);
+ u32 mapped_obj_id, reg_b, zone_restore_id;
+ struct mlx5_tc_ct_priv *ct_priv;
+ struct mapping_ctx *mapping_ctx;
+ struct mlx5e_tc_table *tc;
+
+ reg_b = be32_to_cpu(cqe->ft_metadata);
+ tc = mlx5e_fs_get_tc(priv->fs);
+ mapped_obj_id = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
+ zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
+ ESW_ZONE_ID_MASK;
+ ct_priv = tc->ct;
+ mapping_ctx = tc->mapping;
+
+ return mlx5e_tc_update_skb(cqe, skb, mapping_ctx, mapped_obj_id, ct_priv, zone_restore_id,
+ 0, NULL);
+}
+
+int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
+ u64 act_miss_cookie, u32 *act_miss_mapping)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_mapped_obj mapped_obj = {};
+ struct mapping_ctx *ctx;
+ int err;
+
+ ctx = esw->offloads.reg_c0_obj_pool;
+
+ mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS;
+ mapped_obj.act_miss_cookie = act_miss_cookie;
+ err = mapping_add(ctx, &mapped_obj, act_miss_mapping);
+ if (err)
+ return err;
+
+ attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
+ if (IS_ERR(attr->act_id_restore_rule))
+ goto err_rule;
+
+ return 0;
+
+err_rule:
+ mapping_remove(ctx, *act_miss_mapping);
+ return err;
+}
+
+void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
+ u32 act_miss_mapping)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mapping_ctx *ctx;
+
+ ctx = esw->offloads.reg_c0_obj_pool;
+ mlx5_del_flow_rules(attr->act_id_restore_rule);
+ mapping_remove(ctx, act_miss_mapping);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index f6b10bd3368b..adb39e30f90f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -59,6 +59,8 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
struct mlx5e_tc_update_priv {
struct net_device *fwd_dev;
+ bool skb_done;
+ bool forward_tx;
};
struct mlx5_nic_flow_attr {
@@ -95,6 +97,7 @@ struct mlx5_flow_attr {
struct mlx5_flow_attr *branch_true;
struct mlx5_flow_attr *branch_false;
struct mlx5_flow_attr *jumping_attr;
+ struct mlx5_flow_handle *act_id_restore_rule;
/* keep this union last */
union {
DECLARE_FLEX_ARRAY(struct mlx5_esw_flow_attr, esw_attr);
@@ -225,7 +228,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
enum mlx5e_tc_attr_to_reg {
- CHAIN_TO_REG,
+ MAPPED_OBJ_TO_REG,
VPORT_TO_REG,
TUNNEL_TO_REG,
CTSTATE_TO_REG,
@@ -234,7 +237,7 @@ enum mlx5e_tc_attr_to_reg {
MARK_TO_REG,
LABELS_TO_REG,
FTEID_TO_REG,
- NIC_CHAIN_TO_REG,
+ NIC_MAPPED_OBJ_TO_REG,
NIC_ZONE_RESTORE_TO_REG,
PACKET_COLOR_TO_REG,
};
@@ -368,7 +371,6 @@ struct mlx5e_tc_table *mlx5e_tc_table_alloc(void);
void mlx5e_tc_table_free(struct mlx5e_tc_table *tc);
static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
{
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
u32 chain, reg_b;
reg_b = be32_to_cpu(cqe->ft_metadata);
@@ -379,20 +381,29 @@ static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
if (chain)
return true;
-#endif
return false;
}
-bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
+bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
+bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
+ struct mapping_ctx *mapping_ctx, u32 mapped_obj_id,
+ struct mlx5_tc_ct_priv *ct_priv,
+ u32 zone_restore_id, u32 tunnel_id,
+ struct mlx5e_tc_update_priv *tc_priv);
#else /* CONFIG_MLX5_CLS_ACT */
static inline struct mlx5e_tc_table *mlx5e_tc_table_alloc(void) { return NULL; }
static inline void mlx5e_tc_table_free(struct mlx5e_tc_table *tc) {}
static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
{ return false; }
static inline bool
-mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
+mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
{ return true; }
#endif
+int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
+ u64 act_miss_cookie, u32 *act_miss_mapping);
+void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
+ u32 act_miss_mapping);
+
#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index fd03f076551b..19e9a77c4633 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -52,12 +52,14 @@ enum mlx5_mapped_obj_type {
MLX5_MAPPED_OBJ_CHAIN,
MLX5_MAPPED_OBJ_SAMPLE,
MLX5_MAPPED_OBJ_INT_PORT_METADATA,
+ MLX5_MAPPED_OBJ_ACT_MISS,
};
struct mlx5_mapped_obj {
enum mlx5_mapped_obj_type type;
union {
u32 chain;
+ u64 act_miss_cookie;
struct {
u32 group_id;
u32 rate;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index df58cba37930..81ed91fee59b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -214,7 +214,7 @@ create_chain_restore(struct fs_chain *chain)
struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_fs_chains *chains = chain->chains;
- enum mlx5e_tc_attr_to_reg chain_to_reg;
+ enum mlx5e_tc_attr_to_reg mapped_obj_to_reg;
struct mlx5_modify_hdr *mod_hdr;
u32 index;
int err;
@@ -242,7 +242,7 @@ create_chain_restore(struct fs_chain *chain)
chain->id = index;
if (chains->ns == MLX5_FLOW_NAMESPACE_FDB) {
- chain_to_reg = CHAIN_TO_REG;
+ mapped_obj_to_reg = MAPPED_OBJ_TO_REG;
chain->restore_rule = esw_add_restore_rule(esw, chain->id);
if (IS_ERR(chain->restore_rule)) {
err = PTR_ERR(chain->restore_rule);
@@ -253,7 +253,7 @@ create_chain_restore(struct fs_chain *chain)
* since we write the metadata to reg_b
* that is passed to SW directly.
*/
- chain_to_reg = NIC_CHAIN_TO_REG;
+ mapped_obj_to_reg = NIC_MAPPED_OBJ_TO_REG;
} else {
err = -EINVAL;
goto err_rule;
@@ -261,12 +261,12 @@ create_chain_restore(struct fs_chain *chain)
MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, modact, field,
- mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mfield);
+ mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mfield);
MLX5_SET(set_action_in, modact, offset,
- mlx5e_tc_attr_to_reg_mappings[chain_to_reg].moffset);
+ mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].moffset);
MLX5_SET(set_action_in, modact, length,
- mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen == 32 ?
- 0 : mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen);
+ mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mlen == 32 ?
+ 0 : mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mlen);
MLX5_SET(set_action_in, modact, data, chain->id);
mod_hdr = mlx5_modify_header_alloc(chains->dev, chains->ns,
1, modact);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index e91fb205e0b4..594cdcb90b3d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -103,7 +103,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
}
ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress,
- act->cookie, extack);
+ act->user_cookie, extack);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
return err;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index a8348437dd87..ded9ab79ccc2 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -83,8 +83,7 @@ static int lan966x_ptp_add_trap(struct lan966x_port *port,
if (err)
goto free_rule;
- err = vcap_set_rule_set_actionset(vrule, VCAP_AFS_BASE_TYPE);
- err |= vcap_rule_add_action_bit(vrule, VCAP_AF_CPU_COPY_ENA, VCAP_BIT_1);
+ err = vcap_rule_add_action_bit(vrule, VCAP_AF_CPU_COPY_ENA, VCAP_BIT_1);
err |= vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE, LAN966X_PMM_REPLACE);
err |= vcap_val_rule(vrule, proto);
if (err)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
index bd10a7189741..f960727ecaee 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
@@ -261,8 +261,6 @@ static int lan966x_tc_flower_add(struct lan966x_port *port,
0);
err |= vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
LAN966X_PMM_REPLACE);
- err |= vcap_set_rule_set_actionset(vrule,
- VCAP_AFS_BASE_TYPE);
if (err)
goto out;
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
index 0a1d4d740567..c07f25e791c7 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
@@ -1876,53 +1876,51 @@ static void vcap_api_next_lookup_basic_test(struct kunit *test)
static void vcap_api_next_lookup_advanced_test(struct kunit *test)
{
- struct vcap_admin admin1 = {
+ struct vcap_admin admin[] = {
+ {
.vtype = VCAP_TYPE_IS0,
.vinst = 0,
.first_cid = 1000000,
.last_cid = 1199999,
.lookups = 6,
.lookups_per_instance = 2,
- };
- struct vcap_admin admin2 = {
+ }, {
.vtype = VCAP_TYPE_IS0,
.vinst = 1,
.first_cid = 1200000,
.last_cid = 1399999,
.lookups = 6,
.lookups_per_instance = 2,
- };
- struct vcap_admin admin3 = {
+ }, {
.vtype = VCAP_TYPE_IS0,
.vinst = 2,
.first_cid = 1400000,
.last_cid = 1599999,
.lookups = 6,
.lookups_per_instance = 2,
- };
- struct vcap_admin admin4 = {
+ }, {
.vtype = VCAP_TYPE_IS2,
.vinst = 0,
.first_cid = 8000000,
.last_cid = 8199999,
.lookups = 4,
.lookups_per_instance = 2,
- };
- struct vcap_admin admin5 = {
+ }, {
.vtype = VCAP_TYPE_IS2,
.vinst = 1,
.first_cid = 8200000,
.last_cid = 8399999,
.lookups = 4,
.lookups_per_instance = 2,
+ }
};
bool ret;
- vcap_test_api_init(&admin1);
- list_add_tail(&admin2.list, &test_vctrl.list);
- list_add_tail(&admin3.list, &test_vctrl.list);
- list_add_tail(&admin4.list, &test_vctrl.list);
- list_add_tail(&admin5.list, &test_vctrl.list);
+ vcap_test_api_init(&admin[0]);
+ list_add_tail(&admin[1].list, &test_vctrl.list);
+ list_add_tail(&admin[2].list, &test_vctrl.list);
+ list_add_tail(&admin[3].list, &test_vctrl.list);
+ list_add_tail(&admin[4].list, &test_vctrl.list);
ret = vcap_is_next_lookup(&test_vctrl, 1000000, 1001000);
KUNIT_EXPECT_EQ(test, false, ret);
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index becd21c2325d..4dc643b0d2db 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -399,14 +399,14 @@ static int ef100_filter_table_up(struct efx_nic *efx)
* filter insertion will need to take the lock for read.
*/
up_write(&efx->filter_sem);
-#ifdef CONFIG_SFC_SRIOV
- rc = efx_tc_insert_rep_filters(efx);
+ if (IS_ENABLED(CONFIG_SFC_SRIOV))
+ rc = efx_tc_insert_rep_filters(efx);
+
/* Rep filter failure is nonfatal */
if (rc)
netif_warn(efx, drv, efx->net_dev,
"Failed to insert representor filters, rc %d\n",
rc);
-#endif
return 0;
fail_vlan0:
@@ -419,9 +419,8 @@ fail_unspec:
static void ef100_filter_table_down(struct efx_nic *efx)
{
-#ifdef CONFIG_SFC_SRIOV
- efx_tc_remove_rep_filters(efx);
-#endif
+ if (IS_ENABLED(CONFIG_SFC_SRIOV))
+ efx_tc_remove_rep_filters(efx);
down_write(&efx->filter_sem);
efx_mcdi_filter_del_vlan(efx, 0);
efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
@@ -737,7 +736,6 @@ static unsigned int efx_ef100_recycle_ring_size(const struct efx_nic *efx)
return 10 * EFX_RECYCLE_RING_SIZE_10G;
}
-#ifdef CONFIG_SFC_SRIOV
static int efx_ef100_get_base_mport(struct efx_nic *efx)
{
struct ef100_nic_data *nic_data = efx->nic_data;
@@ -773,7 +771,6 @@ static int efx_ef100_get_base_mport(struct efx_nic *efx)
return 0;
}
-#endif
static int compare_versions(const char *a, const char *b)
{
@@ -1155,10 +1152,9 @@ int ef100_probe_netdev_pf(struct efx_nic *efx)
struct net_device *net_dev = efx->net_dev;
int rc;
- if (!nic_data->grp_mae)
+ if (!IS_ENABLED(CONFIG_SFC_SRIOV) || !nic_data->grp_mae)
return 0;
-#ifdef CONFIG_SFC_SRIOV
rc = efx_init_struct_tc(efx);
if (rc)
return rc;
@@ -1193,7 +1189,6 @@ int ef100_probe_netdev_pf(struct efx_nic *efx)
net_dev->features |= NETIF_F_HW_TC;
efx->fixed_features |= NETIF_F_HW_TC;
}
-#endif
return rc;
}
@@ -1206,12 +1201,11 @@ void ef100_remove(struct efx_nic *efx)
{
struct ef100_nic_data *nic_data = efx->nic_data;
-#ifdef CONFIG_SFC_SRIOV
- if (efx->mae) {
+ if (IS_ENABLED(CONFIG_SFC_SRIOV) && efx->mae) {
efx_ef100_fini_reps(efx);
efx_fini_mae(efx);
}
-#endif
+
efx_mcdi_detach(efx);
efx_mcdi_fini(efx);
if (nic_data)
@@ -1304,9 +1298,8 @@ const struct efx_nic_type ef100_pf_nic_type = {
.update_stats = ef100_update_stats,
.pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
-#ifdef CONFIG_SFC_SRIOV
- .sriov_configure = efx_ef100_sriov_configure,
-#endif
+ .sriov_configure = IS_ENABLED(CONFIG_SFC_SRIOV) ?
+ efx_ef100_sriov_configure : NULL,
/* Per-type bar/size configuration not used on ef100. Location of
* registers is defined by extended capabilities.
diff --git a/drivers/net/ethernet/sfc/efx_devlink.c b/drivers/net/ethernet/sfc/efx_devlink.c
index d2eb6712ba35..381b805659d3 100644
--- a/drivers/net/ethernet/sfc/efx_devlink.c
+++ b/drivers/net/ethernet/sfc/efx_devlink.c
@@ -323,7 +323,7 @@ static void efx_devlink_info_running_v2(struct efx_nic *efx,
GET_VERSION_V2_OUT_SUCFW_BUILD_DATE);
rtc_time64_to_tm(tstamp, &build_date);
#else
- memset(&build_date, 0, sizeof(build_date)
+ memset(&build_date, 0, sizeof(build_date));
#endif
build_id = MCDI_DWORD(outbuf, GET_VERSION_V2_OUT_SUCFW_CHIP_ID);
@@ -655,7 +655,7 @@ static struct devlink_port *ef100_set_devlink_port(struct efx_nic *efx, u32 idx)
"devlink port creation for PF failed.\n");
else
pci_warn(efx->pci_dev,
- "devlink_port creationg for VF %u failed.\n",
+ "devlink_port creation for VF %u failed.\n",
idx);
return NULL;
}
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 6321fd393fc3..2d32abe5f478 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -654,8 +654,8 @@ int efx_mae_enumerate_mports(struct efx_nic *efx)
MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE);
d->pf_idx = MCDI_STRUCT_WORD(desc,
MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX);
- d->vf_idx = MCDI_STRUCT_WORD(desc,
- MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX);
+ d->vf_idx = MCDI_STRUCT_WORD(desc,
+ MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX);
break;
default:
/* Unknown mport_type, just accept it */