summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaxim Mikityanskiy <maximmi@mellanox.com>2020-01-29 14:35:09 +0200
committerSaeed Mahameed <saeedm@mellanox.com>2020-05-09 01:05:40 -0700
commit714c88a38bce3477392944d6c0d0bc724abaff09 (patch)
tree4eda4b4b53543c4e1327d94b4e673979940fa5f3
parent5546100038eeff96fd0361f9b820405c016f8578 (diff)
net/mlx5e: Split TX acceleration offloads into two phases
After previous modifications, the offloads are no longer called one by one, the pi is calculated and the wqe is cleared on between of TLS and IPSEC offloads, which doesn't quite fit mlx5e_accel_handle_tx's purpose. This patch splits mlx5e_accel_handle_tx into two functions that correspond to two logical phases of running offloads: 1. Before fetching a WQE. Here runs the code that can post WQEs on its own, before the main WQE is fetched. It's the main part of TLS offload. 2. After fetching a WQE. Here runs the code that updates the WQE's fields, but can't post other WQEs any more. It's a minor part of TLS offload that sets the tisn field in the cseg, and eseg-based offloads (currently IPSEC, and later patches will move GENEVE and checksum offloads there, too). It allows to make mlx5e_xmit take care of all actions needed to transmit a packet in the right order, improve the structure of the code and reduce unnecessary operations. The structure will be further improved in the following patches (all eseg-based offloads will be moved to a single place, and reserving space for the main WQE will happen between phase 1 and phase 2 of offloads to eliminate unneeded data movements). Signed-off-by: Maxim Mikityanskiy <maximmi@mellanox.com> Reviewed-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c10
8 files changed, 52 insertions, 23 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index d286fb09955c..fac145dcf2ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -102,35 +102,44 @@ mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
udp_hdr(skb)->len = htons(payload_len);
}
-static inline bool mlx5e_accel_handle_tx(struct sk_buff *skb,
- struct mlx5e_txqsq *sq,
- struct net_device *dev,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi)
-{
+struct mlx5e_accel_tx_state {
#ifdef CONFIG_MLX5_EN_TLS
- u32 tls_tisn = 0;
+ struct mlx5e_accel_tx_tls_state tls;
#endif
+};
+static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_state *state)
+{
if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
mlx5e_udp_gso_handle_tx_skb(skb);
#ifdef CONFIG_MLX5_EN_TLS
if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
/* May send SKBs and WQEs. */
- if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &tls_tisn)))
+ if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
return false;
}
+#endif
- *pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
- *wqe = MLX5E_TX_FETCH_WQE(sq, *pi);
+ return true;
+}
- (*wqe)->ctrl.tisn = cpu_to_be32(tls_tisn << 8);
+static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_state *state)
+{
+#ifdef CONFIG_MLX5_EN_TLS
+ mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) {
- if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, &(*wqe)->eth, skb)))
+ if (unlikely(!mlx5e_ipsec_handle_tx_skb(priv, &wqe->eth, skb)))
return false;
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 0e1ac3e68c72..824b87ac8f9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -233,11 +233,10 @@ static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
ntohs(mdata->content.tx.seq));
}
-bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
+bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
struct mlx5_wqe_eth_seg *eseg,
struct sk_buff *skb)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct xfrm_offload *xo = xfrm_offload(skb);
struct mlx5e_ipsec_metadata *mdata;
struct mlx5e_ipsec_sa_entry *sa_entry;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index bd6f32aee8d6..ba02643586a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -52,7 +52,7 @@ void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
-bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
+bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
struct mlx5_wqe_eth_seg *eseg,
struct sk_buff *skb);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 7d9d9420f19d..dabbc5f226ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -9,6 +9,7 @@
#ifdef CONFIG_MLX5_EN_TLS
#include <net/tls.h>
#include "accel/tls.h"
+#include "en_accel/tls_rxtx.h"
#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
(offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \
@@ -96,7 +97,8 @@ void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx);
bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, u32 *tisn, int datalen);
+ struct sk_buff *skb, int datalen,
+ struct mlx5e_accel_tx_tls_state *state);
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index b49d7c1e49dc..352b0a3ef0ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -414,7 +414,8 @@ err_out:
}
bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, u32 *tisn, int datalen)
+ struct sk_buff *skb, int datalen,
+ struct mlx5e_accel_tx_tls_state *state)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_sq_stats *stats = sq->stats;
@@ -447,7 +448,7 @@ bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *s
priv_tx->expected_seq = seq + datalen;
- *tisn = priv_tx->tisn;
+ state->tls_tisn = priv_tx->tisn;
stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
stats->tls_encrypted_bytes += datalen;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 8e6b0b0ce2e4..05454a843b28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -258,7 +258,7 @@ err_out:
}
bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, u32 *tisn)
+ struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_tls_offload_context_tx *context;
@@ -279,7 +279,7 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
goto err_out;
if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx))
- return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, tisn, datalen);
+ return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
skb_seq = ntohl(tcp_hdr(skb)->seq);
context = mlx5e_get_tls_tx_context(tls_ctx);
@@ -302,6 +302,12 @@ err_out:
return false;
}
+void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
+ struct mlx5e_accel_tx_tls_state *state)
+{
+ cseg->tisn = cpu_to_be32(state->tls_tisn << 8);
+}
+
static int tls_update_resync_sn(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5e_tls_metadata *mdata)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
index 3630ed8b1206..a50d0394df0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
@@ -40,8 +40,14 @@
#include "en.h"
#include "en/txrx.h"
+struct mlx5e_accel_tx_tls_state {
+ u32 tls_tisn;
+};
+
bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, u32 *tisn);
+ struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
+void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
+ struct mlx5e_accel_tx_tls_state *state);
void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
u32 *cqe_bcnt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index bb6d3774eafb..f79454746d0d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -383,16 +383,22 @@ err_drop:
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_accel_tx_state accel = {};
struct mlx5e_tx_wqe *wqe;
struct mlx5e_txqsq *sq;
u16 pi;
sq = priv->txq2sq[skb_get_queue_mapping(skb)];
+
+ /* May send SKBs and WQEs. */
+ if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
+ goto out;
+
pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
- /* might send skbs and update wqe and pi */
- if (unlikely(!mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi)))
+ /* May update the WQE, but may not post other WQEs. */
+ if (unlikely(!mlx5e_accel_tx_finish(priv, sq, skb, wqe, &accel)))
goto out;
mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());