summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c243
1 files changed, 184 insertions, 59 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 3cbebfba582b..bb11e644d24f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -2,9 +2,12 @@
// Copyright (c) 2020 Mellanox Technologies
#include "en/ptp.h"
+#include "en/health.h"
#include "en/txrx.h"
#include "en/params.h"
#include "en/fs_tt_redirect.h"
+#include <linux/list.h>
+#include <linux/spinlock.h>
struct mlx5e_ptp_fs {
struct mlx5_flow_handle *l2_rule;
@@ -19,6 +22,48 @@ struct mlx5e_ptp_params {
struct mlx5e_rq_param rq_param;
};
+struct mlx5e_ptp_port_ts_cqe_tracker {
+ u8 metadata_id;
+ bool inuse : 1;
+ struct list_head entry;
+};
+
+struct mlx5e_ptp_port_ts_cqe_list {
+ struct mlx5e_ptp_port_ts_cqe_tracker *nodes;
+ struct list_head tracker_list_head;
+ /* Sync list operations in xmit and napi_poll contexts */
+ spinlock_t tracker_list_lock;
+};
+
+static inline void
+mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata)
+{
+ struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata];
+
+ WARN_ON_ONCE(tracker->inuse);
+ tracker->inuse = true;
+ spin_lock(&list->tracker_list_lock);
+ list_add_tail(&tracker->entry, &list->tracker_list_head);
+ spin_unlock(&list->tracker_list_lock);
+}
+
+static void
+mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata)
+{
+ struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata];
+
+ WARN_ON_ONCE(!tracker->inuse);
+ tracker->inuse = false;
+ spin_lock(&list->tracker_list_lock);
+ list_del(&tracker->entry);
+ spin_unlock(&list->tracker_list_lock);
+}
+
+void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata)
+{
+ mlx5e_ptp_port_ts_cqe_list_add(ptpsq->ts_cqe_pending_list, metadata);
+}
+
struct mlx5e_skb_cb_hwtstamp {
ktime_t cqe_hwtstamp;
ktime_t port_hwtstamp;
@@ -79,75 +124,97 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
}
-#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
-
-static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_ci, u16 skb_id)
+static struct sk_buff *
+mlx5e_ptp_metadata_map_lookup(struct mlx5e_ptp_metadata_map *map, u16 metadata)
{
- return (ptpsq->ts_cqe_ctr_mask && (skb_ci != skb_id));
+ return map->data[metadata];
}
-static bool mlx5e_ptp_ts_cqe_ooo(struct mlx5e_ptpsq *ptpsq, u16 skb_id)
+static struct sk_buff *
+mlx5e_ptp_metadata_map_remove(struct mlx5e_ptp_metadata_map *map, u16 metadata)
{
- u16 skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
- u16 skb_pi = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_pc);
+ struct sk_buff *skb;
- if (PTP_WQE_CTR2IDX(skb_id - skb_ci) >= PTP_WQE_CTR2IDX(skb_pi - skb_ci))
- return true;
+ skb = map->data[metadata];
+ map->data[metadata] = NULL;
- return false;
+ return skb;
}
-static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_ci,
- u16 skb_id, int budget)
+static bool mlx5e_ptp_metadata_map_unhealthy(struct mlx5e_ptp_metadata_map *map)
{
- struct skb_shared_hwtstamps hwts = {};
- struct sk_buff *skb;
+ /* Considered beginning unhealthy state if size * 15 / 2^4 cannot be reclaimed. */
+ return map->undelivered_counter > (map->capacity >> 4) * 15;
+}
+
+static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
+ ktime_t port_tstamp)
+{
+ struct mlx5e_ptp_port_ts_cqe_list *cqe_list = ptpsq->ts_cqe_pending_list;
+ ktime_t timeout = ns_to_ktime(MLX5E_PTP_TS_CQE_UNDELIVERED_TIMEOUT);
+ struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
+ struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n;
+
+ spin_lock(&cqe_list->tracker_list_lock);
+ list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) {
+ struct sk_buff *skb =
+ mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id);
+ ktime_t dma_tstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
- ptpsq->cq_stats->resync_event++;
+ if (!dma_tstamp ||
+ ktime_after(ktime_add(dma_tstamp, timeout), port_tstamp))
+ break;
- while (skb_ci != skb_id) {
- skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
- hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
- skb_tstamp_tx(skb, &hwts);
- ptpsq->cq_stats->resync_cqe++;
- napi_consume_skb(skb, budget);
- skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
+ metadata_map->undelivered_counter++;
+ WARN_ON_ONCE(!pos->inuse);
+ pos->inuse = false;
+ list_del(&pos->entry);
}
+ spin_unlock(&cqe_list->tracker_list_lock);
}
+#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
+
static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
struct mlx5_cqe64 *cqe,
int budget)
{
- u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
- u16 skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
+ struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list;
+ u8 metadata_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
+ bool is_err_cqe = !!MLX5E_RX_ERR_CQE(cqe);
struct mlx5e_txqsq *sq = &ptpsq->txqsq;
struct sk_buff *skb;
ktime_t hwtstamp;
- if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
- skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
- ptpsq->cq_stats->err_cqe++;
- goto out;
+ if (likely(pending_cqe_list->nodes[metadata_id].inuse)) {
+ mlx5e_ptp_port_ts_cqe_list_remove(pending_cqe_list, metadata_id);
+ } else {
+ /* Reclaim space in the unlikely event CQE was delivered after
+ * marking it late.
+ */
+ ptpsq->metadata_map.undelivered_counter--;
+ ptpsq->cq_stats->late_cqe++;
}
- if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_ci, skb_id)) {
- if (mlx5e_ptp_ts_cqe_ooo(ptpsq, skb_id)) {
- /* already handled by a previous resync */
- ptpsq->cq_stats->ooo_cqe_drop++;
- return;
- }
- mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_ci, skb_id, budget);
+ skb = mlx5e_ptp_metadata_map_remove(&ptpsq->metadata_map, metadata_id);
+
+ if (unlikely(is_err_cqe)) {
+ ptpsq->cq_stats->err_cqe++;
+ goto out;
}
- skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
hwtstamp, ptpsq->cq_stats);
ptpsq->cq_stats->cqe++;
+ mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
out:
napi_consume_skb(skb, budget);
+ mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist, metadata_id);
+ if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
+ !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
+ queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
}
static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
@@ -291,36 +358,86 @@ static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
{
- int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
- struct mlx5_core_dev *mdev = ptpsq->txqsq.mdev;
+ struct mlx5e_ptp_metadata_fifo *metadata_freelist = &ptpsq->metadata_freelist;
+ struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
+ struct mlx5e_ptp_port_ts_cqe_list *cqe_list;
+ int db_sz;
+ int md;
- ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
- GFP_KERNEL, numa);
- if (!ptpsq->skb_fifo.fifo)
+ cqe_list = kvzalloc_node(sizeof(*ptpsq->ts_cqe_pending_list), GFP_KERNEL, numa);
+ if (!cqe_list)
return -ENOMEM;
+ ptpsq->ts_cqe_pending_list = cqe_list;
+
+ db_sz = min_t(u32, mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq),
+ 1 << MLX5_CAP_GEN_2(ptpsq->txqsq.mdev,
+ ts_cqe_metadata_size2wqe_counter));
+ ptpsq->ts_cqe_ctr_mask = db_sz - 1;
+
+ cqe_list->nodes = kvzalloc_node(array_size(db_sz, sizeof(*cqe_list->nodes)),
+ GFP_KERNEL, numa);
+ if (!cqe_list->nodes)
+ goto free_cqe_list;
+ INIT_LIST_HEAD(&cqe_list->tracker_list_head);
+ spin_lock_init(&cqe_list->tracker_list_lock);
+
+ metadata_freelist->data =
+ kvzalloc_node(array_size(db_sz, sizeof(*metadata_freelist->data)),
+ GFP_KERNEL, numa);
+ if (!metadata_freelist->data)
+ goto free_cqe_list_nodes;
+ metadata_freelist->mask = ptpsq->ts_cqe_ctr_mask;
+
+ for (md = 0; md < db_sz; ++md) {
+ cqe_list->nodes[md].metadata_id = md;
+ metadata_freelist->data[md] = md;
+ }
+ metadata_freelist->pc = db_sz;
+
+ metadata_map->data =
+ kvzalloc_node(array_size(db_sz, sizeof(*metadata_map->data)),
+ GFP_KERNEL, numa);
+ if (!metadata_map->data)
+ goto free_metadata_freelist;
+ metadata_map->capacity = db_sz;
- ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc;
- ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc;
- ptpsq->skb_fifo.mask = wq_sz - 1;
- if (MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
- ptpsq->ts_cqe_ctr_mask =
- (1 << MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) - 1;
return 0;
+
+free_metadata_freelist:
+ kvfree(metadata_freelist->data);
+free_cqe_list_nodes:
+ kvfree(cqe_list->nodes);
+free_cqe_list:
+ kvfree(cqe_list);
+ return -ENOMEM;
}
-static void mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo *skb_fifo)
+static void mlx5e_ptp_drain_metadata_map(struct mlx5e_ptp_metadata_map *map)
{
- while (*skb_fifo->pc != *skb_fifo->cc) {
- struct sk_buff *skb = mlx5e_skb_fifo_pop(skb_fifo);
+ int idx;
+
+ for (idx = 0; idx < map->capacity; ++idx) {
+ struct sk_buff *skb = map->data[idx];
dev_kfree_skb_any(skb);
}
}
-static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo)
+static void mlx5e_ptp_free_traffic_db(struct mlx5e_ptpsq *ptpsq)
{
- mlx5e_ptp_drain_skb_fifo(skb_fifo);
- kvfree(skb_fifo->fifo);
+ mlx5e_ptp_drain_metadata_map(&ptpsq->metadata_map);
+ kvfree(ptpsq->metadata_map.data);
+ kvfree(ptpsq->metadata_freelist.data);
+ kvfree(ptpsq->ts_cqe_pending_list->nodes);
+ kvfree(ptpsq->ts_cqe_pending_list);
+}
+
+static void mlx5e_ptpsq_unhealthy_work(struct work_struct *work)
+{
+ struct mlx5e_ptpsq *ptpsq =
+ container_of(work, struct mlx5e_ptpsq, report_unhealthy_work);
+
+ mlx5e_reporter_tx_ptpsq_unhealthy(ptpsq);
}
static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
@@ -348,11 +465,12 @@ static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
if (err)
goto err_free_txqsq;
- err = mlx5e_ptp_alloc_traffic_db(ptpsq,
- dev_to_node(mlx5_core_dma_dev(c->mdev)));
+ err = mlx5e_ptp_alloc_traffic_db(ptpsq, dev_to_node(mlx5_core_dma_dev(c->mdev)));
if (err)
goto err_free_txqsq;
+ INIT_WORK(&ptpsq->report_unhealthy_work, mlx5e_ptpsq_unhealthy_work);
+
return 0;
err_free_txqsq:
@@ -366,7 +484,9 @@ static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
struct mlx5e_txqsq *sq = &ptpsq->txqsq;
struct mlx5_core_dev *mdev = sq->mdev;
- mlx5e_ptp_free_traffic_db(&ptpsq->skb_fifo);
+ if (current_work() != &ptpsq->report_unhealthy_work)
+ cancel_work_sync(&ptpsq->report_unhealthy_work);
+ mlx5e_ptp_free_traffic_db(ptpsq);
cancel_work_sync(&sq->recover_work);
mlx5e_ptp_destroy_sq(mdev, sq->sqn);
mlx5e_free_txqsq_descs(sq);
@@ -534,7 +654,10 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
/* SQ */
if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
- params->log_sq_size = orig->log_sq_size;
+ params->log_sq_size =
+ min(MLX5_CAP_GEN_2(c->mdev, ts_cqe_metadata_size2wqe_counter),
+ MLX5E_PTP_MAX_LOG_SQ_SIZE);
+ params->log_sq_size = min(params->log_sq_size, orig->log_sq_size);
mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
}
/* RQ */
@@ -729,8 +852,10 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
- if (!c || !cparams)
- return -ENOMEM;
+ if (!c || !cparams) {
+ err = -ENOMEM;
+ goto err_free;
+ }
c->priv = priv;
c->mdev = priv->mdev;