summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-09-16 15:16:51 -0700
committerDavid S. Miller <davem@davemloft.net>2020-09-16 15:16:51 -0700
commit045e42f3e6b1dd4c78ec82e980894543be86f782 (patch)
tree44ecd127a985ac10d9b5853de5f410d7aae2bd44 /drivers/net
parent897dccb8db0d01209c44439ba59d86d3ccbd55ef (diff)
parentb7cf0806e8783e38f881cae3c56f0869e70b8da2 (diff)
Merge tag 'mlx5-updates-2020-09-15' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux
Saeed Mahameed says: ==================== mlx5-updates-2020-09-15 Various updates to mlx5 driver, 1) Eli adds support for TC trap action. 2) Eran, minor improvements to clock.c code structure 3) Better handling of error reporting in LAG from Jianbo 4) IPv6 traffic class (DSCP) header rewrite support from Maor 5) Ofer Levi adds support for CQE compression of multi-strides packets 6) Vu, Enables use of vport meta data by default. 7) Some minor code cleanup ==================== Reviewed-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c61
11 files changed, 179 insertions, 87 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 4f33658da25a..95aab8b429cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -265,6 +265,7 @@ enum {
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
+ MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX /* set when mini_cqe_resp_stride_index cap is used */
};
struct mlx5e_cq {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 3dc200bcfabd..69a05da0e3e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -242,8 +242,8 @@ static int mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg,
{
u32 data_size;
+ int err = 0;
u32 offset;
- int err;
for (offset = 0; offset < value_len; offset += data_size) {
data_size = value_len - offset;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 9a6078e89587..0dda80d8bdca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -243,7 +243,6 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
return MLX5E_NUM_PFLAGS;
case ETH_SS_TEST:
return mlx5e_self_test_num(priv);
- fallthrough;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 26834625556d..b057a6c3a6d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -848,6 +848,13 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+ /* For CQE compression on striding RQ, use stride index provided by
+ * HW if capability is supported.
+ */
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) &&
+ MLX5_CAP_GEN(c->mdev, mini_cqe_resp_stride_index))
+ __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &c->rq.state);
+
return 0;
err_destroy_rq:
@@ -2182,6 +2189,7 @@ void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ bool hw_stridx = false;
void *cqc = param->cqc;
u8 log_cq_size;
@@ -2189,6 +2197,7 @@ void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
+ hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
log_cq_size = params->log_rq_mtu_frames;
@@ -2196,7 +2205,8 @@ void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
- MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
+ MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
+ MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
MLX5_SET(cqc, cqc, cqe_comp_en, 1);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 7aab69e991a5..c9c82b14060a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -137,8 +137,17 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
title->check_sum = mini_cqe->checksum;
title->op_own &= 0xf0;
title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz);
- title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
+ /* state bit set implies linked-list striding RQ wq type and
+ * HW stride index capability supported
+ */
+ if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) {
+ title->wqe_counter = mini_cqe->stridx;
+ return;
+ }
+
+ /* HW stride index capability not supported */
+ title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
else
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index fd53d101d8fd..817c503693fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2615,6 +2615,7 @@ static struct mlx5_fields fields[] = {
OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
+ OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
@@ -3943,6 +3944,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
break;
+ case FLOW_ACTION_TRAP:
+ if (!flow_offload_has_one_action(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "action trap is supported as a sole action only");
+ return -EOPNOTSUPP;
+ }
+ action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT);
+ attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ break;
case FLOW_ACTION_MPLS_PUSH:
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
reformat_l2_to_l3_tunnel) ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index b381cbca5852..b23d20e16495 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1864,18 +1864,6 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
return true;
}
-static bool
-esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch *esw)
-{
- return mlx5_core_mp_enabled(esw->dev);
-}
-
-static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
-{
- return esw_check_vport_match_metadata_mandatory(esw) &&
- esw_check_vport_match_metadata_supported(esw);
-}
-
u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
{
u32 num_vports = GENMASK(ESW_VPORT_BITS - 1, 0) - 1;
@@ -1908,9 +1896,6 @@ void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- if (vport->vport == MLX5_VPORT_UPLINK)
- return 0;
-
vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
vport->metadata = vport->default_metadata;
return vport->metadata ? 0 : -ENOSPC;
@@ -1919,26 +1904,56 @@ static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- if (vport->vport == MLX5_VPORT_UPLINK || !vport->default_metadata)
+ if (!vport->default_metadata)
return;
WARN_ON(vport->metadata != vport->default_metadata);
mlx5_esw_match_metadata_free(esw, vport->default_metadata);
}
+static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int i;
+
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
+ return;
+
+ mlx5_esw_for_all_vports_reverse(esw, i, vport)
+ esw_offloads_vport_metadata_cleanup(esw, vport);
+}
+
+static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int err;
+ int i;
+
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
+ return 0;
+
+ mlx5_esw_for_all_vports(esw, i, vport) {
+ err = esw_offloads_vport_metadata_setup(esw, vport);
+ if (err)
+ goto metadata_err;
+ }
+
+ return 0;
+
+metadata_err:
+ esw_offloads_metadata_uninit(esw);
+ return err;
+}
+
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int err;
- err = esw_offloads_vport_metadata_setup(esw, vport);
- if (err)
- goto metadata_err;
-
err = esw_acl_ingress_ofld_setup(esw, vport);
if (err)
- goto ingress_err;
+ return err;
if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
err = esw_acl_egress_ofld_setup(esw, vport);
@@ -1950,9 +1965,6 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
egress_err:
esw_acl_ingress_ofld_cleanup(esw, vport);
-ingress_err:
- esw_offloads_vport_metadata_cleanup(esw, vport);
-metadata_err:
return err;
}
@@ -1962,22 +1974,14 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
{
esw_acl_egress_ofld_cleanup(vport);
esw_acl_ingress_ofld_cleanup(esw, vport);
- esw_offloads_vport_metadata_cleanup(esw, vport);
}
static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
- int err;
-
- if (esw_use_vport_metadata(esw))
- esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
- err = esw_vport_create_offloads_acl_tables(esw, vport);
- if (err)
- esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
- return err;
+ return esw_vport_create_offloads_acl_tables(esw, vport);
}
static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
@@ -1986,7 +1990,6 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
esw_vport_destroy_offloads_acl_tables(esw, vport);
- esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
}
static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
@@ -2144,7 +2147,14 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
err = mlx5_esw_host_number_init(esw);
if (err)
- goto err_vport_metadata;
+ goto err_metadata;
+
+ if (esw_check_vport_match_metadata_supported(esw))
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+
+ err = esw_offloads_metadata_init(esw);
+ if (err)
+ goto err_metadata;
err = esw_set_passing_vport_metadata(esw, true);
if (err)
@@ -2178,6 +2188,9 @@ err_uplink:
err_steering_init:
esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
+ esw_offloads_metadata_uninit(esw);
+err_metadata:
+ esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
@@ -2211,6 +2224,8 @@ void esw_offloads_disable(struct mlx5_eswitch *esw)
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
+ esw_offloads_metadata_uninit(esw);
+ esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 874c70e8cc54..33081b24f10a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -102,7 +102,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
if (ldev->pf[i].netdev == ndev)
return i;
- return -1;
+ return -ENOENT;
}
static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
@@ -271,7 +271,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
bool do_bond, roce_lag;
int err;
- if (!dev0 || !dev1)
+ if (!mlx5_lag_is_ready(ldev))
return;
spin_lock(&lag_lock);
@@ -355,7 +355,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
{
struct net_device *upper = info->upper_dev, *ndev_tmp;
struct netdev_lag_upper_info *lag_upper_info = NULL;
- bool is_bonded;
+ bool is_bonded, is_in_lag, mode_supported;
int bond_status = 0;
int num_slaves = 0;
int idx;
@@ -374,7 +374,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
- if (idx > -1)
+ if (idx >= 0)
bond_status |= (1 << idx);
num_slaves++;
@@ -391,13 +391,24 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
/* Determine bonding status:
* A device is considered bonded if both its physical ports are slaves
* of the same lag master, and only them.
- * Lag mode must be activebackup or hash.
*/
- is_bonded = (num_slaves == MLX5_MAX_PORTS) &&
- (bond_status == 0x3) &&
- ((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) ||
- (tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH));
+ is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3;
+ if (!mlx5_lag_is_ready(ldev) && is_in_lag) {
+ NL_SET_ERR_MSG_MOD(info->info.extack,
+ "Can't activate LAG offload, PF is configured with more than 64 VFs");
+ return 0;
+ }
+
+ /* Lag mode must be activebackup or hash. */
+ mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
+ tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
+
+ if (is_in_lag && !mode_supported)
+ NL_SET_ERR_MSG_MOD(info->info.extack,
+ "Can't activate LAG offload, TX type isn't supported");
+
+ is_bonded = is_in_lag && mode_supported;
if (tracker->is_bonded != is_bonded) {
tracker->is_bonded = is_bonded;
return 1;
@@ -418,7 +429,7 @@ static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
return 0;
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
- if (idx == -1)
+ if (idx < 0)
return 0;
/* This information is used to determine virtual to physical
@@ -445,6 +456,10 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
ldev = container_of(this, struct mlx5_lag, nb);
+
+ if (!mlx5_lag_is_ready(ldev) && event == NETDEV_CHANGELOWERSTATE)
+ return NOTIFY_DONE;
+
tracker = ldev->tracker;
switch (event) {
@@ -493,14 +508,14 @@ static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
kfree(ldev);
}
-static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev,
- struct net_device *netdev)
+static int mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
+ struct mlx5_core_dev *dev,
+ struct net_device *netdev)
{
unsigned int fn = PCI_FUNC(dev->pdev->devfn);
if (fn >= MLX5_MAX_PORTS)
- return;
+ return -EPERM;
spin_lock(&lag_lock);
ldev->pf[fn].dev = dev;
@@ -511,6 +526,8 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
dev->priv.lag = ldev;
spin_unlock(&lag_lock);
+
+ return fn;
}
static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
@@ -537,11 +554,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
{
struct mlx5_lag *ldev = NULL;
struct mlx5_core_dev *tmp_dev;
- int err;
+ int i, err;
- if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
- !MLX5_CAP_GEN(dev, lag_master) ||
- (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS))
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
return;
tmp_dev = mlx5_get_next_phys_dev(dev);
@@ -556,7 +571,18 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
}
}
- mlx5_lag_dev_add_pf(ldev, dev, netdev);
+ if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
+ return;
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ tmp_dev = ldev->pf[i].dev;
+ if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
+ MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
+ break;
+ }
+
+ if (i >= MLX5_MAX_PORTS)
+ ldev->flags |= MLX5_LAG_FLAG_READY;
if (!ldev->nb.notifier_call) {
ldev->nb.notifier_call = mlx5_lag_netdev_event;
@@ -587,6 +613,8 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
mlx5_lag_dev_remove_pf(ldev, dev);
+ ldev->flags &= ~MLX5_LAG_FLAG_READY;
+
for (i = 0; i < MLX5_MAX_PORTS; i++)
if (ldev->pf[i].dev)
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
index f1068aac6406..8d8cf2d0bc6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
@@ -16,6 +16,7 @@ enum {
MLX5_LAG_FLAG_ROCE = 1 << 0,
MLX5_LAG_FLAG_SRIOV = 1 << 1,
MLX5_LAG_FLAG_MULTIPATH = 1 << 2,
+ MLX5_LAG_FLAG_READY = 1 << 3,
};
#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\
@@ -59,6 +60,12 @@ __mlx5_lag_is_active(struct mlx5_lag *ldev)
return !!(ldev->flags & MLX5_LAG_MODE_FLAGS);
}
+static inline bool
+mlx5_lag_is_ready(struct mlx5_lag *ldev)
+{
+ return ldev->flags & MLX5_LAG_FLAG_READY;
+}
+
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker);
int mlx5_activate_lag(struct mlx5_lag *ldev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 9e68f5926ab6..88e58ac902de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -11,7 +11,7 @@
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
- if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
+ if (!mlx5_lag_is_ready(ldev))
return false;
return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
@@ -131,7 +131,12 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
struct net_device *nh_dev = nh->fib_nh_dev;
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
- mlx5_lag_set_port_affinity(ldev, ++i);
+ if (i < 0)
+ i = MLX5_LAG_NORMAL_AFFINITY;
+ else
+ ++i;
+
+ mlx5_lag_set_port_affinity(ldev, i);
}
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 2d55b7c22c03..7fc59e01a353 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -150,28 +150,30 @@ static void mlx5_pps_out(struct work_struct *work)
static void mlx5_timestamp_overflow(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
- overflow_work);
+ struct mlx5_core_dev *mdev;
+ struct mlx5_clock *clock;
unsigned long flags;
+ clock = container_of(dwork, struct mlx5_clock, overflow_work);
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc);
- mlx5_update_clock_info_page(clock->mdev);
+ mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
}
-static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
+static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
{
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
- ptp_info);
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
u64 ns = timespec64_to_ns(ts);
+ struct mlx5_core_dev *mdev;
unsigned long flags;
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_init(&clock->tc, &clock->cycles, ns);
- mlx5_update_clock_info_page(clock->mdev);
+ mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
@@ -180,13 +182,12 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
- ptp_info);
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
- clock);
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
unsigned long flags;
u64 cycles, ns;
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
cycles = mlx5_read_internal_timer(mdev, sts);
ns = timecounter_cyc2time(&clock->tc, cycles);
@@ -199,13 +200,14 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
- ptp_info);
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
unsigned long flags;
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_adjtime(&clock->tc, delta);
- mlx5_update_clock_info_page(clock->mdev);
+ mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
@@ -213,12 +215,13 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
- u64 adj;
- u32 diff;
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
unsigned long flags;
int neg_adj = 0;
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
- ptp_info);
+ u32 diff;
+ u64 adj;
+
if (delta < 0) {
neg_adj = 1;
@@ -229,11 +232,12 @@ static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
adj *= delta;
diff = div_u64(adj, 1000000000ULL);
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc);
clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
clock->nominal_c_mult + diff;
- mlx5_update_clock_info_page(clock->mdev);
+ mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
@@ -437,7 +441,7 @@ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
static const struct ptp_clock_info mlx5_ptp_clock_info = {
.owner = THIS_MODULE,
- .name = "mlx5_p2p",
+ .name = "mlx5_ptp",
.max_adj = 100000000,
.n_alarm = 0,
.n_ext_ts = 0,
@@ -465,7 +469,8 @@ static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
{
- struct mlx5_core_dev *mdev = clock->mdev;
+ struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
+
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
u8 mode;
int err;
@@ -538,20 +543,23 @@ static int mlx5_pps_event(struct notifier_block *nb,
unsigned long type, void *data)
{
struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
- struct mlx5_core_dev *mdev = clock->mdev;
struct ptp_clock_event ptp_event;
u64 cycles_now, cycles_delta;
u64 nsec_now, nsec_delta, ns;
struct mlx5_eqe *eqe = data;
int pin = eqe->data.pps.pin;
+ struct mlx5_core_dev *mdev;
struct timespec64 ts;
unsigned long flags;
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
+
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
ptp_event.index = pin;
- ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
- be64_to_cpu(eqe->data.pps.time_stamp));
+ ptp_event.timestamp =
+ mlx5_timecounter_cyc2time(clock,
+ be64_to_cpu(eqe->data.pps.time_stamp));
if (clock->pps_info.enabled) {
ptp_event.type = PTP_CLOCK_PPSUSR;
ptp_event.pps_times.ts_real =
@@ -574,8 +582,8 @@ static int mlx5_pps_event(struct notifier_block *nb,
cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
clock->cycles.mult);
clock->pps_info.start[pin] = cycles_now + cycles_delta;
- schedule_work(&clock->pps_info.out_work);
write_sequnlock_irqrestore(&clock->lock, flags);
+ schedule_work(&clock->pps_info.out_work);
break;
default:
mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
@@ -605,7 +613,6 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
clock->cycles.shift);
clock->nominal_c_mult = clock->cycles.mult;
clock->cycles.mask = CLOCKSOURCE_MASK(41);
- clock->mdev = mdev;
timecounter_init(&clock->tc, &clock->cycles,
ktime_to_ns(ktime_get_real()));