summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
blob: c2f256bb2bc2096439cefd2e14edc34e5894235f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2019 Mellanox Technologies. */

#ifndef __MLX5_LAG_H__
#define __MLX5_LAG_H__

#include <linux/debugfs.h>

#define MLX5_LAG_MAX_HASH_BUCKETS 16
#include "mlx5_core.h"
#include "mp.h"
#include "port_sel.h"
#include "mpesw.h"

enum {
	MLX5_LAG_P1,
	MLX5_LAG_P2,
};

enum {
	MLX5_LAG_FLAG_NDEVS_READY,
};

enum {
	MLX5_LAG_MODE_FLAG_HASH_BASED,
	MLX5_LAG_MODE_FLAG_SHARED_FDB,
	MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
};

enum mlx5_lag_mode {
	MLX5_LAG_MODE_NONE,
	MLX5_LAG_MODE_ROCE,
	MLX5_LAG_MODE_SRIOV,
	MLX5_LAG_MODE_MULTIPATH,
	MLX5_LAG_MODE_MPESW,
};

struct lag_func {
	struct mlx5_core_dev *dev;
	struct net_device    *netdev;
	bool has_drop;
};

/* Used for collection of netdev event info. */
struct lag_tracker {
	enum   netdev_lag_tx_type           tx_type;
	struct netdev_lag_lower_state_info  netdev_state[MLX5_MAX_PORTS];
	unsigned int is_bonded:1;
	unsigned int has_inactive:1;
	enum netdev_lag_hash hash_type;
};

/* LAG data of a ConnectX card.
 * It serves both its phys functions.
 */
struct mlx5_lag {
	enum mlx5_lag_mode        mode;
	unsigned long		  mode_flags;
	unsigned long		  state_flags;
	u8			  ports;
	u8			  buckets;
	int			  mode_changes_in_progress;
	u8			  v2p_map[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS];
	struct kref               ref;
	struct lag_func           pf[MLX5_MAX_PORTS];
	struct lag_tracker        tracker;
	struct workqueue_struct   *wq;
	struct delayed_work       bond_work;
	struct notifier_block     nb;
	struct lag_mp             lag_mp;
	struct mlx5_lag_port_sel  port_sel;
	/* Protect lag fields/state changes */
	struct mutex		  lock;
	struct lag_mpesw	  lag_mpesw;
};

static inline struct mlx5_lag *
mlx5_lag_dev(struct mlx5_core_dev *dev)
{
	return dev->priv.lag;
}

static inline bool
__mlx5_lag_is_active(struct mlx5_lag *ldev)
{
	return ldev->mode != MLX5_LAG_MODE_NONE;
}

static inline bool
mlx5_lag_is_ready(struct mlx5_lag *ldev)
{
	return test_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
}

bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev);
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev);
void mlx5_modify_lag(struct mlx5_lag *ldev,
		     struct lag_tracker *tracker);
int mlx5_activate_lag(struct mlx5_lag *ldev,
		      struct lag_tracker *tracker,
		      enum mlx5_lag_mode mode,
		      bool shared_fdb);
int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
				struct net_device *ndev);

char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);
void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
			   u8 *ports, int *num_enabled);

void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev);
void mlx5_ldev_remove_debugfs(struct dentry *dbg);
void mlx5_disable_lag(struct mlx5_lag *ldev);
void mlx5_lag_remove_devices(struct mlx5_lag *ldev);
int mlx5_deactivate_lag(struct mlx5_lag *ldev);
void mlx5_lag_add_devices(struct mlx5_lag *ldev);
struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev);

static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
{
	if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
	    !MLX5_CAP_GEN(dev, lag_master) ||
	    MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
	    mlx5_get_dev_index(dev) >= MLX5_MAX_PORTS ||
	    MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
		return false;
	return true;
}

#define mlx5_ldev_for_each(i, start_index, ldev) \
	for (int tmp = start_index; tmp = mlx5_get_next_ldev_func(ldev, tmp), \
	     i = tmp, tmp < MLX5_MAX_PORTS; tmp++)

#define mlx5_ldev_for_each_reverse(i, start_index, end_index, ldev)      \
	for (int tmp = start_index, tmp1 = end_index; \
	     tmp = mlx5_get_pre_ldev_func(ldev, tmp, tmp1), \
	     i = tmp, tmp >= tmp1; tmp--)

int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx);
int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx);
int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq);
int mlx5_lag_num_devs(struct mlx5_lag *ldev);
int mlx5_lag_num_netdevs(struct mlx5_lag *ldev);
#endif /* __MLX5_LAG_H__ */