summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
blob: 3799f89ed1a6a339f020e1fce77c9512328ea57e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */

#include <linux/netdevice.h>
#include <net/nexthop.h>
#include "lag/lag.h"
#include "eswitch.h"
#include "lib/mlx5.h"

static int add_mpesw_rule(struct mlx5_lag *ldev)
{
	struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
	int err;

	if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1)
		return 0;

	if (ldev->mode != MLX5_LAG_MODE_NONE) {
		err = -EINVAL;
		goto out_err;
	}

	err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);
	if (err) {
		mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
		goto out_err;
	}

	return 0;

out_err:
	atomic_dec(&ldev->lag_mpesw.mpesw_rule_count);
	return err;
}

static void del_mpesw_rule(struct mlx5_lag *ldev)
{
	if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) &&
	    ldev->mode == MLX5_LAG_MODE_MPESW)
		mlx5_disable_lag(ldev);
}

static void mlx5_mpesw_work(struct work_struct *work)
{
	struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work);
	struct mlx5_lag *ldev = mpesww->lag;

	mutex_lock(&ldev->lock);
	if (mpesww->op == MLX5_MPESW_OP_ENABLE)
		mpesww->result = add_mpesw_rule(ldev);
	else if (mpesww->op == MLX5_MPESW_OP_DISABLE)
		del_mpesw_rule(ldev);
	mutex_unlock(&ldev->lock);

	complete(&mpesww->comp);
}

static int mlx5_lag_mpesw_queue_work(struct mlx5_core_dev *dev,
				     enum mpesw_op op)
{
	struct mlx5_lag *ldev = mlx5_lag_dev(dev);
	struct mlx5_mpesw_work_st *work;
	int err = 0;

	if (!ldev)
		return 0;

	work = kzalloc(sizeof(*work), GFP_KERNEL);
	if (!work)
		return -ENOMEM;

	INIT_WORK(&work->work, mlx5_mpesw_work);
	init_completion(&work->comp);
	work->op = op;
	work->lag = ldev;

	if (!queue_work(ldev->wq, &work->work)) {
		mlx5_core_warn(dev, "failed to queue mpesw work\n");
		err = -EINVAL;
		goto out;
	}
	wait_for_completion(&work->comp);
	err = work->result;
out:
	kfree(work);
	return err;
}

void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
{
	mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_DISABLE);
}

int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
{
	return mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_ENABLE);
}

int mlx5_lag_mpesw_do_mirred(struct mlx5_core_dev *mdev,
			     struct net_device *out_dev,
			     struct netlink_ext_ack *extack)
{
	struct mlx5_lag *ldev = mlx5_lag_dev(mdev);

	if (!netif_is_bond_master(out_dev) || !ldev)
		return 0;

	if (ldev->mode != MLX5_LAG_MODE_MPESW)
		return 0;

	NL_SET_ERR_MSG_MOD(extack, "can't forward to bond in mpesw mode");
	return -EOPNOTSUPP;
}

bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev)
{
	struct mlx5_lag *ldev = mlx5_lag_dev(dev);

	return ldev && ldev->mode == MLX5_LAG_MODE_MPESW;
}

void mlx5_lag_mpesw_init(struct mlx5_lag *ldev)
{
	atomic_set(&ldev->lag_mpesw.mpesw_rule_count, 0);
}

void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev)
{
	WARN_ON(atomic_read(&ldev->lag_mpesw.mpesw_rule_count));
}