summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
blob: e4e487c8431b882e4282b4bfa94125b22cbddd04 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2018 Mellanox Technologies. */

#include <net/ip_tunnels.h>
#include <net/vxlan.h>
#include "lib/vxlan.h"
#include "en/tc_tun.h"

static bool mlx5e_tc_tun_can_offload_vxlan(struct mlx5e_priv *priv)
{
	return !!MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap);
}

static int mlx5e_tc_tun_calc_hlen_vxlan(struct mlx5e_encap_entry *e)
{
	return VXLAN_HLEN;
}

static int mlx5e_tc_tun_check_udp_dport_vxlan(struct mlx5e_priv *priv,
					      struct flow_cls_offload *f)
{
	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
	struct netlink_ext_ack *extack = f->common.extack;
	struct flow_match_ports enc_ports;

	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS))
		return -EOPNOTSUPP;

	flow_rule_match_enc_ports(rule, &enc_ports);

	/* check the UDP destination port validity */

	if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan,
				    be16_to_cpu(enc_ports.key->dst))) {
		NL_SET_ERR_MSG_MOD(extack,
				   "Matched UDP dst port is not registered as a VXLAN port");
		netdev_warn(priv->netdev,
			    "UDP port %d is not registered as a VXLAN port\n",
			    be16_to_cpu(enc_ports.key->dst));
		return -EOPNOTSUPP;
	}

	return 0;
}

static int mlx5e_tc_tun_parse_udp_ports_vxlan(struct mlx5e_priv *priv,
					      struct mlx5_flow_spec *spec,
					      struct flow_cls_offload *f,
					      void *headers_c,
					      void *headers_v)
{
	int err = 0;

	err = mlx5e_tc_tun_parse_udp_ports(priv, spec, f, headers_c, headers_v);
	if (err)
		return err;

	return mlx5e_tc_tun_check_udp_dport_vxlan(priv, f);
}

static int mlx5e_tc_tun_init_encap_attr_vxlan(struct net_device *tunnel_dev,
					      struct mlx5e_priv *priv,
					      struct mlx5e_encap_entry *e,
					      struct netlink_ext_ack *extack)
{
	int dst_port = be16_to_cpu(e->tun_info->key.tp_dst);

	e->tunnel = &vxlan_tunnel;

	if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
		NL_SET_ERR_MSG_MOD(extack,
				   "vxlan udp dport was not registered with the HW");
		netdev_warn(priv->netdev,
			    "%d isn't an offloaded vxlan udp dport\n",
			    dst_port);
		return -EOPNOTSUPP;
	}

	e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
	return 0;
}

static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
					    __u8 *ip_proto,
					    struct mlx5e_encap_entry *e)
{
	const struct ip_tunnel_key *tun_key = &e->tun_info->key;
	__be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
	struct udphdr *udp = (struct udphdr *)(buf);
	const struct vxlan_metadata *md;
	struct vxlanhdr *vxh;

	if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_key->tun_flags) &&
	    e->tun_info->options_len != sizeof(*md))
		return -EOPNOTSUPP;
	vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
	*ip_proto = IPPROTO_UDP;

	udp->dest = tun_key->tp_dst;
	vxh->vx_flags = VXLAN_HF_VNI;
	vxh->vx_vni = vxlan_vni_field(tun_id);
	if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_key->tun_flags)) {
		md = ip_tunnel_info_opts(e->tun_info);
		vxlan_build_gbp_hdr(vxh, md);
	}

	return 0;
}

static int mlx5e_tc_tun_parse_vxlan_gbp_option(struct mlx5e_priv *priv,
					       struct mlx5_flow_spec *spec,
					       struct flow_cls_offload *f)
{
	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
	struct netlink_ext_ack *extack = f->common.extack;
	struct flow_match_enc_opts enc_opts;
	void *misc5_c, *misc5_v;
	u32 *gbp, *gbp_mask;

	flow_rule_match_enc_opts(rule, &enc_opts);

	if (memchr_inv(&enc_opts.mask->data, 0, sizeof(enc_opts.mask->data)) &&
	    !MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(priv->mdev, tunnel_header_0_1)) {
		NL_SET_ERR_MSG_MOD(extack, "Matching on VxLAN GBP is not supported");
		return -EOPNOTSUPP;
	}

	if (enc_opts.key->dst_opt_type != IP_TUNNEL_VXLAN_OPT_BIT) {
		NL_SET_ERR_MSG_MOD(extack, "Wrong VxLAN option type: not GBP");
		return -EOPNOTSUPP;
	}

	if (enc_opts.key->len != sizeof(*gbp) ||
	    enc_opts.mask->len != sizeof(*gbp_mask)) {
		NL_SET_ERR_MSG_MOD(extack, "VxLAN GBP option/mask len is not 32 bits");
		return -EINVAL;
	}

	gbp = (u32 *)&enc_opts.key->data[0];
	gbp_mask = (u32 *)&enc_opts.mask->data[0];

	if (*gbp_mask & ~VXLAN_GBP_MASK) {
		NL_SET_ERR_MSG_FMT_MOD(extack, "Wrong VxLAN GBP mask(0x%08X)\n", *gbp_mask);
		return -EINVAL;
	}

	misc5_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_5);
	misc5_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_5);
	MLX5_SET(fte_match_set_misc5, misc5_c, tunnel_header_0, *gbp_mask);
	MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_0, *gbp);

	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;

	return 0;
}

static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
				    struct mlx5_flow_spec *spec,
				    struct flow_cls_offload *f,
				    void *headers_c,
				    void *headers_v)
{
	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
	struct netlink_ext_ack *extack = f->common.extack;
	struct flow_match_enc_keyid enc_keyid;
	void *misc_c, *misc_v;

	misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
	misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);

	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
		return 0;

	flow_rule_match_enc_keyid(rule, &enc_keyid);

	if (!enc_keyid.mask->keyid)
		return 0;

	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
		int err;

		err = mlx5e_tc_tun_parse_vxlan_gbp_option(priv, spec, f);
		if (err)
			return err;
	}

	/* match on VNI is required */

	if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
					ft_field_support.outer_vxlan_vni)) {
		NL_SET_ERR_MSG_MOD(extack,
				   "Matching on VXLAN VNI is not supported");
		netdev_warn(priv->netdev,
			    "Matching on VXLAN VNI is not supported\n");
		return -EOPNOTSUPP;
	}

	MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
		 be32_to_cpu(enc_keyid.mask->keyid));
	MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
		 be32_to_cpu(enc_keyid.key->keyid));

	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;

	return 0;
}

static bool mlx5e_tc_tun_encap_info_equal_vxlan(struct mlx5e_encap_key *a,
						struct mlx5e_encap_key *b)
{
	return mlx5e_tc_tun_encap_info_equal_options(a, b,
						     IP_TUNNEL_VXLAN_OPT_BIT);
}

static int mlx5e_tc_tun_get_remote_ifindex(struct net_device *mirred_dev)
{
	const struct vxlan_dev *vxlan = netdev_priv(mirred_dev);
	const struct vxlan_rdst *dst = &vxlan->default_dst;

	return dst->remote_ifindex;
}

struct mlx5e_tc_tunnel vxlan_tunnel = {
	.tunnel_type          = MLX5E_TC_TUNNEL_TYPE_VXLAN,
	.match_level          = MLX5_MATCH_L4,
	.can_offload          = mlx5e_tc_tun_can_offload_vxlan,
	.calc_hlen            = mlx5e_tc_tun_calc_hlen_vxlan,
	.init_encap_attr      = mlx5e_tc_tun_init_encap_attr_vxlan,
	.generate_ip_tun_hdr  = mlx5e_gen_ip_tunnel_header_vxlan,
	.parse_udp_ports      = mlx5e_tc_tun_parse_udp_ports_vxlan,
	.parse_tunnel         = mlx5e_tc_tun_parse_vxlan,
	.encap_info_equal     = mlx5e_tc_tun_encap_info_equal_vxlan,
	.get_remote_ifindex   = mlx5e_tc_tun_get_remote_ifindex,
};