summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlxsw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlxsw')
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h351
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c134
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c2367
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c151
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h1
21 files changed, 2540 insertions, 759 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 1a86535c4968..c67825a68a26 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -160,6 +160,7 @@ struct mlxsw_rx_listener_item {
struct mlxsw_event_listener_item {
struct list_head list;
+ struct mlxsw_core *mlxsw_core;
struct mlxsw_event_listener el;
void *priv;
};
@@ -1117,16 +1118,7 @@ static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core,
struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
- const struct firmware *firmware;
- int err;
-
- err = request_firmware_direct(&firmware, params->file_name, mlxsw_core->bus_info->dev);
- if (err)
- return err;
- err = mlxsw_core_fw_flash(mlxsw_core, firmware, extack);
- release_firmware(firmware);
-
- return err;
+ return mlxsw_core_fw_flash(mlxsw_core, params->fw, extack);
}
static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
@@ -2180,11 +2172,16 @@ static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
void *priv)
{
struct mlxsw_event_listener_item *event_listener_item = priv;
+ struct mlxsw_core *mlxsw_core;
struct mlxsw_reg_info reg;
char *payload;
char *reg_tlv;
char *op_tlv;
+ mlxsw_core = event_listener_item->mlxsw_core;
+ trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
+ skb->data, skb->len);
+
mlxsw_emad_tlv_parse(skb);
op_tlv = mlxsw_emad_op_tlv(skb);
reg_tlv = mlxsw_emad_reg_tlv(skb);
@@ -2234,6 +2231,7 @@ int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
if (!el_item)
return -ENOMEM;
+ el_item->mlxsw_core = mlxsw_core;
el_item->el = *el;
el_item->priv = priv;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 9f6905fa6b47..f1b09c2f9eda 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -133,10 +133,8 @@ mlxsw_afk_key_info_find(struct mlxsw_afk *mlxsw_afk,
}
struct mlxsw_afk_picker {
- struct {
- DECLARE_BITMAP(element, MLXSW_AFK_ELEMENT_MAX);
- unsigned int total;
- } hits[0];
+ DECLARE_BITMAP(element, MLXSW_AFK_ELEMENT_MAX);
+ unsigned int total;
};
static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk,
@@ -154,8 +152,8 @@ static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk,
elinst = &block->instances[j];
if (elinst->element == element) {
- __set_bit(element, picker->hits[i].element);
- picker->hits[i].total++;
+ __set_bit(element, picker[i].element);
+ picker[i].total++;
}
}
}
@@ -169,13 +167,13 @@ static void mlxsw_afk_picker_subtract_hits(struct mlxsw_afk *mlxsw_afk,
int i;
int j;
- memcpy(&hits_element, &picker->hits[block_index].element,
+ memcpy(&hits_element, &picker[block_index].element,
sizeof(hits_element));
for (i = 0; i < mlxsw_afk->blocks_count; i++) {
for_each_set_bit(j, hits_element, MLXSW_AFK_ELEMENT_MAX) {
- if (__test_and_clear_bit(j, picker->hits[i].element))
- picker->hits[i].total--;
+ if (__test_and_clear_bit(j, picker[i].element))
+ picker[i].total--;
}
}
}
@@ -188,8 +186,8 @@ static int mlxsw_afk_picker_most_hits_get(struct mlxsw_afk *mlxsw_afk,
int i;
for (i = 0; i < mlxsw_afk->blocks_count; i++) {
- if (picker->hits[i].total > most_hits) {
- most_hits = picker->hits[i].total;
+ if (picker[i].total > most_hits) {
+ most_hits = picker[i].total;
most_index = i;
}
}
@@ -206,7 +204,7 @@ static int mlxsw_afk_picker_key_info_add(struct mlxsw_afk *mlxsw_afk,
if (key_info->blocks_count == mlxsw_afk->max_blocks)
return -EINVAL;
- for_each_set_bit(element, picker->hits[block_index].element,
+ for_each_set_bit(element, picker[block_index].element,
MLXSW_AFK_ELEMENT_MAX) {
key_info->element_to_block[element] = key_info->blocks_count;
mlxsw_afk_element_usage_add(&key_info->elusage, element);
@@ -224,11 +222,9 @@ static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk,
{
struct mlxsw_afk_picker *picker;
enum mlxsw_afk_element element;
- size_t alloc_size;
int err;
- alloc_size = sizeof(picker->hits[0]) * mlxsw_afk->blocks_count;
- picker = kzalloc(alloc_size, GFP_KERNEL);
+ picker = kcalloc(mlxsw_afk->blocks_count, sizeof(*picker), GFP_KERNEL);
if (!picker)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index 8e36a2634ef5..2b23f8a87862 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -4,6 +4,9 @@
#ifndef _MLXSW_CORE_ENV_H
#define _MLXSW_CORE_ENV_H
+struct ethtool_modinfo;
+struct ethtool_eeprom;
+
int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
int off, int *temp);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 39eff6a57ba2..2a89b3261f00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -581,6 +581,13 @@ mlxsw_reg_sfd_uc_tunnel_pack(char *payload, int rec_index,
mlxsw_reg_sfd_uc_tunnel_protocol_set(payload, rec_index, proto);
}
+enum mlxsw_reg_tunnel_port {
+ MLXSW_REG_TUNNEL_PORT_NVE,
+ MLXSW_REG_TUNNEL_PORT_VPLS,
+ MLXSW_REG_TUNNEL_PORT_FLEX_TUNNEL0,
+ MLXSW_REG_TUNNEL_PORT_FLEX_TUNNEL1,
+};
+
/* SFN - Switch FDB Notification Register
* -------------------------------------------
* The switch provides notifications on newly learned FDB entries and
@@ -738,13 +745,6 @@ MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_protocol, MLXSW_REG_SFN_BASE_LEN, 27,
MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_uip_lsb, MLXSW_REG_SFN_BASE_LEN, 0,
24, MLXSW_REG_SFN_REC_LEN, 0x0C, false);
-enum mlxsw_reg_sfn_tunnel_port {
- MLXSW_REG_SFN_TUNNEL_PORT_NVE,
- MLXSW_REG_SFN_TUNNEL_PORT_VPLS,
- MLXSW_REG_SFN_TUNNEL_FLEX_TUNNEL0,
- MLXSW_REG_SFN_TUNNEL_FLEX_TUNNEL1,
-};
-
/* reg_sfn_uc_tunnel_port
* Tunnel port.
* Reserved on Spectrum.
@@ -821,8 +821,16 @@ static inline void mlxsw_reg_spms_vid_pack(char *payload, u16 vid,
MLXSW_REG_DEFINE(spvid, MLXSW_REG_SPVID_ID, MLXSW_REG_SPVID_LEN);
+/* reg_spvid_tport
+ * Port is tunnel port.
+ * Reserved when SwitchX/-2 or Spectrum-1.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvid, tport, 0x00, 24, 1);
+
/* reg_spvid_local_port
- * Local port number.
+ * When tport = 0: Local port number. Not supported for CPU port.
+ * When tport = 1: Tunnel port.
* Access: Index
*/
MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8);
@@ -834,17 +842,30 @@ MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8);
+/* reg_spvid_et_vlan
+ * EtherType used for when VLAN is pushed at ingress (for untagged
+ * packets or for QinQ push mode).
+ * 0: ether_type0 - (default)
+ * 1: ether_type1
+ * 2: ether_type2 - Reserved when Spectrum-1, supported by Spectrum-2
+ * Ethertype IDs are configured by SVER.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvid, et_vlan, 0x04, 16, 2);
+
/* reg_spvid_pvid
* Port default VID
* Access: RW
*/
MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12);
-static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
+static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid,
+ u8 et_vlan)
{
MLXSW_REG_ZERO(spvid, payload);
mlxsw_reg_spvid_local_port_set(payload, local_port);
mlxsw_reg_spvid_pvid_set(payload, pvid);
+ mlxsw_reg_spvid_et_vlan_set(payload, et_vlan);
}
/* SPVM - Switch Port VLAN Membership
@@ -1680,6 +1701,109 @@ static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port,
mlxsw_reg_svfa_vid_set(payload, vid);
}
+/* SPVTR - Switch Port VLAN Stacking Register
+ * ------------------------------------------
+ * The Switch Port VLAN Stacking register configures the VLAN mode of the port
+ * to enable VLAN stacking.
+ */
+#define MLXSW_REG_SPVTR_ID 0x201D
+#define MLXSW_REG_SPVTR_LEN 0x10
+
+MLXSW_REG_DEFINE(spvtr, MLXSW_REG_SPVTR_ID, MLXSW_REG_SPVTR_LEN);
+
+/* reg_spvtr_tport
+ * Port is tunnel port.
+ * Access: Index
+ *
+ * Note: Reserved when SwitchX/-2 or Spectrum-1.
+ */
+MLXSW_ITEM32(reg, spvtr, tport, 0x00, 24, 1);
+
+/* reg_spvtr_local_port
+ * When tport = 0: local port number (Not supported from/to CPU).
+ * When tport = 1: tunnel port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvtr, local_port, 0x00, 16, 8);
+
+/* reg_spvtr_ippe
+ * Ingress Port Prio Mode Update Enable.
+ * When set, the Port Prio Mode is updated with the provided ipprio_mode field.
+ * Reserved on Get operations.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvtr, ippe, 0x04, 31, 1);
+
+/* reg_spvtr_ipve
+ * Ingress Port VID Mode Update Enable.
+ * When set, the Ingress Port VID Mode is updated with the provided ipvid_mode
+ * field.
+ * Reserved on Get operations.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvtr, ipve, 0x04, 30, 1);
+
+/* reg_spvtr_epve
+ * Egress Port VID Mode Update Enable.
+ * When set, the Egress Port VID Mode is updated with the provided epvid_mode
+ * field.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvtr, epve, 0x04, 29, 1);
+
+/* reg_spvtr_ipprio_mode
+ * Ingress Port Priority Mode.
+ * This controls the PCP and DEI of the new outer VLAN
+ * Note: for SwitchX/-2 the DEI is not affected.
+ * 0: use port default PCP and DEI (configured by QPDPC).
+ * 1: use C-VLAN PCP and DEI.
+ * Has no effect when ipvid_mode = 0.
+ * Reserved when tport = 1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvtr, ipprio_mode, 0x04, 20, 4);
+
+enum mlxsw_reg_spvtr_ipvid_mode {
+ /* IEEE Compliant PVID (default) */
+ MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID,
+ /* Push VLAN (for VLAN stacking, except prio tagged packets) */
+ MLXSW_REG_SPVTR_IPVID_MODE_PUSH_VLAN_FOR_UNTAGGED_PACKET,
+ /* Always push VLAN (also for prio tagged packets) */
+ MLXSW_REG_SPVTR_IPVID_MODE_ALWAYS_PUSH_VLAN,
+};
+
+/* reg_spvtr_ipvid_mode
+ * Ingress Port VLAN-ID Mode.
+ * For Spectrum family, this affects the values of SPVM.i
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvtr, ipvid_mode, 0x04, 16, 4);
+
+enum mlxsw_reg_spvtr_epvid_mode {
+ /* IEEE Compliant VLAN membership */
+ MLXSW_REG_SPVTR_EPVID_MODE_IEEE_COMPLIANT_VLAN_MEMBERSHIP,
+ /* Pop VLAN (for VLAN stacking) */
+ MLXSW_REG_SPVTR_EPVID_MODE_POP_VLAN,
+};
+
+/* reg_spvtr_epvid_mode
+ * Egress Port VLAN-ID Mode.
+ * For Spectrum family, this affects the values of SPVM.e,u,pt.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, spvtr, epvid_mode, 0x04, 0, 4);
+
+static inline void mlxsw_reg_spvtr_pack(char *payload, bool tport,
+ u8 local_port,
+ enum mlxsw_reg_spvtr_ipvid_mode ipvid_mode)
+{
+ MLXSW_REG_ZERO(spvtr, payload);
+ mlxsw_reg_spvtr_tport_set(payload, tport);
+ mlxsw_reg_spvtr_local_port_set(payload, local_port);
+ mlxsw_reg_spvtr_ipvid_mode_set(payload, ipvid_mode);
+ mlxsw_reg_spvtr_ipve_set(payload, true);
+}
+
/* SVPE - Switch Virtual-Port Enabling Register
* --------------------------------------------
* Enables port virtualization.
@@ -1857,6 +1981,104 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
}
}
+/* SPVC - Switch Port VLAN Classification Register
+ * -----------------------------------------------
+ * Configures the port to identify packets as untagged / single tagged /
+ * double packets based on the packet EtherTypes.
+ * Ethertype IDs are configured by SVER.
+ */
+#define MLXSW_REG_SPVC_ID 0x2026
+#define MLXSW_REG_SPVC_LEN 0x0C
+
+MLXSW_REG_DEFINE(spvc, MLXSW_REG_SPVC_ID, MLXSW_REG_SPVC_LEN);
+
+/* reg_spvc_local_port
+ * Local port.
+ * Access: Index
+ *
+ * Note: applies both to Rx port and Tx port, so if a packet traverses
+ * through Rx port i and a Tx port j then port i and port j must have the
+ * same configuration.
+ */
+MLXSW_ITEM32(reg, spvc, local_port, 0x00, 16, 8);
+
+/* reg_spvc_inner_et2
+ * Vlan Tag1 EtherType2 enable.
+ * Packet is initially classified as double VLAN Tag if in addition to
+ * being classified with a tag0 VLAN Tag its tag1 EtherType value is
+ * equal to ether_type2.
+ * 0: disable (default)
+ * 1: enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvc, inner_et2, 0x08, 17, 1);
+
+/* reg_spvc_et2
+ * Vlan Tag0 EtherType2 enable.
+ * Packet is initially classified as VLAN Tag if its tag0 EtherType is
+ * equal to ether_type2.
+ * 0: disable (default)
+ * 1: enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvc, et2, 0x08, 16, 1);
+
+/* reg_spvc_inner_et1
+ * Vlan Tag1 EtherType1 enable.
+ * Packet is initially classified as double VLAN Tag if in addition to
+ * being classified with a tag0 VLAN Tag its tag1 EtherType value is
+ * equal to ether_type1.
+ * 0: disable
+ * 1: enable (default)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvc, inner_et1, 0x08, 9, 1);
+
+/* reg_spvc_et1
+ * Vlan Tag0 EtherType1 enable.
+ * Packet is initially classified as VLAN Tag if its tag0 EtherType is
+ * equal to ether_type1.
+ * 0: disable
+ * 1: enable (default)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvc, et1, 0x08, 8, 1);
+
+/* reg_inner_et0
+ * Vlan Tag1 EtherType0 enable.
+ * Packet is initially classified as double VLAN Tag if in addition to
+ * being classified with a tag0 VLAN Tag its tag1 EtherType value is
+ * equal to ether_type0.
+ * 0: disable
+ * 1: enable (default)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvc, inner_et0, 0x08, 1, 1);
+
+/* reg_et0
+ * Vlan Tag0 EtherType0 enable.
+ * Packet is initially classified as VLAN Tag if its tag0 EtherType is
+ * equal to ether_type0.
+ * 0: disable
+ * 1: enable (default)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvc, et0, 0x08, 0, 1);
+
+static inline void mlxsw_reg_spvc_pack(char *payload, u8 local_port, bool et1,
+ bool et0)
+{
+ MLXSW_REG_ZERO(spvc, payload);
+ mlxsw_reg_spvc_local_port_set(payload, local_port);
+ /* Enable inner_et1 and inner_et0 to enable identification of double
+ * tagged packets.
+ */
+ mlxsw_reg_spvc_inner_et1_set(payload, 1);
+ mlxsw_reg_spvc_inner_et0_set(payload, 1);
+ mlxsw_reg_spvc_et1_set(payload, et1);
+ mlxsw_reg_spvc_et0_set(payload, et0);
+}
+
/* CWTP - Congetion WRED ECN TClass Profile
* ----------------------------------------
* Configures the profiles for queues of egress port and traffic class
@@ -7279,10 +7501,11 @@ static inline void mlxsw_reg_ralue_pack4(char *payload,
enum mlxsw_reg_ralxx_protocol protocol,
enum mlxsw_reg_ralue_op op,
u16 virtual_router, u8 prefix_len,
- u32 dip)
+ u32 *dip)
{
mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
- mlxsw_reg_ralue_dip4_set(payload, dip);
+ if (dip)
+ mlxsw_reg_ralue_dip4_set(payload, *dip);
}
static inline void mlxsw_reg_ralue_pack6(char *payload,
@@ -7292,7 +7515,8 @@ static inline void mlxsw_reg_ralue_pack6(char *payload,
const void *dip)
{
mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
- mlxsw_reg_ralue_dip6_memcpy_to(payload, dip);
+ if (dip)
+ mlxsw_reg_ralue_dip6_memcpy_to(payload, dip);
}
static inline void
@@ -8245,6 +8469,86 @@ mlxsw_reg_rmft2_ipv6_pack(char *payload, bool v, u16 offset, u16 virtual_router,
mlxsw_reg_rmft2_sip6_mask_memcpy_to(payload, (void *)&sip6_mask);
}
+/* Note that XRALXX register position violates the rule of ordering register
+ * definition by the ID. However, XRALXX pack helpers are using RALXX pack
+ * helpers, RALXX registers have higher IDs.
+ */
+
+/* XRALTA - XM Router Algorithmic LPM Tree Allocation Register
+ * -----------------------------------------------------------
+ * The XRALTA is used to allocate the XLT LPM trees.
+ *
+ * This register embeds original RALTA register.
+ */
+#define MLXSW_REG_XRALTA_ID 0x7811
+#define MLXSW_REG_XRALTA_LEN 0x08
+#define MLXSW_REG_XRALTA_RALTA_OFFSET 0x04
+
+MLXSW_REG_DEFINE(xralta, MLXSW_REG_XRALTA_ID, MLXSW_REG_XRALTA_LEN);
+
+static inline void mlxsw_reg_xralta_pack(char *payload, bool alloc,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u8 tree_id)
+{
+ char *ralta_payload = payload + MLXSW_REG_XRALTA_RALTA_OFFSET;
+
+ MLXSW_REG_ZERO(xralta, payload);
+ mlxsw_reg_ralta_pack(ralta_payload, alloc, protocol, tree_id);
+}
+
+/* XRALST - XM Router Algorithmic LPM Structure Tree Register
+ * ----------------------------------------------------------
+ * The XRALST is used to set and query the structure of an XLT LPM tree.
+ *
+ * This register embeds original RALST register.
+ */
+#define MLXSW_REG_XRALST_ID 0x7812
+#define MLXSW_REG_XRALST_LEN 0x108
+#define MLXSW_REG_XRALST_RALST_OFFSET 0x04
+
+MLXSW_REG_DEFINE(xralst, MLXSW_REG_XRALST_ID, MLXSW_REG_XRALST_LEN);
+
+static inline void mlxsw_reg_xralst_pack(char *payload, u8 root_bin, u8 tree_id)
+{
+ char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET;
+
+ MLXSW_REG_ZERO(xralst, payload);
+ mlxsw_reg_ralst_pack(ralst_payload, root_bin, tree_id);
+}
+
+static inline void mlxsw_reg_xralst_bin_pack(char *payload, u8 bin_number,
+ u8 left_child_bin,
+ u8 right_child_bin)
+{
+ char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET;
+
+ mlxsw_reg_ralst_bin_pack(ralst_payload, bin_number, left_child_bin,
+ right_child_bin);
+}
+
+/* XRALTB - XM Router Algorithmic LPM Tree Binding Register
+ * --------------------------------------------------------
+ * The XRALTB register is used to bind virtual router and protocol
+ * to an allocated LPM tree.
+ *
+ * This register embeds original RALTB register.
+ */
+#define MLXSW_REG_XRALTB_ID 0x7813
+#define MLXSW_REG_XRALTB_LEN 0x08
+#define MLXSW_REG_XRALTB_RALTB_OFFSET 0x04
+
+MLXSW_REG_DEFINE(xraltb, MLXSW_REG_XRALTB_ID, MLXSW_REG_XRALTB_LEN);
+
+static inline void mlxsw_reg_xraltb_pack(char *payload, u16 virtual_router,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u8 tree_id)
+{
+ char *raltb_payload = payload + MLXSW_REG_XRALTB_RALTB_OFFSET;
+
+ MLXSW_REG_ZERO(xraltb, payload);
+ mlxsw_reg_raltb_pack(raltb_payload, virtual_router, protocol, tree_id);
+}
+
/* MFCR - Management Fan Control Register
* --------------------------------------
* This register controls the settings of the Fan Speed PWM mechanism.
@@ -10314,13 +10618,6 @@ enum mlxsw_reg_tnumt_record_type {
*/
MLXSW_ITEM32(reg, tnumt, record_type, 0x00, 28, 4);
-enum mlxsw_reg_tnumt_tunnel_port {
- MLXSW_REG_TNUMT_TUNNEL_PORT_NVE,
- MLXSW_REG_TNUMT_TUNNEL_PORT_VPLS,
- MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL0,
- MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL1,
-};
-
/* reg_tnumt_tunnel_port
* Tunnel port.
* Access: RW
@@ -10368,7 +10665,7 @@ MLXSW_ITEM32_INDEXED(reg, tnumt, udip_ptr, 0x0C, 0, 24, 0x04, 0x00, false);
static inline void mlxsw_reg_tnumt_pack(char *payload,
enum mlxsw_reg_tnumt_record_type type,
- enum mlxsw_reg_tnumt_tunnel_port tport,
+ enum mlxsw_reg_tunnel_port tport,
u32 underlay_mc_ptr, bool vnext,
u32 next_underlay_mc_ptr,
u8 record_size)
@@ -10532,13 +10829,6 @@ static inline void mlxsw_reg_tndem_pack(char *payload, u8 underlay_ecn,
MLXSW_REG_DEFINE(tnpc, MLXSW_REG_TNPC_ID, MLXSW_REG_TNPC_LEN);
-enum mlxsw_reg_tnpc_tunnel_port {
- MLXSW_REG_TNPC_TUNNEL_PORT_NVE,
- MLXSW_REG_TNPC_TUNNEL_PORT_VPLS,
- MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL0,
- MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL1,
-};
-
/* reg_tnpc_tunnel_port
* Tunnel port.
* Access: Index
@@ -10558,7 +10848,7 @@ MLXSW_ITEM32(reg, tnpc, learn_enable_v6, 0x04, 1, 1);
MLXSW_ITEM32(reg, tnpc, learn_enable_v4, 0x04, 0, 1);
static inline void mlxsw_reg_tnpc_pack(char *payload,
- enum mlxsw_reg_tnpc_tunnel_port tport,
+ enum mlxsw_reg_tunnel_port tport,
bool learn_enable)
{
MLXSW_REG_ZERO(tnpc, payload);
@@ -11127,9 +11417,11 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(slcor),
MLXSW_REG(spmlr),
MLXSW_REG(svfa),
+ MLXSW_REG(spvtr),
MLXSW_REG(svpe),
MLXSW_REG(sfmr),
MLXSW_REG(spvmlr),
+ MLXSW_REG(spvc),
MLXSW_REG(cwtp),
MLXSW_REG(cwtpm),
MLXSW_REG(pgcr),
@@ -11195,6 +11487,9 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rigr2),
MLXSW_REG(recr2),
MLXSW_REG(rmft2),
+ MLXSW_REG(xralta),
+ MLXSW_REG(xralst),
+ MLXSW_REG(xraltb),
MLXSW_REG(mfcr),
MLXSW_REG(mfsc),
MLXSW_REG(mfsm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index b08853f71b2b..df8175cd44ab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -45,7 +45,7 @@
#define MLXSW_SP1_FWREV_MAJOR 13
#define MLXSW_SP1_FWREV_MINOR 2008
-#define MLXSW_SP1_FWREV_SUBMINOR 1310
+#define MLXSW_SP1_FWREV_SUBMINOR 2018
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -62,7 +62,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
#define MLXSW_SP2_FWREV_MAJOR 29
#define MLXSW_SP2_FWREV_MINOR 2008
-#define MLXSW_SP2_FWREV_SUBMINOR 1310
+#define MLXSW_SP2_FWREV_SUBMINOR 2018
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
@@ -77,7 +77,7 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
#define MLXSW_SP3_FWREV_MAJOR 30
#define MLXSW_SP3_FWREV_MINOR 2008
-#define MLXSW_SP3_FWREV_SUBMINOR 1310
+#define MLXSW_SP3_FWREV_SUBMINOR 2018
static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
.major = MLXSW_SP3_FWREV_MAJOR,
@@ -384,13 +384,37 @@ int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
return err;
}
+int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
+{
+ switch (ethtype) {
+ case ETH_P_8021Q:
+ *p_sver_type = 0;
+ break;
+ case ETH_P_8021AD:
+ *p_sver_type = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid)
+ u16 vid, u16 ethtype)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char spvid_pl[MLXSW_REG_SPVID_LEN];
+ u8 sver_type;
+ int err;
+
+ err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
+ if (err)
+ return err;
+
+ mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
+ sver_type);
- mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
}
@@ -404,7 +428,8 @@ static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
}
-int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ u16 ethtype)
{
int err;
@@ -413,7 +438,7 @@ int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
if (err)
return err;
} else {
- err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
+ err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
if (err)
return err;
err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
@@ -425,7 +450,7 @@ int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
return 0;
err_port_allow_untagged_set:
- __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
+ __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
return err;
}
@@ -1386,6 +1411,19 @@ static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_po
return 0;
}
+int
+mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool is_8021ad_tagged,
+ bool is_8021q_tagged)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spvc_pl[MLXSW_REG_SPVC_LEN];
+
+ mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
+ is_8021ad_tagged, is_8021q_tagged);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
+}
+
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
u8 split_base_local_port,
struct mlxsw_sp_port_mapping *port_mapping)
@@ -1575,7 +1613,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_nve_init;
}
- err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
+ ETH_P_8021Q);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
mlxsw_sp_port->local_port);
@@ -1592,6 +1631,16 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
}
mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
+ /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat
+ * only packets with 802.1q header as tagged packets.
+ */
+ err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
+ local_port);
+ goto err_port_vlan_classification_set;
+ }
+
INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
mlxsw_sp->ptp_ops->shaper_work);
@@ -1618,6 +1667,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
err_register_netdev:
err_port_overheat_init_val_set:
+ mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
+err_port_vlan_classification_set:
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
err_port_vlan_create:
@@ -1664,6 +1715,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+ mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
mlxsw_sp_port_nve_fini(mlxsw_sp_port);
@@ -3543,7 +3595,8 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *lag_dev)
+ struct net_device *lag_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_upper *lag;
@@ -3579,8 +3632,20 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
if (mlxsw_sp_port->default_vlan->fid)
mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
+ /* Join a router interface configured on the LAG, if exists */
+ err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan,
+ lag_dev, extack);
+ if (err)
+ goto err_router_join;
+
return 0;
+err_router_join:
+ lag->ref_count--;
+ mlxsw_sp_port->lagged = 0;
+ mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
+ mlxsw_sp_port->local_port);
+ mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
err_col_port_add:
if (!lag->ref_count)
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
@@ -3618,7 +3683,8 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
lag->ref_count--;
/* Make sure untagged frames are allowed to ingress */
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
+ ETH_P_8021Q);
}
static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -3840,6 +3906,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
struct net_device *upper_dev;
struct mlxsw_sp *mlxsw_sp;
int err = 0;
+ u16 proto;
mlxsw_sp_port = netdev_priv(dev);
mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -3897,6 +3964,36 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
return -EINVAL;
}
+ if (netif_is_bridge_master(upper_dev)) {
+ br_vlan_get_proto(upper_dev, &proto);
+ if (br_vlan_enabled(upper_dev) &&
+ proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
+ return -EOPNOTSUPP;
+ }
+ if (vlan_uses_dev(lower_dev) &&
+ br_vlan_enabled(upper_dev) &&
+ proto == ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+ if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
+ struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
+
+ if (br_vlan_enabled(br_dev)) {
+ br_vlan_get_proto(br_dev, &proto);
+ if (proto == ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+ if (is_vlan_dev(upper_dev) &&
+ ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
+ return -EOPNOTSUPP;
+ }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -3913,7 +4010,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
} else if (netif_is_lag_master(upper_dev)) {
if (info->linking) {
err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
- upper_dev);
+ upper_dev, extack);
} else {
mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
mlxsw_sp_port_lag_leave(mlxsw_sp_port,
@@ -4162,6 +4259,7 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
struct netdev_notifier_changeupper_info *info = ptr;
struct netlink_ext_ack *extack;
struct net_device *upper_dev;
+ u16 proto;
if (!mlxsw_sp)
return 0;
@@ -4177,6 +4275,18 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
}
if (!info->linking)
break;
+ if (br_vlan_enabled(br_dev)) {
+ br_vlan_get_proto(br_dev, &proto);
+ if (proto == ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(extack, "Uppers are not supported on top of an 802.1ad bridge");
+ return -EOPNOTSUPP;
+ }
+ }
+ if (is_vlan_dev(upper_dev) &&
+ ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
+ return -EOPNOTSUPP;
+ }
if (netif_is_macvlan(upper_dev) &&
!mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 74b3959b36d4..a6956cfc9cb1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -4,6 +4,7 @@
#ifndef _MLXSW_SPECTRUM_H
#define _MLXSW_SPECTRUM_H
+#include <linux/ethtool.h>
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/rhashtable.h>
@@ -427,6 +428,10 @@ int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
int prio, char *ppcnt_pl);
int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_up);
+int
+mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool is_8021ad_tagged,
+ bool is_8021q_tagged);
/* spectrum_buffers.c */
struct mlxsw_sp_hdroom_prio {
@@ -579,7 +584,9 @@ int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable);
int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
bool learn_enable);
-int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type);
+int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ u16 ethtype);
struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
@@ -650,6 +657,10 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *l3_dev,
unsigned long event,
struct netdev_notifier_info *info);
+int
+mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct net_device *l3_dev,
+ struct netlink_ext_ack *extack);
void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
@@ -1192,6 +1203,7 @@ struct mlxsw_sp_nve_params {
enum mlxsw_sp_nve_type type;
__be32 vni;
const struct net_device *dev;
+ u16 ethertype;
};
extern const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[];
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index daf029931b5f..ed81d4fa48ac 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -913,7 +913,8 @@ static u64 mlxsw_sp_dpipe_table_adj_size(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router)
if (mlxsw_sp_nexthop_offload(nh) &&
- !mlxsw_sp_nexthop_group_has_ipip(nh))
+ !mlxsw_sp_nexthop_group_has_ipip(nh) &&
+ !mlxsw_sp_nexthop_is_discard(nh))
size++;
return size;
}
@@ -1105,7 +1106,8 @@ start_again:
nh_count = 0;
mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
if (!mlxsw_sp_nexthop_offload(nh) ||
- mlxsw_sp_nexthop_group_has_ipip(nh))
+ mlxsw_sp_nexthop_group_has_ipip(nh) ||
+ mlxsw_sp_nexthop_is_discard(nh))
continue;
if (nh_count < nh_skip)
@@ -1186,7 +1188,8 @@ static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable)
mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
if (!mlxsw_sp_nexthop_offload(nh) ||
- mlxsw_sp_nexthop_group_has_ipip(nh))
+ mlxsw_sp_nexthop_group_has_ipip(nh) ||
+ mlxsw_sp_nexthop_is_discard(nh))
continue;
mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index a8525992528f..6ccca39bae84 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -142,9 +142,9 @@ mlxsw_sp_ipip_nexthop_update_gre4(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
}
static int
-mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp,
- u32 tunnel_index,
- struct mlxsw_sp_ipip_entry *ipip_entry)
+mlxsw_sp_ipip_decap_config_gre4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ u32 tunnel_index)
{
u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb);
@@ -180,41 +180,6 @@ mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
}
-static int
-mlxsw_sp_ipip_fib_entry_op_gre4_ralue(struct mlxsw_sp *mlxsw_sp,
- u32 dip, u8 prefix_len, u16 ul_vr_id,
- enum mlxsw_reg_ralue_op op,
- u32 tunnel_index)
-{
- char ralue_pl[MLXSW_REG_RALUE_LEN];
-
- mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_REG_RALXX_PROTOCOL_IPV4, op,
- ul_vr_id, prefix_len, dip);
- mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl, tunnel_index);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
-}
-
-static int mlxsw_sp_ipip_fib_entry_op_gre4(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_ipip_entry *ipip_entry,
- enum mlxsw_reg_ralue_op op,
- u32 tunnel_index)
-{
- u16 ul_vr_id = mlxsw_sp_ipip_lb_ul_vr_id(ipip_entry->ol_lb);
- __be32 dip;
- int err;
-
- err = mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(mlxsw_sp, tunnel_index,
- ipip_entry);
- if (err)
- return err;
-
- dip = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4,
- ipip_entry->ol_dev).addr4;
- return mlxsw_sp_ipip_fib_entry_op_gre4_ralue(mlxsw_sp, be32_to_cpu(dip),
- 32, ul_vr_id, op,
- tunnel_index);
-}
-
static bool mlxsw_sp_ipip_tunnel_complete(enum mlxsw_sp_l3proto proto,
const struct net_device *ol_dev)
{
@@ -231,8 +196,7 @@ static bool mlxsw_sp_ipip_tunnel_complete(enum mlxsw_sp_l3proto proto,
}
static bool mlxsw_sp_ipip_can_offload_gre4(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *ol_dev,
- enum mlxsw_sp_l3proto ol_proto)
+ const struct net_device *ol_dev)
{
struct ip_tunnel *tunnel = netdev_priv(ol_dev);
__be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */
@@ -331,7 +295,7 @@ static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = {
.dev_type = ARPHRD_IPGRE,
.ul_proto = MLXSW_SP_L3_PROTO_IPV4,
.nexthop_update = mlxsw_sp_ipip_nexthop_update_gre4,
- .fib_entry_op = mlxsw_sp_ipip_fib_entry_op_gre4,
+ .decap_config = mlxsw_sp_ipip_decap_config_gre4,
.can_offload = mlxsw_sp_ipip_can_offload_gre4,
.ol_loopback_config = mlxsw_sp_ipip_ol_loopback_config_gre4,
.ol_netdev_change = mlxsw_sp_ipip_ol_netdev_change_gre4,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index bb5c4d4a5872..87bef9880e5e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -43,17 +43,15 @@ struct mlxsw_sp_ipip_ops {
struct mlxsw_sp_ipip_entry *ipip_entry);
bool (*can_offload)(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *ol_dev,
- enum mlxsw_sp_l3proto ol_proto);
+ const struct net_device *ol_dev);
/* Return a configuration for creating an overlay loopback RIF. */
struct mlxsw_sp_rif_ipip_lb_config
(*ol_loopback_config)(struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev);
- int (*fib_entry_op)(struct mlxsw_sp *mlxsw_sp,
+ int (*decap_config)(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
- enum mlxsw_reg_ralue_op op,
u32 tunnel_index);
int (*ol_netdev_change)(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index 47eb751a2570..7846a21555ef 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -15,7 +15,7 @@ struct mlxsw_sp_mr {
struct list_head table_list;
struct mutex table_list_lock; /* Protects table_list */
#define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 54d3e7dcd303..e5ec595593f4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -368,7 +368,7 @@ mlxsw_sp_nve_mc_record_refresh(struct mlxsw_sp_nve_mc_record *mc_record)
next_valid = true;
}
- mlxsw_reg_tnumt_pack(tnumt_pl, type, MLXSW_REG_TNUMT_TUNNEL_PORT_NVE,
+ mlxsw_reg_tnumt_pack(tnumt_pl, type, MLXSW_REG_TUNNEL_PORT_NVE,
mc_record->kvdl_index, next_valid,
next_kvdl_index, mc_record->num_entries);
@@ -798,11 +798,11 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
ops = nve->nve_ops_arr[params->type];
- if (!ops->can_offload(nve, params->dev, extack))
+ if (!ops->can_offload(nve, params, extack))
return -EINVAL;
memset(&config, 0, sizeof(config));
- ops->nve_config(nve, params->dev, &config);
+ ops->nve_config(nve, params, &config);
if (nve->num_nve_tunnels &&
memcmp(&config, &nve->config, sizeof(config))) {
NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
index 12f664f42f21..2796d3659979 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
@@ -18,6 +18,7 @@ struct mlxsw_sp_nve_config {
u32 ul_tb_id;
enum mlxsw_sp_l3proto ul_proto;
union mlxsw_sp_l3addr ul_sip;
+ u16 ethertype;
};
struct mlxsw_sp_nve {
@@ -35,10 +36,10 @@ struct mlxsw_sp_nve {
struct mlxsw_sp_nve_ops {
enum mlxsw_sp_nve_type type;
bool (*can_offload)(const struct mlxsw_sp_nve *nve,
- const struct net_device *dev,
+ const struct mlxsw_sp_nve_params *params,
struct netlink_ext_ack *extack);
void (*nve_config)(const struct mlxsw_sp_nve *nve,
- const struct net_device *dev,
+ const struct mlxsw_sp_nve_params *params,
struct mlxsw_sp_nve_config *config);
int (*init)(struct mlxsw_sp_nve *nve,
const struct mlxsw_sp_nve_config *config);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index 05517c7feaa5..3e2bb22e9ca6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -22,10 +22,10 @@
VXLAN_F_LEARN)
static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
- const struct net_device *dev,
+ const struct mlxsw_sp_nve_params *params,
struct netlink_ext_ack *extack)
{
- struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_dev *vxlan = netdev_priv(params->dev);
struct vxlan_config *cfg = &vxlan->cfg;
if (cfg->saddr.sa.sa_family != AF_INET) {
@@ -86,11 +86,23 @@ static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
return true;
}
+static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
+ const struct mlxsw_sp_nve_params *params,
+ struct netlink_ext_ack *extack)
+{
+ if (params->ethertype == ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: 802.1ad bridge is not supported with VxLAN");
+ return false;
+ }
+
+ return mlxsw_sp_nve_vxlan_can_offload(nve, params, extack);
+}
+
static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
- const struct net_device *dev,
+ const struct mlxsw_sp_nve_params *params,
struct mlxsw_sp_nve_config *config)
{
- struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_dev *vxlan = netdev_priv(params->dev);
struct vxlan_config *cfg = &vxlan->cfg;
config->type = MLXSW_SP_NVE_TYPE_VXLAN;
@@ -101,6 +113,7 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
config->ul_proto = MLXSW_SP_L3_PROTO_IPV4;
config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr;
config->udp_dport = cfg->dst_port;
+ config->ethertype = params->ethertype;
}
static int __mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
@@ -286,7 +299,7 @@ mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni)
const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
.type = MLXSW_SP_NVE_TYPE_VXLAN,
- .can_offload = mlxsw_sp_nve_vxlan_can_offload,
+ .can_offload = mlxsw_sp1_nve_vxlan_can_offload,
.nve_config = mlxsw_sp_nve_vxlan_config,
.init = mlxsw_sp1_nve_vxlan_init,
.fini = mlxsw_sp1_nve_vxlan_fini,
@@ -299,16 +312,35 @@ static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
{
char tnpc_pl[MLXSW_REG_TNPC_LEN];
- mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TNPC_TUNNEL_PORT_NVE,
+ mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TUNNEL_PORT_NVE,
learning_en);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnpc), tnpc_pl);
}
static int
+mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp *mlxsw_sp, u16 ethertype)
+{
+ char spvid_pl[MLXSW_REG_SPVID_LEN] = {};
+ u8 sver_type;
+ int err;
+
+ mlxsw_reg_spvid_tport_set(spvid_pl, true);
+ mlxsw_reg_spvid_local_port_set(spvid_pl,
+ MLXSW_REG_TUNNEL_PORT_NVE);
+ err = mlxsw_sp_ethtype_to_sver_type(ethertype, &sver_type);
+ if (err)
+ return err;
+
+ mlxsw_reg_spvid_et_vlan_set(spvid_pl, sver_type);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
+}
+
+static int
mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_nve_config *config)
{
char tngcr_pl[MLXSW_REG_TNGCR_LEN];
+ char spvtr_pl[MLXSW_REG_SPVTR_LEN];
u16 ul_rif_index;
int err;
@@ -329,8 +361,25 @@ mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_tngcr_write;
+ mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
+ MLXSW_REG_SPVTR_IPVID_MODE_ALWAYS_PUSH_VLAN);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
+ if (err)
+ goto err_spvtr_write;
+
+ err = mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp, config->ethertype);
+ if (err)
+ goto err_decap_ethertype_set;
+
return 0;
+err_decap_ethertype_set:
+ mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
+ MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
+err_spvtr_write:
+ mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
err_tngcr_write:
mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
err_vxlan_learning_set:
@@ -340,8 +389,14 @@ err_vxlan_learning_set:
static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
{
+ char spvtr_pl[MLXSW_REG_SPVTR_LEN];
char tngcr_pl[MLXSW_REG_TNGCR_LEN];
+ /* Set default EtherType */
+ mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp, ETH_P_8021Q);
+ mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
+ MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index ca8090a28dec..d6e9ecb14681 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -828,10 +828,10 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
goto err_hashtable_init;
/* Delive these message types as PTP0. */
- message_type = BIT(MLXSW_SP_PTP_MESSAGE_TYPE_SYNC) |
- BIT(MLXSW_SP_PTP_MESSAGE_TYPE_DELAY_REQ) |
- BIT(MLXSW_SP_PTP_MESSAGE_TYPE_PDELAY_REQ) |
- BIT(MLXSW_SP_PTP_MESSAGE_TYPE_PDELAY_RESP);
+ message_type = BIT(PTP_MSGTYPE_SYNC) |
+ BIT(PTP_MSGTYPE_DELAY_REQ) |
+ BIT(PTP_MSGTYPE_PDELAY_REQ) |
+ BIT(PTP_MSGTYPE_PDELAY_RESP);
err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
message_type);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index 8c386571afce..1d43a3755285 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -11,13 +11,6 @@ struct mlxsw_sp;
struct mlxsw_sp_port;
struct mlxsw_sp_ptp_clock;
-enum {
- MLXSW_SP_PTP_MESSAGE_TYPE_SYNC,
- MLXSW_SP_PTP_MESSAGE_TYPE_DELAY_REQ,
- MLXSW_SP_PTP_MESSAGE_TYPE_PDELAY_REQ,
- MLXSW_SP_PTP_MESSAGE_TYPE_PDELAY_RESP,
-};
-
static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct ethtool_ts_info *info)
{
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 4381f8c6c3fb..d671d961fc33 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -352,6 +352,7 @@ enum mlxsw_sp_fib_entry_type {
MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
};
+struct mlxsw_sp_nexthop_group_info;
struct mlxsw_sp_nexthop_group;
struct mlxsw_sp_fib_entry;
@@ -368,18 +369,71 @@ struct mlxsw_sp_fib_entry_decap {
u32 tunnel_index;
};
+static struct mlxsw_sp_fib_entry_priv *
+mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
+{
+ struct mlxsw_sp_fib_entry_priv *priv;
+
+ if (!ll_ops->fib_entry_priv_size)
+ /* No need to have priv */
+ return NULL;
+
+ priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+ refcount_set(&priv->refcnt, 1);
+ return priv;
+}
+
+static void
+mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
+{
+ kfree(priv);
+}
+
+static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
+{
+ refcount_inc(&priv->refcnt);
+}
+
+static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
+{
+ if (!priv || !refcount_dec_and_test(&priv->refcnt))
+ return;
+ mlxsw_sp_fib_entry_priv_destroy(priv);
+}
+
+static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib_entry_priv *priv)
+{
+ if (!priv)
+ return;
+ mlxsw_sp_fib_entry_priv_hold(priv);
+ list_add(&priv->list, &op_ctx->fib_entry_priv_list);
+}
+
+static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
+{
+ struct mlxsw_sp_fib_entry_priv *priv, *tmp;
+
+ list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
+ mlxsw_sp_fib_entry_priv_put(priv);
+ INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
+}
+
struct mlxsw_sp_fib_entry {
struct mlxsw_sp_fib_node *fib_node;
enum mlxsw_sp_fib_entry_type type;
struct list_head nexthop_group_node;
struct mlxsw_sp_nexthop_group *nh_group;
struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
+ struct mlxsw_sp_fib_entry_priv *priv;
};
struct mlxsw_sp_fib4_entry {
struct mlxsw_sp_fib_entry common;
+ struct fib_info *fi;
u32 tb_id;
- u32 prio;
u8 tos;
u8 type;
};
@@ -409,6 +463,7 @@ struct mlxsw_sp_fib {
struct mlxsw_sp_vr *vr;
struct mlxsw_sp_lpm_tree *lpm_tree;
enum mlxsw_sp_l3proto proto;
+ const struct mlxsw_sp_router_ll_ops *ll_ops;
};
struct mlxsw_sp_vr {
@@ -422,12 +477,31 @@ struct mlxsw_sp_vr {
refcount_t ul_rif_refcnt;
};
+static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
+{
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
+ xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
+}
+
+static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
+{
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
+ xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
+}
+
+static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
+{
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
+ xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
+}
+
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr,
enum mlxsw_sp_l3proto proto)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_fib *fib;
int err;
@@ -443,6 +517,7 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
fib->proto = proto;
fib->vr = vr;
fib->lpm_tree = lpm_tree;
+ fib->ll_ops = ll_ops;
mlxsw_sp_lpm_tree_hold(lpm_tree);
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
if (err)
@@ -481,33 +556,36 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
}
static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char ralta_pl[MLXSW_REG_RALTA_LEN];
+ char xralta_pl[MLXSW_REG_XRALTA_LEN];
- mlxsw_reg_ralta_pack(ralta_pl, true,
- (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
- lpm_tree->id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+ mlxsw_reg_xralta_pack(xralta_pl, true,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
}
static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char ralta_pl[MLXSW_REG_RALTA_LEN];
+ char xralta_pl[MLXSW_REG_XRALTA_LEN];
- mlxsw_reg_ralta_pack(ralta_pl, false,
- (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
- lpm_tree->id);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+ mlxsw_reg_xralta_pack(xralta_pl, false,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ ll_ops->ralta_write(mlxsw_sp, xralta_pl);
}
static int
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_prefix_usage *prefix_usage,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char ralst_pl[MLXSW_REG_RALST_LEN];
+ char xralst_pl[MLXSW_REG_XRALST_LEN];
u8 root_bin = 0;
u8 prefix;
u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
@@ -515,19 +593,20 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
root_bin = prefix;
- mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
+ mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
if (prefix == 0)
continue;
- mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
- MLXSW_REG_RALST_BIN_NO_CHILD);
+ mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
+ MLXSW_REG_RALST_BIN_NO_CHILD);
last_prefix = prefix;
}
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
+ return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
}
static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto)
{
@@ -538,12 +617,11 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
if (!lpm_tree)
return ERR_PTR(-EBUSY);
lpm_tree->proto = proto;
- err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
+ err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
if (err)
return ERR_PTR(err);
- err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
- lpm_tree);
+ err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
if (err)
goto err_left_struct_set;
memcpy(&lpm_tree->prefix_usage, prefix_usage,
@@ -554,14 +632,15 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
return lpm_tree;
err_left_struct_set:
- mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
return ERR_PTR(err);
}
static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
}
static struct mlxsw_sp_lpm_tree *
@@ -569,6 +648,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
@@ -582,7 +662,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
return lpm_tree;
}
}
- return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
+ return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
}
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
@@ -593,8 +673,11 @@ static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops =
+ mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
+
if (--lpm_tree->ref_count == 0)
- mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
+ mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
}
#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
@@ -684,23 +767,23 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fib *fib, u8 tree_id)
{
- char raltb_pl[MLXSW_REG_RALTB_LEN];
+ char xraltb_pl[MLXSW_REG_XRALTB_LEN];
- mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
- (enum mlxsw_reg_ralxx_protocol) fib->proto,
- tree_id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+ mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto,
+ tree_id);
+ return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
}
static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fib *fib)
{
- char raltb_pl[MLXSW_REG_RALTB_LEN];
+ char xraltb_pl[MLXSW_REG_XRALTB_LEN];
/* Bind to tree 0 which is default */
- mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
- (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+ mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
+ return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
}
static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
@@ -1270,21 +1353,33 @@ mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
/* Given decap parameters, find the corresponding IPIP entry. */
static struct mlxsw_sp_ipip_entry *
-mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *ul_dev,
+mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
enum mlxsw_sp_l3proto ul_proto,
union mlxsw_sp_l3addr ul_dip)
{
- struct mlxsw_sp_ipip_entry *ipip_entry;
+ struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
+ struct net_device *ul_dev;
+
+ rcu_read_lock();
+
+ ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
+ if (!ul_dev)
+ goto out_unlock;
list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
ipip_list_node)
if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
ul_proto, ul_dip,
ipip_entry))
- return ipip_entry;
+ goto out_unlock;
+
+ rcu_read_unlock();
return NULL;
+
+out_unlock:
+ rcu_read_unlock();
+ return ipip_entry;
}
static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
@@ -1370,11 +1465,7 @@ static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_ops *ops
= mlxsw_sp->router->ipip_ops_arr[ipipt];
- /* For deciding whether decap should be offloaded, we don't care about
- * overlay protocol, so ask whether either one is supported.
- */
- return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
- ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
+ return ops->can_offload(mlxsw_sp, ol_dev);
}
static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
@@ -2749,10 +2840,11 @@ struct mlxsw_sp_nexthop {
struct list_head neigh_list_node; /* member of neigh entry list */
struct list_head rif_list_node;
struct list_head router_list_node;
- struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
- * this belongs to
- */
+ struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
+ * this nexthop belongs to
+ */
struct rhash_head ht_node;
+ struct neigh_table *neigh_tbl;
struct mlxsw_sp_nexthop_key key;
unsigned char gw_addr[sizeof(struct in6_addr)];
int ifindex;
@@ -2766,9 +2858,10 @@ struct mlxsw_sp_nexthop {
offloaded:1, /* set in case the neigh is actually put into
* KVD linear area of this group.
*/
- update:1; /* set indicates that MAC of this neigh should be
+ update:1, /* set indicates that MAC of this neigh should be
* updated in HW
*/
+ discard:1; /* nexthop is programmed to discard packets */
enum mlxsw_sp_nexthop_type type;
union {
struct mlxsw_sp_neigh_entry *neigh_entry;
@@ -2778,21 +2871,54 @@ struct mlxsw_sp_nexthop {
bool counter_valid;
};
-struct mlxsw_sp_nexthop_group {
- void *priv;
- struct rhash_head ht_node;
- struct list_head fib_list; /* list of fib entries that use this group */
- struct neigh_table *neigh_tbl;
- u8 adj_index_valid:1,
- gateway:1; /* routes using the group use a gateway */
+enum mlxsw_sp_nexthop_group_type {
+ MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
+ MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
+ MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
+};
+
+struct mlxsw_sp_nexthop_group_info {
+ struct mlxsw_sp_nexthop_group *nh_grp;
u32 adj_index;
u16 ecmp_size;
u16 count;
int sum_norm_weight;
+ u8 adj_index_valid:1,
+ gateway:1; /* routes using the group use a gateway */
struct mlxsw_sp_nexthop nexthops[0];
#define nh_rif nexthops[0].rif
};
+struct mlxsw_sp_nexthop_group_vr_key {
+ u16 vr_id;
+ enum mlxsw_sp_l3proto proto;
+};
+
+struct mlxsw_sp_nexthop_group_vr_entry {
+ struct list_head list; /* member in vr_list */
+ struct rhash_head ht_node; /* member in vr_ht */
+ refcount_t ref_count;
+ struct mlxsw_sp_nexthop_group_vr_key key;
+};
+
+struct mlxsw_sp_nexthop_group {
+ struct rhash_head ht_node;
+ struct list_head fib_list; /* list of fib entries that use this group */
+ union {
+ struct {
+ struct fib_info *fi;
+ } ipv4;
+ struct {
+ u32 id;
+ } obj;
+ };
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct list_head vr_list;
+ struct rhashtable vr_ht;
+ enum mlxsw_sp_nexthop_group_type type;
+ bool can_destroy;
+};
+
void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
@@ -2858,18 +2984,18 @@ unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
u32 *p_adj_size, u32 *p_adj_hash_index)
{
- struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
u32 adj_hash_index = 0;
int i;
- if (!nh->offloaded || !nh_grp->adj_index_valid)
+ if (!nh->offloaded || !nhgi->adj_index_valid)
return -EINVAL;
- *p_adj_index = nh_grp->adj_index;
- *p_adj_size = nh_grp->ecmp_size;
+ *p_adj_index = nhgi->adj_index;
+ *p_adj_size = nhgi->ecmp_size;
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
if (nh_iter == nh)
break;
@@ -2888,11 +3014,11 @@ struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
{
- struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
int i;
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
return true;
@@ -2900,17 +3026,107 @@ bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
return false;
}
-static struct fib_info *
-mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
+bool mlxsw_sp_nexthop_is_discard(const struct mlxsw_sp_nexthop *nh)
+{
+ return nh->discard;
+}
+
+static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
+ .head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
+ .automatic_shrinking = true,
+};
+
+static struct mlxsw_sp_nexthop_group_vr_entry *
+mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct mlxsw_sp_fib *fib)
+{
+ struct mlxsw_sp_nexthop_group_vr_key key;
+
+ memset(&key, 0, sizeof(key));
+ key.vr_id = fib->vr->id;
+ key.proto = fib->proto;
+ return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
+ mlxsw_sp_nexthop_group_vr_ht_params);
+}
+
+static int
+mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct mlxsw_sp_fib *fib)
+{
+ struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
+ int err;
+
+ vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
+ if (!vr_entry)
+ return -ENOMEM;
+
+ vr_entry->key.vr_id = fib->vr->id;
+ vr_entry->key.proto = fib->proto;
+ refcount_set(&vr_entry->ref_count, 1);
+
+ err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
+ mlxsw_sp_nexthop_group_vr_ht_params);
+ if (err)
+ goto err_hashtable_insert;
+
+ list_add(&vr_entry->list, &nh_grp->vr_list);
+
+ return 0;
+
+err_hashtable_insert:
+ kfree(vr_entry);
+ return err;
+}
+
+static void
+mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
+{
+ list_del(&vr_entry->list);
+ rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
+ mlxsw_sp_nexthop_group_vr_ht_params);
+ kfree(vr_entry);
+}
+
+static int
+mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct mlxsw_sp_fib *fib)
{
- return nh_grp->priv;
+ struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
+
+ vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
+ if (vr_entry) {
+ refcount_inc(&vr_entry->ref_count);
+ return 0;
+ }
+
+ return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
+}
+
+static void
+mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct mlxsw_sp_fib *fib)
+{
+ struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
+
+ vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
+ if (WARN_ON_ONCE(!vr_entry))
+ return;
+
+ if (!refcount_dec_and_test(&vr_entry->ref_count))
+ return;
+
+ mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
}
struct mlxsw_sp_nexthop_group_cmp_arg {
- enum mlxsw_sp_l3proto proto;
+ enum mlxsw_sp_nexthop_group_type type;
union {
struct fib_info *fi;
struct mlxsw_sp_fib6_entry *fib6_entry;
+ u32 id;
};
};
@@ -2921,10 +3137,10 @@ mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
{
int i;
- for (i = 0; i < nh_grp->count; i++) {
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
const struct mlxsw_sp_nexthop *nh;
- nh = &nh_grp->nexthops[i];
+ nh = &nh_grp->nhgi->nexthops[i];
if (nh->ifindex == ifindex && nh->nh_weight == weight &&
ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
return true;
@@ -2939,7 +3155,7 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
{
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
- if (nh_grp->count != fib6_entry->nrt6)
+ if (nh_grp->nhgi->count != fib6_entry->nrt6)
return false;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
@@ -2964,24 +3180,23 @@ mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
- switch (cmp_arg->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
- case MLXSW_SP_L3_PROTO_IPV6:
+ if (nh_grp->type != cmp_arg->type)
+ return 1;
+
+ switch (cmp_arg->type) {
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
+ return cmp_arg->fi != nh_grp->ipv4.fi;
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
cmp_arg->fib6_entry);
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
+ return cmp_arg->id != nh_grp->obj.id;
default:
WARN_ON(1);
return 1;
}
}
-static int
-mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
-{
- return nh_grp->neigh_tbl->family;
-}
-
static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
{
const struct mlxsw_sp_nexthop_group *nh_grp = data;
@@ -2990,18 +3205,20 @@ static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
unsigned int val;
int i;
- switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
- case AF_INET:
- fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
+ switch (nh_grp->type) {
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
+ fi = nh_grp->ipv4.fi;
return jhash(&fi, sizeof(fi), seed);
- case AF_INET6:
- val = nh_grp->count;
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
+ val = nh_grp->nhgi->count;
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ nh = &nh_grp->nhgi->nexthops[i];
val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
}
return jhash(&val, sizeof(val), seed);
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
+ return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
default:
WARN_ON(1);
return 0;
@@ -3031,11 +3248,13 @@ mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
{
const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
- switch (cmp_arg->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
+ switch (cmp_arg->type) {
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
- case MLXSW_SP_L3_PROTO_IPV6:
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
+ return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
default:
WARN_ON(1);
return 0;
@@ -3052,8 +3271,8 @@ static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
- !nh_grp->gateway)
+ if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
+ !nh_grp->nhgi->gateway)
return 0;
return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
@@ -3064,8 +3283,8 @@ static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
- !nh_grp->gateway)
+ if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
+ !nh_grp->nhgi->gateway)
return;
rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
@@ -3079,7 +3298,7 @@ mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
- cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
+ cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
cmp_arg.fi = fi;
return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
&cmp_arg,
@@ -3092,7 +3311,7 @@ mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
- cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
+ cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
cmp_arg.fib6_entry = fib6_entry;
return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
&cmp_arg,
@@ -3128,7 +3347,8 @@ mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_fib *fib,
+ enum mlxsw_sp_l3proto proto,
+ u16 vr_id,
u32 adj_index, u16 ecmp_size,
u32 new_adj_index,
u16 new_ecmp_size)
@@ -3136,8 +3356,8 @@ static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
char raleu_pl[MLXSW_REG_RALEU_LEN];
mlxsw_reg_raleu_pack(raleu_pl,
- (enum mlxsw_reg_ralxx_protocol) fib->proto,
- fib->vr->id, adj_index, ecmp_size, new_adj_index,
+ (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
+ adj_index, ecmp_size, new_adj_index,
new_ecmp_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
}
@@ -3146,23 +3366,31 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp,
u32 old_adj_index, u16 old_ecmp_size)
{
- struct mlxsw_sp_fib_entry *fib_entry;
- struct mlxsw_sp_fib *fib = NULL;
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
+ struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
int err;
- list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
- if (fib == fib_entry->fib_node->fib)
- continue;
- fib = fib_entry->fib_node->fib;
- err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
+ list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
+ err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
+ vr_entry->key.proto,
+ vr_entry->key.vr_id,
old_adj_index,
old_ecmp_size,
- nh_grp->adj_index,
- nh_grp->ecmp_size);
+ nhgi->adj_index,
+ nhgi->ecmp_size);
if (err)
- return err;
+ goto err_mass_update_vr;
}
return 0;
+
+err_mass_update_vr:
+ list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
+ mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
+ vr_entry->key.vr_id,
+ nhgi->adj_index,
+ nhgi->ecmp_size,
+ old_adj_index, old_ecmp_size);
+ return err;
}
static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
@@ -3173,8 +3401,12 @@ static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
true, MLXSW_REG_RATR_TYPE_ETHERNET,
- adj_index, neigh_entry->rif);
- mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
+ adj_index, nh->rif->rif_index);
+ if (nh->discard)
+ mlxsw_reg_ratr_trap_action_set(ratr_pl,
+ MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
+ else
+ mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
if (nh->counter_valid)
mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
else
@@ -3229,15 +3461,15 @@ static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_nexthop_group_info *nhgi,
bool reallocate)
{
- u32 adj_index = nh_grp->adj_index; /* base */
+ u32 adj_index = nhgi->adj_index; /* base */
struct mlxsw_sp_nexthop *nh;
int i;
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
if (!nh->should_offload) {
nh->offloaded = 0;
@@ -3337,13 +3569,13 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
}
static void
-mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
+mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
{
int i, g = 0, sum_norm_weight = 0;
struct mlxsw_sp_nexthop *nh;
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
if (!nh->should_offload)
continue;
@@ -3353,8 +3585,8 @@ mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
g = nh->nh_weight;
}
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
if (!nh->should_offload)
continue;
@@ -3362,18 +3594,18 @@ mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
sum_norm_weight += nh->norm_nh_weight;
}
- nh_grp->sum_norm_weight = sum_norm_weight;
+ nhgi->sum_norm_weight = sum_norm_weight;
}
static void
-mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
+mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
{
- int total = nh_grp->sum_norm_weight;
- u16 ecmp_size = nh_grp->ecmp_size;
int i, weight = 0, lower_bound = 0;
+ int total = nhgi->sum_norm_weight;
+ u16 ecmp_size = nhgi->ecmp_size;
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
int upper_bound;
if (!nh->should_offload)
@@ -3395,8 +3627,8 @@ mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
{
int i;
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
if (nh->offloaded)
nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
@@ -3439,39 +3671,59 @@ mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
}
static void
+mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ /* Do not update the flags if the nexthop group is being destroyed
+ * since:
+ * 1. The nexthop objects is being deleted, in which case the flags are
+ * irrelevant.
+ * 2. The nexthop group was replaced by a newer group, in which case
+ * the flags of the nexthop object were already updated based on the
+ * new group.
+ */
+ if (nh_grp->can_destroy)
+ return;
+
+ nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
+ nh_grp->nhgi->adj_index_valid, false);
+}
+
+static void
mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
- case AF_INET:
+ switch (nh_grp->type) {
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
break;
- case AF_INET6:
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
break;
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
+ mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
+ break;
}
}
-static void
+static int
mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
u16 ecmp_size, old_ecmp_size;
struct mlxsw_sp_nexthop *nh;
bool offload_change = false;
u32 adj_index;
bool old_adj_index_valid;
+ int i, err2, err = 0;
u32 old_adj_index;
- int i;
- int err;
- if (!nh_grp->gateway) {
- mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
- return;
- }
+ if (!nhgi->gateway)
+ return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
if (nh->should_offload != nh->offloaded) {
offload_change = true;
@@ -3483,21 +3735,21 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
/* Nothing was added or removed, so no need to reallocate. Just
* update MAC on existing adjacency indexes.
*/
- err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
+ err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
goto set_trap;
}
- return;
+ return 0;
}
- mlxsw_sp_nexthop_group_normalize(nh_grp);
- if (!nh_grp->sum_norm_weight)
+ mlxsw_sp_nexthop_group_normalize(nhgi);
+ if (!nhgi->sum_norm_weight)
/* No neigh of this group is connected so we just set
* the trap and let everthing flow through kernel.
*/
goto set_trap;
- ecmp_size = nh_grp->sum_norm_weight;
+ ecmp_size = nhgi->sum_norm_weight;
err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
if (err)
/* No valid allocation size available. */
@@ -3512,14 +3764,14 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
goto set_trap;
}
- old_adj_index_valid = nh_grp->adj_index_valid;
- old_adj_index = nh_grp->adj_index;
- old_ecmp_size = nh_grp->ecmp_size;
- nh_grp->adj_index_valid = 1;
- nh_grp->adj_index = adj_index;
- nh_grp->ecmp_size = ecmp_size;
- mlxsw_sp_nexthop_group_rebalance(nh_grp);
- err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
+ old_adj_index_valid = nhgi->adj_index_valid;
+ old_adj_index = nhgi->adj_index;
+ old_ecmp_size = nhgi->ecmp_size;
+ nhgi->adj_index_valid = 1;
+ nhgi->adj_index = adj_index;
+ nhgi->ecmp_size = ecmp_size;
+ mlxsw_sp_nexthop_group_rebalance(nhgi);
+ err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
goto set_trap;
@@ -3536,7 +3788,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
goto set_trap;
}
- return;
+ return 0;
}
err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
@@ -3548,22 +3800,23 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
goto set_trap;
}
- return;
+ return 0;
set_trap:
- old_adj_index_valid = nh_grp->adj_index_valid;
- nh_grp->adj_index_valid = 0;
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
+ old_adj_index_valid = nhgi->adj_index_valid;
+ nhgi->adj_index_valid = 0;
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
nh->offloaded = 0;
}
- err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
- if (err)
+ err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
+ if (err2)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
if (old_adj_index_valid)
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
- nh_grp->ecmp_size, nh_grp->adj_index);
+ nhgi->ecmp_size, nhgi->adj_index);
+ return err;
}
static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
@@ -3589,10 +3842,9 @@ mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
nh = list_first_entry(&neigh_entry->nexthop_list,
struct mlxsw_sp_nexthop, neigh_list_node);
- n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
+ n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
if (!n) {
- n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
- nh->rif->dev);
+ n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
if (IS_ERR(n))
return PTR_ERR(n);
neigh_event_send(n, NULL);
@@ -3615,7 +3867,7 @@ mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
neigh_release(old_n);
neigh_clone(n);
__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
}
neigh_release(n);
@@ -3652,7 +3904,7 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry(nh, &neigh_entry->nexthop_list,
neigh_list_node) {
__mlxsw_sp_nexthop_neigh_update(nh, removing);
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
}
}
@@ -3683,7 +3935,7 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
u8 nud_state, dead;
int err;
- if (!nh->nh_grp->gateway || nh->neigh_entry)
+ if (!nh->nhgi->gateway || nh->neigh_entry)
return 0;
/* Take a reference of neigh here ensuring that neigh would
@@ -3691,10 +3943,9 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
* The reference is taken either in neigh_lookup() or
* in neigh_create() in case n is not found.
*/
- n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
+ n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
if (!n) {
- n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
- nh->rif->dev);
+ n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
if (IS_ERR(n))
return PTR_ERR(n);
neigh_event_send(n, NULL);
@@ -3775,7 +4026,7 @@ static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
{
bool removing;
- if (!nh->nh_grp->gateway || nh->ipip_entry)
+ if (!nh->nhgi->gateway || nh->ipip_entry)
return;
nh->ipip_entry = ipip_entry;
@@ -3807,27 +4058,11 @@ static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
}
-static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh)
-{
- switch (nh->type) {
- case MLXSW_SP_NEXTHOP_TYPE_ETH:
- mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
- mlxsw_sp_nexthop_rif_fini(nh);
- break;
- case MLXSW_SP_NEXTHOP_TYPE_IPIP:
- mlxsw_sp_nexthop_rif_fini(nh);
- mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
- break;
- }
-}
-
-static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh,
- struct fib_nh *fib_nh)
+static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh,
+ const struct net_device *dev)
{
const struct mlxsw_sp_ipip_ops *ipip_ops;
- struct net_device *dev = fib_nh->fib_nh_dev;
struct mlxsw_sp_ipip_entry *ipip_entry;
struct mlxsw_sp_rif *rif;
int err;
@@ -3835,8 +4070,7 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
if (ipip_entry) {
ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
- if (ipip_ops->can_offload(mlxsw_sp, dev,
- MLXSW_SP_L3_PROTO_IPV4)) {
+ if (ipip_ops->can_offload(mlxsw_sp, dev)) {
nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
return 0;
@@ -3860,10 +4094,19 @@ err_neigh_init:
return err;
}
-static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh)
+static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
{
- mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
+ switch (nh->type) {
+ case MLXSW_SP_NEXTHOP_TYPE_ETH:
+ mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_rif_fini(nh);
+ break;
+ case MLXSW_SP_NEXTHOP_TYPE_IPIP:
+ mlxsw_sp_nexthop_rif_fini(nh);
+ mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
+ break;
+ }
}
static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
@@ -3875,7 +4118,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
struct in_device *in_dev;
int err;
- nh->nh_grp = nh_grp;
+ nh->nhgi = nh_grp->nhgi;
nh->key.fib_nh = fib_nh;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
nh->nh_weight = fib_nh->fib_nh_weight;
@@ -3883,6 +4126,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
nh->nh_weight = 1;
#endif
memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
+ nh->neigh_tbl = &arp_tbl;
err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
if (err)
return err;
@@ -3892,6 +4136,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
if (!dev)
return 0;
+ nh->ifindex = dev->ifindex;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
@@ -3902,7 +4147,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
}
rcu_read_unlock();
- err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
+ err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
if (err)
goto err_nexthop_neigh_init;
@@ -3916,7 +4161,7 @@ err_nexthop_neigh_init:
static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
- mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
list_del(&nh->router_list_node);
mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
@@ -3938,14 +4183,14 @@ static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
switch (event) {
case FIB_EVENT_NH_ADD:
- mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
+ mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
break;
case FIB_EVENT_NH_DEL:
- mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
break;
}
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
}
static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
@@ -3968,7 +4213,7 @@ static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
}
__mlxsw_sp_nexthop_neigh_update(nh, removing);
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
}
}
@@ -3991,10 +4236,450 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
}
}
+static int
+mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
+ const struct nh_notifier_single_info *nh,
+ struct netlink_ext_ack *extack)
+{
+ int err = -EINVAL;
+
+ if (nh->is_fdb)
+ NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
+ else if (nh->has_encap)
+ NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
+ else
+ err = 0;
+
+ return err;
+}
+
+static int
+mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
+ const struct nh_notifier_grp_info *nh_grp,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ if (nh_grp->is_fdb) {
+ NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nh_grp->num_nh; i++) {
+ const struct nh_notifier_single_info *nh;
+ int err;
+
+ nh = &nh_grp->nh_entries[i].nh;
+ err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh,
+ extack);
+ if (err)
+ return err;
+
+ /* Device only nexthops with an IPIP device are programmed as
+ * encapsulating adjacency entries.
+ */
+ if (!nh->gw_family && !nh->is_reject &&
+ !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
+ NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
+ unsigned long event,
+ struct nh_notifier_info *info)
+{
+ if (event != NEXTHOP_EVENT_REPLACE)
+ return 0;
+
+ if (!info->is_grp)
+ return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
+ info->extack);
+ return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp, info->nh_grp,
+ info->extack);
+}
+
+static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
+ const struct nh_notifier_info *info)
+{
+ const struct net_device *dev;
+
+ if (info->is_grp)
+ /* Already validated earlier. */
+ return true;
+
+ dev = info->nh->dev;
+ return info->nh->gw_family || info->nh->is_reject ||
+ mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
+}
+
+static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ u16 lb_rif_index = mlxsw_sp->router->lb_rif_index;
+
+ nh->discard = 1;
+ nh->should_offload = 1;
+ /* While nexthops that discard packets do not forward packets
+ * via an egress RIF, they still need to be programmed using a
+ * valid RIF, so use the loopback RIF created during init.
+ */
+ nh->rif = mlxsw_sp->router->rifs[lb_rif_index];
+}
+
+static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ nh->rif = NULL;
+ nh->should_offload = 0;
+}
+
+static int
+mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_nexthop *nh,
+ struct nh_notifier_single_info *nh_obj, int weight)
+{
+ struct net_device *dev = nh_obj->dev;
+ int err;
+
+ nh->nhgi = nh_grp->nhgi;
+ nh->nh_weight = weight;
+
+ switch (nh_obj->gw_family) {
+ case AF_INET:
+ memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
+ nh->neigh_tbl = &arp_tbl;
+ break;
+ case AF_INET6:
+ memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
+#if IS_ENABLED(CONFIG_IPV6)
+ nh->neigh_tbl = &nd_tbl;
+#endif
+ break;
+ }
+
+ mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
+ list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
+ nh->ifindex = dev->ifindex;
+
+ err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
+ if (err)
+ goto err_type_init;
+
+ if (nh_obj->is_reject)
+ mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
+
+ return 0;
+
+err_type_init:
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ return err;
+}
+
+static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ if (nh->discard)
+ mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+}
+
+static int
+mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct nh_notifier_info *info)
+{
+ unsigned int nhs = info->is_grp ? info->nh_grp->num_nh : 1;
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct mlxsw_sp_nexthop *nh;
+ int err, i;
+
+ nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
+ if (!nhgi)
+ return -ENOMEM;
+ nh_grp->nhgi = nhgi;
+ nhgi->nh_grp = nh_grp;
+ nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
+ nhgi->count = nhs;
+ for (i = 0; i < nhgi->count; i++) {
+ struct nh_notifier_single_info *nh_obj;
+ int weight;
+
+ nh = &nhgi->nexthops[i];
+ if (info->is_grp) {
+ nh_obj = &info->nh_grp->nh_entries[i].nh;
+ weight = info->nh_grp->nh_entries[i].weight;
+ } else {
+ nh_obj = info->nh;
+ weight = 1;
+ }
+ err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
+ weight);
+ if (err)
+ goto err_nexthop_obj_init;
+ }
+ err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
+ goto err_group_refresh;
+ }
+
+ return 0;
+
+err_group_refresh:
+ i = nhgi->count;
+err_nexthop_obj_init:
+ for (i--; i >= 0; i--) {
+ nh = &nhgi->nexthops[i];
+ mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
+ }
+ kfree(nhgi);
+ return err;
+}
+
+static void
+mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
+ int i;
+
+ for (i = nhgi->count - 1; i >= 0; i--) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
+
+ mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
+ }
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(nhgi->adj_index_valid);
+ kfree(nhgi);
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
+ struct nh_notifier_info *info)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+ int err;
+
+ nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
+ if (!nh_grp)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&nh_grp->vr_list);
+ err = rhashtable_init(&nh_grp->vr_ht,
+ &mlxsw_sp_nexthop_group_vr_ht_params);
+ if (err)
+ goto err_nexthop_group_vr_ht_init;
+ INIT_LIST_HEAD(&nh_grp->fib_list);
+ nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
+ nh_grp->obj.id = info->id;
+
+ err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
+ if (err)
+ goto err_nexthop_group_info_init;
+
+ nh_grp->can_destroy = false;
+
+ return nh_grp;
+
+err_nexthop_group_info_init:
+ rhashtable_destroy(&nh_grp->vr_ht);
+err_nexthop_group_vr_ht_init:
+ kfree(nh_grp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ if (!nh_grp->can_destroy)
+ return;
+ mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
+ WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
+ rhashtable_destroy(&nh_grp->vr_ht);
+ kfree(nh_grp);
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
+{
+ struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
+
+ cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
+ cmp_arg.id = id;
+ return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
+ &cmp_arg,
+ mlxsw_sp_nexthop_group_ht_params);
+}
+
+static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
+}
+
+static int
+mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_nexthop_group *old_nh_grp,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
+ struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
+ int err;
+
+ old_nh_grp->nhgi = new_nhgi;
+ new_nhgi->nh_grp = old_nh_grp;
+ nh_grp->nhgi = old_nhgi;
+ old_nhgi->nh_grp = nh_grp;
+
+ if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
+ /* Both the old adjacency index and the new one are valid.
+ * Routes are currently using the old one. Tell the device to
+ * replace the old adjacency index with the new one.
+ */
+ err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
+ old_nhgi->adj_index,
+ old_nhgi->ecmp_size);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
+ goto err_out;
+ }
+ } else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
+ /* The old adjacency index is valid, while the new one is not.
+ * Iterate over all the routes using the group and change them
+ * to trap packets to the CPU.
+ */
+ err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
+ goto err_out;
+ }
+ } else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
+ /* The old adjacency index is invalid, while the new one is.
+ * Iterate over all the routes using the group and change them
+ * to forward packets using the new valid index.
+ */
+ err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
+ goto err_out;
+ }
+ }
+
+ /* Make sure the flags are set / cleared based on the new nexthop group
+ * information.
+ */
+ mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
+
+ /* At this point 'nh_grp' is just a shell that is not used by anyone
+ * and its nexthop group info is the old info that was just replaced
+ * with the new one. Remove it.
+ */
+ nh_grp->can_destroy = true;
+ mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
+
+ return 0;
+
+err_out:
+ old_nhgi->nh_grp = old_nh_grp;
+ nh_grp->nhgi = new_nhgi;
+ new_nhgi->nh_grp = nh_grp;
+ old_nh_grp->nhgi = old_nhgi;
+ return err;
+}
+
+static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
+ struct nh_notifier_info *info)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
+ struct netlink_ext_ack *extack = info->extack;
+ int err;
+
+ nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
+ if (IS_ERR(nh_grp))
+ return PTR_ERR(nh_grp);
+
+ old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
+ if (!old_nh_grp)
+ err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
+ else
+ err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
+ old_nh_grp, extack);
+
+ if (err) {
+ nh_grp->can_destroy = true;
+ mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
+ }
+
+ return err;
+}
+
+static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
+ struct nh_notifier_info *info)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+
+ nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
+ if (!nh_grp)
+ return;
+
+ nh_grp->can_destroy = true;
+ mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
+
+ /* If the group still has routes using it, then defer the delete
+ * operation until the last route using it is deleted.
+ */
+ if (!list_empty(&nh_grp->fib_list))
+ return;
+ mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
+}
+
+static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct nh_notifier_info *info = ptr;
+ struct mlxsw_sp_router *router;
+ int err = 0;
+
+ router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
+ err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
+ if (err)
+ goto out;
+
+ mutex_lock(&router->lock);
+
+ ASSERT_RTNL();
+
+ switch (event) {
+ case NEXTHOP_EVENT_REPLACE:
+ err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
+ break;
+ case NEXTHOP_EVENT_DEL:
+ mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&router->lock);
+
+out:
+ return notifier_from_errno(err);
+}
+
static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
struct fib_info *fi)
{
@@ -4004,46 +4689,102 @@ static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
}
+static int
+mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct mlxsw_sp_nexthop *nh;
+ int err, i;
+
+ nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
+ if (!nhgi)
+ return -ENOMEM;
+ nh_grp->nhgi = nhgi;
+ nhgi->nh_grp = nh_grp;
+ nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
+ nhgi->count = nhs;
+ for (i = 0; i < nhgi->count; i++) {
+ struct fib_nh *fib_nh;
+
+ nh = &nhgi->nexthops[i];
+ fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
+ err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
+ if (err)
+ goto err_nexthop4_init;
+ }
+ err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ if (err)
+ goto err_group_refresh;
+
+ return 0;
+
+err_group_refresh:
+ i = nhgi->count;
+err_nexthop4_init:
+ for (i--; i >= 0; i--) {
+ nh = &nhgi->nexthops[i];
+ mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
+ }
+ kfree(nhgi);
+ return err;
+}
+
+static void
+mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
+ int i;
+
+ for (i = nhgi->count - 1; i >= 0; i--) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
+
+ mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
+ }
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(nhgi->adj_index_valid);
+ kfree(nhgi);
+}
+
static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
{
- unsigned int nhs = fib_info_num_path(fi);
struct mlxsw_sp_nexthop_group *nh_grp;
- struct mlxsw_sp_nexthop *nh;
- struct fib_nh *fib_nh;
- int i;
int err;
- nh_grp = kzalloc(struct_size(nh_grp, nexthops, nhs), GFP_KERNEL);
+ nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
if (!nh_grp)
return ERR_PTR(-ENOMEM);
- nh_grp->priv = fi;
+ INIT_LIST_HEAD(&nh_grp->vr_list);
+ err = rhashtable_init(&nh_grp->vr_ht,
+ &mlxsw_sp_nexthop_group_vr_ht_params);
+ if (err)
+ goto err_nexthop_group_vr_ht_init;
INIT_LIST_HEAD(&nh_grp->fib_list);
- nh_grp->neigh_tbl = &arp_tbl;
-
- nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
- nh_grp->count = nhs;
+ nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
+ nh_grp->ipv4.fi = fi;
fib_info_hold(fi);
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
- fib_nh = fib_info_nh(fi, i);
- err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
- if (err)
- goto err_nexthop4_init;
- }
+
+ err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
+ if (err)
+ goto err_nexthop_group_info_init;
+
err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
if (err)
goto err_nexthop_group_insert;
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+
+ nh_grp->can_destroy = true;
+
return nh_grp;
err_nexthop_group_insert:
-err_nexthop4_init:
- for (i--; i >= 0; i--) {
- nh = &nh_grp->nexthops[i];
- mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
- }
+ mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
+err_nexthop_group_info_init:
fib_info_put(fi);
+ rhashtable_destroy(&nh_grp->vr_ht);
+err_nexthop_group_vr_ht_init:
kfree(nh_grp);
return ERR_PTR(err);
}
@@ -4052,17 +4793,13 @@ static void
mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- struct mlxsw_sp_nexthop *nh;
- int i;
-
+ if (!nh_grp->can_destroy)
+ return;
mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
- mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
- }
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
- WARN_ON_ONCE(nh_grp->adj_index_valid);
- fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
+ mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
+ fib_info_put(nh_grp->ipv4.fi);
+ WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
+ rhashtable_destroy(&nh_grp->vr_ht);
kfree(nh_grp);
}
@@ -4072,12 +4809,21 @@ static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_group *nh_grp;
+ if (fi->nh) {
+ nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
+ fi->nh->id);
+ if (WARN_ON_ONCE(!nh_grp))
+ return -EINVAL;
+ goto out;
+ }
+
nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
if (!nh_grp) {
nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
if (IS_ERR(nh_grp))
return PTR_ERR(nh_grp);
}
+out:
list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
fib_entry->nh_group = nh_grp;
return 0;
@@ -4091,6 +4837,12 @@ static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
list_del(&fib_entry->nexthop_group_node);
if (!list_empty(&nh_grp->fib_list))
return;
+
+ if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
+ mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
+ return;
+ }
+
mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
}
@@ -4120,9 +4872,9 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
switch (fib_entry->type) {
case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
- return !!nh_group->adj_index_valid;
+ return !!nh_group->nhgi->adj_index_valid;
case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
- return !!nh_group->nh_rif;
+ return !!nh_group->nhgi->nh_rif;
case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
@@ -4138,8 +4890,8 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
{
int i;
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
struct fib6_info *rt = mlxsw_sp_rt6->rt;
if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
@@ -4156,7 +4908,6 @@ static void
mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
int dst_len = fib_entry->fib_node->key.prefix_len;
struct mlxsw_sp_fib4_entry *fib4_entry;
@@ -4166,7 +4917,7 @@ mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
common);
- fri.fi = fi;
+ fri.fi = fib4_entry->fi;
fri.tb_id = fib4_entry->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
@@ -4181,7 +4932,6 @@ static void
mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
int dst_len = fib_entry->fib_node->key.prefix_len;
struct mlxsw_sp_fib4_entry *fib4_entry;
@@ -4189,7 +4939,7 @@ mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
common);
- fri.fi = fi;
+ fri.fi = fib4_entry->fi;
fri.tb_id = fib4_entry->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
@@ -4264,13 +5014,14 @@ mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
switch (op) {
- case MLXSW_REG_RALUE_OP_WRITE_WRITE:
+ case MLXSW_SP_FIB_ENTRY_OP_WRITE:
+ case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
break;
- case MLXSW_REG_RALUE_OP_WRITE_DELETE:
+ case MLXSW_SP_FIB_ENTRY_OP_DELETE:
mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
break;
default:
@@ -4278,33 +5029,133 @@ mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
}
}
+struct mlxsw_sp_fib_entry_op_ctx_basic {
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+};
+
static void
-mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
- const struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ enum mlxsw_sp_l3proto proto,
+ enum mlxsw_sp_fib_entry_op op,
+ u16 virtual_router, u8 prefix_len,
+ unsigned char *addr,
+ struct mlxsw_sp_fib_entry_priv *priv)
{
- struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
- enum mlxsw_reg_ralxx_protocol proto;
- u32 *p_dip;
+ struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
+ enum mlxsw_reg_ralxx_protocol ralxx_proto;
+ char *ralue_pl = op_ctx_basic->ralue_pl;
+ enum mlxsw_reg_ralue_op ralue_op;
- proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
+ ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
- switch (fib->proto) {
+ switch (op) {
+ case MLXSW_SP_FIB_ENTRY_OP_WRITE:
+ case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
+ ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
+ break;
+ case MLXSW_SP_FIB_ENTRY_OP_DELETE:
+ ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ switch (proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- p_dip = (u32 *) fib_entry->fib_node->key.addr;
- mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
- fib_entry->fib_node->key.prefix_len,
- *p_dip);
+ mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
+ virtual_router, prefix_len, (u32 *) addr);
break;
case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
- fib_entry->fib_node->key.prefix_len,
- fib_entry->fib_node->key.addr);
+ mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
+ virtual_router, prefix_len, addr);
break;
}
}
-static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index)
+static void
+mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u32 adjacency_index, u16 ecmp_size)
+{
+ struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
+
+ mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
+ trap_id, adjacency_index, ecmp_size);
+}
+
+static void
+mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u16 local_erif)
+{
+ struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
+
+ mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
+ trap_id, local_erif);
+}
+
+static void
+mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
+{
+ struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
+
+ mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
+}
+
+static void
+mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ u32 tunnel_ptr)
+{
+ struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
+
+ mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
+}
+
+static int
+mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ bool *postponed_for_bulk)
+{
+ struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
+ op_ctx_basic->ralue_pl);
+}
+
+static bool
+mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
+{
+ return true;
+}
+
+static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_sp_fib_entry_op op)
+{
+ struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
+
+ mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
+ fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
+ fib_entry->fib_node->key.prefix_len,
+ fib_entry->fib_node->key.addr,
+ fib_entry->priv);
+}
+
+static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ const struct mlxsw_sp_router_ll_ops *ll_ops)
+{
+ bool postponed_for_bulk = false;
+ int err;
+
+ err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
+ if (!postponed_for_bulk)
+ mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
+ return err;
+}
+
+static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp)
{
enum mlxsw_reg_ratr_trap_action trap_action;
char ratr_pl[MLXSW_REG_RATR_LEN];
@@ -4318,11 +5169,13 @@ static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index)
if (err)
return err;
- trap_action = MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS;
+ trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
MLXSW_REG_RATR_TYPE_ETHERNET,
- mlxsw_sp->router->adj_discard_index, rif_index);
+ mlxsw_sp->router->adj_discard_index,
+ mlxsw_sp->router->lb_rif_index);
mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
+ mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
if (err)
goto err_ratr_write;
@@ -4338,11 +5191,13 @@ err_ratr_write:
}
static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
- char ralue_pl[MLXSW_REG_RALUE_LEN];
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
enum mlxsw_reg_ralue_trap_action trap_action;
u16 trap_id = 0;
u32 adjacency_index = 0;
@@ -4355,12 +5210,10 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
*/
if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
- adjacency_index = fib_entry->nh_group->adj_index;
- ecmp_size = fib_entry->nh_group->ecmp_size;
- } else if (!nh_group->adj_index_valid && nh_group->count &&
- nh_group->nh_rif) {
- err = mlxsw_sp_adj_discard_write(mlxsw_sp,
- nh_group->nh_rif->rif_index);
+ adjacency_index = nhgi->adj_index;
+ ecmp_size = nhgi->ecmp_size;
+ } else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
+ err = mlxsw_sp_adj_discard_write(mlxsw_sp);
if (err)
return err;
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
@@ -4371,19 +5224,20 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
- mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
- adjacency_index, ecmp_size);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
+ adjacency_index, ecmp_size);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
- struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
+ struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
enum mlxsw_reg_ralue_trap_action trap_action;
- char ralue_pl[MLXSW_REG_RALUE_LEN];
u16 trap_id = 0;
u16 rif_index = 0;
@@ -4395,111 +5249,124 @@ static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
- mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
- rif_index);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
- char ralue_pl[MLXSW_REG_RALUE_LEN];
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
- mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
- mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_ip2me_pack(op_ctx);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
enum mlxsw_reg_ralue_trap_action trap_action;
- char ralue_pl[MLXSW_REG_RALUE_LEN];
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
- mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
- mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int
mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
enum mlxsw_reg_ralue_trap_action trap_action;
- char ralue_pl[MLXSW_REG_RALUE_LEN];
u16 trap_id;
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
- mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
- mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
const struct mlxsw_sp_ipip_ops *ipip_ops;
+ int err;
if (WARN_ON(!ipip_entry))
return -EINVAL;
ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
- return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
- fib_entry->decap.tunnel_index);
+ err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
+ fib_entry->decap.tunnel_index);
+ if (err)
+ return err;
+
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
- char ralue_pl[MLXSW_REG_RALUE_LEN];
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
- mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
- mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
- fib_entry->decap.tunnel_index);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
switch (fib_entry->type) {
case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
- return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
- return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
- return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
- return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
- return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
- op);
+ return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
- return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
- fib_entry, op);
+ return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
- return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
}
return -EINVAL;
}
static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
- int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
+ int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
if (err)
return err;
@@ -4509,18 +5376,35 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
return err;
}
+static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ bool is_new)
+{
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
+ is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
+ MLXSW_SP_FIB_ENTRY_OP_UPDATE);
+}
+
static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
- MLXSW_REG_RALUE_OP_WRITE_WRITE);
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
+
+ mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
+ return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
}
static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry)
{
- return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
- MLXSW_REG_RALUE_OP_WRITE_DELETE);
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
+
+ if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
+ return 0;
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
+ MLXSW_SP_FIB_ENTRY_OP_DELETE);
}
static int
@@ -4528,17 +5412,17 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
const struct fib_entry_notifier_info *fen_info,
struct mlxsw_sp_fib_entry *fib_entry)
{
- struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
+ struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
struct mlxsw_sp_router *router = mlxsw_sp->router;
u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
+ int ifindex = nhgi->nexthops[0].ifindex;
struct mlxsw_sp_ipip_entry *ipip_entry;
- struct fib_info *fi = fen_info->fi;
switch (fen_info->type) {
case RTN_LOCAL:
- ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
- MLXSW_SP_L3_PROTO_IPV4, dip);
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
+ MLXSW_SP_L3_PROTO_IPV4, dip);
if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
@@ -4571,7 +5455,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
return 0;
case RTN_UNICAST:
- if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
+ if (nhgi->gateway)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
else
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
@@ -4608,15 +5492,27 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
fib_entry = &fib4_entry->common;
- err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
- if (err)
- goto err_fib4_entry_type_set;
+ fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
+ if (IS_ERR(fib_entry->priv)) {
+ err = PTR_ERR(fib_entry->priv);
+ goto err_fib_entry_priv_create;
+ }
err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
if (err)
goto err_nexthop4_group_get;
- fib4_entry->prio = fen_info->fi->fib_priority;
+ err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
+ fib_node->fib);
+ if (err)
+ goto err_nexthop_group_vr_link;
+
+ err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
+ if (err)
+ goto err_fib4_entry_type_set;
+
+ fib4_entry->fi = fen_info->fi;
+ fib_info_hold(fib4_entry->fi);
fib4_entry->tb_id = fen_info->tb_id;
fib4_entry->type = fen_info->type;
fib4_entry->tos = fen_info->tos;
@@ -4625,9 +5521,13 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
return fib4_entry;
-err_nexthop4_group_get:
- mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib_entry);
err_fib4_entry_type_set:
+ mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
+err_nexthop_group_vr_link:
+ mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
+err_nexthop4_group_get:
+ mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
+err_fib_entry_priv_create:
kfree(fib4_entry);
return ERR_PTR(err);
}
@@ -4635,8 +5535,14 @@ err_fib4_entry_type_set:
static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib4_entry *fib4_entry)
{
- mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
+ struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
+
+ fib_info_put(fib4_entry->fi);
mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
+ mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
+ fib_node->fib);
+ mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
+ mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
kfree(fib4_entry);
}
@@ -4665,8 +5571,7 @@ mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
if (fib4_entry->tb_id == fen_info->tb_id &&
fib4_entry->tos == fen_info->tos &&
fib4_entry->type == fen_info->type &&
- mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
- fen_info->fi)
+ fib4_entry->fi == fen_info->fi)
return fib4_entry;
return NULL;
@@ -4875,14 +5780,16 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
+ bool is_new = !fib_node->fib_entry;
int err;
fib_node->fib_entry = fib_entry;
- err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+ err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
if (err)
goto err_fib_entry_update;
@@ -4893,14 +5800,25 @@ err_fib_entry_update:
return err;
}
-static void
-mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
+static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
+ int err;
- mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+ err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
fib_node->fib_entry = NULL;
+ return err;
+}
+
+static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
+
+ mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
+ __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
}
static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
@@ -4922,6 +5840,7 @@ static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
static int
mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
@@ -4955,7 +5874,7 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
}
replaced = fib_node->fib_entry;
- err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
goto err_fib_node_entry_link;
@@ -4980,23 +5899,26 @@ err_fib4_entry_create:
return err;
}
-static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
- struct fib_entry_notifier_info *fen_info)
+static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib4_entry *fib4_entry;
struct mlxsw_sp_fib_node *fib_node;
+ int err;
if (mlxsw_sp->router->aborted)
- return;
+ return 0;
fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
if (!fib4_entry)
- return;
+ return 0;
fib_node = fib4_entry->common.fib_node;
- mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
+ err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return err;
}
static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
@@ -5047,7 +5969,8 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
{
struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
- fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
+ if (!mlxsw_sp_rt6->rt->nh)
+ fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
kfree(mlxsw_sp_rt6);
}
@@ -5081,51 +6004,6 @@ static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
}
-static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop_group *nh_grp,
- struct mlxsw_sp_nexthop *nh,
- const struct fib6_info *rt)
-{
- const struct mlxsw_sp_ipip_ops *ipip_ops;
- struct mlxsw_sp_ipip_entry *ipip_entry;
- struct net_device *dev = rt->fib6_nh->fib_nh_dev;
- struct mlxsw_sp_rif *rif;
- int err;
-
- ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
- if (ipip_entry) {
- ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
- if (ipip_ops->can_offload(mlxsw_sp, dev,
- MLXSW_SP_L3_PROTO_IPV6)) {
- nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
- mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
- return 0;
- }
- }
-
- nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!rif)
- return 0;
- mlxsw_sp_nexthop_rif_init(nh, rif);
-
- err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
- if (err)
- goto err_nexthop_neigh_init;
-
- return 0;
-
-err_nexthop_neigh_init:
- mlxsw_sp_nexthop_rif_fini(nh);
- return err;
-}
-
-static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh)
-{
- mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
-}
-
static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp,
struct mlxsw_sp_nexthop *nh,
@@ -5133,9 +6011,12 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
{
struct net_device *dev = rt->fib6_nh->fib_nh_dev;
- nh->nh_grp = nh_grp;
+ nh->nhgi = nh_grp->nhgi;
nh->nh_weight = rt->fib6_nh->fib_nh_weight;
memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
+#if IS_ENABLED(CONFIG_IPV6)
+ nh->neigh_tbl = &nd_tbl;
+#endif
mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
@@ -5144,13 +6025,13 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
return 0;
nh->ifindex = dev->ifindex;
- return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
+ return mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
}
static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
- mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
list_del(&nh->router_list_node);
mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
}
@@ -5162,51 +6043,105 @@ static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
}
-static struct mlxsw_sp_nexthop_group *
-mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib6_entry *fib6_entry)
+static int
+mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
{
- struct mlxsw_sp_nexthop_group *nh_grp;
+ struct mlxsw_sp_nexthop_group_info *nhgi;
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
struct mlxsw_sp_nexthop *nh;
- int i = 0;
- int err;
+ int err, i;
- nh_grp = kzalloc(struct_size(nh_grp, nexthops, fib6_entry->nrt6),
- GFP_KERNEL);
- if (!nh_grp)
- return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&nh_grp->fib_list);
-#if IS_ENABLED(CONFIG_IPV6)
- nh_grp->neigh_tbl = &nd_tbl;
-#endif
+ nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
+ GFP_KERNEL);
+ if (!nhgi)
+ return -ENOMEM;
+ nh_grp->nhgi = nhgi;
+ nhgi->nh_grp = nh_grp;
mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
struct mlxsw_sp_rt6, list);
- nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
- nh_grp->count = fib6_entry->nrt6;
- for (i = 0; i < nh_grp->count; i++) {
+ nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
+ nhgi->count = fib6_entry->nrt6;
+ for (i = 0; i < nhgi->count; i++) {
struct fib6_info *rt = mlxsw_sp_rt6->rt;
- nh = &nh_grp->nexthops[i];
+ nh = &nhgi->nexthops[i];
err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
if (err)
goto err_nexthop6_init;
mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
}
+ nh_grp->nhgi = nhgi;
+ err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ if (err)
+ goto err_group_refresh;
+
+ return 0;
+
+err_group_refresh:
+ i = nhgi->count;
+err_nexthop6_init:
+ for (i--; i >= 0; i--) {
+ nh = &nhgi->nexthops[i];
+ mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
+ }
+ kfree(nhgi);
+ return err;
+}
+
+static void
+mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
+ int i;
+
+ for (i = nhgi->count - 1; i >= 0; i--) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
+
+ mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
+ }
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(nhgi->adj_index_valid);
+ kfree(nhgi);
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+ int err;
+
+ nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
+ if (!nh_grp)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&nh_grp->vr_list);
+ err = rhashtable_init(&nh_grp->vr_ht,
+ &mlxsw_sp_nexthop_group_vr_ht_params);
+ if (err)
+ goto err_nexthop_group_vr_ht_init;
+ INIT_LIST_HEAD(&nh_grp->fib_list);
+ nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
+
+ err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
+ if (err)
+ goto err_nexthop_group_info_init;
err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
if (err)
goto err_nexthop_group_insert;
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ nh_grp->can_destroy = true;
+
return nh_grp;
err_nexthop_group_insert:
-err_nexthop6_init:
- for (i--; i >= 0; i--) {
- nh = &nh_grp->nexthops[i];
- mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
- }
+ mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
+err_nexthop_group_info_init:
+ rhashtable_destroy(&nh_grp->vr_ht);
+err_nexthop_group_vr_ht_init:
kfree(nh_grp);
return ERR_PTR(err);
}
@@ -5215,24 +6150,29 @@ static void
mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- struct mlxsw_sp_nexthop *nh;
- int i = nh_grp->count;
-
+ if (!nh_grp->can_destroy)
+ return;
mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
- for (i--; i >= 0; i--) {
- nh = &nh_grp->nexthops[i];
- mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
- }
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
- WARN_ON(nh_grp->adj_index_valid);
+ mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
+ rhashtable_destroy(&nh_grp->vr_ht);
kfree(nh_grp);
}
static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib6_entry *fib6_entry)
{
+ struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
struct mlxsw_sp_nexthop_group *nh_grp;
+ if (rt->nh) {
+ nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
+ rt->nh->id);
+ if (WARN_ON_ONCE(!nh_grp))
+ return -EINVAL;
+ goto out;
+ }
+
nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
if (!nh_grp) {
nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
@@ -5240,15 +6180,16 @@ static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(nh_grp);
}
- list_add_tail(&fib6_entry->common.nexthop_group_node,
- &nh_grp->fib_list);
- fib6_entry->common.nh_group = nh_grp;
-
/* The route and the nexthop are described by the same struct, so we
* need to the update the nexthop offload indication for the new route.
*/
__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
+out:
+ list_add_tail(&fib6_entry->common.nexthop_group_node,
+ &nh_grp->fib_list);
+ fib6_entry->common.nh_group = nh_grp;
+
return 0;
}
@@ -5260,16 +6201,24 @@ static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
list_del(&fib_entry->nexthop_group_node);
if (!list_empty(&nh_grp->fib_list))
return;
+
+ if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
+ mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
+ return;
+ }
+
mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
}
-static int
-mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib6_entry *fib6_entry)
+static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
{
struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
+ struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
int err;
+ mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
fib6_entry->common.nh_group = NULL;
list_del(&fib6_entry->common.nexthop_group_node);
@@ -5277,11 +6226,17 @@ mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_nexthop6_group_get;
+ err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
+ fib_node->fib);
+ if (err)
+ goto err_nexthop_group_vr_link;
+
/* In case this entry is offloaded, then the adjacency index
* currently associated with it in the device's table is that
* of the old group. Start using the new one instead.
*/
- err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
+ err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
+ &fib6_entry->common, false);
if (err)
goto err_fib_entry_update;
@@ -5291,16 +6246,21 @@ mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
return 0;
err_fib_entry_update:
+ mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
+ fib_node->fib);
+err_nexthop_group_vr_link:
mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
err_nexthop6_group_get:
list_add_tail(&fib6_entry->common.nexthop_group_node,
&old_nh_grp->fib_list);
fib6_entry->common.nh_group = old_nh_grp;
+ mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
return err;
}
static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib6_entry *fib6_entry,
struct fib6_info **rt_arr, unsigned int nrt6)
{
@@ -5318,7 +6278,7 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
fib6_entry->nrt6++;
}
- err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
+ err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
if (err)
goto err_nexthop6_group_update;
@@ -5339,6 +6299,7 @@ err_rt6_create:
static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib6_entry *fib6_entry,
struct fib6_info **rt_arr, unsigned int nrt6)
{
@@ -5356,26 +6317,20 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
}
- mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
+ mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
}
static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
const struct fib6_info *rt)
{
- /* Packets hitting RTF_REJECT routes need to be discarded by the
- * stack. We can rely on their destination device not having a
- * RIF (it's the loopback device) and can thus use action type
- * local, which will cause them to be trapped with a lower
- * priority than packets that need to be locally received.
- */
if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
else if (rt->fib6_type == RTN_BLACKHOLE)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
else if (rt->fib6_flags & RTF_REJECT)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
- else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
+ else if (fib_entry->nh_group->nhgi->gateway)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
else
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
@@ -5409,6 +6364,12 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
fib_entry = &fib6_entry->common;
+ fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
+ if (IS_ERR(fib_entry->priv)) {
+ err = PTR_ERR(fib_entry->priv);
+ goto err_fib_entry_priv_create;
+ }
+
INIT_LIST_HEAD(&fib6_entry->rt6_list);
for (i = 0; i < nrt6; i++) {
@@ -5421,16 +6382,23 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
fib6_entry->nrt6++;
}
- mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
-
err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
if (err)
goto err_nexthop6_group_get;
+ err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
+ fib_node->fib);
+ if (err)
+ goto err_nexthop_group_vr_link;
+
+ mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
+
fib_entry->fib_node = fib_node;
return fib6_entry;
+err_nexthop_group_vr_link:
+ mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
err_nexthop6_group_get:
i = nrt6;
err_rt6_create:
@@ -5441,6 +6409,8 @@ err_rt6_create:
list_del(&mlxsw_sp_rt6->list);
mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
}
+ mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
+err_fib_entry_priv_create:
kfree(fib6_entry);
return ERR_PTR(err);
}
@@ -5448,9 +6418,14 @@ err_rt6_create:
static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib6_entry *fib6_entry)
{
+ struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
+
+ mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
+ fib_node->fib);
mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
WARN_ON(fib6_entry->nrt6);
+ mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
kfree(fib6_entry);
}
@@ -5508,8 +6483,8 @@ static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
}
static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
- struct fib6_info **rt_arr,
- unsigned int nrt6)
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct fib6_info **rt_arr, unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
struct mlxsw_sp_fib_entry *replaced;
@@ -5548,7 +6523,7 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
}
replaced = fib_node->fib_entry;
- err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
if (err)
goto err_fib_node_entry_link;
@@ -5572,8 +6547,8 @@ err_fib6_entry_create:
}
static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
- struct fib6_info **rt_arr,
- unsigned int nrt6)
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct fib6_info **rt_arr, unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
@@ -5604,8 +6579,7 @@ static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
fib6_entry = container_of(fib_node->fib_entry,
struct mlxsw_sp_fib6_entry, common);
- err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
- nrt6);
+ err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
if (err)
goto err_fib6_entry_nexthop_add;
@@ -5616,19 +6590,20 @@ err_fib6_entry_nexthop_add:
return err;
}
-static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
- struct fib6_info **rt_arr,
- unsigned int nrt6)
+static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct fib6_info **rt_arr, unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
struct fib6_info *rt = rt_arr[0];
+ int err;
if (mlxsw_sp->router->aborted)
- return;
+ return 0;
if (mlxsw_sp_fib6_rt_should_ignore(rt))
- return;
+ return 0;
/* Multipath routes are first added to the FIB trie and only then
* notified. If we vetoed the addition, we will get a delete
@@ -5637,58 +6612,66 @@ static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
*/
fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
if (!fib6_entry)
- return;
+ return 0;
/* If not all the nexthops are deleted, then only reduce the nexthop
* group.
*/
if (nrt6 != fib6_entry->nrt6) {
- mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
- nrt6);
- return;
+ mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
+ return 0;
}
fib_node = fib6_entry->common.fib_node;
- mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
+ err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return err;
}
static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_reg_ralxx_protocol proto,
+ enum mlxsw_sp_l3proto proto,
u8 tree_id)
{
- char ralta_pl[MLXSW_REG_RALTA_LEN];
- char ralst_pl[MLXSW_REG_RALST_LEN];
+ const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
+ enum mlxsw_reg_ralxx_protocol ralxx_proto =
+ (enum mlxsw_reg_ralxx_protocol) proto;
+ struct mlxsw_sp_fib_entry_priv *priv;
+ char xralta_pl[MLXSW_REG_XRALTA_LEN];
+ char xralst_pl[MLXSW_REG_XRALST_LEN];
int i, err;
- mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+ mlxsw_reg_xralta_pack(xralta_pl, true, ralxx_proto, tree_id);
+ err = ll_ops->ralta_write(mlxsw_sp, xralta_pl);
if (err)
return err;
- mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
+ mlxsw_reg_xralst_pack(xralst_pl, 0xff, tree_id);
+ err = ll_ops->ralst_write(mlxsw_sp, xralst_pl);
if (err)
return err;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
- char raltb_pl[MLXSW_REG_RALTB_LEN];
- char ralue_pl[MLXSW_REG_RALUE_LEN];
+ char xraltb_pl[MLXSW_REG_XRALTB_LEN];
- mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
- raltb_pl);
+ mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
+ mlxsw_reg_xraltb_pack(xraltb_pl, vr->id, ralxx_proto, tree_id);
+ err = ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
if (err)
return err;
- mlxsw_reg_ralue_pack(ralue_pl, proto,
- MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
- mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
- ralue_pl);
+ priv = mlxsw_sp_fib_entry_priv_create(ll_ops);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ ll_ops->fib_entry_pack(op_ctx, proto, MLXSW_SP_FIB_ENTRY_OP_WRITE,
+ vr->id, 0, NULL, priv);
+ ll_ops->fib_entry_act_ip2me_pack(op_ctx);
+ err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, NULL);
+ mlxsw_sp_fib_entry_priv_put(priv);
if (err)
return err;
}
@@ -5784,7 +6767,7 @@ mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
{
- enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
+ enum mlxsw_sp_l3proto proto = MLXSW_SP_L3_PROTO_IPV4;
int err;
err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
@@ -5796,7 +6779,7 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
* packets that don't match any routes are trapped to the CPU.
*/
- proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
+ proto = MLXSW_SP_L3_PROTO_IPV6;
return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
MLXSW_SP_LPM_TREE_MIN + 1);
}
@@ -5901,15 +6884,15 @@ static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
}
-struct mlxsw_sp_fib6_event_work {
+struct mlxsw_sp_fib6_event {
struct fib6_info **rt_arr;
unsigned int nrt6;
};
-struct mlxsw_sp_fib_event_work {
- struct work_struct work;
+struct mlxsw_sp_fib_event {
+ struct list_head list; /* node in fib queue */
union {
- struct mlxsw_sp_fib6_event_work fib6_work;
+ struct mlxsw_sp_fib6_event fib6_event;
struct fib_entry_notifier_info fen_info;
struct fib_rule_notifier_info fr_info;
struct fib_nh_notifier_info fnh_info;
@@ -5918,11 +6901,12 @@ struct mlxsw_sp_fib_event_work {
};
struct mlxsw_sp *mlxsw_sp;
unsigned long event;
+ int family;
};
static int
-mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
- struct fib6_entry_notifier_info *fen6_info)
+mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
+ struct fib6_entry_notifier_info *fen6_info)
{
struct fib6_info *rt = fen6_info->rt;
struct fib6_info **rt_arr;
@@ -5936,8 +6920,8 @@ mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
if (!rt_arr)
return -ENOMEM;
- fib6_work->rt_arr = rt_arr;
- fib6_work->nrt6 = nrt6;
+ fib6_event->rt_arr = rt_arr;
+ fib6_event->nrt6 = nrt6;
rt_arr[0] = rt;
fib6_info_hold(rt);
@@ -5959,170 +6943,232 @@ mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
}
static void
-mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
+mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
{
int i;
- for (i = 0; i < fib6_work->nrt6; i++)
- mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
- kfree(fib6_work->rt_arr);
+ for (i = 0; i < fib6_event->nrt6; i++)
+ mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
+ kfree(fib6_event->rt_arr);
}
-static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
+static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib_event *fib_event)
{
- struct mlxsw_sp_fib_event_work *fib_work =
- container_of(work, struct mlxsw_sp_fib_event_work, work);
- struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
- mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
- switch (fib_work->event) {
+ switch (fib_event->event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
- &fib_work->fen_info);
- if (err)
+ err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
+ if (err) {
+ mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
mlxsw_sp_router_fib_abort(mlxsw_sp);
- fib_info_put(fib_work->fen_info.fi);
+ }
+ fib_info_put(fib_event->fen_info.fi);
break;
case FIB_EVENT_ENTRY_DEL:
- mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
- fib_info_put(fib_work->fen_info.fi);
+ err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
+ if (err)
+ mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
+ fib_info_put(fib_event->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
- mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
- fib_work->fnh_info.fib_nh);
- fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
+ mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
+ fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
break;
}
- mutex_unlock(&mlxsw_sp->router->lock);
- kfree(fib_work);
}
-static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
+static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib_event *fib_event)
{
- struct mlxsw_sp_fib_event_work *fib_work =
- container_of(work, struct mlxsw_sp_fib_event_work, work);
- struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
- mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
- switch (fib_work->event) {
+ switch (fib_event->event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
- fib_work->fib6_work.rt_arr,
- fib_work->fib6_work.nrt6);
- if (err)
+ err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
+ fib_event->fib6_event.nrt6);
+ if (err) {
+ mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
mlxsw_sp_router_fib_abort(mlxsw_sp);
- mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
+ }
+ mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
break;
case FIB_EVENT_ENTRY_APPEND:
- err = mlxsw_sp_router_fib6_append(mlxsw_sp,
- fib_work->fib6_work.rt_arr,
- fib_work->fib6_work.nrt6);
- if (err)
+ err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
+ fib_event->fib6_event.nrt6);
+ if (err) {
+ mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
mlxsw_sp_router_fib_abort(mlxsw_sp);
- mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
+ }
+ mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
break;
case FIB_EVENT_ENTRY_DEL:
- mlxsw_sp_router_fib6_del(mlxsw_sp,
- fib_work->fib6_work.rt_arr,
- fib_work->fib6_work.nrt6);
- mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
+ err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
+ fib_event->fib6_event.nrt6);
+ if (err)
+ mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
+ mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
break;
}
- mutex_unlock(&mlxsw_sp->router->lock);
- kfree(fib_work);
}
-static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
+static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_event *fib_event)
{
- struct mlxsw_sp_fib_event_work *fib_work =
- container_of(work, struct mlxsw_sp_fib_event_work, work);
- struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
bool replace;
int err;
rtnl_lock();
mutex_lock(&mlxsw_sp->router->lock);
- switch (fib_work->event) {
+ switch (fib_event->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_ADD:
- replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
+ replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
- err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
- replace);
+ err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
- mr_cache_put(fib_work->men_info.mfc);
+ mr_cache_put(fib_event->men_info.mfc);
break;
case FIB_EVENT_ENTRY_DEL:
- mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
- mr_cache_put(fib_work->men_info.mfc);
+ mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
+ mr_cache_put(fib_event->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
- &fib_work->ven_info);
+ &fib_event->ven_info);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
- dev_put(fib_work->ven_info.dev);
+ dev_put(fib_event->ven_info.dev);
break;
case FIB_EVENT_VIF_DEL:
- mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
- &fib_work->ven_info);
- dev_put(fib_work->ven_info.dev);
+ mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
+ dev_put(fib_event->ven_info.dev);
break;
}
mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
- kfree(fib_work);
}
-static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
+static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
+ struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
+ struct mlxsw_sp_fib_event *next_fib_event;
+ struct mlxsw_sp_fib_event *fib_event;
+ int last_family = AF_UNSPEC;
+ LIST_HEAD(fib_event_queue);
+
+ spin_lock_bh(&router->fib_event_queue_lock);
+ list_splice_init(&router->fib_event_queue, &fib_event_queue);
+ spin_unlock_bh(&router->fib_event_queue_lock);
+
+ /* Router lock is held here to make sure per-instance
+ * operation context is not used in between FIB4/6 events
+ * processing.
+ */
+ mutex_lock(&router->lock);
+ mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
+ list_for_each_entry_safe(fib_event, next_fib_event,
+ &fib_event_queue, list) {
+ /* Check if the next entry in the queue exists and it is
+ * of the same type (family and event) as the currect one.
+ * In that case it is permitted to do the bulking
+ * of multiple FIB entries to a single register write.
+ */
+ op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
+ fib_event->family == next_fib_event->family &&
+ fib_event->event == next_fib_event->event;
+
+ /* In case family of this and the previous entry are different, context
+ * reinitialization is going to be needed now, indicate that.
+ * Note that since last_family is initialized to AF_UNSPEC, this is always
+ * going to happen for the first entry processed in the work.
+ */
+ if (fib_event->family != last_family)
+ op_ctx->initialized = false;
+
+ switch (fib_event->family) {
+ case AF_INET:
+ mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
+ fib_event);
+ break;
+ case AF_INET6:
+ mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
+ fib_event);
+ break;
+ case RTNL_FAMILY_IP6MR:
+ case RTNL_FAMILY_IPMR:
+ /* Unlock here as inside FIBMR the lock is taken again
+ * under RTNL. The per-instance operation context
+ * is not used by FIBMR.
+ */
+ mutex_unlock(&router->lock);
+ mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
+ fib_event);
+ mutex_lock(&router->lock);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ last_family = fib_event->family;
+ kfree(fib_event);
+ cond_resched();
+ }
+ WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
+ mutex_unlock(&router->lock);
+}
+
+static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
struct fib_notifier_info *info)
{
struct fib_entry_notifier_info *fen_info;
struct fib_nh_notifier_info *fnh_info;
- switch (fib_work->event) {
+ switch (fib_event->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL:
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
- fib_work->fen_info = *fen_info;
+ fib_event->fen_info = *fen_info;
/* Take reference on fib_info to prevent it from being
- * freed while work is queued. Release it afterwards.
+ * freed while event is queued. Release it afterwards.
*/
- fib_info_hold(fib_work->fen_info.fi);
+ fib_info_hold(fib_event->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
fnh_info = container_of(info, struct fib_nh_notifier_info,
info);
- fib_work->fnh_info = *fnh_info;
- fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
+ fib_event->fnh_info = *fnh_info;
+ fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
break;
}
}
-static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
+static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
struct fib_notifier_info *info)
{
struct fib6_entry_notifier_info *fen6_info;
int err;
- switch (fib_work->event) {
+ switch (fib_event->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_APPEND:
case FIB_EVENT_ENTRY_DEL:
fen6_info = container_of(info, struct fib6_entry_notifier_info,
info);
- err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
- fen6_info);
+ err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
+ fen6_info);
if (err)
return err;
break;
@@ -6132,20 +7178,20 @@ static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
}
static void
-mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
+mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
struct fib_notifier_info *info)
{
- switch (fib_work->event) {
+ switch (fib_event->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_ADD:
case FIB_EVENT_ENTRY_DEL:
- memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
- mr_cache_hold(fib_work->men_info.mfc);
+ memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
+ mr_cache_hold(fib_event->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
case FIB_EVENT_VIF_DEL:
- memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
- dev_hold(fib_work->ven_info.dev);
+ memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
+ dev_hold(fib_event->ven_info.dev);
break;
}
}
@@ -6202,7 +7248,7 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
- struct mlxsw_sp_fib_event_work *fib_work;
+ struct mlxsw_sp_fib_event *fib_event;
struct fib_notifier_info *info = ptr;
struct mlxsw_sp_router *router;
int err;
@@ -6234,55 +7280,43 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
return notifier_from_errno(-EINVAL);
}
- if (fen_info->fi->nh) {
- NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
- return notifier_from_errno(-EINVAL);
- }
- } else if (info->family == AF_INET6) {
- struct fib6_entry_notifier_info *fen6_info;
-
- fen6_info = container_of(info,
- struct fib6_entry_notifier_info,
- info);
- if (fen6_info->rt->nh) {
- NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
- return notifier_from_errno(-EINVAL);
- }
}
break;
}
- fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
- if (!fib_work)
+ fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
+ if (!fib_event)
return NOTIFY_BAD;
- fib_work->mlxsw_sp = router->mlxsw_sp;
- fib_work->event = event;
+ fib_event->mlxsw_sp = router->mlxsw_sp;
+ fib_event->event = event;
+ fib_event->family = info->family;
switch (info->family) {
case AF_INET:
- INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
- mlxsw_sp_router_fib4_event(fib_work, info);
+ mlxsw_sp_router_fib4_event(fib_event, info);
break;
case AF_INET6:
- INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
- err = mlxsw_sp_router_fib6_event(fib_work, info);
+ err = mlxsw_sp_router_fib6_event(fib_event, info);
if (err)
goto err_fib_event;
break;
case RTNL_FAMILY_IP6MR:
case RTNL_FAMILY_IPMR:
- INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
- mlxsw_sp_router_fibmr_event(fib_work, info);
+ mlxsw_sp_router_fibmr_event(fib_event, info);
break;
}
- mlxsw_core_schedule_work(&fib_work->work);
+ /* Enqueue the event and trigger the work */
+ spin_lock_bh(&router->fib_event_queue_lock);
+ list_add_tail(&fib_event->list, &router->fib_event_queue);
+ spin_unlock_bh(&router->fib_event_queue_lock);
+ mlxsw_core_schedule_work(&router->fib_event_work);
return NOTIFY_DONE;
err_fib_event:
- kfree(fib_work);
+ kfree(fib_event);
return NOTIFY_BAD;
}
@@ -6671,9 +7705,9 @@ static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
}
static int
-mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
- struct net_device *l3_dev,
- struct netlink_ext_ack *extack)
+__mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct net_device *l3_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -6738,6 +7772,27 @@ __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
mlxsw_sp_rif_subport_put(rif);
}
+int
+mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct net_device *l3_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ if (!rif)
+ goto out;
+
+ err = __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
+ extack);
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return err;
+}
+
void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
@@ -6762,8 +7817,8 @@ static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
switch (event) {
case NETDEV_UP:
- return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
- l3_dev, extack);
+ return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
+ l3_dev, extack);
case NETDEV_DOWN:
__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
break;
@@ -6831,6 +7886,15 @@ static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
switch (event) {
case NETDEV_UP:
+ if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
+ u16 proto;
+
+ br_vlan_get_proto(l3_dev, &proto);
+ if (proto == ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
+ return -EOPNOTSUPP;
+ }
+ }
rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
if (IS_ERR(rif))
return PTR_ERR(rif);
@@ -8057,6 +9121,69 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
}
+static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
+ .ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
+ .ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
+ .raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
+ .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
+ .fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
+ .fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
+ .fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
+ .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
+ .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
+ .fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
+ .fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
+};
+
+static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
+{
+ size_t max_size = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
+ size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
+
+ if (size > max_size)
+ max_size = size;
+ }
+ router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
+ GFP_KERNEL);
+ if (!router->ll_op_ctx)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
+ return 0;
+}
+
+static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
+{
+ WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
+ kfree(router->ll_op_ctx);
+}
+
+static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
+{
+ u16 lb_rif_index;
+ int err;
+
+ /* Create a generic loopback RIF associated with the main table
+ * (default VRF). Any table can be used, but the main table exists
+ * anyway, so we do not waste resources.
+ */
+ err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN,
+ &lb_rif_index);
+ if (err)
+ return err;
+
+ mlxsw_sp->router->lb_rif_index = lb_rif_index;
+
+ return 0;
+}
+
+static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index);
+}
+
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack)
{
@@ -8070,6 +9197,13 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp->router = router;
router->mlxsw_sp = mlxsw_sp;
+ router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = &mlxsw_sp_router_ll_basic_ops;
+ router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
+
+ err = mlxsw_sp_router_ll_op_ctx_init(router);
+ if (err)
+ goto err_ll_op_ctx_init;
+
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
@@ -8106,6 +9240,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_vrs_init;
+ err = mlxsw_sp_lb_rif_init(mlxsw_sp);
+ if (err)
+ goto err_lb_rif_init;
+
err = mlxsw_sp_neigh_init(mlxsw_sp);
if (err)
goto err_neigh_init;
@@ -8118,6 +9256,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_dscp_init;
+ INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
+ INIT_LIST_HEAD(&router->fib_event_queue);
+ spin_lock_init(&router->fib_event_queue_lock);
+
router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
err = register_inetaddr_notifier(&router->inetaddr_nb);
if (err)
@@ -8134,6 +9276,14 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_register_netevent_notifier;
+ mlxsw_sp->router->nexthop_nb.notifier_call =
+ mlxsw_sp_nexthop_obj_event;
+ err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->nexthop_nb,
+ extack);
+ if (err)
+ goto err_register_nexthop_notifier;
+
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->fib_nb,
@@ -8144,6 +9294,9 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_register_fib_notifier:
+ unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->nexthop_nb);
+err_register_nexthop_notifier:
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
err_register_netevent_notifier:
unregister_inet6addr_notifier(&router->inet6addr_nb);
@@ -8151,10 +9304,13 @@ err_register_inet6addr_notifier:
unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
mlxsw_core_flush_owq();
+ WARN_ON(!list_empty(&router->fib_event_queue));
err_dscp_init:
err_mp_hash_init:
mlxsw_sp_neigh_fini(mlxsw_sp);
err_neigh_init:
+ mlxsw_sp_lb_rif_fini(mlxsw_sp);
+err_lb_rif_init:
mlxsw_sp_vrs_fini(mlxsw_sp);
err_vrs_init:
mlxsw_sp_mr_fini(mlxsw_sp);
@@ -8171,6 +9327,8 @@ err_ipips_init:
err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
+ mlxsw_sp_router_ll_op_ctx_fini(router);
+err_ll_op_ctx_init:
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
return err;
@@ -8180,11 +9338,15 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->fib_nb);
+ unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->nexthop_nb);
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
mlxsw_core_flush_owq();
+ WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
mlxsw_sp_neigh_fini(mlxsw_sp);
+ mlxsw_sp_lb_rif_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
mlxsw_sp_mr_fini(mlxsw_sp);
mlxsw_sp_lpm_fini(mlxsw_sp);
@@ -8193,6 +9355,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_ipips_fini(mlxsw_sp);
mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
+ mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 8418dc3ae967..d8aed866af21 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -15,6 +15,26 @@ struct mlxsw_sp_router_nve_decap {
u8 valid:1;
};
+struct mlxsw_sp_fib_entry_op_ctx {
+ u8 bulk_ok:1, /* Indicate to the low-level op it is ok to bulk
+ * the actual entry with the one that is the next
+ * in queue.
+ */
+ initialized:1; /* Bit that the low-level op sets in case
+ * the context priv is initialized.
+ */
+ struct list_head fib_entry_priv_list;
+ unsigned long ll_priv[];
+};
+
+static inline void
+mlxsw_sp_fib_entry_op_ctx_clear(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
+{
+ WARN_ON_ONCE(!list_empty(&op_ctx->fib_entry_priv_list));
+ memset(op_ctx, 0, sizeof(*op_ctx));
+ INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
+}
+
struct mlxsw_sp_router {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif **rifs;
@@ -38,6 +58,7 @@ struct mlxsw_sp_router {
struct list_head nexthop_neighs_list;
struct list_head ipip_list;
bool aborted;
+ struct notifier_block nexthop_nb;
struct notifier_block fib_nb;
struct notifier_block netevent_nb;
struct notifier_block inetaddr_nb;
@@ -48,6 +69,53 @@ struct mlxsw_sp_router {
bool adj_discard_index_valid;
struct mlxsw_sp_router_nve_decap nve_decap_config;
struct mutex lock; /* Protects shared router resources */
+ struct work_struct fib_event_work;
+ struct list_head fib_event_queue;
+ spinlock_t fib_event_queue_lock; /* Protects fib event queue list */
+ /* One set of ops for each protocol: IPv4 and IPv6 */
+ const struct mlxsw_sp_router_ll_ops *proto_ll_ops[MLXSW_SP_L3_PROTO_MAX];
+ struct mlxsw_sp_fib_entry_op_ctx *ll_op_ctx;
+ u16 lb_rif_index;
+};
+
+struct mlxsw_sp_fib_entry_priv {
+ refcount_t refcnt;
+ struct list_head list; /* Member in op_ctx->fib_entry_priv_list */
+ unsigned long priv[];
+};
+
+enum mlxsw_sp_fib_entry_op {
+ MLXSW_SP_FIB_ENTRY_OP_WRITE,
+ MLXSW_SP_FIB_ENTRY_OP_UPDATE,
+ MLXSW_SP_FIB_ENTRY_OP_DELETE,
+};
+
+/* Low-level router ops. Basically this is to handle the different
+ * register sets to work with ordinary and XM trees and FIB entries.
+ */
+struct mlxsw_sp_router_ll_ops {
+ int (*ralta_write)(struct mlxsw_sp *mlxsw_sp, char *xralta_pl);
+ int (*ralst_write)(struct mlxsw_sp *mlxsw_sp, char *xralst_pl);
+ int (*raltb_write)(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl);
+ size_t fib_entry_op_ctx_size;
+ size_t fib_entry_priv_size;
+ void (*fib_entry_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ enum mlxsw_sp_l3proto proto, enum mlxsw_sp_fib_entry_op op,
+ u16 virtual_router, u8 prefix_len, unsigned char *addr,
+ struct mlxsw_sp_fib_entry_priv *priv);
+ void (*fib_entry_act_remote_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u32 adjacency_index, u16 ecmp_size);
+ void (*fib_entry_act_local_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u16 local_erif);
+ void (*fib_entry_act_ip2me_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx);
+ void (*fib_entry_act_ip2me_tun_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ u32 tunnel_ptr);
+ int (*fib_entry_commit)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ bool *postponed_for_bulk);
+ bool (*fib_entry_is_committed)(struct mlxsw_sp_fib_entry_priv *priv);
};
struct mlxsw_sp_rif_ipip_lb;
@@ -129,6 +197,7 @@ int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
u32 *p_adj_size, u32 *p_adj_hash_index);
struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh);
bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh);
+bool mlxsw_sp_nexthop_is_discard(const struct mlxsw_sp_nexthop *nh);
#define mlxsw_sp_nexthop_for_each(nh, router) \
for (nh = mlxsw_sp_nexthop_next(router, NULL); nh; \
nh = mlxsw_sp_nexthop_next(router, nh))
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 6501ce94ace5..cea42f6ed89b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -41,6 +41,7 @@ struct mlxsw_sp_bridge {
DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
+ const struct mlxsw_sp_bridge_ops *bridge_8021ad_ops;
};
struct mlxsw_sp_bridge_device {
@@ -228,8 +229,14 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
bridge_device->mrouter = br_multicast_router(br_dev);
INIT_LIST_HEAD(&bridge_device->ports_list);
if (vlan_enabled) {
+ u16 proto;
+
bridge->vlan_enabled_exists = true;
- bridge_device->ops = bridge->bridge_8021q_ops;
+ br_vlan_get_proto(br_dev, &proto);
+ if (proto == ETH_P_8021AD)
+ bridge_device->ops = bridge->bridge_8021ad_ops;
+ else
+ bridge_device->ops = bridge->bridge_8021q_ops;
} else {
bridge_device->ops = bridge->bridge_8021d_ops;
}
@@ -757,6 +764,25 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
return -EINVAL;
}
+static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ struct net_device *orig_dev,
+ u16 vlan_proto)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ if (!switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_device))
+ return -EINVAL;
+
+ netdev_err(bridge_device->dev, "VLAN protocol can't be changed on existing bridge\n");
+ return -EINVAL;
+}
+
static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
struct net_device *orig_dev,
@@ -926,6 +952,11 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
attr->orig_dev,
attr->u.vlan_filtering);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
+ err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
+ attr->u.vlan_protocol);
+ break;
case SWITCHDEV_ATTR_ID_PORT_MROUTER:
err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
attr->orig_dev,
@@ -1129,6 +1160,7 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
u16 old_pvid = mlxsw_sp_port->pvid;
+ u16 proto;
int err;
/* The only valid scenario in which a port-vlan already exists, is if
@@ -1152,7 +1184,8 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
goto err_port_vlan_set;
- err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
+ br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
if (err)
goto err_port_pvid_set;
@@ -1164,7 +1197,7 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
err_port_vlan_bridge_join:
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid, proto);
err_port_pvid_set:
mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
err_port_vlan_set:
@@ -1821,13 +1854,15 @@ mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
{
u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ u16 proto;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_port_vlan))
return;
mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
+ br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
}
@@ -1975,10 +2010,9 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
}
static int
-mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
- struct mlxsw_sp_bridge_port *bridge_port,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct netlink_ext_ack *extack)
+mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
{
if (is_vlan_dev(bridge_port->dev)) {
NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
@@ -1992,19 +2026,37 @@ mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
return 0;
}
+static int
+mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
+ extack);
+}
+
+static void
+mlxsw_sp_bridge_vlan_aware_port_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ /* Make sure untagged frames are allowed to ingress */
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
+ ETH_P_8021Q);
+}
+
static void
mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port)
{
- /* Make sure untagged frames are allowed to ingress */
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
+ mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
}
static int
-mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
- const struct net_device *vxlan_dev, u16 vid,
- struct netlink_ext_ack *extack)
+mlxsw_sp_bridge_vlan_aware_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev,
+ u16 vid, u16 ethertype,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
@@ -2012,6 +2064,7 @@ mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
.type = MLXSW_SP_NVE_TYPE_VXLAN,
.vni = vxlan->cfg.vni,
.dev = vxlan_dev,
+ .ethertype = ethertype,
};
struct mlxsw_sp_fid *fid;
int err;
@@ -2050,6 +2103,15 @@ err_vni_exists:
return err;
}
+static int
+mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev, u16 vid,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
+ vid, ETH_P_8021Q, extack);
+}
+
static struct net_device *
mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
{
@@ -2180,6 +2242,7 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
.type = MLXSW_SP_NVE_TYPE_VXLAN,
.vni = vxlan->cfg.vni,
.dev = vxlan_dev,
+ .ethertype = ETH_P_8021Q,
};
struct mlxsw_sp_fid *fid;
int err;
@@ -2246,6 +2309,57 @@ static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
.fid_vid = mlxsw_sp_bridge_8021d_fid_vid,
};
+static int
+mlxsw_sp_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, false);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
+ extack);
+ if (err)
+ goto err_bridge_vlan_aware_port_join;
+
+ return 0;
+
+err_bridge_vlan_aware_port_join:
+ mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
+ return err;
+}
+
+static void
+mlxsw_sp_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
+ mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
+}
+
+static int
+mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev, u16 vid,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
+ vid, ETH_P_8021AD, extack);
+}
+
+static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021ad_ops = {
+ .port_join = mlxsw_sp_bridge_8021ad_port_join,
+ .port_leave = mlxsw_sp_bridge_8021ad_port_leave,
+ .vxlan_join = mlxsw_sp_bridge_8021ad_vxlan_join,
+ .fid_get = mlxsw_sp_bridge_8021q_fid_get,
+ .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
+ .fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
+};
+
int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev,
@@ -3206,8 +3320,8 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
if (!fid) {
if (!flag_untagged || !flag_pvid)
return 0;
- return mlxsw_sp_bridge_8021q_vxlan_join(bridge_device,
- vxlan_dev, vid, extack);
+ return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev,
+ vid, extack);
}
/* Second case: FID is associated with the VNI and the VLAN associated
@@ -3246,16 +3360,14 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
if (!flag_untagged)
return 0;
- err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
- extack);
+ err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, extack);
if (err)
goto err_vxlan_join;
return 0;
err_vxlan_join:
- mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, old_vid,
- NULL);
+ bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, old_vid, NULL);
return err;
}
@@ -3507,6 +3619,7 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
+ bridge->bridge_8021ad_ops = &mlxsw_sp_bridge_8021ad_ops;
return mlxsw_sp_fdb_init(mlxsw_sp);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 433f14ade464..4ef12e3e021a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -617,7 +617,7 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
TRAP_TO_CPU),
MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV6, L3_EXCEPTIONS,
TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER3, L3_EXCEPTIONS,
+ MLXSW_SP_RXL_EXCEPTION(RTR_EGRESS0, L3_EXCEPTIONS,
TRAP_EXCEPTION_TO_CPU),
},
},
@@ -1007,6 +1007,12 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
false),
},
},
+ {
+ .trap = MLXSW_SP_TRAP_DROP(BLACKHOLE_NEXTHOP, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ROUTER3, L3_DISCARDS),
+ },
+ },
};
static struct mlxsw_sp_trap_policer_item *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 5023d91269f4..40e2e79d4517 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/device.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 57f9e24602d0..9e070ab3ed76 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -52,6 +52,7 @@ enum {
MLXSW_TRAP_ID_RTR_INGRESS1 = 0x71,
MLXSW_TRAP_ID_IPV6_PIM = 0x79,
MLXSW_TRAP_ID_IPV6_VRRP = 0x7A,
+ MLXSW_TRAP_ID_RTR_EGRESS0 = 0x80,
MLXSW_TRAP_ID_IPV4_BGP = 0x88,
MLXSW_TRAP_ID_IPV6_BGP = 0x89,
MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A,