diff options
16 files changed, 177 insertions, 293 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index c3c8a7148723..382d02f6619c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -813,7 +813,8 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out) op_mod = MLX5_GET(mbox_in, in, op_mod); uid = MLX5_GET(mbox_in, in, uid); - if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY) + if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY && + opcode != MLX5_CMD_OP_CREATE_UCTX) mlx5_cmd_out_err(dev, opcode, op_mod, out); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c index 17325c5d6516..cf60f0a3ff23 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c @@ -47,6 +47,7 @@ void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl) void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl) { + WARN_ON(!hash_empty(tbl->hlist)); mutex_destroy(&tbl->lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 585bdc8383ee..53d2979e9457 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -411,9 +411,14 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, { enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); + u8 log_wqe_size, log_stride_size; - return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) - - mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); + log_wqe_size = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode); + log_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); + WARN(log_wqe_size < log_stride_size, + "Log WQE size %u < log stride size %u (page shift %u, umr mode %d, xsk on? %d)\n", + log_wqe_size, log_stride_size, page_shift, umr_mode, !!xsk); + return log_wqe_size - log_stride_size; } u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz) @@ -581,11 +586,16 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa bool unaligned = xsk ? xsk->unaligned : false; u16 max_mtu_pkts; - if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) + if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) { + mlx5_core_err(mdev, "Striding RQ for XSK can't be activated with page_shift %u and umr_mode %d\n", + page_shift, umr_mode); return -EOPNOTSUPP; + } - if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) + if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) { + mlx5_core_err(mdev, "Striding RQ linear mode for XSK can't be activated with current params\n"); return -EINVAL; + } /* Current RQ length is too big for the given frame size, the * needed number of WQEs exceeds the maximum. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h index 2b7fd1c0e643..f575646d2f50 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h @@ -95,8 +95,6 @@ struct mlx5e_tc_flow { */ struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS]; struct mlx5e_tc_flow *peer_flow; - struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */ - struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */ struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */ struct list_head hairpin; /* flows sharing the same hairpin */ struct list_head peer; /* flows with peer flow */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index e6f64d890fb3..83bb0811e774 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -745,8 +745,6 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, if (err) goto out; - esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value, - misc_parameters.vxlan_vni); esw_attr->rx_tun_attr->decap_vport = vport_num; } else if (netif_is_ovs_master(attr.route_dev) && mlx5e_tc_int_port_supported(esw)) { int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 2aaf8ab857b8..780224fd67a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -1349,7 +1349,8 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv, mlx5e_tc_unoffload_from_slow_path(esw, flow); else mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr); - mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr); + + mlx5e_tc_detach_mod_hdr(priv, flow, attr); attr->modify_hdr = NULL; esw_attr->dests[flow->tmp_entry_index].flags &= @@ -1405,7 +1406,7 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv, continue; } - err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr); + err = mlx5e_tc_attach_mod_hdr(priv, flow, attr); if (err) { mlx5_core_warn(priv->mdev, "Failed to update flow mod_hdr err=%d", err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 853f312cd757..5578f92f7e0f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -445,7 +445,7 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size) { - WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < mlx5e_get_max_sq_wqebbs(mdev)); + WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < (u16)mlx5e_get_max_sq_wqebbs(mdev)); /* A WQE must not cross the page boundary, hence two conditions: * 1. Its size must not exceed the page size. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index ff03c43833bb..81a567e17264 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -7,6 +7,18 @@ #include "en/health.h" #include <net/xdp_sock_drv.h> +static int mlx5e_legacy_rq_validate_xsk(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk) +{ + if (!mlx5e_rx_is_linear_skb(mdev, params, xsk)) { + mlx5_core_err(mdev, "Legacy RQ linear mode for XSK can't be activated with current params\n"); + return -EINVAL; + } + + return 0; +} + /* The limitation of 2048 can be altered, but shouldn't go beyond the minimal * stride size of striding RQ. */ @@ -17,8 +29,11 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params, struct mlx5_core_dev *mdev) { /* AF_XDP doesn't support frames larger than PAGE_SIZE. */ - if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) + if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) { + mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size, + MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE); return false; + } /* frag_sz is different for regular and XSK RQs, so ensure that linear * SKB mode is possible. @@ -27,7 +42,7 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params, case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk); default: /* MLX5_WQ_TYPE_CYCLIC */ - return mlx5e_rx_is_linear_skb(mdev, params, xsk); + return !mlx5e_legacy_rq_validate_xsk(mdev, params, xsk); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 0c04a5e7c274..c8377b4c8c8e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -646,36 +646,36 @@ get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) &tc->mod_hdr; } -static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct mlx5e_tc_flow_parse_attr *parse_attr) +int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr) { - struct mlx5_modify_hdr *modify_hdr; struct mlx5e_mod_hdr_handle *mh; mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow), mlx5e_get_flow_namespace(flow), - &parse_attr->mod_hdr_acts); + &attr->parse_attr->mod_hdr_acts); if (IS_ERR(mh)) return PTR_ERR(mh); - modify_hdr = mlx5e_mod_hdr_get(mh); - flow->attr->modify_hdr = modify_hdr; - flow->mh = mh; + WARN_ON(attr->modify_hdr); + attr->modify_hdr = mlx5e_mod_hdr_get(mh); + attr->mh = mh; return 0; } -static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow) +void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr) { /* flow wasn't fully initialized */ - if (!flow->mh) + if (!attr->mh) return; mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow), - flow->mh); - flow->mh = NULL; + attr->mh); + attr->mh = NULL; } static @@ -1433,7 +1433,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, } if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { - err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); + err = mlx5e_tc_attach_mod_hdr(priv, flow, attr); if (err) return err; } @@ -1493,7 +1493,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); - mlx5e_detach_mod_hdr(priv, flow); + mlx5e_tc_detach_mod_hdr(priv, flow, attr); } if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) @@ -1604,7 +1604,7 @@ skip_restore: goto err_offload; } - flow->slow_mh = mh; + flow->attr->slow_mh = mh; flow->chain_mapping = chain_mapping; flow_flag_set(flow, SLOW); @@ -1629,6 +1629,7 @@ err_get_chain: void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, struct mlx5e_tc_flow *flow) { + struct mlx5e_mod_hdr_handle *slow_mh = flow->attr->slow_mh; struct mlx5_flow_attr *slow_attr; slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB); @@ -1641,16 +1642,16 @@ void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; slow_attr->esw_attr->split_count = 0; slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH; - if (flow->slow_mh) { + if (slow_mh) { slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - slow_attr->modify_hdr = mlx5e_mod_hdr_get(flow->slow_mh); + slow_attr->modify_hdr = mlx5e_mod_hdr_get(slow_mh); } mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); - if (flow->slow_mh) { - mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), flow->slow_mh); + if (slow_mh) { + mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), slow_mh); mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping); flow->chain_mapping = 0; - flow->slow_mh = NULL; + flow->attr->slow_mh = NULL; } flow_flag_clear(flow, SLOW); kfree(slow_attr); @@ -1761,26 +1762,6 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro return err; } -int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct mlx5_flow_attr *attr) -{ - struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &attr->parse_attr->mod_hdr_acts; - struct mlx5_modify_hdr *mod_hdr; - - mod_hdr = mlx5_modify_header_alloc(priv->mdev, - mlx5e_get_flow_namespace(flow), - mod_hdr_acts->num_actions, - mod_hdr_acts->actions); - if (IS_ERR(mod_hdr)) - return PTR_ERR(mod_hdr); - - WARN_ON(attr->modify_hdr); - attr->modify_hdr = mod_hdr; - - return 0; -} - static int set_encap_dests(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, @@ -1900,7 +1881,6 @@ verify_attr_actions(u32 actions, struct netlink_ext_ack *extack) static int post_process_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr, - bool is_post_act_attr, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch; @@ -1922,27 +1902,21 @@ post_process_attr(struct mlx5e_tc_flow *flow, } if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { - if (vf_tun || is_post_act_attr) { - err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr); - if (err) - goto err_out; - } else { - err = mlx5e_attach_mod_hdr(flow->priv, flow, attr->parse_attr); - if (err) - goto err_out; - } + err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr); + if (err) + goto err_out; } if (attr->branch_true && attr->branch_true->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { - err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr->branch_true); + err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_true); if (err) goto err_out; } if (attr->branch_false && attr->branch_false->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { - err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr->branch_false); + err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_false); if (err) goto err_out; } @@ -2056,7 +2030,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, esw_attr->int_port = int_port; } - err = post_process_attr(flow, attr, false, extack); + err = post_process_attr(flow, attr, extack); if (err) goto err_out; @@ -2141,10 +2115,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); - if (vf_tun && attr->modify_hdr) - mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr); - else - mlx5e_detach_mod_hdr(priv, flow); + mlx5e_tc_detach_mod_hdr(priv, flow, attr); } if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) @@ -2624,13 +2595,13 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, err = mlx5e_tc_set_attr_rx_tun(flow, spec); if (err) return err; - } else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { + } else if (tunnel) { struct mlx5_flow_spec *tmp_spec; tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL); if (!tmp_spec) { - NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec"); - netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec"); + NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for tunnel tmp spec"); + netdev_warn(priv->netdev, "Failed to allocate memory for tunnel tmp spec"); return -ENOMEM; } memcpy(tmp_spec, spec, sizeof(*tmp_spec)); @@ -3963,7 +3934,7 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) if (err) goto out_free; - err = post_process_attr(flow, attr, true, extack); + err = post_process_attr(flow, attr, extack); if (err) goto out_free; @@ -4530,8 +4501,7 @@ mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr) if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); - if (attr->modify_hdr) - mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr); + mlx5e_tc_detach_mod_hdr(flow->priv, flow, attr); } } @@ -4653,9 +4623,6 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, if (err) goto err_free; - /* always set IP version for indirect table handling */ - flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true); - err = parse_tc_fdb_actions(priv, &rule->action, flow, extack); if (err) goto err_free; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 50af70ef22f3..ce516dc7f3fd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -71,6 +71,8 @@ struct mlx5_flow_attr { u32 action; struct mlx5_fc *counter; struct mlx5_modify_hdr *modify_hdr; + struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */ + struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */ struct mlx5_ct_attr ct_attr; struct mlx5e_sample_attr sample_attr; struct mlx5e_meter_attr meter_attr; @@ -82,7 +84,6 @@ struct mlx5_flow_attr { struct mlx5_flow_table *dest_ft; u8 inner_match_level; u8 outer_match_level; - u8 ip_version; u8 tun_ip_version; int tunnel_id; /* mapped tunnel id */ u32 flags; @@ -134,7 +135,6 @@ struct mlx5_rx_tun_attr { __be32 v4; struct in6_addr v6; } dst_ip; /* Valid if decap_vport is not zero */ - u32 vni; }; #define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16 @@ -285,9 +285,13 @@ int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev, enum mlx5e_tc_attr_to_reg type, u32 data); -int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct mlx5_flow_attr *attr); +int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr); + +void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr); void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev, struct flow_match_basic *match, bool outer, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c index c9a91158e99c..9959e9fd15a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c @@ -16,18 +16,12 @@ #include "lib/fs_chains.h" #include "en/mod_hdr.h" -#define MLX5_ESW_INDIR_TABLE_SIZE 128 -#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX (MLX5_ESW_INDIR_TABLE_SIZE - 2) +#define MLX5_ESW_INDIR_TABLE_SIZE 2 +#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 2) #define MLX5_ESW_INDIR_TABLE_FWD_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 1) struct mlx5_esw_indir_table_rule { - struct list_head list; struct mlx5_flow_handle *handle; - union { - __be32 v4; - struct in6_addr v6; - } dst_ip; - u32 vni; struct mlx5_modify_hdr *mh; refcount_t refcnt; }; @@ -38,12 +32,10 @@ struct mlx5_esw_indir_table_entry { struct mlx5_flow_group *recirc_grp; struct mlx5_flow_group *fwd_grp; struct mlx5_flow_handle *fwd_rule; - struct list_head recirc_rules; - int recirc_cnt; + struct mlx5_esw_indir_table_rule *recirc_rule; int fwd_ref; u16 vport; - u8 ip_version; }; struct mlx5_esw_indir_table { @@ -89,7 +81,6 @@ mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw, return esw_attr->in_rep->vport == MLX5_VPORT_UPLINK && vf_sf_vport && esw->dev == dest_mdev && - attr->ip_version && attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE; } @@ -101,27 +92,8 @@ mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr) return esw_attr->rx_tun_attr ? esw_attr->rx_tun_attr->decap_vport : 0; } -static struct mlx5_esw_indir_table_rule * -mlx5_esw_indir_table_rule_lookup(struct mlx5_esw_indir_table_entry *e, - struct mlx5_esw_flow_attr *attr) -{ - struct mlx5_esw_indir_table_rule *rule; - - list_for_each_entry(rule, &e->recirc_rules, list) - if (rule->vni == attr->rx_tun_attr->vni && - !memcmp(&rule->dst_ip, &attr->rx_tun_attr->dst_ip, - sizeof(attr->rx_tun_attr->dst_ip))) - goto found; - return NULL; - -found: - refcount_inc(&rule->refcnt); - return rule; -} - static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, - struct mlx5_flow_spec *spec, struct mlx5_esw_indir_table_entry *e) { struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; @@ -130,73 +102,18 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw, struct mlx5_flow_destination dest = {}; struct mlx5_esw_indir_table_rule *rule; struct mlx5_flow_act flow_act = {}; - struct mlx5_flow_spec *rule_spec; struct mlx5_flow_handle *handle; int err = 0; u32 data; - rule = mlx5_esw_indir_table_rule_lookup(e, esw_attr); - if (rule) + if (e->recirc_rule) { + refcount_inc(&e->recirc_rule->refcnt); return 0; - - if (e->recirc_cnt == MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX) - return -EINVAL; - - rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL); - if (!rule_spec) - return -ENOMEM; - - rule = kzalloc(sizeof(*rule), GFP_KERNEL); - if (!rule) { - err = -ENOMEM; - goto out; - } - - rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | - MLX5_MATCH_MISC_PARAMETERS | - MLX5_MATCH_MISC_PARAMETERS_2; - if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version)) { - MLX5_SET(fte_match_param, rule_spec->match_criteria, - outer_headers.ip_version, 0xf); - MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, - attr->ip_version); - } else if (attr->ip_version) { - MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, - outer_headers.ethertype); - MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ethertype, - (attr->ip_version == 4 ? ETH_P_IP : ETH_P_IPV6)); - } else { - err = -EOPNOTSUPP; - goto err_ethertype; } - if (attr->ip_version == 4) { - MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, - outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); - MLX5_SET(fte_match_param, rule_spec->match_value, - outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4, - ntohl(esw_attr->rx_tun_attr->dst_ip.v4)); - } else if (attr->ip_version == 6) { - int len = sizeof(struct in6_addr); - - memset(MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria, - outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - 0xff, len); - memcpy(MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, - outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - &esw_attr->rx_tun_attr->dst_ip.v6, len); - } - - MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, - misc_parameters.vxlan_vni); - MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters.vxlan_vni, - MLX5_GET(fte_match_param, spec->match_value, misc_parameters.vxlan_vni)); - - MLX5_SET(fte_match_param, rule_spec->match_criteria, - misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask()); - MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0, - mlx5_eswitch_get_vport_metadata_for_match(esw_attr->in_mdev->priv.eswitch, - MLX5_VPORT_UPLINK)); + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; /* Modify flow source to recirculate packet */ data = mlx5_eswitch_get_vport_metadata_for_set(esw, esw_attr->rx_tun_attr->decap_vport); @@ -219,13 +136,14 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw, flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND; + flow_act.fg = e->recirc_grp; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.ft = mlx5_chains_get_table(chains, 0, 1, 0); if (IS_ERR(dest.ft)) { err = PTR_ERR(dest.ft); goto err_table; } - handle = mlx5_add_flow_rules(e->ft, rule_spec, &flow_act, &dest, 1); + handle = mlx5_add_flow_rules(e->ft, NULL, &flow_act, &dest, 1); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto err_handle; @@ -233,14 +151,10 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw, mlx5e_mod_hdr_dealloc(&mod_acts); rule->handle = handle; - rule->vni = esw_attr->rx_tun_attr->vni; rule->mh = flow_act.modify_hdr; - memcpy(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip, - sizeof(esw_attr->rx_tun_attr->dst_ip)); refcount_set(&rule->refcnt, 1); - list_add(&rule->list, &e->recirc_rules); - e->recirc_cnt++; - goto out; + e->recirc_rule = rule; + return 0; err_handle: mlx5_chains_put_table(chains, 0, 1, 0); @@ -250,89 +164,44 @@ err_mod_hdr_alloc: err_mod_hdr_regc1: mlx5e_mod_hdr_dealloc(&mod_acts); err_mod_hdr_regc0: -err_ethertype: kfree(rule); -out: - kvfree(rule_spec); return err; } static void mlx5_esw_indir_table_rule_put(struct mlx5_eswitch *esw, - struct mlx5_flow_attr *attr, struct mlx5_esw_indir_table_entry *e) { - struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct mlx5_esw_indir_table_rule *rule = e->recirc_rule; struct mlx5_fs_chains *chains = esw_chains(esw); - struct mlx5_esw_indir_table_rule *rule; - list_for_each_entry(rule, &e->recirc_rules, list) - if (rule->vni == esw_attr->rx_tun_attr->vni && - !memcmp(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip, - sizeof(esw_attr->rx_tun_attr->dst_ip))) - goto found; - - return; + if (!rule) + return; -found: if (!refcount_dec_and_test(&rule->refcnt)) return; mlx5_del_flow_rules(rule->handle); mlx5_chains_put_table(chains, 0, 1, 0); mlx5_modify_header_dealloc(esw->dev, rule->mh); - list_del(&rule->list); kfree(rule); - e->recirc_cnt--; + e->recirc_rule = NULL; } -static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw, - struct mlx5_flow_attr *attr, - struct mlx5_flow_spec *spec, - struct mlx5_esw_indir_table_entry *e) +static int mlx5_create_indir_recirc_group(struct mlx5_esw_indir_table_entry *e) { int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - u32 *in, *match; + u32 *in; in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; - MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS | - MLX5_MATCH_MISC_PARAMETERS | MLX5_MATCH_MISC_PARAMETERS_2); - match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); - - if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version)) - MLX5_SET(fte_match_param, match, outer_headers.ip_version, 0xf); - else - MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ethertype); - - if (attr->ip_version == 4) { - MLX5_SET_TO_ONES(fte_match_param, match, - outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); - } else if (attr->ip_version == 6) { - memset(MLX5_ADDR_OF(fte_match_param, match, - outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - 0xff, sizeof(struct in6_addr)); - } else { - err = -EOPNOTSUPP; - goto out; - } - - MLX5_SET_TO_ONES(fte_match_param, match, misc_parameters.vxlan_vni); - MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0, - mlx5_eswitch_get_vport_metadata_mask()); MLX5_SET(create_flow_group_in, in, start_flow_index, 0); - MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX); + MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_RECIRC_IDX); e->recirc_grp = mlx5_create_flow_group(e->ft, in); - if (IS_ERR(e->recirc_grp)) { + if (IS_ERR(e->recirc_grp)) err = PTR_ERR(e->recirc_grp); - goto out; - } - INIT_LIST_HEAD(&e->recirc_rules); - e->recirc_cnt = 0; - -out: kvfree(in); return err; } @@ -343,19 +212,12 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw, int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_destination dest = {}; struct mlx5_flow_act flow_act = {}; - struct mlx5_flow_spec *spec; u32 *in; in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) { - kvfree(in); - return -ENOMEM; - } - /* Hold one entry */ MLX5_SET(create_flow_group_in, in, start_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX); MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX); @@ -366,25 +228,25 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw, } flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + flow_act.fg = e->fwd_grp; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.vport.num = e->vport; dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID; - e->fwd_rule = mlx5_add_flow_rules(e->ft, spec, &flow_act, &dest, 1); + e->fwd_rule = mlx5_add_flow_rules(e->ft, NULL, &flow_act, &dest, 1); if (IS_ERR(e->fwd_rule)) { mlx5_destroy_flow_group(e->fwd_grp); err = PTR_ERR(e->fwd_rule); } err_out: - kvfree(spec); kvfree(in); return err; } static struct mlx5_esw_indir_table_entry * mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, - struct mlx5_flow_spec *spec, u16 vport, bool decap) + u16 vport, bool decap) { struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_namespace *root_ns; @@ -412,15 +274,14 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att } e->ft = ft; e->vport = vport; - e->ip_version = attr->ip_version; e->fwd_ref = !decap; - err = mlx5_create_indir_recirc_group(esw, attr, spec, e); + err = mlx5_create_indir_recirc_group(e); if (err) goto recirc_grp_err; if (decap) { - err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e); + err = mlx5_esw_indir_table_rule_get(esw, attr, e); if (err) goto recirc_rule_err; } @@ -430,13 +291,13 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att goto fwd_grp_err; hash_add(esw->fdb_table.offloads.indir->table, &e->hlist, - vport << 16 | attr->ip_version); + vport << 16); return e; fwd_grp_err: if (decap) - mlx5_esw_indir_table_rule_put(esw, attr, e); + mlx5_esw_indir_table_rule_put(esw, e); recirc_rule_err: mlx5_destroy_flow_group(e->recirc_grp); recirc_grp_err: @@ -447,13 +308,13 @@ tbl_err: } static struct mlx5_esw_indir_table_entry * -mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_version) +mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport) { struct mlx5_esw_indir_table_entry *e; - u32 key = vport << 16 | ip_version; + u32 key = vport << 16; hash_for_each_possible(esw->fdb_table.offloads.indir->table, e, hlist, key) - if (e->vport == vport && e->ip_version == ip_version) + if (e->vport == vport) return e; return NULL; @@ -461,24 +322,23 @@ mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_ver struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, - struct mlx5_flow_spec *spec, u16 vport, bool decap) { struct mlx5_esw_indir_table_entry *e; int err; mutex_lock(&esw->fdb_table.offloads.indir->lock); - e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version); + e = mlx5_esw_indir_table_entry_lookup(esw, vport); if (e) { if (!decap) { e->fwd_ref++; } else { - err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e); + err = mlx5_esw_indir_table_rule_get(esw, attr, e); if (err) goto out_err; } } else { - e = mlx5_esw_indir_table_entry_create(esw, attr, spec, vport, decap); + e = mlx5_esw_indir_table_entry_create(esw, attr, vport, decap); if (IS_ERR(e)) { err = PTR_ERR(e); esw_warn(esw->dev, "Failed to create indirection table, err %d.\n", err); @@ -494,22 +354,21 @@ out_err: } void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, - struct mlx5_flow_attr *attr, u16 vport, bool decap) { struct mlx5_esw_indir_table_entry *e; mutex_lock(&esw->fdb_table.offloads.indir->lock); - e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version); + e = mlx5_esw_indir_table_entry_lookup(esw, vport); if (!e) goto out; if (!decap) e->fwd_ref--; else - mlx5_esw_indir_table_rule_put(esw, attr, e); + mlx5_esw_indir_table_rule_put(esw, e); - if (e->fwd_ref || e->recirc_cnt) + if (e->fwd_ref || e->recirc_rule) goto out; hash_del(&e->hlist); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h index 21d56b49d14b..036f5b3a341b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h @@ -13,10 +13,8 @@ mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir); struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, - struct mlx5_flow_spec *spec, u16 vport, bool decap); void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, - struct mlx5_flow_attr *attr, u16 vport, bool decap); bool @@ -44,7 +42,6 @@ mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir) static inline struct mlx5_flow_table * mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, - struct mlx5_flow_spec *spec, u16 vport, bool decap) { return ERR_PTR(-EOPNOTSUPP); @@ -52,7 +49,6 @@ mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, static inline void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, - struct mlx5_flow_attr *attr, u16 vport, bool decap) { } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index bbb6dab3b21f..6f11b46ee79a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1250,7 +1250,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw) if (err) return err; } else { - esw_warn(dev, "engress ACL is not supported by FW\n"); + esw_warn(dev, "egress ACL is not supported by FW\n"); } if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { @@ -1406,9 +1406,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); if (clear_vf) mlx5_eswitch_clear_vf_vports_info(esw); - /* If disabling sriov in switchdev mode, free meta rules here - * because it depends on num_vfs. - */ + if (esw->mode == MLX5_ESWITCH_OFFLOADS) { struct devlink *devlink = priv_to_devlink(esw->dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index c981fa77f439..5fb9d5e99734 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -179,15 +179,14 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, static int esw_setup_decap_indir(struct mlx5_eswitch *esw, - struct mlx5_flow_attr *attr, - struct mlx5_flow_spec *spec) + struct mlx5_flow_attr *attr) { struct mlx5_flow_table *ft; if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE)) return -EOPNOTSUPP; - ft = mlx5_esw_indir_table_get(esw, attr, spec, + ft = mlx5_esw_indir_table_get(esw, attr, mlx5_esw_indir_table_decap_vport(attr), true); return PTR_ERR_OR_ZERO(ft); } @@ -197,7 +196,7 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) { if (mlx5_esw_indir_table_decap_vport(attr)) - mlx5_esw_indir_table_put(esw, attr, + mlx5_esw_indir_table_put(esw, mlx5_esw_indir_table_decap_vport(attr), true); } @@ -235,7 +234,6 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, - struct mlx5_flow_spec *spec, int i) { flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; @@ -243,7 +241,7 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest, dest[i].ft = attr->dest_ft; if (mlx5_esw_indir_table_decap_vport(attr)) - return esw_setup_decap_indir(esw, attr, spec); + return esw_setup_decap_indir(esw, attr); return 0; } @@ -298,7 +296,7 @@ static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_ mlx5_chains_put_table(chains, 0, 1, 0); else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, esw_attr->dests[i].mdev)) - mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport, + mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport, false); } @@ -384,7 +382,6 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, - struct mlx5_flow_spec *spec, bool ignore_flow_lvl, int *i) { @@ -399,7 +396,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest, flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec, + dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, esw_attr->dests[j].rep->vport, false); if (IS_ERR(dest[*i].ft)) { err = PTR_ERR(dest[*i].ft); @@ -408,7 +405,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest, } if (mlx5_esw_indir_table_decap_vport(attr)) { - err = esw_setup_decap_indir(esw, attr, spec); + err = esw_setup_decap_indir(esw, attr); if (err) goto err_indir_tbl_get; } @@ -511,14 +508,14 @@ esw_setup_dests(struct mlx5_flow_destination *dest, err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i); (*i)++; } else if (esw_is_indir_table(esw, attr)) { - err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i); + err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i); } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) { err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i); } else { *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i); if (attr->dest_ft) { - err = esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); + err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i); (*i)++; } else if (attr->dest_chain) { err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, @@ -727,7 +724,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; for (i = 0; i < esw_attr->split_count; i++) { if (esw_is_indir_table(esw, attr)) - err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i); + err = esw_setup_indir_table(dest, &flow_act, esw, attr, false, &i); else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr, &i); @@ -3575,9 +3572,9 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) if (IS_ERR(esw)) return PTR_ERR(esw); - down_write(&esw->mode_lock); + down_read(&esw->mode_lock); err = esw_mode_to_devlink(esw->mode, mode); - up_write(&esw->mode_lock); + up_read(&esw->mode_lock); return err; } @@ -3675,9 +3672,9 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) if (IS_ERR(esw)) return PTR_ERR(esw); - down_write(&esw->mode_lock); + down_read(&esw->mode_lock); err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); - up_write(&esw->mode_lock); + up_read(&esw->mode_lock); return err; } @@ -3749,9 +3746,9 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, if (IS_ERR(esw)) return PTR_ERR(esw); - down_write(&esw->mode_lock); + down_read(&esw->mode_lock); *encap = esw->offloads.encap; - up_write(&esw->mode_lock); + up_read(&esw->mode_lock); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 69318b143268..75510a12ab02 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -69,6 +69,13 @@ enum { MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS = BIT(0xa), }; +enum { + MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN = S16_MIN, + MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX = S16_MAX, + MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN = -200000, + MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000, +}; + static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev) { return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev)); @@ -86,6 +93,22 @@ static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev) return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify); } +static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta) +{ + s64 min = MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN; + s64 max = MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX; + + if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range)) { + min = MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN; + max = MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX; + } + + if (delta < min || delta > max) + return false; + + return true; +} + static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size) { u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {}; @@ -288,8 +311,8 @@ static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta) if (!mlx5_modify_mtutc_allowed(mdev)) return 0; - /* HW time adjustment range is s16. If out of range, settime instead */ - if (delta < S16_MIN || delta > S16_MAX) { + /* HW time adjustment range is checked. If out of range, settime instead */ + if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) { struct timespec64 ts; s64 ns; @@ -326,6 +349,19 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) return 0; } +static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta) +{ + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); + struct mlx5_core_dev *mdev; + + mdev = container_of(clock, struct mlx5_core_dev, clock); + + if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) + return -ERANGE; + + return mlx5_ptp_adjtime(ptp, delta); +} + static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq) { u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {}; @@ -688,6 +724,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = { .n_pins = 0, .pps = 0, .adjfine = mlx5_ptp_adjfine, + .adjphase = mlx5_ptp_adjphase, .adjtime = mlx5_ptp_adjtime, .gettimex64 = mlx5_ptp_gettimex, .settime64 = mlx5_ptp_settime, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index a84bdeeed2c6..0b102c651fe2 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -9941,7 +9941,9 @@ struct mlx5_ifc_pcam_reg_bits { }; struct mlx5_ifc_mcam_enhanced_features_bits { - u8 reserved_at_0[0x5d]; + u8 reserved_at_0[0x51]; + u8 mtutc_time_adjustment_extended_range[0x1]; + u8 reserved_at_52[0xb]; u8 mcia_32dwords[0x1]; u8 out_pulse_duration_ns[0x1]; u8 npps_period[0x1]; |