diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_switch.c')
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_switch.c | 1667 |
1 files changed, 816 insertions, 851 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 9b762f7972ce..84848f0123e7 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -3,6 +3,7 @@ #include "ice_lib.h" #include "ice_switch.h" +#include "ice_trace.h" #define ICE_ETH_DA_OFFSET 0 #define ICE_ETH_ETHTYPE_OFFSET 12 @@ -20,12 +21,11 @@ * byte 0 = 0x2: to identify it as locally administered DA MAC * byte 6 = 0x2: to identify it as locally administered SA MAC * byte 12 = 0x81 & byte 13 = 0x00: - * In case of VLAN filter first two bytes defines ether type (0x8100) - * and remaining two bytes are placeholder for programming a given VLAN ID - * In case of Ether type filter it is treated as header without VLAN tag - * and byte 12 and 13 is used to program a given Ether type instead + * In case of VLAN filter first two bytes defines ether type (0x8100) + * and remaining two bytes are placeholder for programming a given VLAN ID + * In case of Ether type filter it is treated as header without VLAN tag + * and byte 12 and 13 is used to program a given Ether type instead */ -#define DUMMY_ETH_HDR_LEN 16 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 0x2, 0, 0, 0, 0, 0, 0x81, 0, 0, 0}; @@ -43,6 +43,7 @@ enum { ICE_PKT_KMALLOC = BIT(9), ICE_PKT_PPPOE = BIT(10), ICE_PKT_L2TPV3 = BIT(11), + ICE_PKT_PFCP = BIT(12), }; struct ice_dummy_pkt_offsets { @@ -1111,6 +1112,77 @@ ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = { 0x00, 0x00, }; +ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv4) = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_ILOS, 34 }, + { ICE_PFCP, 42 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv4) = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x2c, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 34 */ + 0x00, 0x18, 0x00, 0x00, + + 0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 42 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv6) = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV6_OFOS, 14 }, + { ICE_UDP_ILOS, 54 }, + { ICE_PFCP, 62 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv6) = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x86, 0xdd, /* ICE_ETYPE_OL 12 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */ + 0x00, 0x10, 0x11, 0x00, /* Next header UDP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 54 */ + 0x00, 0x18, 0x00, 0x00, + + 0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 62 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, @@ -1344,6 +1416,8 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = { ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU), ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6), ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC), + ICE_PKT_PROFILE(pfcp_session_ipv6, ICE_PKT_PFCP | ICE_PKT_OUTER_IPV6), + ICE_PKT_PROFILE(pfcp_session_ipv4, ICE_PKT_PFCP), ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP), ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6), @@ -1369,14 +1443,6 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = { ICE_PKT_PROFILE(tcp, 0), }; -#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l)) -#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s) \ - ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN) -#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s) \ - ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0) -#define ICE_SW_RULE_LG_ACT_SIZE(s, n) struct_size((s), act, (n)) -#define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n)) - /* this is a recipe to profile association bitmap */ static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES], ICE_MAX_NUM_PROFILES); @@ -1406,7 +1472,6 @@ int ice_init_def_sw_recp(struct ice_hw *hw) recps[i].root_rid = i; INIT_LIST_HEAD(&recps[i].filt_rules); INIT_LIST_HEAD(&recps[i].filt_replay_rules); - INIT_LIST_HEAD(&recps[i].rg_list); mutex_init(&recps[i].filt_rule_lock); } @@ -1446,11 +1511,11 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, struct ice_sq_cd *cd) { struct ice_aqc_get_sw_cfg *cmd; - struct ice_aq_desc desc; + struct libie_aq_desc desc; int status; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg); - cmd = &desc.params.get_sw_conf; + cmd = libie_aq_raw(&desc); cmd->element = cpu_to_le16(*req_desc); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); @@ -1476,11 +1541,11 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, { struct ice_aqc_add_update_free_vsi_resp *res; struct ice_aqc_add_get_update_free_vsi *cmd; - struct ice_aq_desc desc; + struct libie_aq_desc desc; int status; - cmd = &desc.params.vsi_cmd; - res = &desc.params.add_update_free_vsi_res; + cmd = libie_aq_raw(&desc); + res = libie_aq_raw(&desc); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi); @@ -1491,7 +1556,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD); status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cd); @@ -1520,11 +1585,11 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, { struct ice_aqc_add_update_free_vsi_resp *resp; struct ice_aqc_add_get_update_free_vsi *cmd; - struct ice_aq_desc desc; + struct libie_aq_desc desc; int status; - cmd = &desc.params.vsi_cmd; - resp = &desc.params.add_update_free_vsi_res; + cmd = libie_aq_raw(&desc); + resp = libie_aq_raw(&desc); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); @@ -1555,17 +1620,17 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, { struct ice_aqc_add_update_free_vsi_resp *resp; struct ice_aqc_add_get_update_free_vsi *cmd; - struct ice_aq_desc desc; + struct libie_aq_desc desc; int status; - cmd = &desc.params.vsi_cmd; - resp = &desc.params.add_update_free_vsi_res; + cmd = libie_aq_raw(&desc); + resp = libie_aq_raw(&desc); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi); cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD); status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cd); @@ -1636,21 +1701,16 @@ ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi) */ static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle) { - struct ice_vsi_ctx *vsi; + struct ice_vsi_ctx *vsi = ice_get_vsi_ctx(hw, vsi_handle); u8 i; - vsi = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi) return; ice_for_each_traffic_class(i) { - if (vsi->lan_q_ctx[i]) { - devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]); - vsi->lan_q_ctx[i] = NULL; - } - if (vsi->rdma_q_ctx[i]) { - devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]); - vsi->rdma_q_ctx[i] = NULL; - } + devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]); + vsi->lan_q_ctx[i] = NULL; + devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]); + vsi->rdma_q_ctx[i] = NULL; } } @@ -1780,18 +1840,36 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, int ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable) { - struct ice_vsi_ctx *ctx; + struct ice_vsi_ctx *ctx, *cached_ctx; + int status; + + cached_ctx = ice_get_vsi_ctx(hw, vsi_handle); + if (!cached_ctx) + return -ENOENT; - ctx = ice_get_vsi_ctx(hw, vsi_handle); + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) - return -EIO; + return -ENOMEM; + + ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss; + ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc; + ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags; + + ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); if (enable) ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; else ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; - return ice_update_vsi(hw, vsi_handle, ctx, NULL); + status = ice_update_vsi(hw, vsi_handle, ctx, NULL); + if (!status) { + cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags; + cached_ctx->info.valid_sections |= ctx->info.valid_sections; + } + + kfree(ctx); + return status; } /** @@ -1808,15 +1886,11 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type, enum ice_adminq_opc opc) { - struct ice_aqc_alloc_free_res_elem *sw_buf; + DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1); + u16 buf_len = __struct_size(sw_buf); struct ice_aqc_res_elem *vsi_ele; - u16 buf_len; int status; - buf_len = struct_size(sw_buf, elem, 1); - sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); - if (!sw_buf) - return -ENOMEM; sw_buf->num_elems = cpu_to_le16(1); if (lkup_type == ICE_SW_LKUP_MAC || @@ -1825,31 +1899,34 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || lkup_type == ICE_SW_LKUP_PROMISC || lkup_type == ICE_SW_LKUP_PROMISC_VLAN || - lkup_type == ICE_SW_LKUP_DFLT) { + lkup_type == ICE_SW_LKUP_DFLT || + lkup_type == ICE_SW_LKUP_LAST) { sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP); } else if (lkup_type == ICE_SW_LKUP_VLAN) { - sw_buf->res_type = - cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); + if (opc == ice_aqc_opc_alloc_res) + sw_buf->res_type = + cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE | + ICE_AQC_RES_TYPE_FLAG_SHARED); + else + sw_buf->res_type = + cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); } else { - status = -EINVAL; - goto ice_aq_alloc_free_vsi_list_exit; + return -EINVAL; } if (opc == ice_aqc_opc_free_res) sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id); - status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL); + status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, opc); if (status) - goto ice_aq_alloc_free_vsi_list_exit; + return status; if (opc == ice_aqc_opc_alloc_res) { vsi_ele = &sw_buf->elem[0]; *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp); } -ice_aq_alloc_free_vsi_list_exit: - devm_kfree(ice_hw_to_dev(hw), sw_buf); - return status; + return 0; } /** @@ -1867,7 +1944,8 @@ int ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { - struct ice_aq_desc desc; + struct ice_aqc_sw_rules *cmd; + struct libie_aq_desc desc; int status; if (opc != ice_aqc_opc_add_sw_rules && @@ -1876,15 +1954,24 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, opc); + cmd = libie_aq_raw(&desc); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - desc.params.sw_rules.num_rules_fltr_entry_index = - cpu_to_le16(num_rules); + desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD); + cmd->num_rules_fltr_entry_index = cpu_to_le16(num_rules); status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); if (opc != ice_aqc_opc_add_sw_rules && - hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) + hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOENT) status = -ENOENT; + if (!status) { + if (opc == ice_aqc_opc_add_sw_rules) + hw->switch_info->rule_cnt += num_rules; + else if (opc == ice_aqc_opc_remove_sw_rules) + hw->switch_info->rule_cnt -= num_rules; + } + + trace_ice_aq_sw_rules(hw->switch_info); + return status; } @@ -1897,20 +1984,20 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, * * Add(0x0290) */ -static int +int ice_aq_add_recipe(struct ice_hw *hw, struct ice_aqc_recipe_data_elem *s_recipe_list, u16 num_recipes, struct ice_sq_cd *cd) { struct ice_aqc_add_get_recipe *cmd; - struct ice_aq_desc desc; + struct libie_aq_desc desc; u16 buf_size; - cmd = &desc.params.add_get_recipe; + cmd = libie_aq_raw(&desc); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe); cmd->num_sub_recipes = cpu_to_le16(num_recipes); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD); buf_size = num_recipes * sizeof(*s_recipe_list); @@ -1934,20 +2021,20 @@ ice_aq_add_recipe(struct ice_hw *hw, * The caller must supply enough space in s_recipe_list to hold all possible * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES. */ -static int +int ice_aq_get_recipe(struct ice_hw *hw, struct ice_aqc_recipe_data_elem *s_recipe_list, u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd) { struct ice_aqc_add_get_recipe *cmd; - struct ice_aq_desc desc; + struct libie_aq_desc desc; u16 buf_size; int status; if (*num_recipes != ICE_MAX_NUM_RECIPES) return -EINVAL; - cmd = &desc.params.add_get_recipe; + cmd = libie_aq_raw(&desc); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe); cmd->return_index = cpu_to_le16(recipe_root); @@ -2023,24 +2110,24 @@ error_out: * ice_aq_map_recipe_to_profile - Map recipe to packet profile * @hw: pointer to the HW struct * @profile_id: package profile ID to associate the recipe with - * @r_bitmap: Recipe bitmap filled in and need to be returned as response + * @r_assoc: Recipe bitmap filled in and need to be returned as response * @cd: pointer to command details structure or NULL * Recipe to profile association (0x0291) */ -static int -ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, +int +ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc, struct ice_sq_cd *cd) { struct ice_aqc_recipe_to_profile *cmd; - struct ice_aq_desc desc; + struct libie_aq_desc desc; - cmd = &desc.params.recipe_to_profile; + cmd = libie_aq_raw(&desc); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile); cmd->profile_id = cpu_to_le16(profile_id); /* Set the recipe ID bit in the bitmask to let the device know which * profile we are associating the recipe to */ - memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc)); + cmd->recipe_assoc = cpu_to_le64(r_assoc); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } @@ -2049,59 +2136,141 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, * ice_aq_get_recipe_to_profile - Map recipe to packet profile * @hw: pointer to the HW struct * @profile_id: package profile ID to associate the recipe with - * @r_bitmap: Recipe bitmap filled in and need to be returned as response + * @r_assoc: Recipe bitmap filled in and need to be returned as response * @cd: pointer to command details structure or NULL * Associate profile ID with given recipe (0x0293) */ -static int -ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, +int +ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc, struct ice_sq_cd *cd) { struct ice_aqc_recipe_to_profile *cmd; - struct ice_aq_desc desc; + struct libie_aq_desc desc; int status; - cmd = &desc.params.recipe_to_profile; + cmd = libie_aq_raw(&desc); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile); cmd->profile_id = cpu_to_le16(profile_id); status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (!status) - memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc)); + *r_assoc = le64_to_cpu(cmd->recipe_assoc); return status; } /** + * ice_init_chk_recipe_reuse_support - check if recipe reuse is supported + * @hw: pointer to the hardware structure + */ +void ice_init_chk_recipe_reuse_support(struct ice_hw *hw) +{ + struct ice_nvm_info *nvm = &hw->flash.nvm; + + hw->recp_reuse = (nvm->major == 0x4 && nvm->minor >= 0x30) || + nvm->major > 0x4; +} + +/** * ice_alloc_recipe - add recipe resource * @hw: pointer to the hardware structure * @rid: recipe ID returned as response to AQ call */ -static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) +int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) { - struct ice_aqc_alloc_free_res_elem *sw_buf; - u16 buf_len; + DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1); + u16 buf_len = __struct_size(sw_buf); + u16 res_type; int status; - buf_len = struct_size(sw_buf, elem, 1); - sw_buf = kzalloc(buf_len, GFP_KERNEL); - if (!sw_buf) - return -ENOMEM; - sw_buf->num_elems = cpu_to_le16(1); - sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << - ICE_AQC_RES_TYPE_S) | - ICE_AQC_RES_TYPE_FLAG_SHARED); - status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, - ice_aqc_opc_alloc_res, NULL); - if (!status) + res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, ICE_AQC_RES_TYPE_RECIPE); + if (hw->recp_reuse) + res_type |= ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED; + else + res_type |= ICE_AQC_RES_TYPE_FLAG_SHARED; + sw_buf->res_type = cpu_to_le16(res_type); + status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, + ice_aqc_opc_alloc_res); + if (!status) { *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp); - kfree(sw_buf); + hw->switch_info->recp_cnt++; + } return status; } /** + * ice_free_recipe_res - free recipe resource + * @hw: pointer to the hardware structure + * @rid: recipe ID to free + * + * Return: 0 on success, and others on error + */ +static int ice_free_recipe_res(struct ice_hw *hw, u16 rid) +{ + int status; + + status = ice_free_hw_res(hw, ICE_AQC_RES_TYPE_RECIPE, 1, &rid); + if (!status) + hw->switch_info->recp_cnt--; + + return status; +} + +/** + * ice_release_recipe_res - disassociate and free recipe resource + * @hw: pointer to the hardware structure + * @recp: the recipe struct resource to unassociate and free + * + * Return: 0 on success, and others on error + */ +static int ice_release_recipe_res(struct ice_hw *hw, + struct ice_sw_recipe *recp) +{ + DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + struct ice_switch_info *sw = hw->switch_info; + u64 recp_assoc; + u32 rid, prof; + int status; + + for_each_set_bit(rid, recp->r_bitmap, ICE_MAX_NUM_RECIPES) { + for_each_set_bit(prof, recipe_to_profile[rid], + ICE_MAX_NUM_PROFILES) { + status = ice_aq_get_recipe_to_profile(hw, prof, + &recp_assoc, + NULL); + if (status) + return status; + + bitmap_from_arr64(r_bitmap, &recp_assoc, + ICE_MAX_NUM_RECIPES); + bitmap_andnot(r_bitmap, r_bitmap, recp->r_bitmap, + ICE_MAX_NUM_RECIPES); + bitmap_to_arr64(&recp_assoc, r_bitmap, + ICE_MAX_NUM_RECIPES); + ice_aq_map_recipe_to_profile(hw, prof, + recp_assoc, NULL); + + clear_bit(rid, profile_to_recipe[prof]); + clear_bit(prof, recipe_to_profile[rid]); + } + + status = ice_free_recipe_res(hw, rid); + if (status) + return status; + + sw->recp_list[rid].recp_created = false; + sw->recp_list[rid].adv_rule = false; + memset(&sw->recp_list[rid].lkup_exts, 0, + sizeof(sw->recp_list[rid].lkup_exts)); + clear_bit(rid, recp->r_bitmap); + } + + return 0; +} + +/** * ice_get_recp_to_prof_map - updates recipe to profile mapping * @hw: pointer to hardware structure * @@ -2112,6 +2281,7 @@ static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) static void ice_get_recp_to_prof_map(struct ice_hw *hw) { DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + u64 recp_assoc; u16 i; for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) { @@ -2119,8 +2289,9 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw) bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES); bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES); - if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL)) + if (ice_aq_get_recipe_to_profile(hw, i, &recp_assoc, NULL)) continue; + bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES); bitmap_copy(profile_to_recipe[i], r_bitmap, ICE_MAX_NUM_RECIPES); for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES) @@ -2129,25 +2300,12 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw) } /** - * ice_collect_result_idx - copy result index values - * @buf: buffer that contains the result index - * @recp: the recipe struct to copy data into - */ -static void -ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, - struct ice_sw_recipe *recp) -{ - if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN) - set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, - recp->res_idxs); -} - -/** * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries * @hw: pointer to hardware structure * @recps: struct that we need to populate * @rid: recipe ID that we are populating * @refresh_required: true if we should get recipe to profile mapping from FW + * @is_add: flag of adding recipe * * This function is used to populate all the necessary entries into our * bookkeeping so that we have a current list of all the recipes that are @@ -2155,7 +2313,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, */ static int ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, - bool *refresh_required) + bool *refresh_required, bool is_add) { DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS); struct ice_aqc_recipe_data_elem *tmp; @@ -2199,18 +2357,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, for (sub_recps = 0; sub_recps < num_recps; sub_recps++) { struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps]; - struct ice_recp_grp_entry *rg_entry; u8 i, prof, idx, prot = 0; bool is_root; u16 off = 0; - rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry), - GFP_KERNEL); - if (!rg_entry) { - status = -ENOMEM; - goto err_unroll; - } - idx = root_bufs.recipe_indx; is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT; @@ -2223,11 +2373,8 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, prof = find_first_bit(recipe_to_profile[idx], ICE_MAX_NUM_PROFILES); for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) { - u8 lkup_indx = root_bufs.content.lkup_indx[i + 1]; - - rg_entry->fv_idx[i] = lkup_indx; - rg_entry->fv_mask[i] = - le16_to_cpu(root_bufs.content.mask[i + 1]); + u8 lkup_indx = root_bufs.content.lkup_indx[i]; + u16 lkup_mask = le16_to_cpu(root_bufs.content.mask[i]); /* If the recipe is a chained recipe then all its * child recipe's result will have a result index. @@ -2238,38 +2385,38 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a * valid offset value. */ - if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) || - rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE || - rg_entry->fv_idx[i] == 0) + if (!lkup_indx || + (lkup_indx & ICE_AQ_RECIPE_LKUP_IGNORE) || + test_bit(lkup_indx, + hw->switch_info->prof_res_bm[prof])) continue; - ice_find_prot_off(hw, ICE_BLK_SW, prof, - rg_entry->fv_idx[i], &prot, &off); + ice_find_prot_off(hw, ICE_BLK_SW, prof, lkup_indx, + &prot, &off); lkup_exts->fv_words[fv_word_idx].prot_id = prot; lkup_exts->fv_words[fv_word_idx].off = off; - lkup_exts->field_mask[fv_word_idx] = - rg_entry->fv_mask[i]; + lkup_exts->field_mask[fv_word_idx] = lkup_mask; fv_word_idx++; } - /* populate rg_list with the data from the child entry of this - * recipe - */ - list_add(&rg_entry->l_entry, &recps[rid].rg_list); /* Propagate some data to the recipe database */ - recps[idx].is_root = !!is_root; recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; + recps[idx].need_pass_l2 = !!(root_bufs.content.act_ctrl & + ICE_AQ_RECIPE_ACT_NEED_PASS_L2); + recps[idx].allow_pass_l2 = !!(root_bufs.content.act_ctrl & + ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2); bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS); if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) { - recps[idx].chain_idx = root_bufs.content.result_indx & - ~ICE_AQ_RECIPE_RESULT_EN; - set_bit(recps[idx].chain_idx, recps[idx].res_idxs); - } else { - recps[idx].chain_idx = ICE_INVAL_CHAIN_IND; + set_bit(root_bufs.content.result_indx & + ~ICE_AQ_RECIPE_RESULT_EN, recps[idx].res_idxs); } - if (!is_root) + if (!is_root) { + if (hw->recp_reuse && is_add) + recps[idx].recp_created = true; + continue; + } /* Only do the following for root recipes entries */ memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap, @@ -2281,19 +2428,11 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, /* Complete initialization of the root recipe entry */ lkup_exts->n_val_words = fv_word_idx; - recps[rid].big_recp = (num_recps > 1); - recps[rid].n_grp_count = (u8)num_recps; - recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp, - recps[rid].n_grp_count * sizeof(*recps[rid].root_buf), - GFP_KERNEL); - if (!recps[rid].root_buf) { - status = -ENOMEM; - goto err_unroll; - } /* Copy result indexes */ bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS); - recps[rid].recp_created = true; + if (is_add) + recps[rid].recp_created = true; err_unroll: kfree(tmp); @@ -2444,6 +2583,18 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) fi->lan_en = true; } } + + if (fi->flag & ICE_FLTR_TX_ONLY) + fi->lan_en = false; +} + +/** + * ice_fill_eth_hdr - helper to copy dummy_eth_hdr into supplied buffer + * @eth_hdr: pointer to buffer to populate + */ +void ice_fill_eth_hdr(u8 *eth_hdr) +{ + memcpy(eth_hdr, dummy_eth_header, DUMMY_ETH_HDR_LEN); } /** @@ -2483,25 +2634,24 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, switch (f_info->fltr_act) { case ICE_FWD_TO_VSI: - act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & - ICE_SINGLE_ACT_VSI_ID_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, + f_info->fwd_id.hw_vsi_id); if (f_info->lkup_type != ICE_SW_LKUP_VLAN) act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; break; case ICE_FWD_TO_VSI_LIST: act |= ICE_SINGLE_ACT_VSI_LIST; - act |= (f_info->fwd_id.vsi_list_id << - ICE_SINGLE_ACT_VSI_LIST_ID_S) & - ICE_SINGLE_ACT_VSI_LIST_ID_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_LIST_ID_M, + f_info->fwd_id.vsi_list_id); if (f_info->lkup_type != ICE_SW_LKUP_VLAN) act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; break; case ICE_FWD_TO_Q: act |= ICE_SINGLE_ACT_TO_Q; - act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & - ICE_SINGLE_ACT_Q_INDEX_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M, + f_info->fwd_id.q_id); break; case ICE_DROP_PACKET: act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | @@ -2511,10 +2661,9 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, q_rgn = f_info->qgrp_size > 0 ? (u8)ilog2(f_info->qgrp_size) : 0; act |= ICE_SINGLE_ACT_TO_Q; - act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & - ICE_SINGLE_ACT_Q_INDEX_M; - act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & - ICE_SINGLE_ACT_Q_REGION_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M, + f_info->fwd_id.q_id); + act |= FIELD_PREP(ICE_SINGLE_ACT_Q_REGION_M, q_rgn); break; default: return; @@ -2640,7 +2789,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, m_ent->fltr_info.fwd_id.hw_vsi_id; act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; - act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M; + act |= FIELD_PREP(ICE_LG_ACT_VSI_LIST_ID_M, id); if (m_ent->vsi_count > 1) act |= ICE_LG_ACT_VSI_LIST; lg_act->act[0] = cpu_to_le32(act); @@ -2648,16 +2797,15 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, /* Second action descriptor type */ act = ICE_LG_ACT_GENERIC; - act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; + act |= FIELD_PREP(ICE_LG_ACT_GENERIC_VALUE_M, 1); lg_act->act[1] = cpu_to_le32(act); - act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << - ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; + act = FIELD_PREP(ICE_LG_ACT_GENERIC_OFFSET_M, + ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX); /* Third action Marker value */ act |= ICE_LG_ACT_GENERIC; - act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & - ICE_LG_ACT_GENERIC_VALUE_M; + act |= FIELD_PREP(ICE_LG_ACT_GENERIC_VALUE_M, sw_marker); lg_act->act[2] = cpu_to_le32(act); @@ -2666,9 +2814,9 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, ice_aqc_opc_update_sw_rules); /* Update the action to point to the large action ID */ - rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR | - ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) & - ICE_SINGLE_ACT_PTR_VAL_M)); + act = ICE_SINGLE_ACT_PTR; + act |= FIELD_PREP(ICE_SINGLE_ACT_PTR_VAL_M, l_id); + rx_tx->act = cpu_to_le32(act); /* Use the filter rule ID of the previously created rule with single * act. Once the update happens, hardware will treat this as large @@ -2751,7 +2899,8 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || lkup_type == ICE_SW_LKUP_PROMISC || lkup_type == ICE_SW_LKUP_PROMISC_VLAN || - lkup_type == ICE_SW_LKUP_DFLT) + lkup_type == ICE_SW_LKUP_DFLT || + lkup_type == ICE_SW_LKUP_LAST) rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : ICE_AQC_SW_RULES_T_VSI_LIST_SET; else if (lkup_type == ICE_SW_LKUP_VLAN) @@ -2998,7 +3147,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, u16 vsi_handle_arr[2]; /* A rule already exists with the new VSI being added */ - if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) + if (cur_fltr->vsi_handle == new_fltr->vsi_handle) return -EEXIST; vsi_handle_arr[0] = cur_fltr->vsi_handle; @@ -3046,7 +3195,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, /* A rule already exists with the new VSI being added */ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) - return 0; + return -EEXIST; /* Update the previously created VSI list set with * the new VSI ID passed in @@ -3105,7 +3254,7 @@ ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info) * handle element. This can be extended further to search VSI list with more * than 1 vsi_count. Returns pointer to VSI list entry if found. */ -static struct ice_vsi_list_map_info * +struct ice_vsi_list_map_info * ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, u16 *vsi_list_id) { @@ -3387,54 +3536,6 @@ exit: } /** - * ice_mac_fltr_exist - does this MAC filter exist for given VSI - * @hw: pointer to the hardware structure - * @mac: MAC address to be checked (for MAC filter) - * @vsi_handle: check MAC filter for this VSI - */ -bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle) -{ - struct ice_fltr_mgmt_list_entry *entry; - struct list_head *rule_head; - struct ice_switch_info *sw; - struct mutex *rule_lock; /* Lock to protect filter rule list */ - u16 hw_vsi_id; - - if (!ice_is_vsi_valid(hw, vsi_handle)) - return false; - - hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); - sw = hw->switch_info; - rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; - if (!rule_head) - return false; - - rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; - mutex_lock(rule_lock); - list_for_each_entry(entry, rule_head, list_entry) { - struct ice_fltr_info *f_info = &entry->fltr_info; - u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; - - if (is_zero_ether_addr(mac_addr)) - continue; - - if (f_info->flag != ICE_FLTR_TX || - f_info->src_id != ICE_SRC_ID_VSI || - f_info->lkup_type != ICE_SW_LKUP_MAC || - f_info->fltr_act != ICE_FWD_TO_VSI || - hw_vsi_id != f_info->fwd_id.hw_vsi_id) - continue; - - if (ether_addr_equal(mac, mac_addr)) { - mutex_unlock(rule_lock); - return true; - } - } - mutex_unlock(rule_lock); - return false; -} - -/** * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI * @hw: pointer to the hardware structure * @vlan_id: VLAN ID @@ -3861,6 +3962,7 @@ ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set, } else if (f_info.flag & ICE_FLTR_TX) { f_info.src_id = ICE_SRC_ID_VSI; f_info.src = hw_vsi_id; + f_info.flag |= ICE_FLTR_TX_ONLY; } f_list_entry.fltr_info = f_info; @@ -4460,29 +4562,19 @@ int ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 *counter_id) { - struct ice_aqc_alloc_free_res_elem *buf; - u16 buf_len; + DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1); + u16 buf_len = __struct_size(buf); int status; - /* Allocate resource */ - buf_len = struct_size(buf, elem, 1); - buf = kzalloc(buf_len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - buf->num_elems = cpu_to_le16(num_items); - buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & - ICE_AQC_RES_TYPE_M) | alloc_shared); + buf->res_type = cpu_to_le16(FIELD_PREP(ICE_AQC_RES_TYPE_M, type) | + alloc_shared); - status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, - ice_aqc_opc_alloc_res, NULL); + status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); if (status) - goto exit; + return status; *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp); - -exit: - kfree(buf); return status; } @@ -4498,27 +4590,54 @@ int ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 counter_id) { - struct ice_aqc_alloc_free_res_elem *buf; - u16 buf_len; + DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1); + u16 buf_len = __struct_size(buf); int status; - /* Free resource */ - buf_len = struct_size(buf, elem, 1); - buf = kzalloc(buf_len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - buf->num_elems = cpu_to_le16(num_items); - buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & - ICE_AQC_RES_TYPE_M) | alloc_shared); + buf->res_type = cpu_to_le16(FIELD_PREP(ICE_AQC_RES_TYPE_M, type) | + alloc_shared); buf->elem[0].e.sw_resp = cpu_to_le16(counter_id); - status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, - ice_aqc_opc_free_res, NULL); + status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); if (status) ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n"); - kfree(buf); + return status; +} + +#define ICE_PROTOCOL_ENTRY(id, ...) { \ + .prot_type = id, \ + .offs = {__VA_ARGS__}, \ +} + +/** + * ice_share_res - set a resource as shared or dedicated + * @hw: hw struct of original owner of resource + * @type: resource type + * @shared: is the resource being set to shared + * @res_id: resource id (descriptor) + */ +int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id) +{ + DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1); + u16 buf_len = __struct_size(buf); + u16 res_type; + int status; + + buf->num_elems = cpu_to_le16(1); + res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, type); + if (shared) + res_type |= ICE_AQC_RES_TYPE_FLAG_SHARED; + + buf->res_type = cpu_to_le16(res_type); + buf->elem[0].e.sw_resp = cpu_to_le16(res_id); + status = ice_aq_alloc_free_res(hw, buf, buf_len, + ice_aqc_opc_share_res); + if (status) + ice_debug(hw, ICE_DBG_SW, "Could not set resource type %u id %u to %s\n", + type, res_id, shared ? "SHARED" : "DEDICATED"); + return status; } @@ -4532,29 +4651,39 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, * structure is added to that union. */ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { - { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } }, - { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } }, - { ICE_ETYPE_OL, { 0 } }, - { ICE_ETYPE_IL, { 0 } }, - { ICE_VLAN_OFOS, { 2, 0 } }, - { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, - { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, - { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, - 26, 28, 30, 32, 34, 36, 38 } }, - { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, - 26, 28, 30, 32, 34, 36, 38 } }, - { ICE_TCP_IL, { 0, 2 } }, - { ICE_UDP_OF, { 0, 2 } }, - { ICE_UDP_ILOS, { 0, 2 } }, - { ICE_VXLAN, { 8, 10, 12, 14 } }, - { ICE_GENEVE, { 8, 10, 12, 14 } }, - { ICE_NVGRE, { 0, 2, 4, 6 } }, - { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } }, - { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } }, - { ICE_PPPOE, { 0, 2, 4, 6 } }, - { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } }, - { ICE_VLAN_EX, { 2, 0 } }, - { ICE_VLAN_IN, { 2, 0 } }, + ICE_PROTOCOL_ENTRY(ICE_MAC_OFOS, 0, 2, 4, 6, 8, 10, 12), + ICE_PROTOCOL_ENTRY(ICE_MAC_IL, 0, 2, 4, 6, 8, 10, 12), + ICE_PROTOCOL_ENTRY(ICE_ETYPE_OL, 0), + ICE_PROTOCOL_ENTRY(ICE_ETYPE_IL, 0), + ICE_PROTOCOL_ENTRY(ICE_VLAN_OFOS, 2, 0), + ICE_PROTOCOL_ENTRY(ICE_IPV4_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18), + ICE_PROTOCOL_ENTRY(ICE_IPV4_IL, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18), + ICE_PROTOCOL_ENTRY(ICE_IPV6_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, + 20, 22, 24, 26, 28, 30, 32, 34, 36, 38), + ICE_PROTOCOL_ENTRY(ICE_IPV6_IL, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, + 22, 24, 26, 28, 30, 32, 34, 36, 38), + ICE_PROTOCOL_ENTRY(ICE_TCP_IL, 0, 2), + ICE_PROTOCOL_ENTRY(ICE_UDP_OF, 0, 2), + ICE_PROTOCOL_ENTRY(ICE_UDP_ILOS, 0, 2), + ICE_PROTOCOL_ENTRY(ICE_VXLAN, 8, 10, 12, 14), + ICE_PROTOCOL_ENTRY(ICE_GENEVE, 8, 10, 12, 14), + ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6), + ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22), + ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14), + ICE_PROTOCOL_ENTRY(ICE_PFCP, 8, 10, 12, 14, 16, 18, 20, 22), + ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6), + ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10), + ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0), + ICE_PROTOCOL_ENTRY(ICE_VLAN_IN, 2, 0), + ICE_PROTOCOL_ENTRY(ICE_HW_METADATA, + ICE_SOURCE_PORT_MDID_OFFSET, + ICE_PTYPE_MDID_OFFSET, + ICE_PACKET_LENGTH_MDID_OFFSET, + ICE_SOURCE_VSI_MDID_OFFSET, + ICE_PKT_VLAN_MDID_OFFSET, + ICE_PKT_TUNNEL_MDID_OFFSET, + ICE_PKT_TCP_MDID_OFFSET, + ICE_PKT_ERROR_MDID_OFFSET), }; static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { @@ -4575,23 +4704,26 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { { ICE_NVGRE, ICE_GRE_OF_HW }, { ICE_GTP, ICE_UDP_OF_HW }, { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW }, + { ICE_PFCP, ICE_UDP_ILOS_HW }, { ICE_PPPOE, ICE_PPPOE_HW }, { ICE_L2TPV3, ICE_L2TPV3_HW }, { ICE_VLAN_EX, ICE_VLAN_OF_HW }, { ICE_VLAN_IN, ICE_VLAN_OL_HW }, + { ICE_HW_METADATA, ICE_META_DATA_ID_HW }, }; /** * ice_find_recp - find a recipe * @hw: pointer to the hardware structure * @lkup_exts: extension sequence to match - * @tun_type: type of recipe tunnel + * @rinfo: information regarding the rule e.g. priority and action info + * @is_add: flag of adding recipe * * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. */ static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, - enum ice_sw_tunnel_type tun_type) + const struct ice_adv_rule_info *rinfo, bool is_add) { bool refresh_required = true; struct ice_sw_recipe *recp; @@ -4605,16 +4737,12 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, * entry update it in our SW bookkeeping and continue with the * matching. */ - if (!recp[i].recp_created) + if (hw->recp_reuse) { if (ice_get_recp_frm_fw(hw, hw->switch_info->recp_list, i, - &refresh_required)) + &refresh_required, is_add)) continue; - - /* Skip inverse action recipes */ - if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl & - ICE_AQ_RECIPE_ACT_INV_ACT) - continue; + } /* if number of words we are looking for match */ if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) { @@ -4652,9 +4780,13 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, } /* If for "i"th recipe the found was never set to false * then it means we found our match - * Also tun type of recipe needs to be checked + * Also tun type and *_pass_l2 of recipe needs to be + * checked */ - if (found && recp[i].tun_type == tun_type) + if (found && recp[i].tun_type == rinfo->tun_type && + recp[i].need_pass_l2 == rinfo->need_pass_l2 && + recp[i].allow_pass_l2 == rinfo->allow_pass_l2 && + recp[i].priority == rinfo->priority) return i; /* Return the recipe ID */ } } @@ -4737,110 +4869,55 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule, } /** - * ice_create_first_fit_recp_def - Create a recipe grouping - * @hw: pointer to the hardware structure - * @lkup_exts: an array of protocol header extractions - * @rg_list: pointer to a list that stores new recipe groups - * @recp_cnt: pointer to a variable that stores returned number of recipe groups - * - * Using first fit algorithm, take all the words that are still not done - * and start grouping them in 4-word groups. Each group makes up one - * recipe. - */ -static int -ice_create_first_fit_recp_def(struct ice_hw *hw, - struct ice_prot_lkup_ext *lkup_exts, - struct list_head *rg_list, - u8 *recp_cnt) -{ - struct ice_pref_recipe_group *grp = NULL; - u8 j; - - *recp_cnt = 0; - - /* Walk through every word in the rule to check if it is not done. If so - * then this word needs to be part of a new recipe. - */ - for (j = 0; j < lkup_exts->n_val_words; j++) - if (!test_bit(j, lkup_exts->done)) { - if (!grp || - grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) { - struct ice_recp_grp_entry *entry; - - entry = devm_kzalloc(ice_hw_to_dev(hw), - sizeof(*entry), - GFP_KERNEL); - if (!entry) - return -ENOMEM; - list_add(&entry->l_entry, rg_list); - grp = &entry->r_group; - (*recp_cnt)++; - } - - grp->pairs[grp->n_val_pairs].prot_id = - lkup_exts->fv_words[j].prot_id; - grp->pairs[grp->n_val_pairs].off = - lkup_exts->fv_words[j].off; - grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j]; - grp->n_val_pairs++; - } - - return 0; -} - -/** * ice_fill_fv_word_index - fill in the field vector indices for a recipe group * @hw: pointer to the hardware structure - * @fv_list: field vector with the extraction sequence information - * @rg_list: recipe groupings with protocol-offset pairs + * @rm: recipe management list entry * * Helper function to fill in the field vector indices for protocol-offset * pairs. These indexes are then ultimately programmed into a recipe. */ static int -ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, - struct list_head *rg_list) +ice_fill_fv_word_index(struct ice_hw *hw, struct ice_sw_recipe *rm) { struct ice_sw_fv_list_entry *fv; - struct ice_recp_grp_entry *rg; struct ice_fv_word *fv_ext; + u8 i; - if (list_empty(fv_list)) - return 0; + if (list_empty(&rm->fv_list)) + return -EINVAL; - fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry, + fv = list_first_entry(&rm->fv_list, struct ice_sw_fv_list_entry, list_entry); fv_ext = fv->fv_ptr->ew; - list_for_each_entry(rg, rg_list, l_entry) { - u8 i; - - for (i = 0; i < rg->r_group.n_val_pairs; i++) { - struct ice_fv_word *pr; - bool found = false; - u16 mask; - u8 j; - - pr = &rg->r_group.pairs[i]; - mask = rg->r_group.mask[i]; + /* Add switch id as the first word. */ + rm->fv_idx[0] = ICE_AQ_SW_ID_LKUP_IDX; + rm->fv_mask[0] = ICE_AQ_SW_ID_LKUP_MASK; + rm->n_ext_words++; - for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) - if (fv_ext[j].prot_id == pr->prot_id && - fv_ext[j].off == pr->off) { - found = true; + for (i = 1; i < rm->n_ext_words; i++) { + struct ice_fv_word *fv_word = &rm->ext_words[i - 1]; + u16 fv_mask = rm->word_masks[i - 1]; + bool found = false; + u8 j; - /* Store index of field vector */ - rg->fv_idx[i] = j; - rg->fv_mask[i] = mask; - break; - } + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) { + if (fv_ext[j].prot_id == fv_word->prot_id && + fv_ext[j].off == fv_word->off) { + found = true; - /* Protocol/offset could not be found, caller gave an - * invalid pair - */ - if (!found) - return -EINVAL; + /* Store index of field vector */ + rm->fv_idx[i] = j; + rm->fv_mask[i] = fv_mask; + break; + } } + + /* Protocol/offset could not be found, caller gave an invalid + * pair. + */ + if (!found) + return -EINVAL; } return 0; @@ -4914,6 +4991,73 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, } /** + * ice_calc_recp_cnt - calculate number of recipes based on word count + * @word_cnt: number of lookup words + * + * Word count should include switch ID word and regular lookup words. + * Returns: number of recipes required to fit @word_cnt, including extra recipes + * needed for recipe chaining (if needed). + */ +static int ice_calc_recp_cnt(u8 word_cnt) +{ + /* All words fit in a single recipe, no need for chaining. */ + if (word_cnt <= ICE_NUM_WORDS_RECIPE) + return 1; + + /* Recipe chaining required. Result indexes are fitted right after + * regular lookup words. In some cases a new recipe must be added in + * order to fit result indexes. + * + * While the word count increases, every 5 words an extra recipe needs + * to be added. However, by adding a recipe, one word for its result + * index must also be added, therefore every 4 words recipe count + * increases by 1. This calculation does not apply to word count == 1, + * which is handled above. + */ + return (word_cnt + 2) / (ICE_NUM_WORDS_RECIPE - 1); +} + +static void fill_recipe_template(struct ice_aqc_recipe_data_elem *recp, u16 rid, + const struct ice_sw_recipe *rm) +{ + int i; + + recp->recipe_indx = rid; + recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_PRUNE_INDX_M; + + for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) { + recp->content.lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE; + recp->content.mask[i] = cpu_to_le16(0); + } + + set_bit(rid, (unsigned long *)recp->recipe_bitmap); + recp->content.act_ctrl_fwd_priority = rm->priority; + + if (rm->need_pass_l2) + recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2; + + if (rm->allow_pass_l2) + recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2; +} + +static void bookkeep_recipe(struct ice_sw_recipe *recipe, + struct ice_aqc_recipe_data_elem *r, + const struct ice_sw_recipe *rm) +{ + memcpy(recipe->r_bitmap, r->recipe_bitmap, sizeof(recipe->r_bitmap)); + + recipe->priority = r->content.act_ctrl_fwd_priority; + recipe->tun_type = rm->tun_type; + recipe->need_pass_l2 = rm->need_pass_l2; + recipe->allow_pass_l2 = rm->allow_pass_l2; + recipe->recp_created = true; +} + +/* For memcpy in ice_add_sw_recipe. */ +static_assert(sizeof_field(struct ice_aqc_recipe_data_elem, recipe_bitmap) == + sizeof_field(struct ice_sw_recipe, r_bitmap)); + +/** * ice_add_sw_recipe - function to call AQ calls to create switch recipe * @hw: pointer to hardware structure * @rm: recipe management list entry @@ -4923,381 +5067,145 @@ static int ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, unsigned long *profiles) { + struct ice_aqc_recipe_data_elem *buf __free(kfree) = NULL; DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); - struct ice_aqc_recipe_data_elem *tmp; - struct ice_aqc_recipe_data_elem *buf; - struct ice_recp_grp_entry *entry; - u16 free_res_idx; - u16 recipe_count; - u8 chain_idx; - u8 recps = 0; + struct ice_aqc_recipe_data_elem *root; + struct ice_sw_recipe *recipe; + u16 free_res_idx, rid; + int lookup = 0; + int recp_cnt; int status; + int word; + int i; + + recp_cnt = ice_calc_recp_cnt(rm->n_ext_words); - /* When more than one recipe are required, another recipe is needed to - * chain them together. Matching a tunnel metadata ID takes up one of - * the match fields in the chaining recipe reducing the number of - * chained recipes by one. - */ - /* check number of free result indices */ bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS); + bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES); + + /* Check number of free result indices */ free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm); ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n", - free_res_idx, rm->n_grp_count); + free_res_idx, recp_cnt); - if (rm->n_grp_count > 1) { - if (rm->n_grp_count > free_res_idx) - return -ENOSPC; - - rm->n_grp_count++; - } - - if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE) + /* Last recipe doesn't need result index */ + if (recp_cnt - 1 > free_res_idx) return -ENOSPC; - tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); - if (!tmp) - return -ENOMEM; - - buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf), - GFP_KERNEL); - if (!buf) { - status = -ENOMEM; - goto err_mem; - } + if (recp_cnt > ICE_MAX_CHAIN_RECIPE_RES) + return -E2BIG; - bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES); - recipe_count = ICE_MAX_NUM_RECIPES; - status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC, - NULL); - if (status || recipe_count == 0) - goto err_unroll; + buf = kcalloc(recp_cnt, sizeof(*buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; - /* Allocate the recipe resources, and configure them according to the - * match fields from protocol headers and extracted field vectors. + /* Setup the non-root subrecipes. These do not contain lookups for other + * subrecipes results. Set associated recipe only to own recipe index. + * Each non-root subrecipe needs a free result index from FV. + * + * Note: only done if there is more than one recipe. */ - chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); - list_for_each_entry(entry, &rm->rg_list, l_entry) { - u8 i; - - status = ice_alloc_recipe(hw, &entry->rid); - if (status) - goto err_unroll; - - /* Clear the result index of the located recipe, as this will be - * updated, if needed, later in the recipe creation process. - */ - tmp[0].content.result_indx = 0; - - buf[recps] = tmp[0]; - buf[recps].recipe_indx = (u8)entry->rid; - /* if the recipe is a non-root recipe RID should be programmed - * as 0 for the rules to be applied correctly. - */ - buf[recps].content.rid = 0; - memset(&buf[recps].content.lkup_indx, 0, - sizeof(buf[recps].content.lkup_indx)); - - /* All recipes use look-up index 0 to match switch ID. */ - buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; - buf[recps].content.mask[0] = - cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); - /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask - * to be 0 - */ - for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { - buf[recps].content.lkup_indx[i] = 0x80; - buf[recps].content.mask[i] = 0; - } - - for (i = 0; i < entry->r_group.n_val_pairs; i++) { - buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i]; - buf[recps].content.mask[i + 1] = - cpu_to_le16(entry->fv_mask[i]); - } - - if (rm->n_grp_count > 1) { - /* Checks to see if there really is a valid result index - * that can be used. - */ - if (chain_idx >= ICE_MAX_FV_WORDS) { - ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); - status = -ENOSPC; - goto err_unroll; - } - - entry->chain_idx = chain_idx; - buf[recps].content.result_indx = - ICE_AQ_RECIPE_RESULT_EN | - ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) & - ICE_AQ_RECIPE_RESULT_DATA_M); - clear_bit(chain_idx, result_idx_bm); - chain_idx = find_first_bit(result_idx_bm, - ICE_MAX_FV_WORDS); - } + for (i = 0; i < recp_cnt - 1; i++) { + struct ice_aqc_recipe_content *content; + u8 result_idx; - /* fill recipe dependencies */ - bitmap_zero((unsigned long *)buf[recps].recipe_bitmap, - ICE_MAX_NUM_RECIPES); - set_bit(buf[recps].recipe_indx, - (unsigned long *)buf[recps].recipe_bitmap); - buf[recps].content.act_ctrl_fwd_priority = rm->priority; - recps++; - } - - if (rm->n_grp_count == 1) { - rm->root_rid = buf[0].recipe_indx; - set_bit(buf[0].recipe_indx, rm->r_bitmap); - buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT; - if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) { - memcpy(buf[0].recipe_bitmap, rm->r_bitmap, - sizeof(buf[0].recipe_bitmap)); - } else { - status = -EINVAL; - goto err_unroll; - } - /* Applicable only for ROOT_RECIPE, set the fwd_priority for - * the recipe which is getting created if specified - * by user. Usually any advanced switch filter, which results - * into new extraction sequence, ended up creating a new recipe - * of type ROOT and usually recipes are associated with profiles - * Switch rule referreing newly created recipe, needs to have - * either/or 'fwd' or 'join' priority, otherwise switch rule - * evaluation will not happen correctly. In other words, if - * switch rule to be evaluated on priority basis, then recipe - * needs to have priority, otherwise it will be evaluated last. - */ - buf[0].content.act_ctrl_fwd_priority = rm->priority; - } else { - struct ice_recp_grp_entry *last_chain_entry; - u16 rid, i; - - /* Allocate the last recipe that will chain the outcomes of the - * other recipes together - */ status = ice_alloc_recipe(hw, &rid); if (status) - goto err_unroll; + return status; - buf[recps].recipe_indx = (u8)rid; - buf[recps].content.rid = (u8)rid; - buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT; - /* the new entry created should also be part of rg_list to - * make sure we have complete recipe - */ - last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw), - sizeof(*last_chain_entry), - GFP_KERNEL); - if (!last_chain_entry) { - status = -ENOMEM; - goto err_unroll; - } - last_chain_entry->rid = rid; - memset(&buf[recps].content.lkup_indx, 0, - sizeof(buf[recps].content.lkup_indx)); - /* All recipes use look-up index 0 to match switch ID. */ - buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; - buf[recps].content.mask[0] = - cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); - for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { - buf[recps].content.lkup_indx[i] = - ICE_AQ_RECIPE_LKUP_IGNORE; - buf[recps].content.mask[i] = 0; - } + fill_recipe_template(&buf[i], rid, rm); - i = 1; - /* update r_bitmap with the recp that is used for chaining */ - set_bit(rid, rm->r_bitmap); - /* this is the recipe that chains all the other recipes so it - * should not have a chaining ID to indicate the same + result_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); + /* Check if there really is a valid result index that can be + * used. */ - last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; - list_for_each_entry(entry, &rm->rg_list, l_entry) { - last_chain_entry->fv_idx[i] = entry->chain_idx; - buf[recps].content.lkup_indx[i] = entry->chain_idx; - buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF); - set_bit(entry->rid, rm->r_bitmap); - } - list_add(&last_chain_entry->l_entry, &rm->rg_list); - if (sizeof(buf[recps].recipe_bitmap) >= - sizeof(rm->r_bitmap)) { - memcpy(buf[recps].recipe_bitmap, rm->r_bitmap, - sizeof(buf[recps].recipe_bitmap)); - } else { - status = -EINVAL; - goto err_unroll; + if (result_idx >= ICE_MAX_FV_WORDS) { + ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); + return -ENOSPC; } - buf[recps].content.act_ctrl_fwd_priority = rm->priority; + clear_bit(result_idx, result_idx_bm); - recps++; - rm->root_rid = (u8)rid; - } - status = ice_acquire_change_lock(hw, ICE_RES_WRITE); - if (status) - goto err_unroll; + content = &buf[i].content; + content->result_indx = ICE_AQ_RECIPE_RESULT_EN | + FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M, + result_idx); - status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL); - ice_release_change_lock(hw); - if (status) - goto err_unroll; + /* Set recipe association to be used for root recipe */ + set_bit(rid, rm->r_bitmap); - /* Every recipe that just got created add it to the recipe - * book keeping list - */ - list_for_each_entry(entry, &rm->rg_list, l_entry) { - struct ice_switch_info *sw = hw->switch_info; - bool is_root, idx_found = false; - struct ice_sw_recipe *recp; - u16 idx, buf_idx = 0; - - /* find buffer index for copying some data */ - for (idx = 0; idx < rm->n_grp_count; idx++) - if (buf[idx].recipe_indx == entry->rid) { - buf_idx = idx; - idx_found = true; - } + word = 0; + while (lookup < rm->n_ext_words && + word < ICE_NUM_WORDS_RECIPE) { + content->lkup_indx[word] = rm->fv_idx[lookup]; + content->mask[word] = cpu_to_le16(rm->fv_mask[lookup]); - if (!idx_found) { - status = -EIO; - goto err_unroll; + lookup++; + word++; } - recp = &sw->recp_list[entry->rid]; - is_root = (rm->root_rid == entry->rid); - recp->is_root = is_root; - - recp->root_rid = entry->rid; - recp->big_recp = (is_root && rm->n_grp_count > 1); - - memcpy(&recp->ext_words, entry->r_group.pairs, - entry->r_group.n_val_pairs * sizeof(struct ice_fv_word)); - - memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap, - sizeof(recp->r_bitmap)); - - /* Copy non-result fv index values and masks to recipe. This - * call will also update the result recipe bitmask. - */ - ice_collect_result_idx(&buf[buf_idx], recp); - - /* for non-root recipes, also copy to the root, this allows - * easier matching of a complete chained recipe - */ - if (!is_root) - ice_collect_result_idx(&buf[buf_idx], - &sw->recp_list[rm->root_rid]); - - recp->n_ext_words = entry->r_group.n_val_pairs; - recp->chain_idx = entry->chain_idx; - recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority; - recp->n_grp_count = rm->n_grp_count; - recp->tun_type = rm->tun_type; - recp->recp_created = true; + recipe = &hw->switch_info->recp_list[rid]; + set_bit(result_idx, recipe->res_idxs); + bookkeep_recipe(recipe, &buf[i], rm); } - rm->root_buf = buf; - kfree(tmp); - return status; -err_unroll: -err_mem: - kfree(tmp); - devm_kfree(ice_hw_to_dev(hw), buf); - return status; -} + /* Setup the root recipe */ + status = ice_alloc_recipe(hw, &rid); + if (status) + return status; -/** - * ice_create_recipe_group - creates recipe group - * @hw: pointer to hardware structure - * @rm: recipe management list entry - * @lkup_exts: lookup elements - */ -static int -ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, - struct ice_prot_lkup_ext *lkup_exts) -{ - u8 recp_count = 0; - int status; + recipe = &hw->switch_info->recp_list[rid]; + root = &buf[recp_cnt - 1]; + fill_recipe_template(root, rid, rm); - rm->n_grp_count = 0; + /* Set recipe association, use previously set bitmap and own rid */ + set_bit(rid, rm->r_bitmap); + memcpy(root->recipe_bitmap, rm->r_bitmap, sizeof(root->recipe_bitmap)); - /* Create recipes for words that are marked not done by packing them - * as best fit. + /* For non-root recipes rid should be 0, for root it should be correct + * rid value ored with 0x80 (is root bit). */ - status = ice_create_first_fit_recp_def(hw, lkup_exts, - &rm->rg_list, &recp_count); - if (!status) { - rm->n_grp_count += recp_count; - rm->n_ext_words = lkup_exts->n_val_words; - memcpy(&rm->ext_words, lkup_exts->fv_words, - sizeof(rm->ext_words)); - memcpy(rm->word_masks, lkup_exts->field_mask, - sizeof(rm->word_masks)); - } + root->content.rid = rid | ICE_AQ_RECIPE_ID_IS_ROOT; - return status; -} + /* Fill remaining lookups in root recipe */ + word = 0; + while (lookup < rm->n_ext_words && + word < ICE_NUM_WORDS_RECIPE /* should always be true */) { + root->content.lkup_indx[word] = rm->fv_idx[lookup]; + root->content.mask[word] = cpu_to_le16(rm->fv_mask[lookup]); -/** - * ice_tun_type_match_word - determine if tun type needs a match mask - * @tun_type: tunnel type - * @mask: mask to be used for the tunnel - */ -static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) -{ - switch (tun_type) { - case ICE_SW_TUN_GENEVE: - case ICE_SW_TUN_VXLAN: - case ICE_SW_TUN_NVGRE: - case ICE_SW_TUN_GTPU: - case ICE_SW_TUN_GTPC: - *mask = ICE_TUN_FLAG_MASK; - return true; - - default: - *mask = 0; - return false; + lookup++; + word++; } -} - -/** - * ice_add_special_words - Add words that are not protocols, such as metadata - * @rinfo: other information regarding the rule e.g. priority and action info - * @lkup_exts: lookup word structure - * @dvm_ena: is double VLAN mode enabled - */ -static int -ice_add_special_words(struct ice_adv_rule_info *rinfo, - struct ice_prot_lkup_ext *lkup_exts, bool dvm_ena) -{ - u16 mask; - /* If this is a tunneled packet, then add recipe index to match the - * tunnel bit in the packet metadata flags. - */ - if (ice_tun_type_match_word(rinfo->tun_type, &mask)) { - if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) { - u8 word = lkup_exts->n_val_words++; + /* Fill result indexes as lookups */ + i = 0; + while (i < recp_cnt - 1 && + word < ICE_NUM_WORDS_RECIPE /* should always be true */) { + root->content.lkup_indx[word] = buf[i].content.result_indx & + ~ICE_AQ_RECIPE_RESULT_EN; + root->content.mask[word] = cpu_to_le16(0xffff); + /* For bookkeeping, it is needed to mark FV index as used for + * intermediate result. + */ + set_bit(root->content.lkup_indx[word], recipe->res_idxs); - lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW; - lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF; - lkup_exts->field_mask[word] = mask; - } else { - return -ENOSPC; - } + i++; + word++; } - if (rinfo->vlan_type != 0 && dvm_ena) { - if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) { - u8 word = lkup_exts->n_val_words++; + rm->root_rid = rid; + bookkeep_recipe(&hw->switch_info->recp_list[rid], root, rm); - lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW; - lkup_exts->fv_words[word].off = ICE_VLAN_FLAG_MDID_OFF; - lkup_exts->field_mask[word] = - ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK; - } else { - return -ENOSPC; - } - } + /* Program the recipe */ + status = ice_acquire_change_lock(hw, ICE_RES_WRITE); + if (status) + return status; + + status = ice_aq_add_recipe(hw, buf, recp_cnt, NULL); + ice_release_change_lock(hw); + if (status) + return status; return 0; } @@ -5335,6 +5243,9 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, case ICE_SW_TUN_GTPC: prof_type = ICE_PROF_TUN_GTPC; break; + case ICE_SW_TUN_PFCP: + prof_type = ICE_PROF_TUN_PFCP; + break; case ICE_SW_TUN_AND_NON_TUN: default: prof_type = ICE_PROF_ALL; @@ -5345,6 +5256,49 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, } /** + * ice_subscribe_recipe - subscribe to an existing recipe + * @hw: pointer to the hardware structure + * @rid: recipe ID to subscribe to + * + * Return: 0 on success, and others on error + */ +static int ice_subscribe_recipe(struct ice_hw *hw, u16 rid) +{ + DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1); + u16 buf_len = __struct_size(sw_buf); + u16 res_type; + int status; + + /* Prepare buffer to allocate resource */ + sw_buf->num_elems = cpu_to_le16(1); + res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, ICE_AQC_RES_TYPE_RECIPE) | + ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED | + ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_CTL; + sw_buf->res_type = cpu_to_le16(res_type); + + sw_buf->elem[0].e.sw_resp = cpu_to_le16(rid); + + status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, + ice_aqc_opc_alloc_res); + + return status; +} + +/** + * ice_subscribable_recp_shared - share an existing subscribable recipe + * @hw: pointer to the hardware structure + * @rid: recipe ID to subscribe to + */ +static void ice_subscribable_recp_shared(struct ice_hw *hw, u16 rid) +{ + struct ice_sw_recipe *recps = hw->switch_info->recp_list; + u16 sub_rid; + + for_each_set_bit(sub_rid, recps[rid].r_bitmap, ICE_MAX_NUM_RECIPES) + ice_subscribe_recipe(hw, sub_rid); +} + +/** * ice_add_adv_recipe - Add an advanced recipe that is not part of the default * @hw: pointer to hardware structure * @lkups: lookup elements or match criteria for the advanced recipe, one @@ -5360,12 +5314,11 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES); DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES); struct ice_prot_lkup_ext *lkup_exts; - struct ice_recp_grp_entry *r_entry; struct ice_sw_fv_list_entry *fvit; - struct ice_recp_grp_entry *r_tmp; struct ice_sw_fv_list_entry *tmp; struct ice_sw_recipe *rm; int status = 0; + u16 rid_tmp; u8 i; if (!lkups_cnt) @@ -5403,7 +5356,6 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, * headers being programmed. */ INIT_LIST_HEAD(&rm->fv_list); - INIT_LIST_HEAD(&rm->rg_list); /* Get bitmap of field vectors (profiles) that are compatible with the * rule request; only these will be searched in the subsequent call to @@ -5415,27 +5367,21 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (status) goto err_unroll; - /* Create any special protocol/offset pairs, such as looking at tunnel - * bits by extracting metadata - */ - status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw)); - if (status) - goto err_free_lkup_exts; - - /* Group match words into recipes using preferred recipe grouping - * criteria. - */ - status = ice_create_recipe_group(hw, rm, lkup_exts); - if (status) - goto err_unroll; + /* Copy FV words and masks from lkup_exts to recipe struct. */ + rm->n_ext_words = lkup_exts->n_val_words; + memcpy(rm->ext_words, lkup_exts->fv_words, sizeof(rm->ext_words)); + memcpy(rm->word_masks, lkup_exts->field_mask, sizeof(rm->word_masks)); /* set the recipe priority if specified */ rm->priority = (u8)rinfo->priority; + rm->need_pass_l2 = rinfo->need_pass_l2; + rm->allow_pass_l2 = rinfo->allow_pass_l2; + /* Find offsets from the field vector. Pick the first one for all the * recipes. */ - status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list); + status = ice_fill_fv_word_index(hw, rm); if (status) goto err_unroll; @@ -5447,10 +5393,14 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } /* Look for a recipe which matches our requested fv / mask list */ - *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type); - if (*rid < ICE_MAX_NUM_RECIPES) + *rid = ice_find_recp(hw, lkup_exts, rinfo, true); + if (*rid < ICE_MAX_NUM_RECIPES) { /* Success if found a recipe that match the existing criteria */ + if (hw->recp_reuse) + ice_subscribable_recp_shared(hw, *rid); + goto err_unroll; + } rm->tun_type = rinfo->tun_type; /* Recipe we need does not exist, add a recipe */ @@ -5463,26 +5413,28 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, */ list_for_each_entry(fvit, &rm->fv_list, list_entry) { DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + u64 recp_assoc; u16 j; status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id, - (u8 *)r_bitmap, NULL); + &recp_assoc, NULL); if (status) - goto err_unroll; + goto err_free_recipe; + bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES); bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap, ICE_MAX_NUM_RECIPES); status = ice_acquire_change_lock(hw, ICE_RES_WRITE); if (status) - goto err_unroll; + goto err_free_recipe; + bitmap_to_arr64(&recp_assoc, r_bitmap, ICE_MAX_NUM_RECIPES); status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id, - (u8 *)r_bitmap, - NULL); + recp_assoc, NULL); ice_release_change_lock(hw); if (status) - goto err_unroll; + goto err_free_recipe; /* Update profile to recipe bitmap array */ bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap, @@ -5496,20 +5448,22 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, *rid = rm->root_rid; memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts, sizeof(*lkup_exts)); -err_unroll: - list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) { - list_del(&r_entry->l_entry); - devm_kfree(ice_hw_to_dev(hw), r_entry); + goto err_unroll; + +err_free_recipe: + if (hw->recp_reuse) { + for_each_set_bit(rid_tmp, rm->r_bitmap, ICE_MAX_NUM_RECIPES) { + if (!ice_free_recipe_res(hw, rid_tmp)) + clear_bit(rid_tmp, rm->r_bitmap); + } } +err_unroll: list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) { list_del(&fvit->list_entry); devm_kfree(ice_hw_to_dev(hw), fvit); } - if (rm->root_buf) - devm_kfree(ice_hw_to_dev(hw), rm->root_buf); - kfree(rm); err_free_lkup_exts: @@ -5623,6 +5577,9 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, case ICE_SW_TUN_VXLAN: match |= ICE_PKT_TUN_UDP; break; + case ICE_SW_TUN_PFCP: + match |= ICE_PKT_PFCP; + break; default: break; } @@ -5707,6 +5664,10 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, * was already checked when search for the dummy packet */ type = lkups[i].type; + /* metadata isn't present in the packet */ + if (type == ICE_HW_METADATA) + continue; + for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) { if (type == offsets[j].type) { offset = offsets[j].offset; @@ -5759,6 +5720,9 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, case ICE_GTP: len = sizeof(struct ice_udp_gtp_hdr); break; + case ICE_PFCP: + len = sizeof(struct ice_pfcp_hdr); + break; case ICE_PPPOE: len = sizeof(struct ice_pppoe_hdr); break; @@ -5842,16 +5806,21 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, /** * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type + * @hw: pointer to hw structure * @vlan_type: VLAN tag type * @pkt: dummy packet to fill in * @offsets: offset info for the dummy packet */ static int -ice_fill_adv_packet_vlan(u16 vlan_type, u8 *pkt, +ice_fill_adv_packet_vlan(struct ice_hw *hw, u16 vlan_type, u8 *pkt, const struct ice_dummy_pkt_offsets *offsets) { u16 i; + /* Check if there is something to do */ + if (!vlan_type || !ice_is_dvm_ena(hw)) + return 0; + /* Find VLAN header and insert VLAN TPID */ for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) { if (offsets[i].type == ICE_VLAN_OFOS || @@ -5870,6 +5839,17 @@ ice_fill_adv_packet_vlan(u16 vlan_type, u8 *pkt, return -EIO; } +static bool ice_rules_equal(const struct ice_adv_rule_info *first, + const struct ice_adv_rule_info *second) +{ + return first->sw_act.flag == second->sw_act.flag && + first->tun_type == second->tun_type && + first->vlan_type == second->vlan_type && + first->src_vsi == second->src_vsi && + first->need_pass_l2 == second->need_pass_l2 && + first->allow_pass_l2 == second->allow_pass_l2; +} + /** * ice_find_adv_rule_entry - Search a rule entry * @hw: pointer to the hardware structure @@ -5903,9 +5883,7 @@ ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, lkups_matched = false; break; } - if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag && - rinfo->tun_type == list_itr->rule_info.tun_type && - rinfo->vlan_type == list_itr->rule_info.vlan_type && + if (ice_rules_equal(rinfo, &list_itr->rule_info) && lkups_matched) return list_itr; } @@ -6001,7 +5979,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, /* A rule already exists with the new VSI being added */ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) - return 0; + return -EEXIST; /* Update the previously created VSI list set with * the new VSI ID passed in @@ -6021,6 +5999,33 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, return status; } +void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup) +{ + lkup->type = ICE_HW_METADATA; + lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID21] |= + cpu_to_be16(ICE_PKT_TUNNEL_MASK); +} + +void ice_rule_add_direction_metadata(struct ice_adv_lkup_elem *lkup) +{ + lkup->type = ICE_HW_METADATA; + lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID20] |= + cpu_to_be16(ICE_PKT_FROM_NETWORK); +} + +void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup) +{ + lkup->type = ICE_HW_METADATA; + lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID20] |= + cpu_to_be16(ICE_PKT_VLAN_MASK); +} + +void ice_rule_add_src_vsi_metadata(struct ice_adv_lkup_elem *lkup) +{ + lkup->type = ICE_HW_METADATA; + lkup->m_u.metadata.source_vsi = cpu_to_be16(ICE_MDID_SOURCE_VSI_MASK); +} + /** * ice_add_adv_rule - helper function to create an advanced switch rule * @hw: pointer to the hardware structure @@ -6088,7 +6093,9 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || - rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) { + rinfo->sw_act.fltr_act == ICE_DROP_PACKET || + rinfo->sw_act.fltr_act == ICE_MIRROR_PACKET || + rinfo->sw_act.fltr_act == ICE_NOP)) { status = -EIO; goto free_pkt_profile; } @@ -6099,10 +6106,16 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, goto free_pkt_profile; } - if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) + if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || + rinfo->sw_act.fltr_act == ICE_MIRROR_PACKET || + rinfo->sw_act.fltr_act == ICE_NOP) { rinfo->sw_act.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); - if (rinfo->sw_act.flag & ICE_FLTR_TX) + } + + if (rinfo->src_vsi) + rinfo->sw_act.src = ice_get_hw_vsi_num(hw, rinfo->src_vsi); + else rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle); status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid); @@ -6134,56 +6147,69 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, status = -ENOMEM; goto free_pkt_profile; } - if (!rinfo->flags_info.act_valid) { - act |= ICE_SINGLE_ACT_LAN_ENABLE; - act |= ICE_SINGLE_ACT_LB_ENABLE; - } else { - act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE | - ICE_SINGLE_ACT_LB_ENABLE); + + if (rinfo->sw_act.fltr_act != ICE_MIRROR_PACKET) { + if (!rinfo->flags_info.act_valid) { + act |= ICE_SINGLE_ACT_LAN_ENABLE; + act |= ICE_SINGLE_ACT_LB_ENABLE; + } else { + act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE | + ICE_SINGLE_ACT_LB_ENABLE); + } } switch (rinfo->sw_act.fltr_act) { case ICE_FWD_TO_VSI: - act |= (rinfo->sw_act.fwd_id.hw_vsi_id << - ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, + rinfo->sw_act.fwd_id.hw_vsi_id); act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; break; case ICE_FWD_TO_Q: act |= ICE_SINGLE_ACT_TO_Q; - act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & - ICE_SINGLE_ACT_Q_INDEX_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M, + rinfo->sw_act.fwd_id.q_id); break; case ICE_FWD_TO_QGRP: q_rgn = rinfo->sw_act.qgrp_size > 0 ? (u8)ilog2(rinfo->sw_act.qgrp_size) : 0; act |= ICE_SINGLE_ACT_TO_Q; - act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & - ICE_SINGLE_ACT_Q_INDEX_M; - act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & - ICE_SINGLE_ACT_Q_REGION_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M, + rinfo->sw_act.fwd_id.q_id); + act |= FIELD_PREP(ICE_SINGLE_ACT_Q_REGION_M, q_rgn); break; case ICE_DROP_PACKET: act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | ICE_SINGLE_ACT_VALID_BIT; break; + case ICE_MIRROR_PACKET: + act |= ICE_SINGLE_ACT_OTHER_ACTS; + act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, + rinfo->sw_act.fwd_id.hw_vsi_id); + break; + case ICE_NOP: + act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, + rinfo->sw_act.fwd_id.hw_vsi_id); + act &= ~ICE_SINGLE_ACT_VALID_BIT; + break; default: status = -EIO; goto err_ice_add_adv_rule; } - /* set the rule LOOKUP type based on caller specified 'Rx' - * instead of hardcoding it to be either LOOKUP_TX/RX + /* If there is no matching criteria for direction there + * is only one difference between Rx and Tx: + * - get switch id base on VSI number from source field (Tx) + * - get switch id base on port number (Rx) * - * for 'Rx' set the source to be the port number - * for 'Tx' set the source to be the source HW VSI number (determined - * by caller) + * If matching on direction metadata is chose rule direction is + * extracted from type value set here. */ - if (rinfo->rx) { - s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); - s_rule->src = cpu_to_le16(hw->port_info->lport); - } else { + if (rinfo->sw_act.flag & ICE_FLTR_TX) { s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); s_rule->src = cpu_to_le16(rinfo->sw_act.src); + } else { + s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); + s_rule->src = cpu_to_le16(hw->port_info->lport); } s_rule->recipe_id = cpu_to_le16(rid); @@ -6193,22 +6219,16 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (status) goto err_ice_add_adv_rule; - if (rinfo->tun_type != ICE_NON_TUN && - rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) { - status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, - s_rule->hdr_data, - profile->offsets); - if (status) - goto err_ice_add_adv_rule; - } + status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, s_rule->hdr_data, + profile->offsets); + if (status) + goto err_ice_add_adv_rule; - if (rinfo->vlan_type != 0 && ice_is_dvm_ena(hw)) { - status = ice_fill_adv_packet_vlan(rinfo->vlan_type, - s_rule->hdr_data, - profile->offsets); - if (status) - goto err_ice_add_adv_rule; - } + status = ice_fill_adv_packet_vlan(hw, rinfo->vlan_type, + s_rule->hdr_data, + profile->offsets); + if (status) + goto err_ice_add_adv_rule; status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, rule_buf_sz, 1, ice_aqc_opc_add_sw_rules, @@ -6304,8 +6324,6 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, if (!itr->vsi_list_info || !test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) continue; - /* Clearing it so that the logic can add it back */ - clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); f_entry.fltr_info.vsi_handle = vsi_handle; f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; /* update the src in case it is VSI num */ @@ -6451,14 +6469,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, return -EIO; } - /* Create any special protocol/offset pairs, such as looking at tunnel - * bits by extracting metadata - */ - status = ice_add_special_words(rinfo, &lkup_exts, ice_is_dvm_ena(hw)); - if (status) - return status; - - rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type); + rid = ice_find_recp(hw, &lkup_exts, rinfo, false); /* If did not find a recipe that match the existing criteria */ if (rid == ICE_MAX_NUM_RECIPES) return -EINVAL; @@ -6502,14 +6513,21 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, ice_aqc_opc_remove_sw_rules, NULL); if (!status || status == -ENOENT) { struct ice_switch_info *sw = hw->switch_info; + struct ice_sw_recipe *r_list = sw->recp_list; mutex_lock(rule_lock); list_del(&list_elem->list_entry); devm_kfree(ice_hw_to_dev(hw), list_elem->lkups); devm_kfree(ice_hw_to_dev(hw), list_elem); mutex_unlock(rule_lock); - if (list_empty(&sw->recp_list[rid].filt_rules)) - sw->recp_list[rid].adv_rule = false; + if (list_empty(&r_list[rid].filt_rules)) { + r_list[rid].adv_rule = false; + + /* All rules for this recipe are now removed */ + if (hw->recp_reuse) + ice_release_recipe_res(hw, + &r_list[rid]); + } } kfree(s_rule); } @@ -6552,59 +6570,6 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw, } /** - * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a - * given VSI handle - * @hw: pointer to the hardware structure - * @vsi_handle: VSI handle for which we are supposed to remove all the rules. - * - * This function is used to remove all the rules for a given VSI and as soon - * as removing a rule fails, it will return immediately with the error code, - * else it will return success. - */ -int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle) -{ - struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry; - struct ice_vsi_list_map_info *map_info; - struct ice_adv_rule_info rinfo; - struct list_head *list_head; - struct ice_switch_info *sw; - int status; - u8 rid; - - sw = hw->switch_info; - for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) { - if (!sw->recp_list[rid].recp_created) - continue; - if (!sw->recp_list[rid].adv_rule) - continue; - - list_head = &sw->recp_list[rid].filt_rules; - list_for_each_entry_safe(list_itr, tmp_entry, list_head, - list_entry) { - rinfo = list_itr->rule_info; - - if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) { - map_info = list_itr->vsi_list_info; - if (!map_info) - continue; - - if (!test_bit(vsi_handle, map_info->vsi_map)) - continue; - } else if (rinfo.sw_act.vsi_handle != vsi_handle) { - continue; - } - - rinfo.sw_act.vsi_handle = vsi_handle; - status = ice_rem_adv_rule(hw, list_itr->lkups, - list_itr->lkups_cnt, &rinfo); - if (status) - return status; - } - } - return 0; -} - -/** * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI * @hw: pointer to the hardware structure * @vsi_handle: driver VSI handle |
