diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_tc_lib.c')
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_tc_lib.c | 630 |
1 files changed, 503 insertions, 127 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index faba0f857cd9..fb9ea7f8ef44 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -7,19 +7,26 @@ #include "ice_lib.h" #include "ice_protocol_type.h" +#define ICE_TC_METADATA_LKUP_IDX 0 + /** * ice_tc_count_lkups - determine lookup count for switch filter * @flags: TC-flower flags - * @headers: Pointer to TC flower filter header structure * @fltr: Pointer to outer TC filter structure * - * Determine lookup count based on TC flower input for switch filter. + * Return: lookup count based on TC flower input for a switch filter. */ -static int -ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, - struct ice_tc_flower_fltr *fltr) +static int ice_tc_count_lkups(u32 flags, struct ice_tc_flower_fltr *fltr) { - int lkups_cnt = 0; + int lkups_cnt = 1; /* 0th lookup is metadata */ + + /* Always add metadata as the 0th lookup. Included elements: + * - Direction flag (always present) + * - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified) + * - Tunnel flag (present if tunnel) + */ + if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS) + lkups_cnt++; if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) lkups_cnt++; @@ -27,7 +34,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) lkups_cnt++; - if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS) + if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS) + lkups_cnt++; + + if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS) lkups_cnt++; if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | @@ -130,6 +140,8 @@ ice_proto_type_from_tunnel(enum ice_tunnel_type type) return ICE_GTP; case TNL_GTPC: return ICE_GTP_NO_PAY; + case TNL_PFCP: + return ICE_PFCP; default: return 0; } @@ -149,6 +161,8 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type) return ICE_SW_TUN_GTPU; case TNL_GTPC: return ICE_SW_TUN_GTPC; + case TNL_PFCP: + return ICE_SW_TUN_PFCP; default: return ICE_NON_TUN; } @@ -168,10 +182,9 @@ static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid) static int ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, - struct ice_adv_lkup_elem *list) + struct ice_adv_lkup_elem *list, int i) { struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers; - int i = 0; if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) { u32 tenant_id; @@ -212,8 +225,7 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, i++; } - if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS && - (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) { + if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS) { list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type); if (fltr->gtp_pdu_info_masks.pdu_type) { @@ -230,6 +242,22 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, i++; } + if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS) { + struct ice_pfcp_hdr *hdr_h, *hdr_m; + + hdr_h = &list[i].h_u.pfcp_hdr; + hdr_m = &list[i].m_u.pfcp_hdr; + list[i].type = ICE_PFCP; + + hdr_h->flags = fltr->pfcp_meta_keys.type; + hdr_m->flags = fltr->pfcp_meta_masks.type & 0x01; + + hdr_h->seid = fltr->pfcp_meta_keys.seid; + hdr_m->seid = fltr->pfcp_meta_masks.seid; + + i++; + } + if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) { list[i].type = ice_proto_type_from_ipv4(false); @@ -320,6 +348,9 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, i++; } + /* always fill matching on tunneled packets in metadata */ + ice_rule_add_tunnel_metadata(&list[ICE_TC_METADATA_LKUP_IDX]); + return i; } @@ -346,16 +377,27 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers; bool inner = false; u16 vlan_tpid = 0; - int i = 0; + int i = 1; /* 0th lookup is metadata */ rule_info->vlan_type = vlan_tpid; + /* Always add direction metadata */ + ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]); + + if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { + ice_rule_add_src_vsi_metadata(&list[i]); + i++; + } + rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type); if (tc_fltr->tunnel_type != TNL_LAST) { - i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list); + i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i); - headers = &tc_fltr->inner_headers; - inner = true; + /* PFCP is considered non-tunneled - don't swap headers. */ + if (tc_fltr->tunnel_type != TNL_PFCP) { + headers = &tc_fltr->inner_headers; + inner = true; + } } if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) { @@ -390,10 +432,6 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, /* copy VLAN info */ if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO)) { - vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid); - rule_info->vlan_type = - ice_check_supported_vlan_tpid(vlan_tpid); - if (flags & ICE_TC_FLWR_FIELD_CVLAN) list[i].type = ICE_VLAN_EX; else @@ -418,6 +456,14 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, i++; } + if (flags & ICE_TC_FLWR_FIELD_VLAN_TPID) { + vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid); + rule_info->vlan_type = + ice_check_supported_vlan_tpid(vlan_tpid); + + ice_rule_add_vlan_metadata(&list[ICE_TC_METADATA_LKUP_IDX]); + } + if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) { list[i].type = ICE_VLAN_IN; @@ -605,6 +651,8 @@ static int ice_tc_tun_get_type(struct net_device *tunnel_dev) */ if (netif_is_gtp(tunnel_dev)) return TNL_GTPU; + if (netif_is_pfcp(tunnel_dev)) + return TNL_PFCP; return TNL_LAST; } @@ -613,32 +661,96 @@ bool ice_is_tunnel_supported(struct net_device *dev) return ice_tc_tun_get_type(dev) != TNL_LAST; } -static int -ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr, - struct flow_action_entry *act) +static bool ice_tc_is_dev_uplink(struct net_device *dev) +{ + return netif_is_ice(dev) || ice_is_tunnel_supported(dev); +} + +static int ice_tc_setup_action(struct net_device *filter_dev, + struct ice_tc_flower_fltr *fltr, + struct net_device *target_dev, + enum ice_sw_fwd_act_type action) { struct ice_repr *repr; + if (action != ICE_FWD_TO_VSI && action != ICE_MIRROR_PACKET) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action to setup provided"); + return -EINVAL; + } + + fltr->action.fltr_act = action; + + if (ice_is_port_repr_netdev(filter_dev) && + ice_is_port_repr_netdev(target_dev) && + fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { + repr = ice_netdev_to_repr(target_dev); + + fltr->dest_vsi = repr->src_vsi; + } else if (ice_is_port_repr_netdev(filter_dev) && + ice_tc_is_dev_uplink(target_dev) && + fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { + repr = ice_netdev_to_repr(filter_dev); + + fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi; + } else if (ice_tc_is_dev_uplink(filter_dev) && + ice_is_port_repr_netdev(target_dev) && + fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { + repr = ice_netdev_to_repr(target_dev); + + fltr->dest_vsi = repr->src_vsi; + } else { + NL_SET_ERR_MSG_MOD(fltr->extack, + "The action is not supported for this netdevice"); + return -EINVAL; + } + + return 0; +} + +static int +ice_tc_setup_drop_action(struct net_device *filter_dev, + struct ice_tc_flower_fltr *fltr) +{ + fltr->action.fltr_act = ICE_DROP_PACKET; + + if (!ice_tc_is_dev_uplink(filter_dev) && + !(ice_is_port_repr_netdev(filter_dev) && + fltr->direction == ICE_ESWITCH_FLTR_INGRESS)) { + NL_SET_ERR_MSG_MOD(fltr->extack, + "The action is not supported for this netdevice"); + return -EINVAL; + } + + return 0; +} + +static int ice_eswitch_tc_parse_action(struct net_device *filter_dev, + struct ice_tc_flower_fltr *fltr, + struct flow_action_entry *act) +{ + int err; + switch (act->id) { case FLOW_ACTION_DROP: - fltr->action.fltr_act = ICE_DROP_PACKET; + err = ice_tc_setup_drop_action(filter_dev, fltr); + if (err) + return err; + break; case FLOW_ACTION_REDIRECT: - fltr->action.fltr_act = ICE_FWD_TO_VSI; + err = ice_tc_setup_action(filter_dev, fltr, + act->dev, ICE_FWD_TO_VSI); + if (err) + return err; - if (ice_is_port_repr_netdev(act->dev)) { - repr = ice_netdev_to_repr(act->dev); + break; - fltr->dest_vsi = repr->src_vsi; - fltr->direction = ICE_ESWITCH_FLTR_INGRESS; - } else if (netif_is_ice(act->dev) || - ice_is_tunnel_supported(act->dev)) { - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; - } else { - NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode"); - return -EINVAL; - } + case FLOW_ACTION_MIRRED: + err = ice_tc_setup_action(filter_dev, fltr, + act->dev, ICE_MIRROR_PACKET); + if (err) + return err; break; @@ -650,10 +762,157 @@ ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr, return 0; } +static bool ice_is_fltr_lldp(struct ice_tc_flower_fltr *fltr) +{ + return fltr->outer_headers.l2_key.n_proto == htons(ETH_P_LLDP); +} + +static bool ice_is_fltr_pf_tx_lldp(struct ice_tc_flower_fltr *fltr) +{ + struct ice_vsi *vsi = fltr->src_vsi, *uplink; + + if (!ice_is_switchdev_running(vsi->back)) + return false; + + uplink = vsi->back->eswitch.uplink_vsi; + return vsi == uplink && fltr->action.fltr_act == ICE_DROP_PACKET && + ice_is_fltr_lldp(fltr) && + fltr->direction == ICE_ESWITCH_FLTR_EGRESS && + fltr->flags == ICE_TC_FLWR_FIELD_ETH_TYPE_ID; +} + +static bool ice_is_fltr_vf_tx_lldp(struct ice_tc_flower_fltr *fltr) +{ + struct ice_vsi *vsi = fltr->src_vsi, *uplink; + + uplink = vsi->back->eswitch.uplink_vsi; + return fltr->src_vsi->type == ICE_VSI_VF && ice_is_fltr_lldp(fltr) && + fltr->direction == ICE_ESWITCH_FLTR_EGRESS && + fltr->dest_vsi == uplink; +} + +static struct ice_tc_flower_fltr * +ice_find_pf_tx_lldp_fltr(struct ice_pf *pf) +{ + struct ice_tc_flower_fltr *fltr; + + hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) + if (ice_is_fltr_pf_tx_lldp(fltr)) + return fltr; + + return NULL; +} + +static bool ice_any_vf_lldp_tx_ena(struct ice_pf *pf) +{ + struct ice_vf *vf; + unsigned int bkt; + + ice_for_each_vf(pf, bkt, vf) + if (vf->lldp_tx_ena) + return true; + + return false; +} + +int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit) +{ + struct ice_rule_query_data remove_entry = { + .rid = vsi->vf->lldp_recipe_id, + .rule_id = vsi->vf->lldp_rule_id, + .vsi_handle = vsi->idx, + }; + struct ice_pf *pf = vsi->back; + int err; + + if (vsi->vf->lldp_tx_ena) + return 0; + + if (!deinit && !ice_find_pf_tx_lldp_fltr(vsi->back)) + return -EINVAL; + + if (!deinit && ice_any_vf_lldp_tx_ena(pf)) + return -EINVAL; + + err = ice_rem_adv_rule_by_id(&pf->hw, &remove_entry); + if (!err) + vsi->vf->lldp_tx_ena = true; + + return err; +} + +int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init) +{ + struct ice_rule_query_data rule_added; + struct ice_adv_rule_info rinfo = { + .priority = 7, + .src_vsi = vsi->idx, + .sw_act = { + .src = vsi->idx, + .flag = ICE_FLTR_TX, + .fltr_act = ICE_DROP_PACKET, + .vsi_handle = vsi->idx, + }, + .flags_info.act_valid = true, + }; + struct ice_adv_lkup_elem list[3]; + struct ice_pf *pf = vsi->back; + int err; + + if (!init && !vsi->vf->lldp_tx_ena) + return 0; + + memset(list, 0, sizeof(list)); + ice_rule_add_direction_metadata(&list[0]); + ice_rule_add_src_vsi_metadata(&list[1]); + list[2].type = ICE_ETYPE_OL; + list[2].h_u.ethertype.ethtype_id = htons(ETH_P_LLDP); + list[2].m_u.ethertype.ethtype_id = htons(0xFFFF); + + err = ice_add_adv_rule(&pf->hw, list, ARRAY_SIZE(list), &rinfo, + &rule_added); + if (err) { + dev_err(&pf->pdev->dev, + "Failed to add an LLDP rule to VSI 0x%X: %d\n", + vsi->idx, err); + } else { + vsi->vf->lldp_recipe_id = rule_added.rid; + vsi->vf->lldp_rule_id = rule_added.rule_id; + vsi->vf->lldp_tx_ena = false; + } + + return err; +} + +static void ice_handle_add_pf_lldp_drop_rule(struct ice_vsi *vsi) +{ + struct ice_tc_flower_fltr *fltr; + struct ice_pf *pf = vsi->back; + + hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) { + if (!ice_is_fltr_vf_tx_lldp(fltr)) + continue; + ice_pass_vf_tx_lldp(fltr->src_vsi, true); + break; + } +} + +static void ice_handle_del_pf_lldp_drop_rule(struct ice_pf *pf) +{ + int i; + + /* Make the VF LLDP fwd to uplink rule dormant */ + ice_for_each_vsi(pf, i) { + struct ice_vsi *vf_vsi = pf->vsi[i]; + + if (vf_vsi && vf_vsi->type == ICE_VSI_VF) + ice_drop_vf_tx_lldp(vf_vsi, false); + } +} + static int ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) { - struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; struct ice_adv_rule_info rule_info = { 0 }; struct ice_rule_query_data rule_added; struct ice_hw *hw = &vsi->back->hw; @@ -663,12 +922,15 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) int ret; int i; - if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) { + if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)"); return -EOPNOTSUPP; } - lkups_cnt = ice_tc_count_lkups(flags, headers, fltr); + if (ice_is_fltr_vf_tx_lldp(fltr)) + return ice_pass_vf_tx_lldp(vsi, false); + + lkups_cnt = ice_tc_count_lkups(flags, fltr); list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); if (!list) return -ENOMEM; @@ -679,10 +941,6 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) goto exit; } - /* egress traffic is always redirect to uplink */ - if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS) - fltr->dest_vsi = vsi->back->switchdev.uplink_vsi; - rule_info.sw_act.fltr_act = fltr->action.fltr_act; if (fltr->action.fltr_act != ICE_DROP_PACKET) rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx; @@ -693,32 +951,62 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) * results into order of switch rule evaluation. */ rule_info.priority = 7; + rule_info.flags_info.act_valid = true; if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { + /* Uplink to VF */ rule_info.sw_act.flag |= ICE_FLTR_RX; rule_info.sw_act.src = hw->pf_id; - rule_info.rx = true; - } else { + rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; + } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS && + !fltr->dest_vsi && vsi == vsi->back->eswitch.uplink_vsi) { + /* PF to Uplink */ + rule_info.sw_act.flag |= ICE_FLTR_TX; + rule_info.sw_act.src = vsi->idx; + } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS && + fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) { + /* VF to Uplink */ rule_info.sw_act.flag |= ICE_FLTR_TX; rule_info.sw_act.src = vsi->idx; - rule_info.rx = false; rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; - rule_info.flags_info.act_valid = true; + /* This is a specific case. The destination VSI index is + * overwritten by the source VSI index. This type of filter + * should allow the packet to go to the LAN, not to the + * VSI passed here. It should set LAN_EN bit only. However, + * the VSI must be a valid one. Setting source VSI index + * here is safe. Even if the result from switch is set LAN_EN + * and LB_EN (which normally will pass the packet to this VSI) + * packet won't be seen on the VSI, because local loopback is + * turned off. + */ + rule_info.sw_act.vsi_handle = vsi->idx; + } else { + /* VF to VF */ + rule_info.sw_act.flag |= ICE_FLTR_TX; + rule_info.sw_act.src = vsi->idx; + rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; } /* specify the cookie as filter_rule_id */ rule_info.fltr_rule_id = fltr->cookie; + rule_info.src_vsi = vsi->idx; ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); if (ret == -EEXIST) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist"); ret = -EINVAL; goto exit; + } else if (ret == -ENOSPC) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter: insufficient space available."); + goto exit; } else if (ret) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error"); goto exit; } + if (ice_is_fltr_pf_tx_lldp(fltr)) + ice_handle_add_pf_lldp_drop_rule(vsi); + /* store the output params, which are needed later for removing * advanced switch filter */ @@ -734,17 +1022,16 @@ exit: /** * ice_locate_vsi_using_queue - locate VSI using queue (forward to queue action) * @vsi: Pointer to VSI - * @tc_fltr: Pointer to tc_flower_filter + * @queue: Queue index * - * Locate the VSI using specified queue. When ADQ is not enabled, always - * return input VSI, otherwise locate corresponding VSI based on per channel - * offset and qcount + * Locate the VSI using specified "queue". When ADQ is not enabled, + * always return input VSI, otherwise locate corresponding + * VSI based on per channel "offset" and "qcount" */ -static struct ice_vsi * -ice_locate_vsi_using_queue(struct ice_vsi *vsi, - struct ice_tc_flower_fltr *tc_fltr) +struct ice_vsi * +ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue) { - int num_tc, tc, queue; + int num_tc, tc; /* if ADQ is not active, passed VSI is the candidate VSI */ if (!ice_is_adq_active(vsi->back)) @@ -754,7 +1041,6 @@ ice_locate_vsi_using_queue(struct ice_vsi *vsi, * upon queue number) */ num_tc = vsi->mqprio_qopt.qopt.num_tc; - queue = tc_fltr->action.fwd.q.queue; for (tc = 0; tc < num_tc; tc++) { int qcount = vsi->mqprio_qopt.qopt.count[tc]; @@ -792,10 +1078,11 @@ static struct ice_vsi * ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) { struct ice_rx_ring *ring = NULL; - struct ice_vsi *ch_vsi = NULL; + struct ice_vsi *dest_vsi = NULL; struct ice_pf *pf = vsi->back; struct device *dev; u32 tc_class; + int q; dev = ice_pf_to_dev(pf); @@ -810,7 +1097,7 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) return ERR_PTR(-EOPNOTSUPP); } /* Locate ADQ VSI depending on hw_tc number */ - ch_vsi = vsi->tc_map_vsi[tc_class]; + dest_vsi = vsi->tc_map_vsi[tc_class]; break; case ICE_FWD_TO_Q: /* Locate the Rx queue */ @@ -824,7 +1111,8 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) /* Determine destination VSI even though the action is * FWD_TO_QUEUE, because QUEUE is associated with VSI */ - ch_vsi = tc_fltr->dest_vsi; + q = tc_fltr->action.fwd.q.queue; + dest_vsi = ice_locate_vsi_using_queue(vsi, q); break; default: dev_err(dev, @@ -832,13 +1120,13 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) tc_fltr->action.fltr_act); return ERR_PTR(-EINVAL); } - /* Must have valid ch_vsi (it could be main VSI or ADQ VSI) */ - if (!ch_vsi) { + /* Must have valid dest_vsi (it could be main VSI or ADQ VSI) */ + if (!dest_vsi) { dev_err(dev, "Unable to add filter because specified destination VSI doesn't exist\n"); return ERR_PTR(-EINVAL); } - return ch_vsi; + return dest_vsi; } /** @@ -853,14 +1141,13 @@ static int ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) { - struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers; struct ice_adv_rule_info rule_info = {0}; struct ice_rule_query_data rule_added; struct ice_adv_lkup_elem *list; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; u32 flags = tc_fltr->flags; - struct ice_vsi *ch_vsi; + struct ice_vsi *dest_vsi; struct device *dev; u16 lkups_cnt = 0; u16 l4_proto = 0; @@ -883,11 +1170,13 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, } /* validate forwarding action VSI and queue */ - ch_vsi = ice_tc_forward_action(vsi, tc_fltr); - if (IS_ERR(ch_vsi)) - return PTR_ERR(ch_vsi); + if (ice_is_forward_action(tc_fltr->action.fltr_act)) { + dest_vsi = ice_tc_forward_action(vsi, tc_fltr); + if (IS_ERR(dest_vsi)) + return PTR_ERR(dest_vsi); + } - lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr); + lkups_cnt = ice_tc_count_lkups(flags, tc_fltr); list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); if (!list) return -ENOMEM; @@ -904,10 +1193,9 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, switch (tc_fltr->action.fltr_act) { case ICE_FWD_TO_VSI: - rule_info.sw_act.vsi_handle = ch_vsi->idx; + rule_info.sw_act.vsi_handle = dest_vsi->idx; rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; rule_info.sw_act.src = hw->pf_id; - rule_info.rx = true; dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n", tc_fltr->action.fwd.tc.tc_class, rule_info.sw_act.vsi_handle, lkups_cnt); @@ -915,22 +1203,26 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, case ICE_FWD_TO_Q: /* HW queue number in global space */ rule_info.sw_act.fwd_id.q_id = tc_fltr->action.fwd.q.hw_queue; - rule_info.sw_act.vsi_handle = ch_vsi->idx; + rule_info.sw_act.vsi_handle = dest_vsi->idx; rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE; rule_info.sw_act.src = hw->pf_id; - rule_info.rx = true; dev_dbg(dev, "add switch rule action to forward to queue:%u (HW queue %u), lkups_cnt:%u\n", tc_fltr->action.fwd.q.queue, tc_fltr->action.fwd.q.hw_queue, lkups_cnt); break; - default: - rule_info.sw_act.flag |= ICE_FLTR_TX; - /* In case of Tx (LOOKUP_TX), src needs to be src VSI */ - rule_info.sw_act.src = vsi->idx; - /* 'Rx' is false, direction of rule(LOOKUPTRX) */ - rule_info.rx = false; + case ICE_DROP_PACKET: + if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { + rule_info.sw_act.flag |= ICE_FLTR_TX; + rule_info.sw_act.src = vsi->idx; + } else { + rule_info.sw_act.flag |= ICE_FLTR_RX; + rule_info.sw_act.src = hw->pf_id; + } rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; break; + default: + ret = -EOPNOTSUPP; + goto exit; } ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); @@ -939,6 +1231,10 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, "Unable to add filter because it already exist"); ret = -EINVAL; goto exit; + } else if (ret == -ENOSPC) { + NL_SET_ERR_MSG_MOD(tc_fltr->extack, + "Unable to add filter: insufficient space available."); + goto exit; } else if (ret) { NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter due to error"); @@ -953,11 +1249,11 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, tc_fltr->dest_vsi_handle = rule_added.vsi_handle; if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI || tc_fltr->action.fltr_act == ICE_FWD_TO_Q) { - tc_fltr->dest_vsi = ch_vsi; + tc_fltr->dest_vsi = dest_vsi; /* keep track of advanced switch filter for * destination VSI */ - ch_vsi->num_chnl_fltr++; + dest_vsi->num_chnl_fltr++; /* keeps track of channel filters for PF VSI */ if (vsi->type == ICE_VSI_PF && @@ -978,6 +1274,10 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, tc_fltr->action.fwd.q.hw_queue, rule_added.rid, rule_added.rule_id); break; + case ICE_DROP_PACKET: + dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is drop, rid %u, rule_id %u\n", + lkups_cnt, flags, rule_added.rid, rule_added.rule_id); + break; default: break; } @@ -1228,6 +1528,7 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, struct ice_tc_flower_fltr *fltr) { struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; + struct netlink_ext_ack *extack = fltr->extack; struct flow_match_control enc_control; fltr->tunnel_type = ice_tc_tun_get_type(dev); @@ -1248,6 +1549,9 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, flow_rule_match_enc_control(rule, &enc_control); + if (flow_rule_has_enc_control_flags(enc_control.mask->flags, extack)) + return -EOPNOTSUPP; + if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { struct flow_match_ipv4_addrs match; @@ -1285,7 +1589,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, } } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) && + (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) { struct flow_match_enc_opts match; flow_rule_match_enc_opts(rule, &match); @@ -1296,7 +1601,21 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0], sizeof(struct gtp_pdu_session_info)); - fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS; + fltr->flags |= ICE_TC_FLWR_FIELD_GTP_OPTS; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) && + fltr->tunnel_type == TNL_PFCP) { + struct flow_match_enc_opts match; + + flow_rule_match_enc_opts(rule, &match); + + memcpy(&fltr->pfcp_meta_keys, match.key->data, + sizeof(struct pfcp_metadata)); + memcpy(&fltr->pfcp_meta_masks, match.mask->data, + sizeof(struct pfcp_metadata)); + + fltr->flags |= ICE_TC_FLWR_FIELD_PFCP_OPTS; } return 0; @@ -1308,11 +1627,16 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, * @filter_dev: Pointer to device on which filter is being added * @f: Pointer to struct flow_cls_offload * @fltr: Pointer to filter structure + * @ingress: if the rule is added to an ingress block + * + * Return: 0 if the flower was parsed successfully, -EINVAL if the flower + * cannot be parsed, -EOPNOTSUPP if such filter cannot be configured + * for the given VSI. */ static int ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, struct flow_cls_offload *f, - struct ice_tc_flower_fltr *fltr) + struct ice_tc_flower_fltr *fltr, bool ingress) { struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; struct flow_rule *rule = flow_cls_offload_flow_rule(f); @@ -1323,24 +1647,24 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, dissector = rule->match.dissector; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_VLAN) | - BIT(FLOW_DISSECTOR_KEY_CVLAN) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | - BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | - BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | - BIT(FLOW_DISSECTOR_KEY_IP) | - BIT(FLOW_DISSECTOR_KEY_ENC_IP) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_PPPOE) | - BIT(FLOW_DISSECTOR_KEY_L2TPV3))) { + ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IP) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PPPOE) | + BIT_ULL(FLOW_DISSECTOR_KEY_L2TPV3))) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used"); return -EOPNOTSUPP; } @@ -1357,15 +1681,22 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, return err; } - /* header pointers should point to the inner headers, outer - * header were already set by ice_parse_tunnel_attr - */ - headers = &fltr->inner_headers; + /* PFCP is considered non-tunneled - don't swap headers. */ + if (fltr->tunnel_type != TNL_PFCP) { + /* Header pointers should point to the inner headers, + * outer header were already set by + * ice_parse_tunnel_attr(). + */ + headers = &fltr->inner_headers; + } } else if (dissector->used_keys & - (BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | - BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) { + (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL))) { NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel"); return -EOPNOTSUPP; } else { @@ -1389,6 +1720,20 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID; } + if (!ingress) { + bool switchdev = + ice_is_eswitch_mode_switchdev(vsi->back); + + if (switchdev != (n_proto_key == ETH_P_LLDP)) { + NL_SET_ERR_MSG_FMT_MOD(fltr->extack, + "%sLLDP filtering is not supported on egress in %s mode", + switchdev ? "Non-" : "", + switchdev ? "switchdev" : + "legacy"); + return -EOPNOTSUPP; + } + } + headers->l2_key.n_proto = cpu_to_be16(n_proto_key); headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask); headers->l3_key.ip_proto = match.key->ip_proto; @@ -1448,12 +1793,14 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, if (match.mask->vlan_priority) { fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO; headers->vlan_hdr.vlan_prio = - cpu_to_be16((match.key->vlan_priority << - VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK); + be16_encode_bits(match.key->vlan_priority, + VLAN_PRIO_MASK); } - if (match.mask->vlan_tpid) + if (match.mask->vlan_tpid) { headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid; + fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_TPID; + } } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { @@ -1482,8 +1829,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, if (match.mask->vlan_priority) { fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO; headers->cvlan_hdr.vlan_prio = - cpu_to_be16((match.key->vlan_priority << - VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK); + be16_encode_bits(match.key->vlan_priority, + VLAN_PRIO_MASK); } } @@ -1509,6 +1856,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, flow_rule_match_control(rule, &match); addr_type = match.key->addr_type; + + if (flow_rule_has_control_flags(match.mask->flags, + fltr->extack)) + return -EOPNOTSUPP; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { @@ -1558,6 +1909,14 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, return -EINVAL; } } + + /* Ingress filter on representor results in an egress filter in HW + * and vice versa + */ + ingress = ice_is_port_repr_netdev(filter_dev) ? !ingress : ingress; + fltr->direction = ingress ? ICE_ESWITCH_FLTR_INGRESS : + ICE_ESWITCH_FLTR_EGRESS; + return 0; } @@ -1681,7 +2040,7 @@ ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, struct ice_vsi *ch_vsi = NULL; u16 queue = act->rx_queue; - if (queue > vsi->num_rxq) { + if (queue >= vsi->num_rxq) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified queue is invalid"); return -EINVAL; @@ -1694,7 +2053,7 @@ ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, /* If ADQ is configured, and the queue belongs to ADQ VSI, then prepare * ADQ switch filter */ - ch_vsi = ice_locate_vsi_using_queue(vsi, fltr); + ch_vsi = ice_locate_vsi_using_queue(vsi, fltr->action.fwd.q.queue); if (!ch_vsi) return -EINVAL; fltr->dest_vsi = ch_vsi; @@ -1712,6 +2071,9 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, case FLOW_ACTION_RX_QUEUE_MAPPING: /* forward to queue */ return ice_tc_forward_to_queue(vsi, fltr, act); + case FLOW_ACTION_DROP: + fltr->action.fltr_act = ICE_DROP_PACKET; + return 0; default: NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported TC action"); return -EOPNOTSUPP; @@ -1720,16 +2082,17 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, /** * ice_parse_tc_flower_actions - Parse the actions for a TC filter + * @filter_dev: Pointer to device on which filter is being added * @vsi: Pointer to VSI * @cls_flower: Pointer to TC flower offload structure * @fltr: Pointer to TC flower filter structure * * Parse the actions for a TC filter */ -static int -ice_parse_tc_flower_actions(struct ice_vsi *vsi, - struct flow_cls_offload *cls_flower, - struct ice_tc_flower_fltr *fltr) +static int ice_parse_tc_flower_actions(struct net_device *filter_dev, + struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower, + struct ice_tc_flower_fltr *fltr) { struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); struct flow_action *flow_action = &rule->action; @@ -1744,7 +2107,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi, flow_action_for_each(i, act, flow_action) { if (ice_is_eswitch_mode_switchdev(vsi->back)) - err = ice_eswitch_tc_parse_action(fltr, act); + err = ice_eswitch_tc_parse_action(filter_dev, fltr, act); else err = ice_tc_parse_action(vsi, fltr, act); if (err) @@ -1767,6 +2130,12 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) struct ice_pf *pf = vsi->back; int err; + if (ice_is_fltr_pf_tx_lldp(fltr)) + ice_handle_del_pf_lldp_drop_rule(pf); + + if (ice_is_fltr_vf_tx_lldp(fltr)) + return ice_drop_vf_tx_lldp(vsi, false); + rule_rem.rid = fltr->rid; rule_rem.rule_id = fltr->rule_id; rule_rem.vsi_handle = fltr->dest_vsi_handle; @@ -1803,14 +2172,18 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) * @vsi: Pointer to VSI * @f: Pointer to flower offload structure * @__fltr: Pointer to struct ice_tc_flower_fltr + * @ingress: if the rule is added to an ingress block * * This function parses TC-flower input fields, parses action, * and adds a filter. + * + * Return: 0 if the filter was successfully added, + * negative error code otherwise. */ static int ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, struct flow_cls_offload *f, - struct ice_tc_flower_fltr **__fltr) + struct ice_tc_flower_fltr **__fltr, bool ingress) { struct ice_tc_flower_fltr *fltr; int err; @@ -1827,11 +2200,11 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, fltr->src_vsi = vsi; INIT_HLIST_NODE(&fltr->tc_flower_node); - err = ice_parse_cls_flower(netdev, vsi, f, fltr); + err = ice_parse_cls_flower(netdev, vsi, f, fltr, ingress); if (err < 0) goto err; - err = ice_parse_tc_flower_actions(vsi, f, fltr); + err = ice_parse_tc_flower_actions(netdev, vsi, f, fltr); if (err < 0) goto err; @@ -1870,10 +2243,13 @@ ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie) * @netdev: Pointer to filter device * @vsi: Pointer to VSI * @cls_flower: Pointer to flower offload structure + * @ingress: if the rule is added to an ingress block + * + * Return: 0 if the flower was successfully added, + * negative error code otherwise. */ -int -ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, - struct flow_cls_offload *cls_flower) +int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower, bool ingress) { struct netlink_ext_ack *extack = cls_flower->common.extack; struct net_device *vsi_netdev = vsi->netdev; @@ -1908,7 +2284,7 @@ ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, } /* prep and add TC-flower filter in HW */ - err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr); + err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr, ingress); if (err) return err; |
