diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_eswitch.c')
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_eswitch.c | 783 |
1 files changed, 355 insertions, 428 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index f9f15acae90a..2e4f0969035f 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -4,108 +4,14 @@ #include "ice.h" #include "ice_lib.h" #include "ice_eswitch.h" +#include "ice_eswitch_br.h" #include "ice_fltr.h" #include "ice_repr.h" -#include "ice_devlink.h" +#include "devlink/devlink.h" #include "ice_tc_lib.h" /** - * ice_eswitch_add_vf_mac_rule - add adv rule with VF's MAC - * @pf: pointer to PF struct - * @vf: pointer to VF struct - * @mac: VF's MAC address - * - * This function adds advanced rule that forwards packets with - * VF's MAC address (src MAC) to the corresponding switchdev ctrl VSI queue. - */ -int -ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac) -{ - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; - struct ice_adv_rule_info rule_info = { 0 }; - struct ice_adv_lkup_elem *list; - struct ice_hw *hw = &pf->hw; - const u16 lkups_cnt = 1; - int err; - - list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); - if (!list) - return -ENOMEM; - - list[0].type = ICE_MAC_OFOS; - ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac); - eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr); - - rule_info.sw_act.flag |= ICE_FLTR_TX; - rule_info.sw_act.vsi_handle = ctrl_vsi->idx; - rule_info.sw_act.fltr_act = ICE_FWD_TO_Q; - rule_info.rx = false; - rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id + - ctrl_vsi->rxq_map[vf->vf_id]; - rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE; - rule_info.flags_info.act_valid = true; - rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN; - - err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, - vf->repr->mac_rule); - if (err) - dev_err(ice_pf_to_dev(pf), "Unable to add VF mac rule in switchdev mode for VF %d", - vf->vf_id); - else - vf->repr->rule_added = true; - - kfree(list); - return err; -} - -/** - * ice_eswitch_replay_vf_mac_rule - replay adv rule with VF's MAC - * @vf: pointer to vF struct - * - * This function replays VF's MAC rule after reset. - */ -void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf) -{ - int err; - - if (!ice_is_switchdev_running(vf->pf)) - return; - - if (is_valid_ether_addr(vf->hw_lan_addr.addr)) { - err = ice_eswitch_add_vf_mac_rule(vf->pf, vf, - vf->hw_lan_addr.addr); - if (err) { - dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n", - vf->hw_lan_addr.addr, vf->vf_id, err); - return; - } - vf->num_mac++; - - ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr); - } -} - -/** - * ice_eswitch_del_vf_mac_rule - delete adv rule with VF's MAC - * @vf: pointer to the VF struct - * - * Delete the advanced rule that was used to forward packets with the VF's MAC - * address (src MAC) to the corresponding switchdev ctrl VSI queue. - */ -void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf) -{ - if (!ice_is_switchdev_running(vf->pf)) - return; - - if (!vf->repr->rule_added) - return; - - ice_rem_adv_rule_by_id(&vf->pf->hw, vf->repr->mac_rule); - vf->repr->rule_added = false; -} - -/** - * ice_eswitch_setup_env - configure switchdev HW filters + * ice_eswitch_setup_env - configure eswitch HW filters * @pf: pointer to PF struct * * This function adds HW filters configuration specific for switchdev @@ -113,234 +19,192 @@ void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf) */ static int ice_eswitch_setup_env(struct ice_pf *pf) { - struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; - struct net_device *uplink_netdev = uplink_vsi->netdev; - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi; + struct net_device *netdev = uplink_vsi->netdev; + bool if_running = netif_running(netdev); struct ice_vsi_vlan_ops *vlan_ops; - bool rule_added = false; - vlan_ops = ice_get_compat_vsi_vlan_ops(ctrl_vsi); - if (vlan_ops->dis_stripping(ctrl_vsi)) - return -ENODEV; + if (if_running && !test_and_set_bit(ICE_VSI_DOWN, uplink_vsi->state)) + if (ice_down(uplink_vsi)) + return -ENODEV; ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); + ice_vsi_cfg_sw_lldp(uplink_vsi, true, false); - netif_addr_lock_bh(uplink_netdev); - __dev_uc_unsync(uplink_netdev, NULL); - __dev_mc_unsync(uplink_netdev, NULL); - netif_addr_unlock_bh(uplink_netdev); + netif_addr_lock_bh(netdev); + __dev_uc_unsync(netdev, NULL); + __dev_mc_unsync(netdev, NULL); + netif_addr_unlock_bh(netdev); if (ice_vsi_add_vlan_zero(uplink_vsi)) + goto err_vlan_zero; + + if (ice_set_dflt_vsi(uplink_vsi)) goto err_def_rx; - if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) { - if (ice_set_dflt_vsi(uplink_vsi)) - goto err_def_rx; - rule_added = true; - } + if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true, + ICE_FLTR_TX)) + goto err_def_tx; - if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) - goto err_override_uplink; + vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi); + if (vlan_ops->dis_rx_filtering(uplink_vsi)) + goto err_vlan_filtering; - if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override)) - goto err_override_control; + if (ice_vsi_update_local_lb(uplink_vsi, true)) + goto err_override_local_lb; + + if (if_running && ice_up(uplink_vsi)) + goto err_up; return 0; -err_override_control: - ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); -err_override_uplink: - if (rule_added) - ice_clear_dflt_vsi(uplink_vsi); +err_up: + ice_vsi_update_local_lb(uplink_vsi, false); +err_override_local_lb: + vlan_ops->ena_rx_filtering(uplink_vsi); +err_vlan_filtering: + ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, + ICE_FLTR_TX); +err_def_tx: + ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, + ICE_FLTR_RX); err_def_rx: + ice_vsi_del_vlan_zero(uplink_vsi); +err_vlan_zero: ice_fltr_add_mac_and_broadcast(uplink_vsi, uplink_vsi->port_info->mac.perm_addr, ICE_FWD_TO_VSI); + if (if_running) + ice_up(uplink_vsi); + return -ENODEV; } /** - * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI - * @pf: pointer to PF struct - * - * In switchdev number of allocated Tx/Rx rings is equal. - * - * This function fills q_vectors structures associated with representor and - * move each ring pairs to port representor netdevs. Each port representor - * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to - * number of VFs. + * ice_eswitch_release_repr - clear PR VSI configuration + * @pf: poiner to PF struct + * @repr: pointer to PR */ -static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) +static void +ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr) { - struct ice_vsi *vsi = pf->switchdev.control_vsi; - int q_id; - - ice_for_each_txq(vsi, q_id) { - struct ice_q_vector *q_vector; - struct ice_tx_ring *tx_ring; - struct ice_rx_ring *rx_ring; - struct ice_repr *repr; - struct ice_vf *vf; - - vf = ice_get_vf_by_id(pf, q_id); - if (WARN_ON(!vf)) - continue; - - repr = vf->repr; - q_vector = repr->q_vector; - tx_ring = vsi->tx_rings[q_id]; - rx_ring = vsi->rx_rings[q_id]; - - q_vector->vsi = vsi; - q_vector->reg_idx = vsi->q_vectors[0]->reg_idx; - - q_vector->num_ring_tx = 1; - q_vector->tx.tx_ring = tx_ring; - tx_ring->q_vector = q_vector; - tx_ring->next = NULL; - tx_ring->netdev = repr->netdev; - /* In switchdev mode, from OS stack perspective, there is only - * one queue for given netdev, so it needs to be indexed as 0. - */ - tx_ring->q_index = 0; + struct ice_vsi *vsi = repr->src_vsi; - q_vector->num_ring_rx = 1; - q_vector->rx.rx_ring = rx_ring; - rx_ring->q_vector = q_vector; - rx_ring->next = NULL; - rx_ring->netdev = repr->netdev; + /* Skip representors that aren't configured */ + if (!repr->dst) + return; - ice_put_vf(vf); - } + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + metadata_dst_free(repr->dst); + repr->dst = NULL; + ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, + ICE_FWD_TO_VSI); } /** - * ice_eswitch_release_reprs - clear PR VSIs configuration - * @pf: poiner to PF struct - * @ctrl_vsi: pointer to switchdev control VSI + * ice_eswitch_setup_repr - configure PR to run in switchdev mode + * @pf: pointer to PF struct + * @repr: pointer to PR struct */ -static void -ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi) +static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr) { - struct ice_vf *vf; - unsigned int bkt; + struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi; + struct ice_vsi *vsi = repr->src_vsi; + struct metadata_dst *dst; - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) { - struct ice_vsi *vsi = vf->repr->src_vsi; + repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, + GFP_KERNEL); + if (!repr->dst) + return -ENOMEM; - /* Skip VFs that aren't configured */ - if (!vf->repr->dst) - continue; + netif_keep_dst(uplink_vsi->netdev); - ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); - metadata_dst_free(vf->repr->dst); - vf->repr->dst = NULL; - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, - ICE_FWD_TO_VSI); + dst = repr->dst; + dst->u.port_info.port_id = vsi->vsi_num; + dst->u.port_info.lower_dev = uplink_vsi->netdev; - netif_napi_del(&vf->repr->q_vector->napi); - } + return 0; } /** - * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode - * @pf: pointer to PF struct + * ice_eswitch_cfg_vsi - configure VSI to work in slow-path + * @vsi: VSI structure of representee + * @mac: representee MAC + * + * Return: 0 on success, non-zero on error. */ -static int ice_eswitch_setup_reprs(struct ice_pf *pf) +int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac) { - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; - int max_vsi_num = 0; - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) { - struct ice_vsi *vsi = vf->repr->src_vsi; - - ice_remove_vsi_fltr(&pf->hw, vsi->idx); - vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, - GFP_KERNEL); - if (!vf->repr->dst) { - ice_fltr_add_mac_and_broadcast(vsi, - vf->hw_lan_addr.addr, - ICE_FWD_TO_VSI); - goto err; - } - - if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) { - ice_fltr_add_mac_and_broadcast(vsi, - vf->hw_lan_addr.addr, - ICE_FWD_TO_VSI); - metadata_dst_free(vf->repr->dst); - vf->repr->dst = NULL; - goto err; - } - - if (ice_vsi_add_vlan_zero(vsi)) { - ice_fltr_add_mac_and_broadcast(vsi, - vf->hw_lan_addr.addr, - ICE_FWD_TO_VSI); - metadata_dst_free(vf->repr->dst); - vf->repr->dst = NULL; - ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); - goto err; - } - - if (max_vsi_num < vsi->vsi_num) - max_vsi_num = vsi->vsi_num; - - netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, - ice_napi_poll); + int err; - netif_keep_dst(vf->repr->netdev); - } + ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx); - ice_for_each_vf(pf, bkt, vf) { - struct ice_repr *repr = vf->repr; - struct ice_vsi *vsi = repr->src_vsi; - struct metadata_dst *dst; + err = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); + if (err) + goto err_update_security; - dst = repr->dst; - dst->u.port_info.port_id = vsi->vsi_num; - dst->u.port_info.lower_dev = repr->netdev; - ice_repr_set_traffic_vsi(repr, ctrl_vsi); - } + err = ice_vsi_add_vlan_zero(vsi); + if (err) + goto err_vlan_zero; return 0; -err: - ice_eswitch_release_reprs(pf, ctrl_vsi); +err_vlan_zero: + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); +err_update_security: + ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI); - return -ENODEV; + return err; +} + +/** + * ice_eswitch_decfg_vsi - unroll changes done to VSI for switchdev + * @vsi: VSI structure of representee + * @mac: representee MAC + */ +void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac) +{ + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI); } /** - * ice_eswitch_update_repr - reconfigure VF port representor - * @vsi: VF VSI for which port representor is configured + * ice_eswitch_update_repr - reconfigure port representor + * @repr_id: representor ID + * @vsi: VSI for which port representor is configured */ -void ice_eswitch_update_repr(struct ice_vsi *vsi) +void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; struct ice_repr *repr; - struct ice_vf *vf; - int ret; + int err; if (!ice_is_switchdev_running(pf)) return; - vf = vsi->vf; - repr = vf->repr; + repr = xa_load(&pf->eswitch.reprs, *repr_id); + if (!repr) + return; + repr->src_vsi = vsi; repr->dst->u.port_info.port_id = vsi->vsi_num; - ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); - if (ret) { - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); - dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", - vsi->vf->vf_id); + if (repr->br_port) + repr->br_port->vsi = vsi; + + err = ice_eswitch_cfg_vsi(vsi, repr->parent_mac); + if (err) + dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d", + repr->id); + + /* The VSI number is different, reload the PR with new id */ + if (repr->id != vsi->vsi_num) { + xa_erase(&pf->eswitch.reprs, repr->id); + repr->id = vsi->vsi_num; + if (xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL)) + dev_err(ice_pf_to_dev(pf), "Failed to reload port representor %d", + repr->id); + *repr_id = repr->id; } } @@ -354,28 +218,23 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi) netdev_tx_t ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) { - struct ice_netdev_priv *np; - struct ice_repr *repr; - struct ice_vsi *vsi; - - np = netdev_priv(netdev); - vsi = np->vsi; - - if (ice_is_reset_in_progress(vsi->back->state) || - test_bit(ICE_VF_DIS, vsi->back->state)) - return NETDEV_TX_BUSY; + struct ice_repr *repr = ice_netdev_to_repr(netdev); + unsigned int len = skb->len; + int ret; - repr = ice_netdev_to_repr(netdev); skb_dst_drop(skb); dst_hold((struct dst_entry *)repr->dst); skb_dst_set(skb, (struct dst_entry *)repr->dst); - skb->queue_mapping = repr->vf->vf_id; + skb->dev = repr->dst->u.port_info.lower_dev; - return ice_start_xmit(skb, netdev); + ret = dev_queue_xmit(skb); + ice_repr_inc_tx_stats(repr, len, ret); + + return ret; } /** - * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor + * ice_eswitch_set_target_vsi - set eswitch context in Tx context descriptor * @skb: pointer to send buffer * @off: pointer to offload struct */ @@ -387,18 +246,22 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, u64 cd_cmd, dst_vsi; if (!dst) { + struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); + + if (unlikely(eth->h_proto == htons(ETH_P_LLDP))) + return; cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S; off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX); } else { cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S; - dst_vsi = ((u64)dst->u.port_info.port_id << - ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M; + dst_vsi = FIELD_PREP(ICE_TXD_CTX_QW1_VSI_M, + dst->u.port_info.port_id); off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX; } } /** - * ice_eswitch_release_env - clear switchdev HW filters + * ice_eswitch_release_env - clear eswitch HW filters * @pf: pointer to PF struct * * This function removes HW filters configuration specific for switchdev @@ -406,71 +269,21 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, */ static void ice_eswitch_release_env(struct ice_pf *pf) { - struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi; + struct ice_vsi_vlan_ops *vlan_ops; - ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); - ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); - ice_clear_dflt_vsi(uplink_vsi); + vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi); + + ice_vsi_update_local_lb(uplink_vsi, false); + vlan_ops->ena_rx_filtering(uplink_vsi); + ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, + ICE_FLTR_TX); + ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, + ICE_FLTR_RX); ice_fltr_add_mac_and_broadcast(uplink_vsi, uplink_vsi->port_info->mac.perm_addr, ICE_FWD_TO_VSI); -} - -/** - * ice_eswitch_vsi_setup - configure switchdev control VSI - * @pf: pointer to PF structure - * @pi: pointer to port_info structure - */ -static struct ice_vsi * -ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) -{ - return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, NULL, NULL); -} - -/** - * ice_eswitch_napi_del - remove NAPI handle for all port representors - * @pf: pointer to PF structure - */ -static void ice_eswitch_napi_del(struct ice_pf *pf) -{ - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) - netif_napi_del(&vf->repr->q_vector->napi); -} - -/** - * ice_eswitch_napi_enable - enable NAPI for all port representors - * @pf: pointer to PF structure - */ -static void ice_eswitch_napi_enable(struct ice_pf *pf) -{ - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) - napi_enable(&vf->repr->q_vector->napi); -} - -/** - * ice_eswitch_napi_disable - disable NAPI for all port representors - * @pf: pointer to PF structure - */ -static void ice_eswitch_napi_disable(struct ice_pf *pf) -{ - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) - napi_disable(&vf->repr->q_vector->napi); + ice_vsi_cfg_sw_lldp(uplink_vsi, true, true); } /** @@ -479,58 +292,45 @@ static void ice_eswitch_napi_disable(struct ice_pf *pf) */ static int ice_eswitch_enable_switchdev(struct ice_pf *pf) { - struct ice_vsi *ctrl_vsi; + struct ice_vsi *uplink_vsi; - pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); - if (!pf->switchdev.control_vsi) + uplink_vsi = ice_get_main_vsi(pf); + if (!uplink_vsi) return -ENODEV; - ctrl_vsi = pf->switchdev.control_vsi; - pf->switchdev.uplink_vsi = ice_get_main_vsi(pf); - if (!pf->switchdev.uplink_vsi) - goto err_vsi; - - if (ice_eswitch_setup_env(pf)) - goto err_vsi; - - if (ice_repr_add_for_all_vfs(pf)) - goto err_repr_add; + if (netif_is_any_bridge_port(uplink_vsi->netdev)) { + dev_err(ice_pf_to_dev(pf), + "Uplink port cannot be a bridge port\n"); + return -EINVAL; + } - if (ice_eswitch_setup_reprs(pf)) - goto err_setup_reprs; + pf->eswitch.uplink_vsi = uplink_vsi; - ice_eswitch_remap_rings_to_vectors(pf); + if (ice_eswitch_setup_env(pf)) + return -ENODEV; - if (ice_vsi_open(ctrl_vsi)) - goto err_setup_reprs; + if (ice_eswitch_br_offloads_init(pf)) + goto err_br_offloads; - ice_eswitch_napi_enable(pf); + pf->eswitch.is_running = true; return 0; -err_setup_reprs: - ice_repr_rem_from_all_vfs(pf); -err_repr_add: +err_br_offloads: ice_eswitch_release_env(pf); -err_vsi: - ice_vsi_release(ctrl_vsi); return -ENODEV; } /** - * ice_eswitch_disable_switchdev - disable switchdev resources + * ice_eswitch_disable_switchdev - disable eswitch resources * @pf: pointer to PF structure */ static void ice_eswitch_disable_switchdev(struct ice_pf *pf) { - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; - - ice_eswitch_napi_disable(pf); + ice_eswitch_br_offloads_deinit(pf); ice_eswitch_release_env(pf); - ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx); - ice_eswitch_release_reprs(pf, ctrl_vsi); - ice_vsi_release(ctrl_vsi); - ice_repr_rem_from_all_vfs(pf); + + pf->eswitch.is_running = false; } /** @@ -558,12 +358,20 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, case DEVLINK_ESWITCH_MODE_LEGACY: dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy", pf->hw.pf_id); + xa_destroy(&pf->eswitch.reprs); NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy"); break; case DEVLINK_ESWITCH_MODE_SWITCHDEV: { + if (ice_is_adq_active(pf)) { + dev_err(ice_pf_to_dev(pf), "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root"); + NL_SET_ERR_MSG_MOD(extack, "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root"); + return -EOPNOTSUPP; + } + dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev", pf->hw.pf_id); + xa_init(&pf->eswitch.reprs); NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev"); break; } @@ -602,107 +410,226 @@ bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf) } /** - * ice_eswitch_release - cleanup eswitch + * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors * @pf: pointer to PF structure */ -void ice_eswitch_release(struct ice_pf *pf) +static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) { - if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) + struct ice_repr *repr; + unsigned long id; + + if (test_bit(ICE_DOWN, pf->state)) return; - ice_eswitch_disable_switchdev(pf); - pf->switchdev.is_running = false; + xa_for_each(&pf->eswitch.reprs, id, repr) + ice_repr_start_tx_queues(repr); } /** - * ice_eswitch_configure - configure eswitch + * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors * @pf: pointer to PF structure */ -int ice_eswitch_configure(struct ice_pf *pf) +void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) +{ + struct ice_repr *repr; + unsigned long id; + + if (test_bit(ICE_DOWN, pf->state)) + return; + + xa_for_each(&pf->eswitch.reprs, id, repr) + ice_repr_stop_tx_queues(repr); +} + +static void ice_eswitch_stop_reprs(struct ice_pf *pf) { - int status; + ice_eswitch_stop_all_tx_queues(pf); +} + +static void ice_eswitch_start_reprs(struct ice_pf *pf) +{ + ice_eswitch_start_all_tx_queues(pf); +} + +static int +ice_eswitch_attach(struct ice_pf *pf, struct ice_repr *repr, unsigned long *id) +{ + int err; - if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running) + if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) return 0; - status = ice_eswitch_enable_switchdev(pf); - if (status) - return status; + if (xa_empty(&pf->eswitch.reprs)) { + err = ice_eswitch_enable_switchdev(pf); + if (err) + return err; + } + + ice_eswitch_stop_reprs(pf); + + err = repr->ops.add(repr); + if (err) + goto err_create_repr; + + err = ice_eswitch_setup_repr(pf, repr); + if (err) + goto err_setup_repr; + + err = xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL); + if (err) + goto err_xa_alloc; + + *id = repr->id; + + ice_eswitch_start_reprs(pf); - pf->switchdev.is_running = true; return 0; + +err_xa_alloc: + ice_eswitch_release_repr(pf, repr); +err_setup_repr: + repr->ops.rem(repr); +err_create_repr: + if (xa_empty(&pf->eswitch.reprs)) + ice_eswitch_disable_switchdev(pf); + ice_eswitch_start_reprs(pf); + + return err; } /** - * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors + * ice_eswitch_attach_vf - attach VF to a eswitch * @pf: pointer to PF structure + * @vf: pointer to VF structure to be attached + * + * During attaching port representor for VF is created. + * + * Return: zero on success or an error code on failure. */ -static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) +int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf) { - struct ice_vf *vf; - unsigned int bkt; + struct devlink *devlink = priv_to_devlink(pf); + struct ice_repr *repr; + int err; - lockdep_assert_held(&pf->vfs.table_lock); + if (!ice_is_eswitch_mode_switchdev(pf)) + return 0; - if (test_bit(ICE_DOWN, pf->state)) - return; + repr = ice_repr_create_vf(vf); + if (IS_ERR(repr)) + return PTR_ERR(repr); - ice_for_each_vf(pf, bkt, vf) { - if (vf->repr) - ice_repr_start_tx_queues(vf->repr); - } + devl_lock(devlink); + err = ice_eswitch_attach(pf, repr, &vf->repr_id); + if (err) + ice_repr_destroy(repr); + devl_unlock(devlink); + + return err; } /** - * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors + * ice_eswitch_attach_sf - attach SF to a eswitch * @pf: pointer to PF structure + * @sf: pointer to SF structure to be attached + * + * During attaching port representor for SF is created. + * + * Return: zero on success or an error code on failure. */ -void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) +int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) { - struct ice_vf *vf; - unsigned int bkt; + struct ice_repr *repr = ice_repr_create_sf(sf); + int err; - lockdep_assert_held(&pf->vfs.table_lock); + if (IS_ERR(repr)) + return PTR_ERR(repr); - if (test_bit(ICE_DOWN, pf->state)) - return; + err = ice_eswitch_attach(pf, repr, &sf->repr_id); + if (err) + ice_repr_destroy(repr); + + return err; +} + +static void ice_eswitch_detach(struct ice_pf *pf, struct ice_repr *repr) +{ + ice_eswitch_stop_reprs(pf); + repr->ops.rem(repr); + + xa_erase(&pf->eswitch.reprs, repr->id); - ice_for_each_vf(pf, bkt, vf) { - if (vf->repr) - ice_repr_stop_tx_queues(vf->repr); + if (xa_empty(&pf->eswitch.reprs)) + ice_eswitch_disable_switchdev(pf); + + ice_eswitch_release_repr(pf, repr); + ice_repr_destroy(repr); + + if (xa_empty(&pf->eswitch.reprs)) { + struct devlink *devlink = priv_to_devlink(pf); + + /* since all port representors are destroyed, there is + * no point in keeping the nodes + */ + ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf)); + devl_rate_nodes_destroy(devlink); + } else { + ice_eswitch_start_reprs(pf); } } /** - * ice_eswitch_rebuild - rebuild eswitch + * ice_eswitch_detach_vf - detach VF from a eswitch * @pf: pointer to PF structure + * @vf: pointer to VF structure to be detached */ -int ice_eswitch_rebuild(struct ice_pf *pf) +void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf) { - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; - int status; + struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id); + struct devlink *devlink = priv_to_devlink(pf); - ice_eswitch_napi_disable(pf); - ice_eswitch_napi_del(pf); + if (!repr) + return; - status = ice_eswitch_setup_env(pf); - if (status) - return status; + devl_lock(devlink); + ice_eswitch_detach(pf, repr); + devl_unlock(devlink); +} - status = ice_eswitch_setup_reprs(pf); - if (status) - return status; +/** + * ice_eswitch_detach_sf - detach SF from a eswitch + * @pf: pointer to PF structure + * @sf: pointer to SF structure to be detached + */ +void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) +{ + struct ice_repr *repr = xa_load(&pf->eswitch.reprs, sf->repr_id); - ice_eswitch_remap_rings_to_vectors(pf); + if (!repr) + return; - ice_replay_tc_fltrs(pf); + ice_eswitch_detach(pf, repr); +} - status = ice_vsi_open(ctrl_vsi); - if (status) - return status; +/** + * ice_eswitch_get_target - get netdev based on src_vsi from descriptor + * @rx_ring: ring used to receive the packet + * @rx_desc: descriptor used to get src_vsi value + * + * Get src_vsi value from descriptor and load correct representor. If it isn't + * found return rx_ring->netdev. + */ +struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc) +{ + struct ice_eswitch *eswitch = &rx_ring->vsi->back->eswitch; + struct ice_32b_rx_flex_desc_nic_2 *desc; + struct ice_repr *repr; - ice_eswitch_napi_enable(pf); - ice_eswitch_start_all_tx_queues(pf); + desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc; + repr = xa_load(&eswitch->reprs, le16_to_cpu(desc->src_vsi)); + if (!repr) + return rx_ring->netdev; - return 0; + return repr->netdev; } |
