diff options
Diffstat (limited to 'drivers/net/ethernet/intel/iavf/iavf_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_main.c | 505 |
1 files changed, 432 insertions, 73 deletions
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 335fd13e86f7..6faa62bced3a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include <linux/net/intel/libie/rx.h> + #include "iavf.h" #include "iavf_prototype.h" /* All iavf tracepoints are defined by the include below, which must @@ -43,8 +45,9 @@ static const struct pci_device_id iavf_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); MODULE_ALIAS("i40evf"); -MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); +MODULE_IMPORT_NS("LIBETH"); +MODULE_IMPORT_NS("LIBIE"); MODULE_LICENSE("GPL v2"); static const struct net_device_ops iavf_netdev_ops; @@ -714,40 +717,10 @@ static void iavf_configure_tx(struct iavf_adapter *adapter) **/ static void iavf_configure_rx(struct iavf_adapter *adapter) { - unsigned int rx_buf_len = IAVF_RXBUFFER_2048; struct iavf_hw *hw = &adapter->hw; - int i; - /* Legacy Rx will always default to a 2048 buffer size. */ -#if (PAGE_SIZE < 8192) - if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) { - struct net_device *netdev = adapter->netdev; - - /* For jumbo frames on systems with 4K pages we have to use - * an order 1 page, so we might as well increase the size - * of our Rx buffer to make better use of the available space - */ - rx_buf_len = IAVF_RXBUFFER_3072; - - /* We use a 1536 buffer size for configurations with - * standard Ethernet mtu. On x86 this gives us enough room - * for shared info and 192 bytes of padding. - */ - if (!IAVF_2K_TOO_SMALL_WITH_PADDING && - (netdev->mtu <= ETH_DATA_LEN)) - rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; - } -#endif - - for (i = 0; i < adapter->num_active_queues; i++) { + for (u32 i = 0; i < adapter->num_active_queues; i++) adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); - adapter->rx_rings[i].rx_buf_len = rx_buf_len; - - if (adapter->flags & IAVF_FLAG_LEGACY_RX) - clear_ring_build_skb_enabled(&adapter->rx_rings[i]); - else - set_ring_build_skb_enabled(&adapter->rx_rings[i]); - } } /** @@ -800,6 +773,11 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, f->state = IAVF_VLAN_ADD; adapter->num_vlan_filters++; iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER); + } else if (f->state == IAVF_VLAN_REMOVE) { + /* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed. + * We can safely only change the state here. + */ + f->state = IAVF_VLAN_ACTIVE; } clearout: @@ -820,8 +798,18 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan) f = iavf_find_vlan(adapter, vlan); if (f) { - f->state = IAVF_VLAN_REMOVE; - iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER); + /* IAVF_ADD_VLAN means that VLAN wasn't even added yet. + * Remove it from the list. + */ + if (f->state == IAVF_VLAN_ADD) { + list_del(&f->list); + kfree(f); + adapter->num_vlan_filters--; + } else { + f->state = IAVF_VLAN_REMOVE; + iavf_schedule_aq_request(adapter, + IAVF_FLAG_AQ_DEL_VLAN_FILTER); + } } spin_unlock_bh(&adapter->mac_vlan_list_lock); @@ -1207,7 +1195,7 @@ static void iavf_napi_enable_all(struct iavf_adapter *adapter) q_vector = &adapter->q_vectors[q_idx]; napi = &q_vector->napi; - napi_enable(napi); + napi_enable_locked(napi); } } @@ -1223,7 +1211,7 @@ static void iavf_napi_disable_all(struct iavf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = &adapter->q_vectors[q_idx]; - napi_disable(&q_vector->napi); + napi_disable_locked(&q_vector->napi); } } @@ -1615,7 +1603,6 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter) rx_ring = &adapter->rx_rings[i]; rx_ring->queue_index = i; rx_ring->netdev = adapter->netdev; - rx_ring->dev = &adapter->pdev->dev; rx_ring->count = adapter->rx_desc_count; rx_ring->itr_setting = IAVF_ITR_RX_DEF; } @@ -1828,8 +1815,8 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) q_vector->v_idx = q_idx; q_vector->reg_idx = q_idx; cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); - netif_napi_add(adapter->netdev, &q_vector->napi, - iavf_napi_poll); + netif_napi_add_locked(adapter->netdev, &q_vector->napi, + iavf_napi_poll); } return 0; @@ -1855,7 +1842,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter) for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; - netif_napi_del(&q_vector->napi); + netif_napi_del_locked(&q_vector->napi); } kfree(adapter->q_vectors); adapter->q_vectors = NULL; @@ -1996,12 +1983,16 @@ err: static void iavf_finish_config(struct work_struct *work) { struct iavf_adapter *adapter; + bool locks_released = false; int pairs, err; adapter = container_of(work, struct iavf_adapter, finish_config); - /* Always take RTNL first to prevent circular lock dependency */ + /* Always take RTNL first to prevent circular lock dependency; + * The dev->lock is needed to update the queue number + */ rtnl_lock(); + netdev_lock(adapter->netdev); mutex_lock(&adapter->crit_lock); if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) && @@ -2013,26 +2004,34 @@ static void iavf_finish_config(struct work_struct *work) switch (adapter->state) { case __IAVF_DOWN: + /* Set the real number of queues when reset occurs while + * state == __IAVF_DOWN + */ + pairs = adapter->num_active_queues; + netif_set_real_num_rx_queues(adapter->netdev, pairs); + netif_set_real_num_tx_queues(adapter->netdev, pairs); + if (adapter->netdev->reg_state != NETREG_REGISTERED) { + mutex_unlock(&adapter->crit_lock); + netdev_unlock(adapter->netdev); + locks_released = true; err = register_netdevice(adapter->netdev); if (err) { dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n", err); /* go back and try again.*/ + mutex_lock(&adapter->crit_lock); iavf_free_rss(adapter); iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER); + mutex_unlock(&adapter->crit_lock); goto out; } } - - /* Set the real number of queues when reset occurs while - * state == __IAVF_DOWN - */ - fallthrough; + break; case __IAVF_RUNNING: pairs = adapter->num_active_queues; netif_set_real_num_rx_queues(adapter->netdev, pairs); @@ -2044,7 +2043,10 @@ static void iavf_finish_config(struct work_struct *work) } out: - mutex_unlock(&adapter->crit_lock); + if (!locks_released) { + mutex_unlock(&adapter->crit_lock); + netdev_unlock(adapter->netdev); + } rtnl_unlock(); } @@ -2113,6 +2115,21 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) return 0; } + if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW) { + iavf_cfg_queues_bw(adapter); + return 0; + } + + if (adapter->aq_required & IAVF_FLAG_AQ_GET_QOS_CAPS) { + iavf_get_qos_caps(adapter); + return 0; + } + + if (adapter->aq_required & IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE) { + iavf_cfg_queues_quanta_size(adapter); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { iavf_configure_queues(adapter); return 0; @@ -2170,19 +2187,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_add_cloud_filter(adapter); return 0; } - - if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { - iavf_del_cloud_filter(adapter); - return 0; - } if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { iavf_del_cloud_filter(adapter); return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { - iavf_add_cloud_filter(adapter); - return 0; - } if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) { iavf_add_fdir_filter(adapter); return IAVF_SUCCESS; @@ -2651,9 +2659,8 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter) iavf_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; - /* MTU range: 68 - 9710 */ netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; + netdev->max_mtu = LIBIE_MAX_MTU; if (!is_valid_ether_addr(adapter->hw.mac.addr)) { dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", @@ -2708,6 +2715,9 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter) /* request initial VLAN offload settings */ iavf_set_vlan_offload_features(adapter, 0, netdev->features); + if (QOS_ALLOWED(adapter)) + adapter->aq_required |= IAVF_FLAG_AQ_GET_QOS_CAPS; + iavf_schedule_finish_config(adapter); return; @@ -2729,12 +2739,16 @@ static void iavf_watchdog_task(struct work_struct *work) struct iavf_adapter *adapter = container_of(work, struct iavf_adapter, watchdog_task.work); + struct net_device *netdev = adapter->netdev; struct iavf_hw *hw = &adapter->hw; u32 reg_val; + netdev_lock(netdev); if (!mutex_trylock(&adapter->crit_lock)) { - if (adapter->state == __IAVF_REMOVE) + if (adapter->state == __IAVF_REMOVE) { + netdev_unlock(netdev); return; + } goto restart_watchdog; } @@ -2746,30 +2760,35 @@ static void iavf_watchdog_task(struct work_struct *work) case __IAVF_STARTUP: iavf_startup(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(30)); return; case __IAVF_INIT_VERSION_CHECK: iavf_init_version_check(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(30)); return; case __IAVF_INIT_GET_RESOURCES: iavf_init_get_resources(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; case __IAVF_INIT_EXTENDED_CAPS: iavf_init_process_extended_caps(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; case __IAVF_INIT_CONFIG_ADAPTER: iavf_init_config_adapter(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; @@ -2781,6 +2800,7 @@ static void iavf_watchdog_task(struct work_struct *work) * as it can loop forever */ mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return; } if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { @@ -2789,6 +2809,7 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; iavf_shutdown_adminq(hw); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, (5 * HZ)); return; @@ -2796,6 +2817,7 @@ static void iavf_watchdog_task(struct work_struct *work) /* Try again from failed step*/ iavf_change_state(adapter, adapter->last_state); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ); return; case __IAVF_COMM_FAILED: @@ -2808,6 +2830,7 @@ static void iavf_watchdog_task(struct work_struct *work) iavf_change_state(adapter, __IAVF_INIT_FAILED); adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return; } reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & @@ -2827,12 +2850,14 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(10)); return; case __IAVF_RESETTING: mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ * 2); return; @@ -2863,6 +2888,7 @@ static void iavf_watchdog_task(struct work_struct *work) case __IAVF_REMOVE: default: mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return; } @@ -2874,6 +2900,7 @@ static void iavf_watchdog_task(struct work_struct *work) dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ * 2); return; @@ -2881,6 +2908,7 @@ static void iavf_watchdog_task(struct work_struct *work) mutex_unlock(&adapter->crit_lock); restart_watchdog: + netdev_unlock(netdev); if (adapter->state >= __IAVF_DOWN) queue_work(adapter->wq, &adapter->adminq_task); if (adapter->aq_required) @@ -2957,6 +2985,30 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) } /** + * iavf_reconfig_qs_bw - Call-back task to handle hardware reset + * @adapter: board private structure + * + * After a reset, the shaper parameters of queues need to be replayed again. + * Since the net_shaper object inside TX rings persists across reset, + * set the update flag for all queues so that the virtchnl message is triggered + * for all queues. + **/ +static void iavf_reconfig_qs_bw(struct iavf_adapter *adapter) +{ + int i, num = 0; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->tx_rings[i].q_shaper.bw_min || + adapter->tx_rings[i].q_shaper.bw_max) { + adapter->tx_rings[i].q_shaper_update = true; + num++; + } + + if (num) + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW; +} + +/** * iavf_reset_task - Call-back task to handle hardware reset * @work: pointer to work_struct * @@ -2982,10 +3034,12 @@ static void iavf_reset_task(struct work_struct *work) /* When device is being removed it doesn't make sense to run the reset * task, just return in such a case. */ + netdev_lock(netdev); if (!mutex_trylock(&adapter->crit_lock)) { if (adapter->state != __IAVF_REMOVE) queue_work(adapter->wq, &adapter->reset_task); + netdev_unlock(netdev); return; } @@ -3033,6 +3087,7 @@ static void iavf_reset_task(struct work_struct *work) reg_val); iavf_disable_vf(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -3162,6 +3217,8 @@ continue_reset: iavf_up_complete(adapter); iavf_irq_enable(adapter, true); + + iavf_reconfig_qs_bw(adapter); } else { iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); @@ -3171,6 +3228,7 @@ continue_reset: wake_up(&adapter->reset_waitqueue); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return; reset_err: @@ -3181,6 +3239,7 @@ reset_err: iavf_disable_vf(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); } @@ -3512,6 +3571,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) } /** + * iavf_is_tc_config_same - Compare the mqprio TC config with the + * TC config already configured on this adapter. + * @adapter: board private structure + * @mqprio_qopt: TC config received from kernel. + * + * This function compares the TC config received from the kernel + * with the config already configured on the adapter. + * + * Return: True if configuration is same, false otherwise. + **/ +static bool iavf_is_tc_config_same(struct iavf_adapter *adapter, + struct tc_mqprio_qopt *mqprio_qopt) +{ + struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0]; + int i; + + if (adapter->num_tc != mqprio_qopt->num_tc) + return false; + + for (i = 0; i < adapter->num_tc; i++) { + if (ch[i].count != mqprio_qopt->count[i] || + ch[i].offset != mqprio_qopt->offset[i]) + return false; + } + return true; +} + +/** * __iavf_setup_tc - configure multiple traffic classes * @netdev: network interface device structure * @type_data: tc offload data @@ -3568,7 +3655,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data) if (ret) return ret; /* Return if same TC config is requested */ - if (adapter->num_tc == num_tc) + if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt)) return 0; adapter->num_tc = num_tc; @@ -3624,8 +3711,10 @@ exit: if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) return 0; + netdev_lock(netdev); netif_set_real_num_rx_queues(netdev, total_qps); netif_set_real_num_tx_queues(netdev, total_qps); + netdev_unlock(netdev); return ret; } @@ -3766,6 +3855,10 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, flow_rule_match_control(rule, &match); addr_type = match.key->addr_type; + + if (flow_rule_has_control_flags(match.mask->flags, + f->common.extack)) + return -EOPNOTSUPP; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { @@ -4019,7 +4112,7 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter, /** * iavf_setup_tc_cls_flower - flower classifier offloads - * @adapter: board private structure + * @adapter: pointer to iavf adapter structure * @cls_flower: pointer to flow_cls_offload struct with flow info */ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, @@ -4038,6 +4131,154 @@ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, } /** + * iavf_add_cls_u32 - Add U32 classifier offloads + * @adapter: pointer to iavf adapter structure + * @cls_u32: pointer to tc_cls_u32_offload struct with flow info + * + * Return: 0 on success or negative errno on failure. + */ +static int iavf_add_cls_u32(struct iavf_adapter *adapter, + struct tc_cls_u32_offload *cls_u32) +{ + struct netlink_ext_ack *extack = cls_u32->common.extack; + struct virtchnl_fdir_rule *rule_cfg; + struct virtchnl_filter_action *vact; + struct virtchnl_proto_hdrs *hdrs; + struct ethhdr *spec_h, *mask_h; + const struct tc_action *act; + struct iavf_fdir_fltr *fltr; + struct tcf_exts *exts; + unsigned int q_index; + int i, status = 0; + int off_base = 0; + + if (cls_u32->knode.link_handle) { + NL_SET_ERR_MSG_MOD(extack, "Linking not supported"); + return -EOPNOTSUPP; + } + + fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); + if (!fltr) + return -ENOMEM; + + rule_cfg = &fltr->vc_add_msg.rule_cfg; + hdrs = &rule_cfg->proto_hdrs; + hdrs->count = 0; + + /* The parser lib at the PF expects the packet starting with MAC hdr */ + switch (ntohs(cls_u32->common.protocol)) { + case ETH_P_802_3: + break; + case ETH_P_IP: + spec_h = (struct ethhdr *)hdrs->raw.spec; + mask_h = (struct ethhdr *)hdrs->raw.mask; + spec_h->h_proto = htons(ETH_P_IP); + mask_h->h_proto = htons(0xFFFF); + off_base += ETH_HLEN; + break; + default: + NL_SET_ERR_MSG_MOD(extack, "Only 802_3 and ip filter protocols are supported"); + status = -EOPNOTSUPP; + goto free_alloc; + } + + for (i = 0; i < cls_u32->knode.sel->nkeys; i++) { + __be32 val, mask; + int off; + + off = off_base + cls_u32->knode.sel->keys[i].off; + val = cls_u32->knode.sel->keys[i].val; + mask = cls_u32->knode.sel->keys[i].mask; + + if (off >= sizeof(hdrs->raw.spec)) { + NL_SET_ERR_MSG_MOD(extack, "Input exceeds maximum allowed."); + status = -EINVAL; + goto free_alloc; + } + + memcpy(&hdrs->raw.spec[off], &val, sizeof(val)); + memcpy(&hdrs->raw.mask[off], &mask, sizeof(mask)); + hdrs->raw.pkt_len = off + sizeof(val); + } + + /* Only one action is allowed */ + rule_cfg->action_set.count = 1; + vact = &rule_cfg->action_set.actions[0]; + exts = cls_u32->knode.exts; + + tcf_exts_for_each_action(i, act, exts) { + /* FDIR queue */ + if (is_tcf_skbedit_rx_queue_mapping(act)) { + q_index = tcf_skbedit_rx_queue_mapping(act); + if (q_index >= adapter->num_active_queues) { + status = -EINVAL; + goto free_alloc; + } + + vact->type = VIRTCHNL_ACTION_QUEUE; + vact->act_conf.queue.index = q_index; + break; + } + + /* Drop */ + if (is_tcf_gact_shot(act)) { + vact->type = VIRTCHNL_ACTION_DROP; + break; + } + + /* Unsupported action */ + NL_SET_ERR_MSG_MOD(extack, "Unsupported action."); + status = -EOPNOTSUPP; + goto free_alloc; + } + + fltr->vc_add_msg.vsi_id = adapter->vsi.id; + fltr->cls_u32_handle = cls_u32->knode.handle; + return iavf_fdir_add_fltr(adapter, fltr); + +free_alloc: + kfree(fltr); + return status; +} + +/** + * iavf_del_cls_u32 - Delete U32 classifier offloads + * @adapter: pointer to iavf adapter structure + * @cls_u32: pointer to tc_cls_u32_offload struct with flow info + * + * Return: 0 on success or negative errno on failure. + */ +static int iavf_del_cls_u32(struct iavf_adapter *adapter, + struct tc_cls_u32_offload *cls_u32) +{ + return iavf_fdir_del_fltr(adapter, true, cls_u32->knode.handle); +} + +/** + * iavf_setup_tc_cls_u32 - U32 filter offloads + * @adapter: pointer to iavf adapter structure + * @cls_u32: pointer to tc_cls_u32_offload struct with flow info + * + * Return: 0 on success or negative errno on failure. + */ +static int iavf_setup_tc_cls_u32(struct iavf_adapter *adapter, + struct tc_cls_u32_offload *cls_u32) +{ + if (!TC_U32_SUPPORT(adapter) || !FDIR_FLTR_SUPPORT(adapter)) + return -EOPNOTSUPP; + + switch (cls_u32->command) { + case TC_CLSU32_NEW_KNODE: + case TC_CLSU32_REPLACE_KNODE: + return iavf_add_cls_u32(adapter, cls_u32); + case TC_CLSU32_DELETE_KNODE: + return iavf_del_cls_u32(adapter, cls_u32); + default: + return -EOPNOTSUPP; + } +} + +/** * iavf_setup_tc_block_cb - block callback for tc * @type: type of offload * @type_data: offload data @@ -4056,6 +4297,8 @@ static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, switch (type) { case TC_SETUP_CLSFLOWER: return iavf_setup_tc_cls_flower(cb_priv, type_data); + case TC_SETUP_CLSU32: + return iavf_setup_tc_cls_u32(cb_priv, type_data); default: return -EOPNOTSUPP; } @@ -4141,14 +4384,17 @@ static int iavf_open(struct net_device *netdev) return -EIO; } + netdev_lock(netdev); while (!mutex_trylock(&adapter->crit_lock)) { /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock * is already taken and iavf_open is called from an upper * device's notifier reacting on NETDEV_REGISTER event. * We have to leave here to avoid dead lock. */ - if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) + if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) { + netdev_unlock(netdev); return -EBUSY; + } usleep_range(500, 1000); } @@ -4197,6 +4443,7 @@ static int iavf_open(struct net_device *netdev) iavf_irq_enable(adapter, true); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return 0; @@ -4209,6 +4456,7 @@ err_setup_tx: iavf_free_all_tx_resources(adapter); err_unlock: mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return err; } @@ -4230,10 +4478,12 @@ static int iavf_close(struct net_device *netdev) u64 aq_to_restore; int status; + netdev_lock(netdev); mutex_lock(&adapter->crit_lock); if (adapter->state <= __IAVF_DOWN_PENDING) { mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return 0; } @@ -4267,6 +4517,7 @@ static int iavf_close(struct net_device *netdev) iavf_free_traffic_irqs(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); /* We explicitly don't free resources here because the hardware is * still active and can DMA into memory. Resources are cleared in @@ -4305,7 +4556,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (netif_running(netdev)) { iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); @@ -4338,8 +4589,8 @@ static void iavf_disable_fdir(struct iavf_adapter *adapter) fdir->state == IAVF_FDIR_FLTR_INACTIVE) { /* Delete filters not registered in PF */ list_del(&fdir->list); + iavf_dec_fdir_active_fltr(adapter, fdir); kfree(fdir); - adapter->fdir_active_fltr--; } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING || fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST || fdir->state == IAVF_FDIR_FLTR_ACTIVE) { @@ -4423,12 +4674,12 @@ static netdev_features_t iavf_features_check(struct sk_buff *skb, features &= ~NETIF_F_GSO_MASK; /* MACLEN can support at most 63 words */ - len = skb_network_header(skb) - skb->data; + len = skb_network_offset(skb); if (len & ~(63 * 2)) goto out_err; /* IPLEN and EIPLEN can support at most 127 dwords */ - len = skb_transport_header(skb) - skb_network_header(skb); + len = skb_network_header_len(skb); if (len & ~(127 * 4)) goto out_err; @@ -4749,6 +5000,98 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev, return iavf_fix_strip_features(adapter, features); } +static int +iavf_verify_shaper(struct net_shaper_binding *binding, + const struct net_shaper *shaper, + struct netlink_ext_ack *extack) +{ + struct iavf_adapter *adapter = netdev_priv(binding->netdev); + u64 vf_max; + + if (shaper->handle.scope == NET_SHAPER_SCOPE_QUEUE) { + vf_max = adapter->qos_caps->cap[0].shaper.peak; + if (vf_max && shaper->bw_max > vf_max) { + NL_SET_ERR_MSG_FMT(extack, "Max rate (%llu) of queue %d can't exceed max TX rate of VF (%llu kbps)", + shaper->bw_max, shaper->handle.id, + vf_max); + return -EINVAL; + } + } + return 0; +} + +static int +iavf_shaper_set(struct net_shaper_binding *binding, + const struct net_shaper *shaper, + struct netlink_ext_ack *extack) +{ + struct iavf_adapter *adapter = netdev_priv(binding->netdev); + const struct net_shaper_handle *handle = &shaper->handle; + struct iavf_ring *tx_ring; + int ret = 0; + + mutex_lock(&adapter->crit_lock); + if (handle->id >= adapter->num_active_queues) + goto unlock; + + ret = iavf_verify_shaper(binding, shaper, extack); + if (ret) + goto unlock; + + tx_ring = &adapter->tx_rings[handle->id]; + + tx_ring->q_shaper.bw_min = div_u64(shaper->bw_min, 1000); + tx_ring->q_shaper.bw_max = div_u64(shaper->bw_max, 1000); + tx_ring->q_shaper_update = true; + + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW; + +unlock: + mutex_unlock(&adapter->crit_lock); + return ret; +} + +static int iavf_shaper_del(struct net_shaper_binding *binding, + const struct net_shaper_handle *handle, + struct netlink_ext_ack *extack) +{ + struct iavf_adapter *adapter = netdev_priv(binding->netdev); + struct iavf_ring *tx_ring; + + mutex_lock(&adapter->crit_lock); + if (handle->id >= adapter->num_active_queues) + goto unlock; + + tx_ring = &adapter->tx_rings[handle->id]; + tx_ring->q_shaper.bw_min = 0; + tx_ring->q_shaper.bw_max = 0; + tx_ring->q_shaper_update = true; + + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW; + +unlock: + mutex_unlock(&adapter->crit_lock); + return 0; +} + +static void iavf_shaper_cap(struct net_shaper_binding *binding, + enum net_shaper_scope scope, + unsigned long *flags) +{ + if (scope != NET_SHAPER_SCOPE_QUEUE) + return; + + *flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MIN) | + BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) | + BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS); +} + +static const struct net_shaper_ops iavf_shaper_ops = { + .set = iavf_shaper_set, + .delete = iavf_shaper_del, + .capabilities = iavf_shaper_cap, +}; + static const struct net_device_ops iavf_netdev_ops = { .ndo_open = iavf_open, .ndo_stop = iavf_close, @@ -4764,6 +5107,7 @@ static const struct net_device_ops iavf_netdev_ops = { .ndo_fix_features = iavf_fix_features, .ndo_set_features = iavf_set_features, .ndo_setup_tc = iavf_setup_tc, + .net_shaper_ops = &iavf_shaper_ops, }; /** @@ -4849,9 +5193,11 @@ int iavf_process_config(struct iavf_adapter *adapter) /* get HW VLAN features that can be toggled */ hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter); - /* Enable cloud filter if ADQ is supported */ - if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) + /* Enable HW TC offload if ADQ or tc U32 is supported */ + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ || + TC_U32_SUPPORT(adapter)) hw_features |= NETIF_F_HW_TC; + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) hw_features |= NETIF_F_GSO_UDP_L4; @@ -4908,7 +5254,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct net_device *netdev; struct iavf_adapter *adapter = NULL; struct iavf_hw *hw = NULL; - int err; + int err, len; err = pci_enable_device(pdev); if (err) @@ -4976,6 +5322,13 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->bus.func = PCI_FUNC(pdev->devfn); hw->bus.bus_id = pdev->bus->number; + len = struct_size(adapter->qos_caps, cap, IAVF_MAX_QOS_TC_NUM); + adapter->qos_caps = kzalloc(len, GFP_KERNEL); + if (!adapter->qos_caps) { + err = -ENOMEM; + goto err_alloc_qos_cap; + } + /* set up the locks for the AQ, do this only once in probe * and destroy them only once in remove */ @@ -5014,6 +5367,8 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Initialization goes on in the work. Do not add more of it below. */ return 0; +err_alloc_qos_cap: + iounmap(hw->hw_addr); err_ioremap: destroy_workqueue(adapter->wq); err_alloc_wq: @@ -5032,13 +5387,14 @@ err_dma: * * Called when the system (VM) is entering sleep/suspend. **/ -static int __maybe_unused iavf_suspend(struct device *dev_d) +static int iavf_suspend(struct device *dev_d) { struct net_device *netdev = dev_get_drvdata(dev_d); struct iavf_adapter *adapter = netdev_priv(netdev); netif_device_detach(netdev); + netdev_lock(netdev); mutex_lock(&adapter->crit_lock); if (netif_running(netdev)) { @@ -5050,6 +5406,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d) iavf_reset_interrupt_capability(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return 0; } @@ -5060,7 +5417,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d) * * Called when the system (VM) is resumed from sleep/suspend. **/ -static int __maybe_unused iavf_resume(struct device *dev_d) +static int iavf_resume(struct device *dev_d) { struct pci_dev *pdev = to_pci_dev(dev_d); struct iavf_adapter *adapter; @@ -5148,6 +5505,7 @@ static void iavf_remove(struct pci_dev *pdev) if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); + netdev_lock(netdev); mutex_lock(&adapter->crit_lock); dev_info(&adapter->pdev->dev, "Removing device\n"); iavf_change_state(adapter, __IAVF_REMOVE); @@ -5184,6 +5542,7 @@ static void iavf_remove(struct pci_dev *pdev) mutex_destroy(&hw->aq.asq_mutex); mutex_unlock(&adapter->crit_lock); mutex_destroy(&adapter->crit_lock); + netdev_unlock(netdev); iounmap(hw->hw_addr); pci_release_regions(pdev); @@ -5247,14 +5606,14 @@ static void iavf_shutdown(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D3hot); } -static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); static struct pci_driver iavf_driver = { .name = iavf_driver_name, .id_table = iavf_pci_tbl, .probe = iavf_probe, .remove = iavf_remove, - .driver.pm = &iavf_pm_ops, + .driver.pm = pm_sleep_ptr(&iavf_pm_ops), .shutdown = iavf_shutdown, }; |