diff options
Diffstat (limited to 'drivers/net/ethernet/intel/i40e/i40e_main.c')
| -rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_main.c | 3921 |
1 files changed, 2152 insertions, 1769 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 861e59a350bd..d8192aa23254 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1,19 +1,23 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2021 Intel Corporation. */ -#include <linux/etherdevice.h> -#include <linux/of_net.h> -#include <linux/pci.h> -#include <linux/bpf.h> #include <generated/utsrelease.h> #include <linux/crash_dump.h> +#include <linux/net/intel/libie/pctype.h> +#include <linux/if_bridge.h> +#include <linux/if_macvlan.h> +#include <linux/module.h> +#include <net/pkt_cls.h> +#include <net/xdp_sock_drv.h> /* Local includes */ #include "i40e.h" +#include "i40e_devids.h" #include "i40e_diag.h" +#include "i40e_lan_hmc.h" +#include "i40e_virtchnl_pf.h" #include "i40e_xsk.h" -#include <net/udp_tunnel.h> -#include <net/xdp_sock_drv.h> + /* All i40e tracepoints are defined by the include below, which * must be included exactly once across the whole kernel with * CREATE_TRACE_POINTS defined @@ -66,6 +70,7 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_BC), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0}, @@ -77,6 +82,7 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0}, @@ -93,23 +99,59 @@ static int debug = -1; module_param(debug, uint, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)"); -MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); +MODULE_IMPORT_NS("LIBIE"); +MODULE_IMPORT_NS("LIBIE_ADMINQ"); MODULE_LICENSE("GPL v2"); static struct workqueue_struct *i40e_wq; +static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f, + struct net_device *netdev, int delta) +{ + struct netdev_hw_addr_list *ha_list; + struct netdev_hw_addr *ha; + + if (!f || !netdev) + return; + + if (is_unicast_ether_addr(f->macaddr) || is_link_local_ether_addr(f->macaddr)) + ha_list = &netdev->uc; + else + ha_list = &netdev->mc; + + netdev_hw_addr_list_for_each(ha, ha_list) { + if (ether_addr_equal(ha->addr, f->macaddr)) { + ha->refcount += delta; + if (ha->refcount <= 0) + ha->refcount = 1; + break; + } + } +} + /** - * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code + * i40e_hw_to_dev - get device pointer from the hardware structure + * @hw: pointer to the device HW structure + **/ +struct device *i40e_hw_to_dev(struct i40e_hw *hw) +{ + struct i40e_pf *pf = i40e_hw_to_pf(hw); + + return &pf->pdev->dev; +} + +/** + * i40e_allocate_dma_mem - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested * @alignment: what to align the allocation to **/ -int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, - u64 size, u32 alignment) +int i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem, + u64 size, u32 alignment) { - struct i40e_pf *pf = (struct i40e_pf *)hw->back; + struct i40e_pf *pf = i40e_hw_to_pf(hw); mem->size = ALIGN(size, alignment); mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, @@ -121,13 +163,13 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, } /** - * i40e_free_dma_mem_d - OS specific memory free for shared code + * i40e_free_dma_mem - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ -int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) +int i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem) { - struct i40e_pf *pf = (struct i40e_pf *)hw->back; + struct i40e_pf *pf = i40e_hw_to_pf(hw); dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); mem->va = NULL; @@ -138,13 +180,13 @@ int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) } /** - * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code + * i40e_allocate_virt_mem - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested **/ -int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, - u32 size) +int i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem, + u32 size) { mem->size = size; mem->va = kzalloc(size, GFP_KERNEL); @@ -156,11 +198,11 @@ int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, } /** - * i40e_free_virt_mem_d - OS specific memory free for shared code + * i40e_free_virt_mem - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ -int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) +int i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem) { /* it's ok to kfree a NULL pointer */ kfree(mem->va); @@ -178,10 +220,6 @@ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) * @id: an owner id to stick on the items assigned * * Returns the base item index of the lump, or negative for error - * - * The search_hint trick and lack of advanced fit-finding only work - * because we're highly likely to have all the same size lump requests. - * Linear search time and any fragmentation should be minimal. **/ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, u16 needed, u16 id) @@ -196,8 +234,21 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, return -EINVAL; } - /* start the linear search with an imperfect hint */ - i = pile->search_hint; + /* Allocate last queue in the pile for FDIR VSI queue + * so it doesn't fragment the qp_pile + */ + if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) { + if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) { + dev_err(&pf->pdev->dev, + "Cannot allocate queue %d for I40E_VSI_FDIR\n", + pile->num_entries - 1); + return -ENOMEM; + } + pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT; + return pile->num_entries - 1; + } + + i = 0; while (i < pile->num_entries) { /* skip already allocated entries */ if (pile->list[i] & I40E_PILE_VALID_BIT) { @@ -216,7 +267,6 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, for (j = 0; j < needed; j++) pile->list[i+j] = id | I40E_PILE_VALID_BIT; ret = i; - pile->search_hint = i + j; break; } @@ -239,7 +289,7 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) { int valid_id = (id | I40E_PILE_VALID_BIT); int count = 0; - int i; + u16 i; if (!pile || index >= pile->num_entries) return -EINVAL; @@ -251,8 +301,6 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) count++; } - if (count && index < pile->search_hint) - pile->search_hint = index; return count; } @@ -264,11 +312,12 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) **/ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) { + struct i40e_vsi *vsi; int i; - for (i = 0; i < pf->num_alloc_vsi; i++) - if (pf->vsi[i] && (pf->vsi[i]->id == id)) - return pf->vsi[i]; + i40e_pf_for_each_vsi(pf, i, vsi) + if (vsi->id == id) + return vsi; return NULL; } @@ -331,7 +380,7 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue) if (tx_ring) { head = i40e_get_head(tx_ring); /* Read interrupt register */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) val = rd32(&pf->hw, I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + tx_ring->vsi->base_vector - 1)); @@ -359,7 +408,9 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue) set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); break; default: - netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); + netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n"); + set_bit(__I40E_DOWN_REQUESTED, pf->state); + set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state); break; } @@ -391,10 +442,10 @@ static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring, unsigned int start; do { - start = u64_stats_fetch_begin_irq(&ring->syncp); + start = u64_stats_fetch_begin(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&ring->syncp, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; @@ -444,10 +495,10 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, if (!ring) continue; do { - start = u64_stats_fetch_begin_irq(&ring->syncp); + start = u64_stats_fetch_begin(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; @@ -461,6 +512,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, stats->tx_dropped = vsi_stats->tx_dropped; stats->rx_errors = vsi_stats->rx_errors; stats->rx_dropped = vsi_stats->rx_dropped; + stats->rx_missed_errors = vsi_stats->rx_missed_errors; stats->rx_crc_errors = vsi_stats->rx_crc_errors; stats->rx_length_errors = vsi_stats->rx_length_errors; } @@ -503,29 +555,65 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi) **/ void i40e_pf_reset_stats(struct i40e_pf *pf) { + struct i40e_veb *veb; int i; memset(&pf->stats, 0, sizeof(pf->stats)); memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); pf->stat_offsets_loaded = false; - for (i = 0; i < I40E_MAX_VEB; i++) { - if (pf->veb[i]) { - memset(&pf->veb[i]->stats, 0, - sizeof(pf->veb[i]->stats)); - memset(&pf->veb[i]->stats_offsets, 0, - sizeof(pf->veb[i]->stats_offsets)); - memset(&pf->veb[i]->tc_stats, 0, - sizeof(pf->veb[i]->tc_stats)); - memset(&pf->veb[i]->tc_stats_offsets, 0, - sizeof(pf->veb[i]->tc_stats_offsets)); - pf->veb[i]->stat_offsets_loaded = false; - } + i40e_pf_for_each_veb(pf, i, veb) { + memset(&veb->stats, 0, sizeof(veb->stats)); + memset(&veb->stats_offsets, 0, sizeof(veb->stats_offsets)); + memset(&veb->tc_stats, 0, sizeof(veb->tc_stats)); + memset(&veb->tc_stats_offsets, 0, sizeof(veb->tc_stats_offsets)); + veb->stat_offsets_loaded = false; } pf->hw_csum_rx_error = 0; } /** + * i40e_compute_pci_to_hw_id - compute index form PCI function. + * @vsi: ptr to the VSI to read from. + * @hw: ptr to the hardware info. + **/ +static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw) +{ + int pf_count = i40e_get_pf_count(hw); + + if (vsi->type == I40E_VSI_SRIOV) + return (hw->port * BIT(7)) / pf_count + vsi->vf_id; + + return hw->port + BIT(7); +} + +/** + * i40e_stat_update64 - read and update a 64 bit stat from the chip. + * @hw: ptr to the hardware info. + * @hireg: the high 32 bit reg to read. + * @loreg: the low 32 bit reg to read. + * @offset_loaded: has the initial offset been loaded yet. + * @offset: ptr to current offset value. + * @stat: ptr to the stat. + * + * Since the device stats are not reset at PFReset, they will not + * be zeroed when the driver starts. We'll save the first values read + * and use them as offsets to be subtracted from the raw values in order + * to report stats that count from zero. + **/ +static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg, + bool offset_loaded, u64 *offset, u64 *stat) +{ + u64 new_data; + + new_data = rd64(hw, loreg); + + if (!offset_loaded || new_data < *offset) + *offset = new_data; + *stat = new_data - *offset; +} + +/** * i40e_stat_update48 - read and update a 48 bit stat from the chip * @hw: ptr to the hardware info * @hireg: the high 32 bit reg to read @@ -597,6 +685,30 @@ static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat) } /** + * i40e_stats_update_rx_discards - update rx_discards. + * @vsi: ptr to the VSI to be updated. + * @hw: ptr to the hardware info. + * @stat_idx: VSI's stat_counter_idx. + * @offset_loaded: ptr to the VSI's stat_offsets_loaded. + * @stat_offset: ptr to stat_offset to store first read of specific register. + * @stat: ptr to VSI's stat to be updated. + **/ +static void +i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw, + int stat_idx, bool offset_loaded, + struct i40e_eth_stats *stat_offset, + struct i40e_eth_stats *stat) +{ + i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded, + &stat_offset->rx_discards, &stat->rx_discards); + i40e_stat_update64(hw, + I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)), + I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)), + offset_loaded, &stat_offset->rx_discards_other, + &stat->rx_discards_other); +} + +/** * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. * @vsi: the VSI to be updated **/ @@ -615,9 +727,6 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi) i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), vsi->stat_offsets_loaded, &oes->tx_errors, &es->tx_errors); - i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), - vsi->stat_offsets_loaded, - &oes->rx_discards, &es->rx_discards); i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), vsi->stat_offsets_loaded, &oes->rx_unknown_protocol, &es->rx_unknown_protocol); @@ -655,6 +764,10 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi) I40E_GLV_BPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); + + i40e_stats_update_rx_discards(vsi, hw, stat_idx, + vsi->stat_offsets_loaded, oes, es); + vsi->stat_offsets_loaded = true; } @@ -749,18 +862,19 @@ void i40e_update_veb_stats(struct i40e_veb *veb) **/ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) { + u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy; struct i40e_pf *pf = vsi->back; struct rtnl_link_stats64 *ons; struct rtnl_link_stats64 *ns; /* netdev stats */ struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ - u32 tx_restart, tx_busy; + u64 tx_restart, tx_busy; struct i40e_ring *p; - u32 rx_page, rx_buf; u64 bytes, packets; unsigned int start; u64 tx_linearize; u64 tx_force_wb; + u64 tx_stopped; u64 rx_p, rx_b; u64 tx_p, tx_b; u16 q; @@ -780,8 +894,13 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rx_b = rx_p = 0; tx_b = tx_p = 0; tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; + tx_stopped = 0; rx_page = 0; rx_buf = 0; + rx_reuse = 0; + rx_alloc = 0; + rx_waive = 0; + rx_busy = 0; rcu_read_lock(); for (q = 0; q < vsi->num_queue_pairs; q++) { /* locate Tx ring */ @@ -790,16 +909,17 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) continue; do { - start = u64_stats_fetch_begin_irq(&p->syncp); + start = u64_stats_fetch_begin(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + } while (u64_stats_fetch_retry(&p->syncp, start)); tx_b += bytes; tx_p += packets; tx_restart += p->tx_stats.restart_queue; tx_busy += p->tx_stats.tx_busy; tx_linearize += p->tx_stats.tx_linearize; tx_force_wb += p->tx_stats.tx_force_wb; + tx_stopped += p->tx_stats.tx_stopped; /* locate Rx ring */ p = READ_ONCE(vsi->rx_rings[q]); @@ -807,14 +927,18 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) continue; do { - start = u64_stats_fetch_begin_irq(&p->syncp); + start = u64_stats_fetch_begin(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + } while (u64_stats_fetch_retry(&p->syncp, start)); rx_b += bytes; rx_p += packets; rx_buf += p->rx_stats.alloc_buff_failed; rx_page += p->rx_stats.alloc_page_failed; + rx_reuse += p->rx_stats.page_reuse_count; + rx_alloc += p->rx_stats.page_alloc_count; + rx_waive += p->rx_stats.page_waive_count; + rx_busy += p->rx_stats.page_busy_count; if (i40e_enabled_xdp_vsi(vsi)) { /* locate XDP ring */ @@ -823,10 +947,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) continue; do { - start = u64_stats_fetch_begin_irq(&p->syncp); + start = u64_stats_fetch_begin(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + } while (u64_stats_fetch_retry(&p->syncp, start)); tx_b += bytes; tx_p += packets; tx_restart += p->tx_stats.restart_queue; @@ -840,8 +964,13 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) vsi->tx_busy = tx_busy; vsi->tx_linearize = tx_linearize; vsi->tx_force_wb = tx_force_wb; + vsi->tx_stopped = tx_stopped; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; + vsi->rx_page_reuse = rx_reuse; + vsi->rx_page_alloc = rx_alloc; + vsi->rx_page_waive = rx_waive; + vsi->rx_page_busy = rx_busy; ns->rx_packets = rx_p; ns->rx_bytes = rx_b; @@ -854,13 +983,15 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ns->tx_errors = es->tx_errors; ons->multicast = oes->rx_multicast; ns->multicast = es->rx_multicast; - ons->rx_dropped = oes->rx_discards; - ns->rx_dropped = es->rx_discards; + ons->rx_dropped = oes->rx_discards_other; + ns->rx_dropped = es->rx_discards_other; + ons->rx_missed_errors = oes->rx_discards; + ns->rx_missed_errors = es->rx_discards; ons->tx_dropped = oes->tx_discards; ns->tx_dropped = es->tx_discards; /* pull in a couple PF stats if this is the main vsi */ - if (vsi == pf->vsi[pf->lan_vsi]) { + if (vsi->type == I40E_VSI_MAIN) { ns->rx_crc_errors = pf->stats.crc_errors; ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; ns->rx_length_errors = pf->stats.rx_length_errors; @@ -1070,11 +1201,9 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) val = rd32(hw, I40E_PRTPM_EEE_STAT); nsd->tx_lpi_status = - (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> - I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; + FIELD_GET(I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK, val); nsd->rx_lpi_status = - (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> - I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; + FIELD_GET(I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK, val); i40e_stat_update32(hw, I40E_PRTPM_TLPIC, pf->stat_offsets_loaded, &osd->tx_lpi_count, &nsd->tx_lpi_count); @@ -1082,13 +1211,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) pf->stat_offsets_loaded, &osd->rx_lpi_count, &nsd->rx_lpi_count); - if (pf->flags & I40E_FLAG_FD_SB_ENABLED && + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) nsd->fd_sb_status = true; else nsd->fd_sb_status = false; - if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && + if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) nsd->fd_atr_status = true; else @@ -1107,27 +1236,49 @@ void i40e_update_stats(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; - if (vsi == pf->vsi[pf->lan_vsi]) + if (vsi->type == I40E_VSI_MAIN) i40e_update_pf_stats(pf); i40e_update_vsi_stats(vsi); } /** - * i40e_count_filters - counts VSI mac filters + * i40e_count_all_filters - counts VSI MAC filters * @vsi: the VSI to be searched * - * Returns count of mac filters - **/ -int i40e_count_filters(struct i40e_vsi *vsi) + * Return: count of MAC filters in any state. + */ +int i40e_count_all_filters(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f; + struct hlist_node *h; + int bkt, cnt = 0; + + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) + cnt++; + + return cnt; +} + +/** + * i40e_count_active_filters - counts VSI MAC filters + * @vsi: the VSI to be searched + * + * Return: count of active MAC filters. + */ +int i40e_count_active_filters(struct i40e_vsi *vsi) { struct i40e_mac_filter *f; struct hlist_node *h; int bkt; int cnt = 0; - hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) - ++cnt; + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { + if (f->state == I40E_FILTER_NEW || + f->state == I40E_FILTER_NEW_SYNC || + f->state == I40E_FILTER_ACTIVE) + ++cnt; + } return cnt; } @@ -1311,6 +1462,8 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, new->f = add_head; new->state = add_head->state; + if (add_head->state == I40E_FILTER_NEW) + add_head->state = I40E_FILTER_NEW_SYNC; /* Add the new filter to the tmp list */ hlist_add_head(&new->hlist, tmp_add_list); @@ -1328,6 +1481,116 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, } /** + * i40e_get_vf_new_vlan - Get new vlan id on a vf + * @vsi: the vsi to configure + * @new_mac: new mac filter to be added + * @f: existing mac filter, replaced with new_mac->f if new_mac is not NULL + * @vlan_filters: the number of active VLAN filters + * @trusted: flag if the VF is trusted + * + * Get new VLAN id based on current VLAN filters, trust, PVID + * and vf-vlan-prune-disable flag. + * + * Returns the value of the new vlan filter or + * the old value if no new filter is needed. + */ +static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi, + struct i40e_new_mac_filter *new_mac, + struct i40e_mac_filter *f, + int vlan_filters, + bool trusted) +{ + s16 pvid = le16_to_cpu(vsi->info.pvid); + struct i40e_pf *pf = vsi->back; + bool is_any; + + if (new_mac) + f = new_mac->f; + + if (pvid && f->vlan != pvid) + return pvid; + + is_any = (trusted || + !test_bit(I40E_FLAG_VF_VLAN_PRUNING_ENA, pf->flags)); + + if ((vlan_filters && f->vlan == I40E_VLAN_ANY) || + (!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) || + (is_any && !vlan_filters && f->vlan == 0)) { + if (is_any) + return I40E_VLAN_ANY; + else + return 0; + } + + return f->vlan; +} + +/** + * i40e_correct_vf_mac_vlan_filters - Correct non-VLAN VF filters if necessary + * @vsi: the vsi to configure + * @tmp_add_list: list of filters ready to be added + * @tmp_del_list: list of filters ready to be deleted + * @vlan_filters: the number of active VLAN filters + * @trusted: flag if the VF is trusted + * + * Correct VF VLAN filters based on current VLAN filters, trust, PVID + * and vf-vlan-prune-disable flag. + * + * In case of memory allocation failure return -ENOMEM. Otherwise, return 0. + * + * This function is only expected to be called from within + * i40e_sync_vsi_filters. + * + * NOTE: This function expects to be called while under the + * mac_filter_hash_lock + */ +static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi, + struct hlist_head *tmp_add_list, + struct hlist_head *tmp_del_list, + int vlan_filters, + bool trusted) +{ + struct i40e_mac_filter *f, *add_head; + struct i40e_new_mac_filter *new_mac; + struct hlist_node *h; + int bkt, new_vlan; + + hlist_for_each_entry(new_mac, tmp_add_list, hlist) { + new_mac->f->vlan = i40e_get_vf_new_vlan(vsi, new_mac, NULL, + vlan_filters, trusted); + } + + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { + new_vlan = i40e_get_vf_new_vlan(vsi, NULL, f, vlan_filters, + trusted); + if (new_vlan != f->vlan) { + add_head = i40e_add_filter(vsi, f->macaddr, new_vlan); + if (!add_head) + return -ENOMEM; + /* Create a temporary i40e_new_mac_filter */ + new_mac = kzalloc(sizeof(*new_mac), GFP_ATOMIC); + if (!new_mac) + return -ENOMEM; + new_mac->f = add_head; + new_mac->state = add_head->state; + if (add_head->state == I40E_FILTER_NEW) + add_head->state = I40E_FILTER_NEW_SYNC; + + /* Add the new filter to the tmp list */ + hlist_add_head(&new_mac->hlist, tmp_add_list); + + /* Put the original filter into the delete list */ + f->state = I40E_FILTER_REMOVE; + hash_del(&f->hlist); + hlist_add_head(&f->hlist, tmp_del_list); + } + } + + vsi->has_vlan_filter = !!vlan_filters; + return 0; +} + +/** * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM * @vsi: the PF Main VSI - inappropriate for any other VSI * @macaddr: the MAC address @@ -1423,9 +1686,8 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, * @vsi: VSI to remove from * @f: the filter to remove from the list * - * This function should be called instead of i40e_del_filter only if you know - * the exact filter you will remove already, such as via i40e_find_filter or - * i40e_find_mac. + * This function requires you've found * the exact filter you will remove + * already, such as via i40e_find_filter or i40e_find_mac. * * NOTE: This function is expected to be called with mac_filter_hash_lock * being held. @@ -1455,29 +1717,6 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) } /** - * i40e_del_filter - Remove a MAC/VLAN filter from the VSI - * @vsi: the VSI to be searched - * @macaddr: the MAC address - * @vlan: the VLAN - * - * NOTE: This function is expected to be called with mac_filter_hash_lock - * being held. - * ANOTHER NOTE: This function MUST be called from within the context of - * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() - * instead of list_for_each_entry(). - **/ -void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan) -{ - struct i40e_mac_filter *f; - - if (!vsi || !macaddr) - return; - - f = i40e_find_filter(vsi, macaddr, vlan); - __i40e_del_filter(vsi, f); -} - -/** * i40e_add_mac_filter - Add a MAC filter for all active VLANs * @vsi: the VSI to be searched * @macaddr: the mac address to be filtered @@ -1496,6 +1735,7 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, struct hlist_node *h; int bkt; + lockdep_assert_held(&vsi->mac_filter_hash_lock); if (vsi->info.pvid) return i40e_add_filter(vsi, macaddr, le16_to_cpu(vsi->info.pvid)); @@ -1563,12 +1803,6 @@ static int i40e_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { - netdev_info(netdev, "already using mac address %pM\n", - addr->sa_data); - return 0; - } - if (test_bit(__I40E_DOWN, pf->state) || test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) return -EADDRNOTAVAIL; @@ -1587,19 +1821,19 @@ static int i40e_set_mac(struct net_device *netdev, void *p) */ spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_del_mac_filter(vsi, netdev->dev_addr); - ether_addr_copy(netdev->dev_addr, addr->sa_data); + eth_hw_addr_set(netdev, addr->sa_data); i40e_add_mac_filter(vsi, netdev->dev_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); if (vsi->type == I40E_VSI_MAIN) { - i40e_status ret; + int ret; ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL, addr->sa_data, NULL); if (ret) - netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); + netdev_info(netdev, "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n", + ERR_PTR(ret), + libie_aq_str(hw->aq.asq_last_status)); } /* schedule our worker thread which will take care of @@ -1629,9 +1863,9 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw); if (ret) { dev_info(&pf->pdev->dev, - "Cannot set RSS key, err %s aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); + "Cannot set RSS key, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(hw->aq.asq_last_status)); return ret; } } @@ -1641,9 +1875,9 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); if (ret) { dev_info(&pf->pdev->dev, - "Cannot set RSS lut, err %s aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); + "Cannot set RSS lut, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(hw->aq.asq_last_status)); return ret; } } @@ -1661,7 +1895,7 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi) u8 *lut; int ret; - if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)) + if (!test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps)) return 0; if (!vsi->rss_size) vsi->rss_size = min_t(int, pf->alloc_rss_size, @@ -1790,6 +2024,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, bool is_add) { struct i40e_pf *pf = vsi->back; + u16 num_tc_qps = 0; u16 sections = 0; u8 netdev_tc = 0; u16 numtc = 1; @@ -1797,14 +2032,38 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, u8 offset; u16 qmap; int i; - u16 num_tc_qps = 0; sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; offset = 0; + /* zero out queue mapping, it will get updated on the end of the function */ + memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping)); + + if (vsi->type == I40E_VSI_MAIN) { + /* This code helps add more queue to the VSI if we have + * more cores than RSS can support, the higher cores will + * be served by ATR or other filters. Furthermore, the + * non-zero req_queue_pairs says that user requested a new + * queue count via ethtool's set_channels, so use this + * value for queues distribution across traffic classes + * We need at least one queue pair for the interface + * to be usable as we see in else statement. + */ + if (vsi->req_queue_pairs > 0) + vsi->num_queue_pairs = vsi->req_queue_pairs; + else if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) + vsi->num_queue_pairs = pf->num_lan_msix; + else + vsi->num_queue_pairs = 1; + } /* Number of queues per enabled TC */ - num_tc_qps = vsi->alloc_queue_pairs; - if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { + if (vsi->type == I40E_VSI_MAIN || + (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0)) + num_tc_qps = vsi->num_queue_pairs; + else + num_tc_qps = vsi->alloc_queue_pairs; + + if (enabled_tc && test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) { /* Find numtc from enabled TC bitmap */ for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) /* TC is enabled */ @@ -1823,7 +2082,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; /* Do not allow use more TC queue pairs than MSI-X vectors exist */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix); /* Setup queue offset/count for all TCs for given VSI */ @@ -1835,8 +2094,10 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, switch (vsi->type) { case I40E_VSI_MAIN: - if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_ATR_ENABLED)) || + if ((!test_bit(I40E_FLAG_FD_SB_ENA, + pf->flags) && + !test_bit(I40E_FLAG_FD_ATR_ENA, + pf->flags)) || vsi->tc_config.enabled_tc != 1) { qcount = min_t(int, pf->alloc_rss_size, num_tc_qps); @@ -1881,15 +2142,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, } ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); } - - /* Set actual Tx/Rx queue pairs */ - vsi->num_queue_pairs = offset; - if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { - if (vsi->req_queue_pairs > 0) - vsi->num_queue_pairs = vsi->req_queue_pairs; - else if (pf->flags & I40E_FLAG_MSIX_ENABLED) - vsi->num_queue_pairs = pf->num_lan_msix; - } + /* Do not change previously set num_queue_pairs for PFs and VFs*/ + if ((vsi->type == I40E_VSI_MAIN && numtc != 1) || + (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) || + (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV)) + vsi->num_queue_pairs = offset; /* Scheduler section valid can only be set for ADD VSI */ if (is_add) { @@ -2019,6 +2276,7 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi, hlist_for_each_entry_safe(new, h, from, hlist) { /* We can simply free the wrapper structure */ hlist_del(&new->hlist); + netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); kfree(new); } } @@ -2101,19 +2359,18 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name, int num_del, int *retval) { struct i40e_hw *hw = &vsi->back->hw; - i40e_status aq_ret; - int aq_err; + enum libie_aq_err aq_status; + int aq_ret; - aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL); - aq_err = hw->aq.asq_last_status; + aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL, + &aq_status); /* Explicitly ignore and do not report when firmware returns ENOENT */ - if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) { + if (aq_ret && !(aq_status == LIBIE_AQ_RC_ENOENT)) { *retval = -EIO; dev_info(&vsi->back->pdev->dev, - "ignoring delete macvlan error on %s, err %s, aq_err %s\n", - vsi_name, i40e_stat_str(hw, aq_ret), - i40e_aq_str(hw, aq_err)); + "ignoring delete macvlan error on %s, err %pe, aq_err %s\n", + vsi_name, ERR_PTR(aq_ret), libie_aq_str(aq_status)); } } @@ -2136,10 +2393,10 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, int num_add) { struct i40e_hw *hw = &vsi->back->hw; - int aq_err, fcnt; + enum libie_aq_err aq_status; + int fcnt; - i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL); - aq_err = hw->aq.asq_last_status; + i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status); fcnt = i40e_update_filter_state(num_add, list, add_head); if (fcnt != num_add) { @@ -2147,17 +2404,17 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); dev_warn(&vsi->back->pdev->dev, "Error %s adding RX filters on %s, promiscuous mode forced on\n", - i40e_aq_str(hw, aq_err), vsi_name); + libie_aq_str(aq_status), vsi_name); } else if (vsi->type == I40E_VSI_SRIOV || vsi->type == I40E_VSI_VMDQ1 || vsi->type == I40E_VSI_VMDQ2) { dev_warn(&vsi->back->pdev->dev, "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n", - i40e_aq_str(hw, aq_err), vsi_name, vsi_name); + libie_aq_str(aq_status), vsi_name, vsi_name); } else { dev_warn(&vsi->back->pdev->dev, "Error %s adding RX filters on %s, incorrect VSI type: %i.\n", - i40e_aq_str(hw, aq_err), vsi_name, vsi->type); + libie_aq_str(aq_status), vsi_name, vsi->type); } } } @@ -2174,13 +2431,14 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, * * Returns status indicating success or failure; **/ -static i40e_status +static int i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, struct i40e_mac_filter *f) { - bool enable = f->state == I40E_FILTER_NEW; + bool enable = f->state == I40E_FILTER_NEW || + f->state == I40E_FILTER_NEW_SYNC; struct i40e_hw *hw = &vsi->back->hw; - i40e_status aq_ret; + int aq_ret; if (f->vlan == I40E_VLAN_ANY) { aq_ret = i40e_aq_set_vsi_broadcast(hw, @@ -2199,8 +2457,7 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); dev_warn(&vsi->back->pdev->dev, "Error %s, forcing overflow promiscuous on %s\n", - i40e_aq_str(hw, hw->aq.asq_last_status), - vsi_name); + libie_aq_str(hw->aq.asq_last_status), vsi_name); } return aq_ret; @@ -2217,13 +2474,13 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, **/ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_hw *hw = &pf->hw; - i40e_status aq_ret; + int aq_ret; if (vsi->type == I40E_VSI_MAIN && - pf->lan_veb != I40E_NO_VEB && - !(pf->flags & I40E_FLAG_MFP_ENABLED)) { + i40e_pf_get_main_veb(pf) && + !test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { /* set defport ON for Main VSI instead of true promisc * this way we will get all unicast/multicast and VLAN * promisc behavior but will not get VF or VMDq traffic @@ -2239,9 +2496,9 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) NULL); if (aq_ret) { dev_info(&pf->pdev->dev, - "Set default VSI failed, err %s, aq_err %s\n", - i40e_stat_str(hw, aq_ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); + "Set default VSI failed, err %pe, aq_err %s\n", + ERR_PTR(aq_ret), + libie_aq_str(hw->aq.asq_last_status)); } } else { aq_ret = i40e_aq_set_vsi_unicast_promiscuous( @@ -2251,9 +2508,9 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) true); if (aq_ret) { dev_info(&pf->pdev->dev, - "set unicast promisc failed, err %s, aq_err %s\n", - i40e_stat_str(hw, aq_ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); + "set unicast promisc failed, err %pe, aq_err %s\n", + ERR_PTR(aq_ret), + libie_aq_str(hw->aq.asq_last_status)); } aq_ret = i40e_aq_set_vsi_multicast_promiscuous( hw, @@ -2261,9 +2518,9 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) promisc, NULL); if (aq_ret) { dev_info(&pf->pdev->dev, - "set multicast promisc failed, err %s, aq_err %s\n", - i40e_stat_str(hw, aq_ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); + "set multicast promisc failed, err %pe, aq_err %s\n", + ERR_PTR(aq_ret), + libie_aq_str(hw->aq.asq_last_status)); } } @@ -2292,12 +2549,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) unsigned int vlan_filters = 0; char vsi_name[16] = "PF"; int filter_list_len = 0; - i40e_status aq_ret = 0; u32 changed_flags = 0; struct hlist_node *h; struct i40e_pf *pf; int num_add = 0; int num_del = 0; + int aq_ret = 0; int retval = 0; u16 cmd_flags; int list_size; @@ -2352,6 +2609,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) /* Add it to the hash list */ hlist_add_head(&new->hlist, &tmp_add_list); + f->state = I40E_FILTER_NEW_SYNC; } /* Count the number of active (current and new) VLAN @@ -2362,10 +2620,18 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) vlan_filters++; } - retval = i40e_correct_mac_vlan_filters(vsi, - &tmp_add_list, - &tmp_del_list, - vlan_filters); + if (vsi->type != I40E_VSI_SRIOV) + retval = i40e_correct_mac_vlan_filters + (vsi, &tmp_add_list, &tmp_del_list, + vlan_filters); + else if (pf->vf) + retval = i40e_correct_vf_mac_vlan_filters + (vsi, &tmp_add_list, &tmp_del_list, + vlan_filters, pf->vf[vsi->vf_id].trusted); + + hlist_for_each_entry(new, &tmp_add_list, hlist) + netdev_hw_addr_refcnt(new->f, vsi->netdev, 1); + if (retval) goto err_no_memory_locked; @@ -2495,9 +2761,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) spin_lock_bh(&vsi->mac_filter_hash_lock); hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { /* Only update the state if we're still NEW */ - if (new->f->state == I40E_FILTER_NEW) + if (new->f->state == I40E_FILTER_NEW || + new->f->state == I40E_FILTER_NEW_SYNC) new->f->state = new->state; hlist_del(&new->hlist); + netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); kfree(new); } spin_unlock_bh(&vsi->mac_filter_hash_lock); @@ -2530,7 +2798,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) } /* if the VF is not trusted do not do promisc */ - if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { + if (vsi->type == I40E_VSI_SRIOV && pf->vf && + !pf->vf[vsi->vf_id].trusted) { clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); goto out; } @@ -2556,10 +2825,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) retval = i40e_aq_rc_to_posix(aq_ret, hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "set multi promisc failed on %s, err %s aq_err %s\n", + "set multi promisc failed on %s, err %pe aq_err %s\n", vsi_name, - i40e_stat_str(hw, aq_ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); + ERR_PTR(aq_ret), + libie_aq_str(hw->aq.asq_last_status)); } else { dev_info(&pf->pdev->dev, "%s allmulti mode.\n", cur_multipromisc ? "entering" : "leaving"); @@ -2576,11 +2845,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) retval = i40e_aq_rc_to_posix(aq_ret, hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "Setting promiscuous %s failed on %s, err %s aq_err %s\n", + "Setting promiscuous %s failed on %s, err %pe aq_err %s\n", cur_promisc ? "on" : "off", vsi_name, - i40e_stat_str(hw, aq_ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); + ERR_PTR(aq_ret), + libie_aq_str(hw->aq.asq_last_status)); } } out: @@ -2610,6 +2879,7 @@ err_no_memory_locked: **/ static void i40e_sync_filters_subtask(struct i40e_pf *pf) { + struct i40e_vsi *vsi; int v; if (!pf) @@ -2621,10 +2891,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) return; } - for (v = 0; v < pf->num_alloc_vsi; v++) { - if (pf->vsi[v] && - (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) { - int ret = i40e_sync_vsi_filters(pf->vsi[v]); + i40e_pf_for_each_vsi(pf, v, vsi) { + if ((vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) && + !test_bit(__I40E_VSI_RELEASING, vsi->state)) { + int ret = i40e_sync_vsi_filters(vsi); if (ret) { /* come back and try again later */ @@ -2637,15 +2907,35 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) } /** - * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP + * i40e_calculate_vsi_rx_buf_len - Calculates buffer length + * + * @vsi: VSI to calculate rx_buf_len from + */ +static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi) +{ + if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags)) + return SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048); + + return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048; +} + +/** + * i40e_max_vsi_frame_size - returns the maximum allowed frame size for VSI * @vsi: the vsi + * @xdp_prog: XDP program **/ -static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi) +static int i40e_max_vsi_frame_size(struct i40e_vsi *vsi, + struct bpf_prog *xdp_prog) { - if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) - return I40E_RXBUFFER_2048; + u16 rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi); + u16 chain_len; + + if (xdp_prog && !xdp_prog->aux->xdp_has_frags) + chain_len = 1; else - return I40E_RXBUFFER_3072; + chain_len = I40E_MAX_CHAINED_RX_BUFFERS; + + return min_t(u16, rx_buf_len * chain_len, I40E_MAX_RXBUFFER); } /** @@ -2660,17 +2950,18 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + int frame_size; - if (i40e_enabled_xdp_vsi(vsi)) { - int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - - if (frame_size > i40e_max_xdp_frame_size(vsi)) - return -EINVAL; + frame_size = i40e_max_vsi_frame_size(vsi, vsi->xdp_prog); + if (new_mtu > frame_size - I40E_PACKET_HDR_PAD) { + netdev_err(netdev, "Error changing mtu to %d, Max is %d\n", + new_mtu, frame_size - I40E_PACKET_HDR_PAD); + return -EINVAL; } netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (netif_running(netdev)) i40e_vsi_reinit_locked(vsi); set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); @@ -2679,34 +2970,13 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) } /** - * i40e_ioctl - Access the hwtstamp interface - * @netdev: network interface device structure - * @ifr: interface request data - * @cmd: ioctl command - **/ -int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_pf *pf = np->vsi->back; - - switch (cmd) { - case SIOCGHWTSTAMP: - return i40e_ptp_get_ts_config(pf, ifr); - case SIOCSHWTSTAMP: - return i40e_ptp_set_ts_config(pf, ifr); - default: - return -EOPNOTSUPP; - } -} - -/** * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI * @vsi: the vsi being adjusted **/ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) { struct i40e_vsi_context ctxt; - i40e_status ret; + int ret; /* Don't modify stripping options if a port VLAN is active */ if (vsi->info.pvid) @@ -2726,10 +2996,9 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "update vlan stripping failed, err %s aq_err %s\n", - i40e_stat_str(&vsi->back->hw, ret), - i40e_aq_str(&vsi->back->hw, - vsi->back->hw.aq.asq_last_status)); + "update vlan stripping failed, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(vsi->back->hw.aq.asq_last_status)); } } @@ -2740,7 +3009,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) { struct i40e_vsi_context ctxt; - i40e_status ret; + int ret; /* Don't modify stripping options if a port VLAN is active */ if (vsi->info.pvid) @@ -2761,10 +3030,9 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "update vlan stripping failed, err %s aq_err %s\n", - i40e_stat_str(&vsi->back->hw, ret), - i40e_aq_str(&vsi->back->hw, - vsi->back->hw.aq.asq_last_status)); + "update vlan stripping failed, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(vsi->back->hw.aq.asq_last_status)); } } @@ -2788,8 +3056,21 @@ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) int bkt; hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { - if (f->state == I40E_FILTER_REMOVE) + /* If we're asked to add a filter that has been marked for + * removal, it is safe to simply restore it to active state. + * __i40e_del_filter will have simply deleted any filters which + * were previously marked NEW or FAILED, so if it is currently + * marked REMOVE it must have previously been ACTIVE. Since we + * haven't yet run the sync filters task, just restore this + * filter to the ACTIVE state so that the sync task leaves it + * in place. + */ + if (f->state == I40E_FILTER_REMOVE && f->vlan == vid) { + f->state = I40E_FILTER_ACTIVE; + continue; + } else if (f->state == I40E_FILTER_REMOVE) { continue; + } add_f = i40e_add_filter(vsi, f->macaddr, vid); if (!add_f) { dev_info(&vsi->back->pdev->dev, @@ -2980,7 +3261,7 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi) int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) { struct i40e_vsi_context ctxt; - i40e_status ret; + int ret; vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.pvid = cpu_to_le16(vid); @@ -2993,10 +3274,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "add pvid failed, err %s aq_err %s\n", - i40e_stat_str(&vsi->back->hw, ret), - i40e_aq_str(&vsi->back->hw, - vsi->back->hw.aq.asq_last_status)); + "add pvid failed, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(vsi->back->hw.aq.asq_last_status)); return -ENOENT; } @@ -3157,15 +3437,15 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) u16 pf_q = vsi->base_queue + ring->queue_index; struct i40e_hw *hw = &vsi->back->hw; struct i40e_hmc_obj_txq tx_ctx; - i40e_status err = 0; u32 qtx_ctl = 0; + int err = 0; if (ring_is_xdp(ring)) ring->xsk_pool = i40e_xsk_pool(ring); /* some ATR related tx ring init */ - if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { - ring->atr_sample_rate = vsi->back->atr_sample_rate; + if (test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags)) { + ring->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; ring->atr_count = 0; } else { ring->atr_sample_rate = 0; @@ -3180,9 +3460,11 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) tx_ctx.new_context = 1; tx_ctx.base = (ring->dma / 128); tx_ctx.qlen = ring->count; - tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_ATR_ENABLED)); - tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); + if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags) || + test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags)) + tx_ctx.fd_ena = 1; + if (test_bit(I40E_FLAG_PTP_ENA, vsi->back->flags)) + tx_ctx.timesync_ena = 1; /* FDIR VSI tx ring can still use RS bit and writebacks */ if (vsi->type != I40E_VSI_FDIR) tx_ctx.head_wb_ena = 1; @@ -3234,21 +3516,19 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) else return -EINVAL; - qtx_ctl |= (ring->ch->vsi_number << - I40E_QTX_CTL_VFVM_INDX_SHIFT) & - I40E_QTX_CTL_VFVM_INDX_MASK; + qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK, + ring->ch->vsi_number); } else { if (vsi->type == I40E_VSI_VMDQ2) { qtx_ctl = I40E_QTX_CTL_VM_QUEUE; - qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & - I40E_QTX_CTL_VFVM_INDX_MASK; + qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK, + vsi->id); } else { qtx_ctl = I40E_QTX_CTL_PF_QUEUE; } } - qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & - I40E_QTX_CTL_PF_INDX_MASK); + qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id); wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); i40e_flush(hw); @@ -3282,54 +3562,59 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) u16 pf_q = vsi->base_queue + ring->queue_index; struct i40e_hw *hw = &vsi->back->hw; struct i40e_hmc_obj_rxq rx_ctx; - i40e_status err = 0; + int err = 0; bool ok; - int ret; bitmap_zero(ring->state, __I40E_RING_STATE_NBITS); /* clear the context structure first */ memset(&rx_ctx, 0, sizeof(rx_ctx)); - if (ring->vsi->type == I40E_VSI_MAIN) - xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + ring->rx_buf_len = vsi->rx_buf_len; + + /* XDP RX-queue info only needed for RX rings exposed to XDP */ + if (ring->vsi->type != I40E_VSI_MAIN) + goto skip; + + if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { + err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, + ring->queue_index, + ring->q_vector->napi.napi_id, + ring->rx_buf_len); + if (err) + return err; + } - kfree(ring->rx_bi); ring->xsk_pool = i40e_xsk_pool(ring); if (ring->xsk_pool) { - ret = i40e_alloc_rx_bi_zc(ring); - if (ret) - return ret; - ring->rx_buf_len = - xsk_pool_get_rx_frame_size(ring->xsk_pool); - /* For AF_XDP ZC, we disallow packets to span on - * multiple buffers, thus letting us skip that - * handling in the fast-path. - */ - chain_len = 1; - ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + xdp_rxq_info_unreg(&ring->xdp_rxq); + ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); + err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, + ring->queue_index, + ring->q_vector->napi.napi_id, + ring->rx_buf_len); + if (err) + return err; + err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL); - if (ret) - return ret; + if (err) + return err; dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", ring->queue_index); } else { - ret = i40e_alloc_rx_bi(ring); - if (ret) - return ret; - ring->rx_buf_len = vsi->rx_buf_len; - if (ring->vsi->type == I40E_VSI_MAIN) { - ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, - MEM_TYPE_PAGE_SHARED, - NULL); - if (ret) - return ret; - } + err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL); + if (err) + return err; } +skip: + xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq); + rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len, BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); @@ -3375,10 +3660,16 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) } /* configure Rx buffer alignment */ - if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) + if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags)) { + if (I40E_2K_TOO_SMALL_WITH_PADDING) { + dev_info(&vsi->back->pdev->dev, + "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n"); + return -EOPNOTSUPP; + } clear_ring_build_skb_enabled(ring); - else + } else { set_ring_build_skb_enabled(ring); + } ring->rx_offset = i40e_rx_offset(ring); @@ -3439,20 +3730,16 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) int err = 0; u16 i; - if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) { - vsi->max_frame = I40E_MAX_RXBUFFER; - vsi->rx_buf_len = I40E_RXBUFFER_2048; + vsi->max_frame = i40e_max_vsi_frame_size(vsi, vsi->xdp_prog); + vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi); + #if (PAGE_SIZE < 8192) - } else if (!I40E_2K_TOO_SMALL_WITH_PADDING && - (vsi->netdev->mtu <= ETH_DATA_LEN)) { - vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; + if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING && + vsi->netdev->mtu <= ETH_DATA_LEN) { vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; -#endif - } else { - vsi->max_frame = I40E_MAX_RXBUFFER; - vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 : - I40E_RXBUFFER_2048; + vsi->max_frame = vsi->rx_buf_len; } +#endif /* set up individual rings */ for (i = 0; i < vsi->num_queue_pairs && !err; i++) @@ -3471,7 +3758,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) u16 qoffset, qcount; int i, n; - if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { + if (!test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) { /* Reset the TC information */ for (i = 0; i < vsi->num_queue_pairs; i++) { rx_ring = vsi->rx_rings[i]; @@ -3538,7 +3825,7 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) struct i40e_pf *pf = vsi->back; struct hlist_node *node; - if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) return; /* Reset FDir counters as we're replaying all existing filters */ @@ -3604,10 +3891,16 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) q_vector->tx.target_itr >> 1); q_vector->tx.current_itr = q_vector->tx.target_itr; + /* Set ITR for software interrupts triggered after exiting + * busy-loop polling. + */ + wr32(hw, I40E_PFINT_ITRN(I40E_SW_ITR, vector - 1), + I40E_ITR_20K); + wr32(hw, I40E_PFINT_RATEN(vector - 1), i40e_intrl_usec_to_reg(vsi->int_rate_limit)); - /* Linked list for the queuepairs assigned to this vector */ + /* begin of linked list for RX queue assigned to this vector */ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); for (q = 0; q < q_vector->num_ringpairs; q++) { u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp; @@ -3623,6 +3916,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) wr32(hw, I40E_QINT_RQCTL(qp), val); if (has_xdp) { + /* TX queue with next queue set to TX */ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | @@ -3632,7 +3926,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) wr32(hw, I40E_QINT_TQCTL(nextqp), val); } - + /* TX queue with next RX or end of linked list */ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | @@ -3675,10 +3969,10 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf) I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK; - if (pf->flags & I40E_FLAG_IWARP_ENABLED) + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; - if (pf->flags & I40E_FLAG_PTP) + if (test_bit(I40E_FLAG_PTP_ENA, pf->flags)) val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, val); @@ -3701,7 +3995,6 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) struct i40e_q_vector *q_vector = vsi->q_vectors[0]; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - u32 val; /* set the ITR configuration */ q_vector->rx.next_update = jiffies + 1; @@ -3718,28 +4011,20 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ wr32(hw, I40E_PFINT_LNKLST0, 0); - /* Associate the queue pair to the vector and enable the queue int */ - val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | - (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | - (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| - (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); - - wr32(hw, I40E_QINT_RQCTL(0), val); + /* Associate the queue pair to the vector and enable the queue + * interrupt RX queue in linked list with next queue set to TX + */ + wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX)); if (i40e_enabled_xdp_vsi(vsi)) { - val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | - (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)| - (I40E_QUEUE_TYPE_TX - << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); - - wr32(hw, I40E_QINT_TQCTL(nextqp), val); + /* TX queue in linked list with next queue set to TX */ + wr32(hw, I40E_QINT_TQCTL(nextqp), + I40E_QINT_TQCTL_VAL(nextqp, 0, TX)); } - val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | - (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | - (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); - - wr32(hw, I40E_QINT_TQCTL(0), val); + /* last TX queue so the next RX queue doesn't matter */ + wr32(hw, I40E_QINT_TQCTL(0), + I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX)); i40e_flush(hw); } @@ -3866,6 +4151,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) } /* register for affinity change notifications */ + q_vector->irq_num = irq_num; q_vector->affinity_notify.notify = i40e_irq_affinity_notify; q_vector->affinity_notify.release = i40e_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); @@ -3873,10 +4159,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) * * get_cpu_mask returns a static constant mask with * a permanent lifetime so it's ok to pass to - * irq_set_affinity_hint without making a copy. + * irq_update_affinity_hint without making a copy. */ cpu = cpumask_local_spread(q_vector->v_idx, -1); - irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); + irq_update_affinity_hint(irq_num, get_cpu_mask(cpu)); } vsi->irqs_ready = true; @@ -3887,8 +4173,8 @@ free_queue_irqs: vector--; irq_num = pf->msix_entries[base + vector].vector; irq_set_affinity_notifier(irq_num, NULL); - irq_set_affinity_hint(irq_num, NULL); - free_irq(irq_num, &vsi->q_vectors[vector]); + irq_update_affinity_hint(irq_num, NULL); + free_irq(irq_num, vsi->q_vectors[vector]); } return err; } @@ -3922,7 +4208,7 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) } /* disable each interrupt */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { for (i = vsi->base_vector; i < (vsi->num_q_vectors + vsi->base_vector); i++) wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); @@ -3948,7 +4234,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) struct i40e_pf *pf = vsi->back; int i; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { for (i = 0; i < vsi->num_q_vectors; i++) i40e_irq_dynamic_enable(vsi, i); } else { @@ -3969,8 +4255,7 @@ static void i40e_free_misc_vector(struct i40e_pf *pf) wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); i40e_flush(&pf->hw); - if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) { - synchronize_irq(pf->msix_entries[0].vector); + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { free_irq(pf->msix_entries[0].vector, pf); clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); } @@ -4005,7 +4290,7 @@ static irqreturn_t i40e_intr(int irq, void *data) (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) pf->sw_int_count++; - if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) && (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) { ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n"); @@ -4014,7 +4299,7 @@ static irqreturn_t i40e_intr(int irq, void *data) /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_q_vector *q_vector = vsi->q_vectors[0]; /* We do not have a way to disarm Queue causes while leaving @@ -4056,8 +4341,7 @@ static irqreturn_t i40e_intr(int irq, void *data) set_bit(__I40E_RESET_INTR_RECEIVED, pf->state); ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; val = rd32(hw, I40E_GLGEN_RSTAT); - val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) - >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; + val = FIELD_GET(I40E_GLGEN_RSTAT_RESET_TYPE_MASK, val); if (val == I40E_RESET_CORER) { pf->corer_count++; } else if (val == I40E_RESET_GLOBR) { @@ -4079,10 +4363,13 @@ static irqreturn_t i40e_intr(int irq, void *data) if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); - if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { - icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; + if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK) + schedule_work(&pf->ptp_extts0_work); + + if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) i40e_ptp_tx_hwtstamp(pf); - } + + icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; } /* If a critical error is pending we have no choice but to reset the @@ -4195,7 +4482,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) i += tx_ring->count; tx_ring->next_to_clean = i; - if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); return budget > 0; @@ -4308,9 +4595,9 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) struct i40e_pf *pf = vsi->back; int err; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) err = i40e_vsi_request_irq_msix(vsi, basename); - else if (pf->flags & I40E_FLAG_MSI_ENABLED) + else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags)) err = request_irq(pf->pdev->irq, i40e_intr, 0, pf->int_name, pf); else @@ -4342,7 +4629,7 @@ static void i40e_netpoll(struct net_device *netdev) if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { for (i = 0; i < vsi->num_q_vectors; i++) i40e_msix_clean_rings(0, vsi->q_vectors[i]); } else { @@ -4454,11 +4741,10 @@ int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, } /** - * i40e_vsi_control_tx - Start or stop a VSI's rings + * i40e_vsi_enable_tx - Start a VSI's rings * @vsi: the VSI being configured - * @enable: start or stop the rings **/ -static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) +static int i40e_vsi_enable_tx(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int i, pf_q, ret = 0; @@ -4467,7 +4753,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q, - false /*is xdp*/, enable); + false /*is xdp*/, true); if (ret) break; @@ -4476,7 +4762,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q + vsi->alloc_queue_pairs, - true /*is xdp*/, enable); + true /*is xdp*/, true); if (ret) break; } @@ -4574,32 +4860,25 @@ int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable) } /** - * i40e_vsi_control_rx - Start or stop a VSI's rings + * i40e_vsi_enable_rx - Start a VSI's rings * @vsi: the VSI being configured - * @enable: start or stop the rings **/ -static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) +static int i40e_vsi_enable_rx(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int i, pf_q, ret = 0; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { - ret = i40e_control_wait_rx_q(pf, pf_q, enable); + ret = i40e_control_wait_rx_q(pf, pf_q, true); if (ret) { dev_info(&pf->pdev->dev, - "VSI seid %d Rx ring %d %sable timeout\n", - vsi->seid, pf_q, (enable ? "en" : "dis")); + "VSI seid %d Rx ring %d enable timeout\n", + vsi->seid, pf_q); break; } } - /* Due to HW errata, on Rx disable only, the register can indicate done - * before it really is. Needs 50ms to be sure - */ - if (!enable) - mdelay(50); - return ret; } @@ -4612,29 +4891,43 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi) int ret = 0; /* do rx first for enable and last for disable */ - ret = i40e_vsi_control_rx(vsi, true); + ret = i40e_vsi_enable_rx(vsi); if (ret) return ret; - ret = i40e_vsi_control_tx(vsi, true); + ret = i40e_vsi_enable_tx(vsi); return ret; } +#define I40E_DISABLE_TX_GAP_MSEC 50 + /** * i40e_vsi_stop_rings - Stop a VSI's rings * @vsi: the VSI being configured **/ void i40e_vsi_stop_rings(struct i40e_vsi *vsi) { + struct i40e_pf *pf = vsi->back; + u32 pf_q, tx_q_end, rx_q_end; + /* When port TX is suspended, don't wait */ if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) return i40e_vsi_stop_rings_no_wait(vsi); - /* do rx first for enable and last for disable - * Ignore return value, we need to shutdown whatever we can - */ - i40e_vsi_control_tx(vsi, false); - i40e_vsi_control_rx(vsi, false); + tx_q_end = vsi->base_queue + + vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); + for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++) + i40e_pre_tx_queue_cfg(&pf->hw, pf_q, false); + + rx_q_end = vsi->base_queue + vsi->num_queue_pairs; + for (pf_q = vsi->base_queue; pf_q < rx_q_end; pf_q++) + i40e_control_rx_q(pf, pf_q, false); + + msleep(I40E_DISABLE_TX_GAP_MSEC); + for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++) + wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0); + + i40e_vsi_wait_queues_disabled(vsi); } /** @@ -4672,7 +4965,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) u32 val, qp; int i; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { if (!vsi->q_vectors) return; @@ -4695,8 +4988,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) /* clear the affinity notifier in the IRQ descriptor */ irq_set_affinity_notifier(irq_num, NULL); /* remove our suggested affinity mask for this IRQ */ - irq_set_affinity_hint(irq_num, NULL); - synchronize_irq(irq_num); + irq_update_affinity_hint(irq_num, NULL); free_irq(irq_num, vsi->q_vectors[i]); /* Tear down the interrupt queue link list @@ -4707,8 +4999,8 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) * next_q field of the registers. */ val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); - qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) - >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; + qp = FIELD_GET(I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK, + val); val |= I40E_QUEUE_END_OF_LIST << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); @@ -4730,8 +5022,8 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) val = rd32(hw, I40E_QINT_TQCTL(qp)); - next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) - >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; + next = FIELD_GET(I40E_QINT_TQCTL_NEXTQ_INDX_MASK, + val); val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | I40E_QINT_TQCTL_MSIX0_INDX_MASK | @@ -4749,8 +5041,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) free_irq(pf->pdev->irq, pf); val = rd32(hw, I40E_PFINT_LNKLST0); - qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) - >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; + qp = FIELD_GET(I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK, val); val |= I40E_QUEUE_END_OF_LIST << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; wr32(hw, I40E_PFINT_LNKLST0, val); @@ -4835,16 +5126,17 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) static void i40e_reset_interrupt_capability(struct i40e_pf *pf) { /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { pci_disable_msix(pf->pdev); kfree(pf->msix_entries); pf->msix_entries = NULL; kfree(pf->irq_pile); pf->irq_pile = NULL; - } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { + } else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags)) { pci_disable_msi(pf->pdev); } - pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); + clear_bit(I40E_FLAG_MSI_ENA, pf->flags); + clear_bit(I40E_FLAG_MSIX_ENA, pf->flags); } /** @@ -4856,17 +5148,20 @@ static void i40e_reset_interrupt_capability(struct i40e_pf *pf) **/ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) { + struct i40e_vsi *vsi; int i; - i40e_free_misc_vector(pf); + if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) + i40e_free_misc_vector(pf); i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector, I40E_IWARP_IRQ_PILE_ID); i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); - for (i = 0; i < pf->num_alloc_vsi; i++) - if (pf->vsi[i]) - i40e_vsi_free_q_vectors(pf->vsi[i]); + + i40e_pf_for_each_vsi(pf, i, vsi) + i40e_vsi_free_q_vectors(vsi); + i40e_reset_interrupt_capability(pf); } @@ -4963,12 +5258,11 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) **/ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) { + struct i40e_vsi *vsi; int v; - for (v = 0; v < pf->num_alloc_vsi; v++) { - if (pf->vsi[v]) - i40e_quiesce_vsi(pf->vsi[v]); - } + i40e_pf_for_each_vsi(pf, v, vsi) + i40e_quiesce_vsi(vsi); } /** @@ -4977,12 +5271,11 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) **/ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) { + struct i40e_vsi *vsi; int v; - for (v = 0; v < pf->num_alloc_vsi; v++) { - if (pf->vsi[v]) - i40e_unquiesce_vsi(pf->vsi[v]); - } + i40e_pf_for_each_vsi(pf, v, vsi) + i40e_unquiesce_vsi(vsi); } /** @@ -5043,14 +5336,13 @@ wait_rx: **/ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) { + struct i40e_vsi *vsi; int v, ret = 0; - for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { - if (pf->vsi[v]) { - ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); - if (ret) - break; - } + i40e_pf_for_each_vsi(pf, v, vsi) { + ret = i40e_vsi_wait_queues_disabled(vsi); + if (ret) + break; } return ret; @@ -5157,7 +5449,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) **/ static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); u8 num_tc = vsi->mqprio_qopt.qopt.num_tc; u8 enabled_tc = 1, i; @@ -5174,21 +5466,22 @@ static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf) **/ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) { - struct i40e_hw *hw = &pf->hw; u8 i, enabled_tc = 1; u8 num_tc = 0; - struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; - if (pf->flags & I40E_FLAG_TC_MQPRIO) - return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; + if (i40e_is_tc_mqprio_enabled(pf)) { + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); + + return vsi->mqprio_qopt.qopt.num_tc; + } /* If neither MQPRIO nor DCB is enabled, then always use single TC */ - if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) + if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) return 1; /* SFP mode will be enabled for all TCs on port */ - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) - return i40e_dcb_get_num_tc(dcbcfg); + if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) + return i40e_dcb_get_num_tc(&pf->hw.local_dcbx_config); /* MFP mode return count of enabled TCs for this PF */ if (pf->hw.func_caps.iscsi) @@ -5211,17 +5504,17 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) **/ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) { - if (pf->flags & I40E_FLAG_TC_MQPRIO) + if (i40e_is_tc_mqprio_enabled(pf)) return i40e_mqprio_get_enabled_tc(pf); /* If neither MQPRIO nor DCB is enabled for this PF then just return * default TC */ - if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) + if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) return I40E_DEFAULT_TRAFFIC_CLASS; /* SFP mode we want PF to be enabled for all TCs */ - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) + if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); /* MFP enabled and iSCSI PF type */ @@ -5243,17 +5536,17 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - i40e_status ret; u32 tc_bw_max; + int ret; int i; /* Get the VSI level BW configuration */ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi bw config, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "couldn't get PF vsi bw config, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(pf->hw.aq.asq_last_status)); return -EINVAL; } @@ -5262,9 +5555,9 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) NULL); if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi ets bw config, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "couldn't get PF vsi ets bw config, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(pf->hw.aq.asq_last_status)); return -EINVAL; } @@ -5304,13 +5597,13 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, { struct i40e_aqc_configure_vsi_tc_bw_data bw_data; struct i40e_pf *pf = vsi->back; - i40e_status ret; + int ret; int i; /* There is no need to reset BW when mqprio mode is on. */ - if (pf->flags & I40E_FLAG_TC_MQPRIO) + if (i40e_is_tc_mqprio_enabled(pf)) return 0; - if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { + if (!vsi->mqprio_qopt.qopt.hw && !test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { ret = i40e_set_bw_limit(vsi, vsi->seid, 0); if (ret) dev_info(&pf->pdev->dev, @@ -5380,7 +5673,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) vsi->tc_config.tc_info[i].qoffset); } - if (pf->flags & I40E_FLAG_TC_MQPRIO) + if (i40e_is_tc_mqprio_enabled(pf)) return; /* Assign UP2TC map for the VSI */ @@ -5413,6 +5706,58 @@ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, } /** + * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI + * @vsi: the VSI being reconfigured + * @vsi_offset: offset from main VF VSI + */ +int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset) +{ + struct i40e_vsi_context ctxt = {}; + struct i40e_pf *pf; + struct i40e_hw *hw; + int ret; + + if (!vsi) + return -EINVAL; + pf = vsi->back; + hw = &pf->hw; + + ctxt.seid = vsi->seid; + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; + ctxt.flags = I40E_AQ_VSI_TYPE_VF; + ctxt.info = vsi->info; + + i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc, + false); + if (vsi->reconfig_rss) { + vsi->rss_size = min_t(int, pf->alloc_rss_size, + vsi->num_queue_pairs); + ret = i40e_vsi_config_rss(vsi); + if (ret) { + dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n"); + return ret; + } + vsi->reconfig_rss = false; + } + + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) { + dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(hw->aq.asq_last_status)); + return ret; + } + /* update the local VSI info with updated queue map */ + i40e_vsi_update_queue_map(vsi, &ctxt); + vsi->info.valid_sections = 0; + + return ret; +} + +/** * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map * @vsi: VSI to be configured * @enabled_tc: TC bitmap @@ -5456,9 +5801,9 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) &bw_config, NULL); if (ret) { dev_info(&pf->pdev->dev, - "Failed querying vsi bw info, err %s aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); + "Failed querying vsi bw info, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(hw->aq.asq_last_status)); goto out; } if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) { @@ -5489,7 +5834,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; ctxt.info = vsi->info; - if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) { + if (i40e_is_tc_mqprio_enabled(pf)) { ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc); if (ret) goto out; @@ -5511,7 +5856,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) } vsi->reconfig_rss = false; } - if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; @@ -5523,9 +5868,9 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "Update vsi tc config failed, err %s aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); + "Update vsi tc config failed, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(hw->aq.asq_last_status)); goto out; } /* update the local VSI info with updated queue map */ @@ -5536,9 +5881,9 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ret = i40e_vsi_get_bw_info(vsi); if (ret) { dev_info(&pf->pdev->dev, - "Failed updating vsi bw info, err %s aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); + "Failed updating vsi bw info, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(hw->aq.asq_last_status)); goto out; } @@ -5549,6 +5894,28 @@ out: } /** + * i40e_vsi_reconfig_tc - Reconfigure VSI Tx Scheduler for stored TC map + * @vsi: VSI to be reconfigured + * + * This reconfigures a particular VSI for TCs that are mapped to the + * TC bitmap stored previously for the VSI. + * + * Context: It is expected that the VSI queues have been quisced before + * calling this function. + * + * Return: 0 on success, negative value on failure + **/ +static int i40e_vsi_reconfig_tc(struct i40e_vsi *vsi) +{ + u8 enabled_tc; + + enabled_tc = vsi->tc_config.enabled_tc; + vsi->tc_config.enabled_tc = 0; + + return i40e_vsi_config_tc(vsi, enabled_tc); +} + +/** * i40e_get_link_speed - Returns link speed for the interface * @vsi: VSI to be configured * @@ -5574,6 +5941,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi) } /** + * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits + * @vsi: Pointer to vsi structure + * @max_tx_rate: max TX rate in bytes to be converted into Mbits + * + * Helper function to convert units before send to set BW limit + **/ +static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate) +{ + if (max_tx_rate < I40E_BW_MBPS_DIVISOR) { + dev_warn(&vsi->back->pdev->dev, + "Setting max tx rate to minimum usable value of 50Mbps.\n"); + max_tx_rate = I40E_BW_CREDIT_DIVISOR; + } else { + do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); + } + + return max_tx_rate; +} + +/** * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate * @vsi: VSI to be configured * @seid: seid of the channel/VSI @@ -5595,10 +5982,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) max_tx_rate, seid); return -EINVAL; } - if (max_tx_rate && max_tx_rate < 50) { + if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) { dev_warn(&pf->pdev->dev, "Setting max tx rate to minimum usable value of 50Mbps.\n"); - max_tx_rate = 50; + max_tx_rate = I40E_BW_CREDIT_DIVISOR; } /* Tx rate credits are in values of 50Mbps, 0 is disabled */ @@ -5608,9 +5995,9 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) I40E_MAX_BW_INACTIVE_ACCUM, NULL); if (ret) dev_err(&pf->pdev->dev, - "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n", - max_tx_rate, seid, i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n", + max_tx_rate, seid, ERR_PTR(ret), + libie_aq_str(pf->hw.aq.asq_last_status)); return ret; } @@ -5622,8 +6009,8 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) **/ static void i40e_remove_queue_channels(struct i40e_vsi *vsi) { - enum i40e_admin_queue_err last_aq_status; struct i40e_cloud_filter *cfilter; + enum libie_aq_err last_aq_status; struct i40e_channel *ch, *ch_tmp; struct i40e_pf *pf = vsi->back; struct hlist_node *node; @@ -5684,9 +6071,9 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi) last_aq_status = pf->hw.aq.asq_last_status; if (ret) dev_info(&pf->pdev->dev, - "Failed to delete cloud filter, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, last_aq_status)); + "Failed to delete cloud filter, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(last_aq_status)); kfree(cfilter); } @@ -5703,24 +6090,6 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi) } /** - * i40e_is_any_channel - channel exist or not - * @vsi: ptr to VSI to which channels are associated with - * - * Returns true or false if channel(s) exist for associated VSI or not - **/ -static bool i40e_is_any_channel(struct i40e_vsi *vsi) -{ - struct i40e_channel *ch, *ch_tmp; - - list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { - if (ch->initialized) - return true; - } - - return false; -} - -/** * i40e_get_max_queues_for_channel * @vsi: ptr to VSI to which channels are associated with * @@ -5837,9 +6206,9 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size) ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); if (ret) { dev_info(&pf->pdev->dev, - "Cannot set RSS lut, err %s aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); + "Cannot set RSS lut, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(hw->aq.asq_last_status)); kfree(lut); return ret; } @@ -5922,7 +6291,7 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid, if (ch->type == I40E_VSI_VMDQ2) ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; - if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) { + if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = @@ -5936,10 +6305,9 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid, ret = i40e_aq_add_vsi(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "add new vsi failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + "add new vsi failed, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(pf->hw.aq.asq_last_status)); return -ENOENT; } @@ -5968,7 +6336,7 @@ static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch, u8 *bw_share) { struct i40e_aqc_configure_vsi_tc_bw_data bw_data; - i40e_status ret; + int ret; int i; memset(&bw_data, 0, sizeof(bw_data)); @@ -6004,9 +6372,9 @@ static int i40e_channel_config_tx_ring(struct i40e_pf *pf, struct i40e_vsi *vsi, struct i40e_channel *ch) { - i40e_status ret; - int i; u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; + int ret; + int i; /* Enable ETS TCs with equal BW Share for now across all VSIs */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { @@ -6108,6 +6476,7 @@ static inline int i40e_setup_hw_channel(struct i40e_pf *pf, static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, struct i40e_channel *ch) { + struct i40e_vsi *main_vsi; u8 vsi_type; u16 seid; int ret; @@ -6121,7 +6490,8 @@ static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, } /* underlying switching element */ - seid = pf->vsi[pf->lan_vsi]->uplink_seid; + main_vsi = i40e_pf_get_main_vsi(pf); + seid = main_vsi->uplink_seid; /* create channel (VSI), configure TX rings */ ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type); @@ -6180,12 +6550,10 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi) ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags, pf->last_sw_conf_valid_flags, mode, NULL); - if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH) + if (ret && hw->aq.asq_last_status != LIBIE_AQ_RC_ESRCH) dev_err(&pf->pdev->dev, - "couldn't set switch config bits, err %s aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, - hw->aq.asq_last_status)); + "couldn't set switch config bits, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status)); return ret; } @@ -6226,26 +6594,15 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi, /* By default we are in VEPA mode, if this is the first VF/VMDq * VSI to be added switch to VEB mode. */ - if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) || - (!i40e_is_any_channel(vsi))) { - if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) { - dev_dbg(&pf->pdev->dev, - "Failed to create channel. Override queues (%u) not power of 2\n", - vsi->tc_config.tc_info[0].qcount); - return -EINVAL; - } - if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { + set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); - if (vsi->type == I40E_VSI_MAIN) { - if (pf->flags & I40E_FLAG_TC_MQPRIO) - i40e_do_reset(pf, I40E_PF_RESET_FLAG, - true); - else - i40e_do_reset_safe(pf, - I40E_PF_RESET_FLAG); - } + if (vsi->type == I40E_VSI_MAIN) { + if (i40e_is_tc_mqprio_enabled(pf)) + i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); + else + i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); } /* now onwards for main VSI, number of queues will be value * of TC0's queue count @@ -6353,6 +6710,9 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi) vsi->tc_seid_map[i] = ch->seid; } } + + /* reset to reconfigure TX queue contexts */ + i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true); return ret; err_free: @@ -6391,9 +6751,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) &bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, - "VEB bw config failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "VEB bw config failed, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status)); goto out; } @@ -6401,9 +6760,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) ret = i40e_veb_get_bw_info(veb); if (ret) { dev_info(&pf->pdev->dev, - "Failed getting veb bw config, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "Failed getting veb bw config, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status)); } out: @@ -6421,51 +6779,48 @@ out: **/ static void i40e_dcb_reconfigure(struct i40e_pf *pf) { + struct i40e_vsi *vsi; + struct i40e_veb *veb; u8 tc_map = 0; int ret; - u8 v; + int v; /* Enable the TCs available on PF to all VEBs */ tc_map = i40e_pf_get_tc_map(pf); if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS) return; - for (v = 0; v < I40E_MAX_VEB; v++) { - if (!pf->veb[v]) - continue; - ret = i40e_veb_config_tc(pf->veb[v], tc_map); + i40e_pf_for_each_veb(pf, v, veb) { + ret = i40e_veb_config_tc(veb, tc_map); if (ret) { dev_info(&pf->pdev->dev, "Failed configuring TC for VEB seid=%d\n", - pf->veb[v]->seid); + veb->seid); /* Will try to configure as many components */ } } /* Update each VSI */ - for (v = 0; v < pf->num_alloc_vsi; v++) { - if (!pf->vsi[v]) - continue; - + i40e_pf_for_each_vsi(pf, v, vsi) { /* - Enable all TCs for the LAN VSI * - For all others keep them at TC0 for now */ - if (v == pf->lan_vsi) + if (vsi->type == I40E_VSI_MAIN) tc_map = i40e_pf_get_tc_map(pf); else tc_map = I40E_DEFAULT_TRAFFIC_CLASS; - ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); + ret = i40e_vsi_config_tc(vsi, tc_map); if (ret) { dev_info(&pf->pdev->dev, "Failed configuring TC for VSI seid=%d\n", - pf->vsi[v]->seid); + vsi->seid); /* Will try to configure as many components */ } else { /* Re-configure VSI vectors based on updated TC map */ - i40e_vsi_map_rings_to_vectors(pf->vsi[v]); - if (pf->vsi[v]->netdev) - i40e_dcbnl_set_all(pf->vsi[v]); + i40e_vsi_map_rings_to_vectors(vsi); + if (vsi->netdev) + i40e_dcbnl_set_all(vsi); } } } @@ -6485,9 +6840,9 @@ static int i40e_resume_port_tx(struct i40e_pf *pf) ret = i40e_aq_resume_port_tx(hw, NULL); if (ret) { dev_info(&pf->pdev->dev, - "Resume Port Tx failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "Resume Port Tx failed, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(pf->hw.aq.asq_last_status)); /* Schedule PF reset to recover */ set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); @@ -6510,9 +6865,8 @@ static int i40e_suspend_port_tx(struct i40e_pf *pf) ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL); if (ret) { dev_info(&pf->pdev->dev, - "Suspend Port Tx failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "Suspend Port Tx failed, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status)); /* Schedule PF reset to recover */ set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); @@ -6550,9 +6904,8 @@ static int i40e_hw_set_dcb_config(struct i40e_pf *pf, ret = i40e_set_dcb_config(&pf->hw); if (ret) { dev_info(&pf->pdev->dev, - "Set DCB Config failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "Set DCB Config failed, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status)); goto out; } @@ -6647,9 +7000,9 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg) if (need_reconfig) { /* Enable DCB tagging only when more than one TC */ if (new_numtc > 1) - pf->flags |= I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_DCB_ENA, pf->flags); else - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); set_bit(__I40E_PORT_SUSPENDED, pf->state); /* Reconfiguration needed quiesce all VSIs */ @@ -6667,9 +7020,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg) i40e_aqc_opc_modify_switching_comp_ets, NULL); if (ret) { dev_info(&pf->pdev->dev, - "Modify Port ETS failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "Modify Port ETS failed, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status)); goto out; } @@ -6689,7 +7041,9 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg) /* Configure Rx Packet Buffers in HW */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu; + struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf); + + mfs_tc[i] = main_vsi->netdev->mtu; mfs_tc[i] += I40E_PACKET_HDR_PAD; } @@ -6705,9 +7059,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg) ret = i40e_aq_dcb_updated(&pf->hw, NULL); if (ret) { dev_info(&pf->pdev->dev, - "DCB Updated failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "DCB Updated failed, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status)); goto out; } @@ -6739,7 +7092,7 @@ out: set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); } /* registers are set, lets apply */ - if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) + if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps)) ret = i40e_hw_set_dcb_config(pf, new_cfg); } @@ -6760,7 +7113,7 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf) struct i40e_hw *hw = &pf->hw; int err; - if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) { + if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps)) { /* Update the local cached instance with TC0 ETS */ memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config)); pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING; @@ -6789,9 +7142,8 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf) i40e_aqc_opc_enable_switching_comp_ets, NULL); if (err) { dev_info(&pf->pdev->dev, - "Enable Port ETS failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, err), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "Enable Port ETS failed, err %pe aq_err %s\n", + ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status)); err = -ENOENT; goto out; } @@ -6821,12 +7173,12 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) /* Do not enable DCB for SW1 and SW2 images even if the FW is capable * Also do not enable DCBx if FW LLDP agent is disabled */ - if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) { + if (test_bit(I40E_HW_CAP_NO_DCB_SUPPORT, pf->hw.caps)) { dev_info(&pf->pdev->dev, "DCB is not supported.\n"); - err = I40E_NOT_SUPPORTED; + err = -EOPNOTSUPP; goto out; } - if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) { + if (test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) { dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n"); err = i40e_dcb_sw_default_config(pf); if (err) { @@ -6837,8 +7189,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; /* at init capable but disabled */ - pf->flags |= I40E_FLAG_DCB_CAPABLE; - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); goto out; } err = i40e_init_dcb(hw, true); @@ -6853,25 +7205,24 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE; - pf->flags |= I40E_FLAG_DCB_CAPABLE; + set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); /* Enable DCB tagging only when more than one TC * or explicitly disable if only one TC */ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) - pf->flags |= I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_DCB_ENA, pf->flags); else - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); dev_dbg(&pf->pdev->dev, "DCBX offload is supported for this PF.\n"); } - } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) { + } else if (pf->hw.aq.asq_last_status == LIBIE_AQ_RC_EPERM) { dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n"); - pf->flags |= I40E_FLAG_DISABLE_FW_LLDP; + set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags); } else { dev_info(&pf->pdev->dev, - "Query for DCB configuration failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, err), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "Query for DCB configuration failed, err %pe aq_err %s\n", + ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status)); } out: @@ -6879,6 +7230,26 @@ out: } #endif /* CONFIG_I40E_DCB */ +static void i40e_print_link_message_eee(struct i40e_vsi *vsi, + const char *speed, const char *fc) +{ + struct ethtool_keee kedata; + + memzero_explicit(&kedata, sizeof(kedata)); + if (vsi->netdev->ethtool_ops->get_eee) + vsi->netdev->ethtool_ops->get_eee(vsi->netdev, &kedata); + + if (!linkmode_empty(kedata.supported)) + netdev_info(vsi->netdev, + "NIC Link is Up, %sbps Full Duplex, Flow Control: %s, EEE: %s\n", + speed, fc, + kedata.eee_enabled ? "Enabled" : "Disabled"); + else + netdev_info(vsi->netdev, + "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n", + speed, fc); +} + /** * i40e_print_link_message - print link up or down * @vsi: the VSI for which link needs a message @@ -7010,9 +7381,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", speed, req_fec, fec, an, fc); } else { - netdev_info(vsi->netdev, - "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n", - speed, fc); + i40e_print_link_message_eee(vsi, speed, fc); } } @@ -7026,7 +7395,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi) struct i40e_pf *pf = vsi->back; int err; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) i40e_vsi_configure_msix(vsi); else i40e_configure_msi_and_legacy(vsi); @@ -7088,15 +7457,15 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) * @pf: board private structure * @is_up: whether the link state should be forced up or down **/ -static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) +static int i40e_force_link_state(struct i40e_pf *pf, bool is_up) { struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config = {0}; bool non_zero_phy_type = is_up; struct i40e_hw *hw = &pf->hw; - i40e_status err; u64 mask; u8 speed; + int err; /* Card might've been put in an unstable state by other drivers * and applications, which causes incorrect speed values being @@ -7108,9 +7477,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) NULL); if (err) { dev_err(&pf->pdev->dev, - "failed to get phy cap., ret = %s last_status = %s\n", - i40e_stat_str(hw, err), - i40e_aq_str(hw, hw->aq.asq_last_status)); + "failed to get phy cap., ret = %pe last_status = %s\n", + ERR_PTR(err), libie_aq_str(hw->aq.asq_last_status)); return err; } speed = abilities.link_speed; @@ -7120,9 +7488,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) NULL); if (err) { dev_err(&pf->pdev->dev, - "failed to get phy cap., ret = %s last_status = %s\n", - i40e_stat_str(hw, err), - i40e_aq_str(hw, hw->aq.asq_last_status)); + "failed to get phy cap., ret = %pe last_status = %s\n", + ERR_PTR(err), libie_aq_str(hw->aq.asq_last_status)); return err; } @@ -7130,10 +7497,10 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) * and its speed values are OK, no need for a flap * if non_zero_phy_type was set, still need to force up */ - if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) + if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) non_zero_phy_type = true; else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0) - return I40E_SUCCESS; + return 0; /* To force link we need to set bits for all supported PHY types, * but there are now more than 32, so we need to split the bitmap @@ -7146,7 +7513,7 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0; /* Copy the old settings, except of phy_type */ config.abilities = abilities.abilities; - if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) { + if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) { if (is_up) config.abilities |= I40E_AQ_PHY_ENABLE_LINK; else @@ -7165,9 +7532,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) if (err) { dev_err(&pf->pdev->dev, - "set phy config ret = %s last_status = %s\n", - i40e_stat_str(&pf->hw, err), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "set phy config ret = %pe last_status = %s\n", + ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status)); return err; } @@ -7184,7 +7550,7 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) i40e_aq_set_link_restart_an(hw, is_up, NULL); - return I40E_SUCCESS; + return 0; } /** @@ -7196,8 +7562,8 @@ int i40e_up(struct i40e_vsi *vsi) int err; if (vsi->type == I40E_VSI_MAIN && - (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED || - vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) + (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) || + test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags))) i40e_force_link_state(vsi->back, true); err = i40e_vsi_configure(vsi); @@ -7225,8 +7591,8 @@ void i40e_down(struct i40e_vsi *vsi) i40e_vsi_disable_irq(vsi); i40e_vsi_stop_rings(vsi); if (vsi->type == I40E_VSI_MAIN && - (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED || - vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) + (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) || + test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags))) i40e_force_link_state(vsi->back, false); i40e_napi_disable_all(vsi); @@ -7280,6 +7646,8 @@ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi, } if (vsi->num_queue_pairs < (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) { + dev_err(&vsi->back->pdev->dev, + "Failed to create traffic channel, insufficient number of queues.\n"); return -EINVAL; } if (sum_max_rate > i40e_get_link_speed(vsi)) { @@ -7327,11 +7695,11 @@ static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi) * This function deletes a mac filter on the channel VSI which serves as the * macvlan. Returns 0 on success. **/ -static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid, - const u8 *macaddr, int *aq_err) +static int i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid, + const u8 *macaddr, int *aq_err) { struct i40e_aqc_remove_macvlan_element_data element; - i40e_status status; + int status; memset(&element, 0, sizeof(element)); ether_addr_copy(element.mac_addr, macaddr); @@ -7353,12 +7721,12 @@ static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid, * This function adds a mac filter on the channel VSI which serves as the * macvlan. Returns 0 on success. **/ -static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid, - const u8 *macaddr, int *aq_err) +static int i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid, + const u8 *macaddr, int *aq_err) { struct i40e_aqc_add_macvlan_element_data element; - i40e_status status; u16 cmd_flags = 0; + int status; ether_addr_copy(element.mac_addr, macaddr); element.vlan_tag = 0; @@ -7448,42 +7816,43 @@ static void i40e_free_macvlan_channels(struct i40e_vsi *vsi) static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev, struct i40e_fwd_adapter *fwd) { + struct i40e_channel *ch = NULL, *ch_tmp, *iter; int ret = 0, num_tc = 1, i, aq_err; - struct i40e_channel *ch, *ch_tmp; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - if (list_empty(&vsi->macvlan_list)) - return -EINVAL; - /* Go through the list and find an available channel */ - list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { - if (!i40e_is_channel_macvlan(ch)) { - ch->fwd = fwd; + list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) { + if (!i40e_is_channel_macvlan(iter)) { + iter->fwd = fwd; /* record configuration for macvlan interface in vdev */ for (i = 0; i < num_tc; i++) netdev_bind_sb_channel_queue(vsi->netdev, vdev, i, - ch->num_queue_pairs, - ch->base_queue); - for (i = 0; i < ch->num_queue_pairs; i++) { + iter->num_queue_pairs, + iter->base_queue); + for (i = 0; i < iter->num_queue_pairs; i++) { struct i40e_ring *tx_ring, *rx_ring; u16 pf_q; - pf_q = ch->base_queue + i; + pf_q = iter->base_queue + i; /* Get to TX ring ptr */ tx_ring = vsi->tx_rings[pf_q]; - tx_ring->ch = ch; + tx_ring->ch = iter; /* Get the RX ring ptr */ rx_ring = vsi->rx_rings[pf_q]; - rx_ring->ch = ch; + rx_ring->ch = iter; } + ch = iter; break; } } + if (!ch) + return -EINVAL; + /* Guarantee all rings are updated before we update the * MAC address filter. */ @@ -7503,9 +7872,8 @@ static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev, rx_ring->netdev = NULL; } dev_info(&pf->pdev->dev, - "Error adding mac filter on macvlan err %s, aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, aq_err)); + "Error adding mac filter on macvlan err %pe, aq_err %s\n", + ERR_PTR(ret), libie_aq_str(aq_err)); netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n"); } @@ -7576,9 +7944,8 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt, ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "Update vsi tc config failed, err %s aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); + "Update vsi tc config failed, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status)); return ret; } /* update the local VSI info with updated queue map */ @@ -7629,11 +7996,11 @@ static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev) struct i40e_fwd_adapter *fwd; int avail_macvlan, ret; - if ((pf->flags & I40E_FLAG_DCB_ENABLED)) { + if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n"); return ERR_PTR(-EINVAL); } - if ((pf->flags & I40E_FLAG_TC_MQPRIO)) { + if (i40e_is_tc_mqprio_enabled(pf)) { netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n"); return ERR_PTR(-EINVAL); } @@ -7792,9 +8159,8 @@ static void i40e_fwd_del(struct net_device *netdev, void *vdev) ch->fwd = NULL; } else { dev_info(&pf->pdev->dev, - "Error deleting mac filter on macvlan err %s, aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, aq_err)); + "Error deleting mac filter on macvlan err %pe, aq_err %s\n", + ERR_PTR(ret), libie_aq_str(aq_err)); } break; } @@ -7824,23 +8190,23 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data) hw = mqprio_qopt->qopt.hw; mode = mqprio_qopt->mode; if (!hw) { - pf->flags &= ~I40E_FLAG_TC_MQPRIO; + clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags); memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); goto config_tc; } /* Check if MFP enabled */ - if (pf->flags & I40E_FLAG_MFP_ENABLED) { + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); return ret; } switch (mode) { case TC_MQPRIO_MODE_DCB: - pf->flags &= ~I40E_FLAG_TC_MQPRIO; + clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags); /* Check if DCB enabled to continue */ - if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { + if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { netdev_info(netdev, "DCB is not enabled for adapter\n"); return ret; @@ -7854,20 +8220,20 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data) } break; case TC_MQPRIO_MODE_CHANNEL: - if (pf->flags & I40E_FLAG_DCB_ENABLED) { + if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { netdev_info(netdev, "Full offload of TC Mqprio options is not supported when DCB is enabled\n"); return ret; } - if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) + if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) return ret; ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt); if (ret) return ret; memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); - pf->flags |= I40E_FLAG_TC_MQPRIO; - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); break; default: return -EINVAL; @@ -7886,7 +8252,7 @@ config_tc: /* Quiesce VSI queues */ i40e_quiesce_vsi(vsi); - if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO)) + if (!hw && !i40e_is_tc_mqprio_enabled(pf)) i40e_remove_queue_channels(vsi); /* Configure VSI for enabled TCs */ @@ -7896,17 +8262,25 @@ config_tc: vsi->seid); need_reset = true; goto exit; - } else { - dev_info(&vsi->back->pdev->dev, - "Setup channel (id:%u) utilizing num_queues %d\n", - vsi->seid, vsi->tc_config.tc_info[0].qcount); + } else if (enabled_tc && + (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) { + netdev_info(netdev, + "Failed to create channel. Override queues (%u) not power of 2\n", + vsi->tc_config.tc_info[0].qcount); + ret = -EINVAL; + need_reset = true; + goto exit; } - if (pf->flags & I40E_FLAG_TC_MQPRIO) { + dev_info(&vsi->back->pdev->dev, + "Setup channel (id:%u) utilizing num_queues %d\n", + vsi->seid, vsi->tc_config.tc_info[0].qcount); + + if (i40e_is_tc_mqprio_enabled(pf)) { if (vsi->mqprio_qopt.max_rate[0]) { - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; + u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, + vsi->mqprio_qopt.max_rate[0]); - do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (!ret) { u64 credits = max_tx_rate; @@ -8020,7 +8394,7 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, }; if (filter->flags >= ARRAY_SIZE(flag_table)) - return I40E_ERR_CONFIG; + return -EIO; memset(&cld_filter, 0, sizeof(cld_filter)); @@ -8184,15 +8558,15 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, u8 field_flags = 0; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_VLAN) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { - dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n", + ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) { + dev_err(&pf->pdev->dev, "Unsupported key used: 0x%llx\n", dissector->used_keys); return -EOPNOTSUPP; } @@ -8234,7 +8608,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, } else { dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n", match.mask->dst); - return I40E_ERR_CONFIG; + return -EIO; } } @@ -8244,7 +8618,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, } else { dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n", match.mask->src); - return I40E_ERR_CONFIG; + return -EIO; } } ether_addr_copy(filter->dst_mac, match.key->dst); @@ -8262,7 +8636,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, } else { dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n", match.mask->vlan_id); - return I40E_ERR_CONFIG; + return -EIO; } } @@ -8274,6 +8648,10 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, flow_rule_match_control(rule, &match); addr_type = match.key->addr_type; + + if (flow_rule_has_control_flags(match.mask->flags, + f->common.extack)) + return -EOPNOTSUPP; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { @@ -8286,7 +8664,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, } else { dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n", &match.mask->dst); - return I40E_ERR_CONFIG; + return -EIO; } } @@ -8296,13 +8674,13 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, } else { dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n", &match.mask->src); - return I40E_ERR_CONFIG; + return -EIO; } } if (field_flags & I40E_CLOUD_FIELD_TEN_ID) { dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n"); - return I40E_ERR_CONFIG; + return -EIO; } filter->dst_ipv4 = match.key->dst; filter->src_ipv4 = match.key->src; @@ -8320,7 +8698,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, ipv6_addr_loopback(&match.key->src)) { dev_err(&pf->pdev->dev, "Bad ipv6, addr is LOOPBACK\n"); - return I40E_ERR_CONFIG; + return -EIO; } if (!ipv6_addr_any(&match.mask->dst) || !ipv6_addr_any(&match.mask->src)) @@ -8342,7 +8720,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, } else { dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n", be16_to_cpu(match.mask->src)); - return I40E_ERR_CONFIG; + return -EIO; } } @@ -8352,7 +8730,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, } else { dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n", be16_to_cpu(match.mask->dst)); - return I40E_ERR_CONFIG; + return -EIO; } } @@ -8427,6 +8805,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi, return -EOPNOTSUPP; } + if (!tc) { + dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination"); + return -EINVAL; + } + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) return -EBUSY; @@ -8438,11 +8821,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi, return -EINVAL; } - if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) { + if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags)) { dev_err(&vsi->back->pdev->dev, "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n"); - vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED; - vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER; + clear_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags); + clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, vsi->back->flags); } filter = kzalloc(sizeof(*filter), GFP_KERNEL); @@ -8466,9 +8849,8 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi, err = i40e_add_del_cloud_filter(vsi, filter, true); if (err) { - dev_err(&pf->pdev->dev, - "Failed to add cloud filter, err %s\n", - i40e_stat_str(&pf->hw, err)); + dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n", + err); goto err; } @@ -8532,18 +8914,18 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi, kfree(filter); if (err) { dev_err(&pf->pdev->dev, - "Failed to delete cloud filter, err %s\n", - i40e_stat_str(&pf->hw, err)); + "Failed to delete cloud filter, err %pe\n", + ERR_PTR(err)); return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status); } pf->num_cloud_filters--; if (!pf->num_cloud_filters) - if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && - !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { - pf->flags |= I40E_FLAG_FD_SB_ENABLED; - pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; - pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; + if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) && + !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) { + set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags); + clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); } return 0; } @@ -8653,6 +9035,27 @@ int i40e_open(struct net_device *netdev) } /** + * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues + * @vsi: vsi structure + * + * This updates netdev's number of tx/rx queues + * + * Returns status of setting tx/rx queues + **/ +static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi) +{ + int ret; + + ret = netif_set_real_num_rx_queues(vsi->netdev, + vsi->num_queue_pairs); + if (ret) + return ret; + + return netif_set_real_num_tx_queues(vsi->netdev, + vsi->num_queue_pairs); +} + +/** * i40e_vsi_open - * @vsi: the VSI to open * @@ -8688,13 +9091,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi) goto err_setup_rx; /* Notify the stack of the actual queue counts. */ - err = netif_set_real_num_tx_queues(vsi->netdev, - vsi->num_queue_pairs); - if (err) - goto err_set_queues; - - err = netif_set_real_num_rx_queues(vsi->netdev, - vsi->num_queue_pairs); + err = i40e_netif_set_realnum_tx_rx_queues(vsi); if (err) goto err_set_queues; @@ -8725,7 +9122,7 @@ err_setup_rx: i40e_vsi_free_rx_resources(vsi); err_setup_tx: i40e_vsi_free_tx_resources(vsi); - if (vsi == pf->vsi[pf->lan_vsi]) + if (vsi->type == I40E_VSI_MAIN) i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); return err; @@ -8766,47 +9163,47 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf) i40e_reset_fdir_filter_cnt(pf); /* Reprogram the default input set for TCP/IPv4 */ - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP, I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for TCP/IPv6 */ - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP, I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for UDP/IPv4 */ - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP, I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for UDP/IPv6 */ - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP, I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for SCTP/IPv4 */ - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP, I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for SCTP/IPv6 */ - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP, I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for Other/IPv4 */ - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER, I40E_L3_SRC_MASK | I40E_L3_DST_MASK); - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV4, I40E_L3_SRC_MASK | I40E_L3_DST_MASK); /* Reprogram the default input set for Other/IPv6 */ - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER, I40E_L3_SRC_MASK | I40E_L3_DST_MASK); - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV6, I40E_L3_SRC_MASK | I40E_L3_DST_MASK); } @@ -8829,11 +9226,11 @@ static void i40e_cloud_filter_exit(struct i40e_pf *pf) } pf->num_cloud_filters = 0; - if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && - !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { - pf->flags |= I40E_FLAG_FD_SB_ENABLED; - pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; - pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; + if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) && + !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) { + set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags); + clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); } } @@ -8870,7 +9267,9 @@ int i40e_close(struct net_device *netdev) **/ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) { + struct i40e_vsi *vsi; u32 val; + int i; /* do the biggest reset indicated */ if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { @@ -8921,34 +9320,25 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) i40e_prep_for_reset(pf); i40e_reset_and_rebuild(pf, true, lock_acquired); dev_info(&pf->pdev->dev, - pf->flags & I40E_FLAG_DISABLE_FW_LLDP ? + test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ? "FW LLDP is disabled\n" : "FW LLDP is enabled\n"); } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { - int v; - /* Find the VSI(s) that requested a re-init */ - dev_info(&pf->pdev->dev, - "VSI reinit requested\n"); - for (v = 0; v < pf->num_alloc_vsi; v++) { - struct i40e_vsi *vsi = pf->vsi[v]; + dev_info(&pf->pdev->dev, "VSI reinit requested\n"); - if (vsi != NULL && - test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED, + i40e_pf_for_each_vsi(pf, i, vsi) { + if (test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED, vsi->state)) - i40e_vsi_reinit_locked(pf->vsi[v]); + i40e_vsi_reinit_locked(vsi); } } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { - int v; - /* Find the VSI(s) that needs to be brought down */ dev_info(&pf->pdev->dev, "VSI down requested\n"); - for (v = 0; v < pf->num_alloc_vsi; v++) { - struct i40e_vsi *vsi = pf->vsi[v]; - if (vsi != NULL && - test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED, + i40e_pf_for_each_vsi(pf, i, vsi) { + if (test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state)) { set_bit(__I40E_VSI_DOWN, vsi->state); i40e_down(vsi); @@ -9024,8 +9414,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf, static int i40e_handle_lldp_event(struct i40e_pf *pf, struct i40e_arq_event_info *e) { - struct i40e_aqc_lldp_get_mib *mib = - (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; + struct i40e_aqc_lldp_get_mib *mib = libie_aq_raw(&e->desc); struct i40e_hw *hw = &pf->hw; struct i40e_dcbx_config tmp_dcbx_cfg; bool need_reconfig = false; @@ -9036,12 +9425,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, if (I40E_IS_X710TL_DEVICE(hw->device_id) && (hw->phy.link_info.link_speed & ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) && - !(pf->flags & I40E_FLAG_DCB_CAPABLE)) + !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) /* let firmware decide if the DCB should be disabled */ - pf->flags |= I40E_FLAG_DCB_CAPABLE; + set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); /* Not DCB capable or capability disabled */ - if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) + if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) return ret; /* Ignore if event is not for Nearest Bridge */ @@ -9077,13 +9466,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) { dev_warn(&pf->pdev->dev, "DCB is not supported for X710-T*L 2.5/5G speeds\n"); - pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); } else { dev_info(&pf->pdev->dev, - "Failed querying DCB configuration data from firmware, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + "Failed querying DCB configuration data from firmware, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(pf->hw.aq.asq_last_status)); } goto exit; } @@ -9105,9 +9493,9 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, /* Enable DCB tagging only when more than one TC */ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) - pf->flags |= I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_DCB_ENA, pf->flags); else - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); set_bit(__I40E_PORT_SUSPENDED, pf->state); /* Reconfiguration needed quiesce all VSIs */ @@ -9164,8 +9552,7 @@ void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, struct i40e_arq_event_info *e) { - struct i40e_aqc_lan_overflow *data = - (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; + struct i40e_aqc_lan_overflow *data = libie_aq_raw(&e->desc); u32 queue = le32_to_cpu(data->prtdcb_rupto); u32 qtx_ctl = le32_to_cpu(data->otx_ctl); struct i40e_hw *hw = &pf->hw; @@ -9175,31 +9562,18 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", queue, qtx_ctl); - /* Queue belongs to VF, find the VF and issue VF reset */ - if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) - >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { - vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) - >> I40E_QTX_CTL_VFVM_INDX_SHIFT); - vf_id -= hw->func_caps.vf_base_id; - vf = &pf->vf[vf_id]; - i40e_vc_notify_vf_reset(vf); - /* Allow VF to process pending reset notification */ - msleep(20); - i40e_reset_vf(vf, false); - } -} - -/** - * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters - * @pf: board private structure - **/ -u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) -{ - u32 val, fcnt_prog; + if (FIELD_GET(I40E_QTX_CTL_PFVF_Q_MASK, qtx_ctl) != + I40E_QTX_CTL_VF_QUEUE) + return; - val = rd32(&pf->hw, I40E_PFQF_FDSTAT); - fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); - return fcnt_prog; + /* Queue belongs to VF, find the VF and issue VF reset */ + vf_id = FIELD_GET(I40E_QTX_CTL_VFVM_INDX_MASK, qtx_ctl); + vf_id -= hw->func_caps.vf_base_id; + vf = &pf->vf[vf_id]; + i40e_vc_notify_vf_reset(vf); + /* Allow VF to process pending reset notification */ + msleep(20); + i40e_reset_vf(vf, false); } /** @@ -9212,8 +9586,7 @@ u32 i40e_get_current_fd_count(struct i40e_pf *pf) val = rd32(&pf->hw, I40E_PFQF_FDSTAT); fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + - ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> - I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); + FIELD_GET(I40E_PFQF_FDSTAT_BEST_CNT_MASK, val); return fcnt_prog; } @@ -9227,8 +9600,7 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf) val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) + - ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >> - I40E_GLQF_FDCNT_0_BESTCNT_SHIFT); + FIELD_GET(I40E_GLQF_FDCNT_0_BESTCNT_MASK, val); return fcnt_prog; } @@ -9239,7 +9611,7 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf) static void i40e_reenable_fdir_sb(struct i40e_pf *pf) { if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) - if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); } @@ -9256,11 +9628,11 @@ static void i40e_reenable_fdir_atr(struct i40e_pf *pf) * settings. It is safe to restore the default input set * because there are no active TCPv4 filter rules. */ - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP, I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n"); } @@ -9425,7 +9797,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); } else { /* replay sideband filters */ - i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); + i40e_fdir_filter_restore(i40e_pf_get_main_vsi(pf)); if (!disable_atr && !pf->fd_tcp4_filter_cnt) clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); @@ -9503,6 +9875,7 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) **/ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) { + struct i40e_vsi *vsi; struct i40e_pf *pf; int i; @@ -9510,15 +9883,10 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) return; pf = veb->pf; - /* depth first... */ - for (i = 0; i < I40E_MAX_VEB; i++) - if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) - i40e_veb_link_event(pf->veb[i], link_up); - - /* ... now the local VSIs */ - for (i = 0; i < pf->num_alloc_vsi; i++) - if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) - i40e_vsi_link_event(pf->vsi[i], link_up); + /* Send link event to contained VSIs */ + i40e_pf_for_each_vsi(pf, i, vsi) + if (vsi->uplink_seid == veb->seid) + i40e_vsi_link_event(vsi, link_up); } /** @@ -9527,10 +9895,11 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) **/ static void i40e_link_event(struct i40e_pf *pf) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); + struct i40e_veb *veb = i40e_pf_get_main_veb(pf); u8 new_link_speed, old_link_speed; - i40e_status status; bool new_link, old_link; + int status; #ifdef CONFIG_I40E_DCB int err; #endif /* CONFIG_I40E_DCB */ @@ -9541,11 +9910,11 @@ static void i40e_link_event(struct i40e_pf *pf) status = i40e_get_link_status(&pf->hw, &new_link); /* On success, disable temp link polling */ - if (status == I40E_SUCCESS) { + if (status == 0) { clear_bit(__I40E_TEMP_LINK_POLLING, pf->state); } else { /* Enable link polling temporarily until i40e_get_link_status - * returns I40E_SUCCESS + * returns 0 */ set_bit(__I40E_TEMP_LINK_POLLING, pf->state); dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", @@ -9562,20 +9931,23 @@ static void i40e_link_event(struct i40e_pf *pf) new_link == netif_carrier_ok(vsi->netdev))) return; + if (!new_link && old_link) + pf->link_down_events++; + i40e_print_link_message(vsi, new_link); /* Notify the base of the switch tree connected to * the link. Floating VEBs are not notified. */ - if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) - i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); + if (veb) + i40e_veb_link_event(veb, new_link); else i40e_vsi_link_event(vsi, new_link); if (pf->vf) i40e_vc_notify_link_state(pf); - if (pf->flags & I40E_FLAG_PTP) + if (test_bit(I40E_FLAG_PTP_ENA, pf->flags)) i40e_ptp_set_increment(pf); #ifdef CONFIG_I40E_DCB if (new_link == old_link) @@ -9592,13 +9964,13 @@ static void i40e_link_event(struct i40e_pf *pf) memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg)); err = i40e_dcb_sw_default_config(pf); if (err) { - pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | - I40E_FLAG_DCB_ENABLED); + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); } else { pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; - pf->flags |= I40E_FLAG_DCB_CAPABLE; - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); } } #endif /* CONFIG_I40E_DCB */ @@ -9610,6 +9982,8 @@ static void i40e_link_event(struct i40e_pf *pf) **/ static void i40e_watchdog_subtask(struct i40e_pf *pf) { + struct i40e_vsi *vsi; + struct i40e_veb *veb; int i; /* if interface is down do nothing */ @@ -9623,22 +9997,21 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) return; pf->service_timer_previous = jiffies; - if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) || + if (test_bit(I40E_FLAG_LINK_POLLING_ENA, pf->flags) || test_bit(__I40E_TEMP_LINK_POLLING, pf->state)) i40e_link_event(pf); /* Update the stats for active netdevs so the network stack * can look at updated numbers whenever it cares to */ - for (i = 0; i < pf->num_alloc_vsi; i++) - if (pf->vsi[i] && pf->vsi[i]->netdev) - i40e_update_stats(pf->vsi[i]); + i40e_pf_for_each_vsi(pf, i, vsi) + if (vsi->netdev) + i40e_update_stats(vsi); - if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) { + if (test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)) { /* Update the stats for the active switching components */ - for (i = 0; i < I40E_MAX_VEB; i++) - if (pf->veb[i]) - i40e_update_veb_stats(pf->veb[i]); + i40e_pf_for_each_veb(pf, i, veb) + i40e_update_veb_stats(veb); } i40e_ptp_rx_hang(pf); @@ -9699,8 +10072,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf) static void i40e_handle_link_event(struct i40e_pf *pf, struct i40e_arq_event_info *e) { - struct i40e_aqc_get_link_status *status = - (struct i40e_aqc_get_link_status *)&e->desc.params.raw; + struct i40e_aqc_get_link_status *status = libie_aq_raw(&e->desc); /* Do a new status request to re-enable LSE reporting * and load new status information into the hw struct @@ -9723,7 +10095,7 @@ static void i40e_handle_link_event(struct i40e_pf *pf, if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && (!(status->link_info & I40E_AQ_LINK_UP)) && - (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) { + (!test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))) { dev_err(&pf->pdev->dev, "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n"); dev_err(&pf->pdev->dev, @@ -9741,9 +10113,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) struct i40e_arq_event_info event; struct i40e_hw *hw = &pf->hw; u16 pending, i = 0; - i40e_status ret; u16 opcode; u32 oldval; + int ret; u32 val; /* Do not run clean AQ when PF reset fails */ @@ -9751,7 +10123,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) return; /* check for error indications */ - val = rd32(&pf->hw, pf->hw.aq.arq.len); + val = rd32(&pf->hw, I40E_PF_ARQLEN); oldval = val; if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { if (hw->debug_mask & I40E_DEBUG_AQ) @@ -9770,9 +10142,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; } if (oldval != val) - wr32(&pf->hw, pf->hw.aq.arq.len, val); + wr32(&pf->hw, I40E_PF_ARQLEN, val); - val = rd32(&pf->hw, pf->hw.aq.asq.len); + val = rd32(&pf->hw, I40E_PF_ATQLEN); oldval = val; if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { if (pf->hw.debug_mask & I40E_DEBUG_AQ) @@ -9790,7 +10162,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; } if (oldval != val) - wr32(&pf->hw, pf->hw.aq.asq.len, val); + wr32(&pf->hw, I40E_PF_ATQLEN, val); event.buf_len = I40E_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); @@ -9799,7 +10171,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) do { ret = i40e_clean_arq_element(hw, &event, &pending); - if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) + if (ret == -EALREADY) break; else if (ret) { dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); @@ -9850,9 +10222,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) opcode); break; } - } while (i++ < pf->adminq_work_limit); + } while (i++ < I40E_AQ_WORK_LIMIT); - if (i < pf->adminq_work_limit) + if (i < I40E_AQ_WORK_LIMIT) clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); /* re-enable Admin queue interrupt cause */ @@ -9897,7 +10269,7 @@ static void i40e_verify_eeprom(struct i40e_pf *pf) **/ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_vsi_context ctxt; int ret; @@ -9907,9 +10279,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi config, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "couldn't get PF vsi config, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status)); return; } ctxt.flags = I40E_AQ_VSI_TYPE_PF; @@ -9919,9 +10290,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "update vsi switch failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "update vsi switch failed, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status)); } } @@ -9933,7 +10303,7 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) **/ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_vsi_context ctxt; int ret; @@ -9943,9 +10313,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi config, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "couldn't get PF vsi config, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status)); return; } ctxt.flags = I40E_AQ_VSI_TYPE_PF; @@ -9955,9 +10324,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "update vsi switch failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "update vsi switch failed, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status)); } } @@ -9983,89 +10351,84 @@ static void i40e_config_bridge_mode(struct i40e_veb *veb) } /** - * i40e_reconstitute_veb - rebuild the VEB and anything connected to it + * i40e_reconstitute_veb - rebuild the VEB and VSIs connected to it * @veb: pointer to the VEB instance * - * This is a recursive function that first builds the attached VSIs then - * recurses in to build the next layer of VEB. We track the connections - * through our own index numbers because the seid's from the HW could - * change across the reset. + * This is a function that builds the attached VSIs. We track the connections + * through our own index numbers because the seid's from the HW could change + * across the reset. **/ static int i40e_reconstitute_veb(struct i40e_veb *veb) { struct i40e_vsi *ctl_vsi = NULL; struct i40e_pf *pf = veb->pf; - int v, veb_idx; - int ret; + struct i40e_vsi *vsi; + int v, ret; - /* build VSI that owns this VEB, temporarily attached to base VEB */ - for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { - if (pf->vsi[v] && - pf->vsi[v]->veb_idx == veb->idx && - pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { - ctl_vsi = pf->vsi[v]; - break; - } - } - if (!ctl_vsi) { - dev_info(&pf->pdev->dev, - "missing owner VSI for veb_idx %d\n", veb->idx); - ret = -ENOENT; - goto end_reconstitute; + /* As we do not maintain PV (port virtualizer) switch element then + * there can be only one non-floating VEB that have uplink to MAC SEID + * and its control VSI is the main one. + */ + if (WARN_ON(veb->uplink_seid && veb->uplink_seid != pf->mac_seid)) { + dev_err(&pf->pdev->dev, + "Invalid uplink SEID for VEB %d\n", veb->idx); + return -ENOENT; } - if (ctl_vsi != pf->vsi[pf->lan_vsi]) - ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; - ret = i40e_add_vsi(ctl_vsi); - if (ret) { - dev_info(&pf->pdev->dev, - "rebuild of veb_idx %d owner VSI failed: %d\n", - veb->idx, ret); - goto end_reconstitute; + + if (veb->uplink_seid == pf->mac_seid) { + /* Check that the LAN VSI has VEB owning flag set */ + ctl_vsi = i40e_pf_get_main_vsi(pf); + + if (WARN_ON(ctl_vsi->veb_idx != veb->idx || + !(ctl_vsi->flags & I40E_VSI_FLAG_VEB_OWNER))) { + dev_err(&pf->pdev->dev, + "Invalid control VSI for VEB %d\n", veb->idx); + return -ENOENT; + } + + /* Add the control VSI to switch */ + ret = i40e_add_vsi(ctl_vsi); + if (ret) { + dev_err(&pf->pdev->dev, + "Rebuild of owner VSI for VEB %d failed: %d\n", + veb->idx, ret); + return ret; + } + + i40e_vsi_reset_stats(ctl_vsi); } - i40e_vsi_reset_stats(ctl_vsi); /* create the VEB in the switch and move the VSI onto the VEB */ ret = i40e_add_veb(veb, ctl_vsi); if (ret) - goto end_reconstitute; + return ret; - if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) - veb->bridge_mode = BRIDGE_MODE_VEB; - else - veb->bridge_mode = BRIDGE_MODE_VEPA; - i40e_config_bridge_mode(veb); + if (veb->uplink_seid) { + if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) + veb->bridge_mode = BRIDGE_MODE_VEB; + else + veb->bridge_mode = BRIDGE_MODE_VEPA; + i40e_config_bridge_mode(veb); + } /* create the remaining VSIs attached to this VEB */ - for (v = 0; v < pf->num_alloc_vsi; v++) { - if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) + i40e_pf_for_each_vsi(pf, v, vsi) { + if (vsi == ctl_vsi) continue; - if (pf->vsi[v]->veb_idx == veb->idx) { - struct i40e_vsi *vsi = pf->vsi[v]; - + if (vsi->veb_idx == veb->idx) { vsi->uplink_seid = veb->seid; ret = i40e_add_vsi(vsi); if (ret) { dev_info(&pf->pdev->dev, "rebuild of vsi_idx %d failed: %d\n", v, ret); - goto end_reconstitute; + return ret; } i40e_vsi_reset_stats(vsi); } } - /* create any VEBs attached to this VEB - RECURSION */ - for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { - if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { - pf->veb[veb_idx]->uplink_seid = veb->seid; - ret = i40e_reconstitute_veb(pf->veb[veb_idx]); - if (ret) - break; - } - } - -end_reconstitute: return ret; } @@ -10077,12 +10440,12 @@ end_reconstitute: static int i40e_get_capabilities(struct i40e_pf *pf, enum i40e_admin_queue_opc list_type) { - struct i40e_aqc_list_capabilities_element_resp *cap_buf; + struct libie_aqc_list_caps_elem *cap_buf; u16 data_size; int buf_len; int err; - buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); + buf_len = 40 * sizeof(struct libie_aqc_list_caps_elem); do { cap_buf = kzalloc(buf_len, GFP_KERNEL); if (!cap_buf) @@ -10095,15 +10458,14 @@ static int i40e_get_capabilities(struct i40e_pf *pf, /* data loaded, buffer no longer needed */ kfree(cap_buf); - if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { + if (pf->hw.aq.asq_last_status == LIBIE_AQ_RC_ENOMEM) { /* retry with a larger buffer */ buf_len = data_size; - } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { + } else if (pf->hw.aq.asq_last_status != LIBIE_AQ_RC_OK || err) { dev_info(&pf->pdev->dev, - "capability discovery failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, err), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + "capability discovery failed, err %pe aq_err %s\n", + ERR_PTR(err), + libie_aq_str(pf->hw.aq.asq_last_status)); return -ENODEV; } } while (err); @@ -10157,7 +10519,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi); **/ static void i40e_fdir_sb_setup(struct i40e_pf *pf) { - struct i40e_vsi *vsi; + struct i40e_vsi *main_vsi, *vsi; /* quick workaround for an NVM issue that leaves a critical register * uninitialized @@ -10174,7 +10536,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf) wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); } - if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) return; /* find existing VSI and see if it needs configuring */ @@ -10182,12 +10544,12 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf) /* create a new VSI if none exists */ if (!vsi) { - vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, - pf->vsi[pf->lan_vsi]->seid, 0); + main_vsi = i40e_pf_get_main_vsi(pf); + vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, main_vsi->seid, 0); if (!vsi) { dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); - pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); return; } } @@ -10222,7 +10584,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid) struct i40e_cloud_filter *cfilter; struct i40e_pf *pf = vsi->back; struct hlist_node *node; - i40e_status ret; + int ret; /* Add cloud filters back if they exist */ hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, @@ -10238,10 +10600,9 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid) if (ret) { dev_dbg(&pf->pdev->dev, - "Failed to rebuild cloud filter, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + "Failed to rebuild cloud filter, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(pf->hw.aq.asq_last_status)); return ret; } } @@ -10257,7 +10618,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid) static int i40e_rebuild_channels(struct i40e_vsi *vsi) { struct i40e_channel *ch, *ch_tmp; - i40e_status ret; + int ret; if (list_empty(&vsi->ch_list)) return 0; @@ -10310,6 +10671,21 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi) } /** + * i40e_clean_xps_state - clean xps state for every tx_ring + * @vsi: ptr to the VSI + **/ +static void i40e_clean_xps_state(struct i40e_vsi *vsi) +{ + int i; + + if (vsi->tx_rings) + for (i = 0; i < vsi->num_queue_pairs; i++) + if (vsi->tx_rings[i]) + clear_bit(__I40E_TX_XPS_INIT_DONE, + vsi->tx_rings[i]->state); +} + +/** * i40e_prep_for_reset - prep for the core to reset * @pf: board private structure * @@ -10318,7 +10694,8 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi) static void i40e_prep_for_reset(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; - i40e_status ret = 0; + struct i40e_vsi *vsi; + int ret = 0; u32 v; clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state); @@ -10332,9 +10709,9 @@ static void i40e_prep_for_reset(struct i40e_pf *pf) /* quiesce the VSIs and their queues that are not already DOWN */ i40e_pf_quiesce_all_vsi(pf); - for (v = 0; v < pf->num_alloc_vsi; v++) { - if (pf->vsi[v]) - pf->vsi[v]->seid = 0; + i40e_pf_for_each_vsi(pf, v, vsi) { + i40e_clean_xps_state(vsi); + vsi->seid = 0; } i40e_shutdown_adminq(&pf->hw); @@ -10365,7 +10742,7 @@ static void i40e_send_version(struct i40e_pf *pf) dv.minor_version = 0xff; dv.build_version = 0xff; dv.subbuild_version = 0; - strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string)); + strscpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string)); i40e_aq_send_driver_version(&pf->hw, &dv, NULL); } @@ -10410,7 +10787,9 @@ static void i40e_get_oem_version(struct i40e_hw *hw) &gen_snap); i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET, &release); - hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release; + hw->nvm.oem_ver = + FIELD_PREP(I40E_OEM_GEN_MASK | I40E_OEM_SNAP_MASK, gen_snap) | + FIELD_PREP(I40E_OEM_RELEASE_MASK, release); hw->nvm.eetrack = I40E_OEM_EETRACK_ID; } @@ -10421,7 +10800,7 @@ static void i40e_get_oem_version(struct i40e_hw *hw) static int i40e_reset(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; - i40e_status ret; + int ret; ret = i40e_pf_reset(hw); if (ret) { @@ -10443,43 +10822,35 @@ static int i40e_reset(struct i40e_pf *pf) **/ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) { - int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state); - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf); + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_hw *hw = &pf->hw; - i40e_status ret; + struct i40e_veb *veb; + int ret; u32 val; int v; if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && - i40e_check_recovery_mode(pf)) { - i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); - } + is_recovery_mode_reported) + i40e_set_ethtool_ops(vsi->netdev); if (test_bit(__I40E_DOWN, pf->state) && - !test_bit(__I40E_RECOVERY_MODE, pf->state) && - !old_recovery_mode_bit) + !test_bit(__I40E_RECOVERY_MODE, pf->state)) goto clear_recovery; dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ ret = i40e_init_adminq(&pf->hw); if (ret) { - dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status)); goto clear_recovery; } i40e_get_oem_version(&pf->hw); - if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && - ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) || - hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) { - /* The following delay is necessary for 4.33 firmware and older - * to recover after EMP reset. 200 ms should suffice but we - * put here 300 ms to be sure that FW is ready to operate - * after reset. - */ - mdelay(300); + if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) { + /* The following delay is necessary for firmware update. */ + mdelay(1000); } /* re-verify the eeprom if we just had an EMP reset */ @@ -10490,13 +10861,12 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) * accordingly with regard to resources initialization * and deinitialization */ - if (test_bit(__I40E_RECOVERY_MODE, pf->state) || - old_recovery_mode_bit) { + if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { if (i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities)) goto end_unlock; - if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { + if (is_recovery_mode_reported) { /* we're staying in recovery mode so we'll reinitialize * misc vector here */ @@ -10546,7 +10916,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) * unless I40E_FLAG_TC_MQPRIO was enabled or DCB * is not supported with new link speed */ - if (pf->flags & I40E_FLAG_TC_MQPRIO) { + if (i40e_is_tc_mqprio_enabled(pf)) { i40e_aq_set_dcb_parameters(hw, false, NULL); } else { if (I40E_IS_X710TL_DEVICE(hw->device_id) && @@ -10555,14 +10925,14 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) i40e_aq_set_dcb_parameters(hw, false, NULL); dev_warn(&pf->pdev->dev, "DCB is not supported for X710-T*L 2.5/5G speeds\n"); - pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); } else { i40e_aq_set_dcb_parameters(hw, true, NULL); ret = i40e_init_pf_dcb(pf); if (ret) { dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); - pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); /* Continue without DCB enabled */ } } @@ -10583,9 +10953,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) I40E_AQ_EVENT_MEDIA_NA | I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); if (ret) - dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status)); /* Rebuild the VSIs and VEBs that existed before reset. * They are still in our local switch element arrays, so only @@ -10596,35 +10965,29 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) */ if (vsi->uplink_seid != pf->mac_seid) { dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); - /* find the one VEB connected to the MAC, and find orphans */ - for (v = 0; v < I40E_MAX_VEB; v++) { - if (!pf->veb[v]) - continue; - - if (pf->veb[v]->uplink_seid == pf->mac_seid || - pf->veb[v]->uplink_seid == 0) { - ret = i40e_reconstitute_veb(pf->veb[v]); - if (!ret) - continue; + /* Rebuild VEBs */ + i40e_pf_for_each_veb(pf, v, veb) { + ret = i40e_reconstitute_veb(veb); + if (!ret) + continue; - /* If Main VEB failed, we're in deep doodoo, - * so give up rebuilding the switch and set up - * for minimal rebuild of PF VSI. - * If orphan failed, we'll report the error - * but try to keep going. - */ - if (pf->veb[v]->uplink_seid == pf->mac_seid) { - dev_info(&pf->pdev->dev, - "rebuild of switch failed: %d, will try to set up simple PF connection\n", - ret); - vsi->uplink_seid = pf->mac_seid; - break; - } else if (pf->veb[v]->uplink_seid == 0) { - dev_info(&pf->pdev->dev, - "rebuild of orphan VEB failed: %d\n", - ret); - } + /* If Main VEB failed, we're in deep doodoo, + * so give up rebuilding the switch and set up + * for minimal rebuild of PF VSI. + * If orphan failed, we'll report the error + * but try to keep going. + */ + if (veb->uplink_seid == pf->mac_seid) { + dev_info(&pf->pdev->dev, + "rebuild of switch failed: %d, will try to set up simple PF connection\n", + ret); + vsi->uplink_seid = pf->mac_seid; + break; + } else if (veb->uplink_seid == 0) { + dev_info(&pf->pdev->dev, + "rebuild of orphan VEB failed: %d\n", + ret); } } } @@ -10641,10 +11004,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) } if (vsi->mqprio_qopt.max_rate[0]) { - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; + u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, + vsi->mqprio_qopt.max_rate[0]); u64 credits = 0; - do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (ret) goto end_unlock; @@ -10683,18 +11046,20 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) wr32(hw, I40E_REG_MSS, val); } - if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { + if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) { msleep(75); ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (ret) - dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(pf->hw.aq.asq_last_status)); } /* reinit the misc interrupt */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { ret = i40e_setup_misc_vector(pf); + if (ret) + goto end_unlock; + } /* Add a filter to drop all Flow control frames from any VSI from being * transmitted. By doing so we stop a malicious VF from sending out @@ -10716,10 +11081,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) ret = i40e_set_promiscuous(pf, pf->cur_promisc); if (ret) dev_warn(&pf->pdev->dev, - "Failed to restore promiscuous setting: %s, err %s aq_err %s\n", + "Failed to restore promiscuous setting: %s, err %pe aq_err %s\n", pf->cur_promisc ? "on" : "off", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status)); i40e_reset_all_vfs(pf, true); @@ -10750,6 +11114,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) { int ret; + + if (test_bit(__I40E_IN_REMOVE, pf->state)) + return; /* Now we wait for GRST to settle out. * We don't have to delete the VEBs or VSIs from the hw switch * because the reset will make them disappear. @@ -10757,6 +11124,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit, ret = i40e_reset(pf); if (!ret) i40e_rebuild(pf, reinit, lock_acquired); + else + dev_err(&pf->pdev->dev, "%s: i40e_reset() FAILED", __func__); } /** @@ -10775,6 +11144,67 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired) } /** + * i40e_print_vf_mdd_event - print VF Tx/Rx malicious driver detect event + * @pf: board private structure + * @vf: pointer to the VF structure + * @is_tx: true - for Tx event, false - for Rx + */ +static void i40e_print_vf_mdd_event(struct i40e_pf *pf, struct i40e_vf *vf, + bool is_tx) +{ + dev_err(&pf->pdev->dev, is_tx ? + "%lld Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pm. mdd-auto-reset-vfs=%s\n" : + "%lld Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pm. mdd-auto-reset-vfs=%s\n", + is_tx ? vf->mdd_tx_events.count : vf->mdd_rx_events.count, + pf->hw.pf_id, + vf->vf_id, + vf->default_lan_addr.addr, + str_on_off(test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags))); +} + +/** + * i40e_print_vfs_mdd_events - print VFs malicious driver detect event + * @pf: pointer to the PF structure + * + * Called from i40e_handle_mdd_event to rate limit and print VFs MDD events. + */ +static void i40e_print_vfs_mdd_events(struct i40e_pf *pf) +{ + unsigned int i; + + /* check that there are pending MDD events to print */ + if (!test_and_clear_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state)) + return; + + if (!__ratelimit(&pf->mdd_message_rate_limit)) + return; + + for (i = 0; i < pf->num_alloc_vfs; i++) { + struct i40e_vf *vf = &pf->vf[i]; + bool is_printed = false; + + /* only print Rx MDD event message if there are new events */ + if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) { + vf->mdd_rx_events.last_printed = vf->mdd_rx_events.count; + i40e_print_vf_mdd_event(pf, vf, false); + is_printed = true; + } + + /* only print Tx MDD event message if there are new events */ + if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) { + vf->mdd_tx_events.last_printed = vf->mdd_tx_events.count; + i40e_print_vf_mdd_event(pf, vf, true); + is_printed = true; + } + + if (is_printed && !test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags)) + dev_info(&pf->pdev->dev, + "Use PF Control I/F to re-enable the VF #%d\n", + i); + } +} + +/** * i40e_handle_mdd_event * @pf: pointer to the PF structure * @@ -10788,20 +11218,21 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) u32 reg; int i; - if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state)) + if (!test_and_clear_bit(__I40E_MDD_EVENT_PENDING, pf->state)) { + /* Since the VF MDD event logging is rate limited, check if + * there are pending MDD events. + */ + i40e_print_vfs_mdd_events(pf); return; + } /* find what triggered the MDD event */ reg = rd32(hw, I40E_GL_MDET_TX); if (reg & I40E_GL_MDET_TX_VALID_MASK) { - u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> - I40E_GL_MDET_TX_PF_NUM_SHIFT; - u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> - I40E_GL_MDET_TX_VF_NUM_SHIFT; - u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> - I40E_GL_MDET_TX_EVENT_SHIFT; - u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> - I40E_GL_MDET_TX_QUEUE_SHIFT) - + u8 pf_num = FIELD_GET(I40E_GL_MDET_TX_PF_NUM_MASK, reg); + u16 vf_num = FIELD_GET(I40E_GL_MDET_TX_VF_NUM_MASK, reg); + u8 event = FIELD_GET(I40E_GL_MDET_TX_EVENT_MASK, reg); + u16 queue = FIELD_GET(I40E_GL_MDET_TX_QUEUE_MASK, reg) - pf->hw.func_caps.base_queue; if (netif_msg_tx_err(pf)) dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", @@ -10811,12 +11242,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) } reg = rd32(hw, I40E_GL_MDET_RX); if (reg & I40E_GL_MDET_RX_VALID_MASK) { - u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> - I40E_GL_MDET_RX_FUNCTION_SHIFT; - u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> - I40E_GL_MDET_RX_EVENT_SHIFT; - u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> - I40E_GL_MDET_RX_QUEUE_SHIFT) - + u8 func = FIELD_GET(I40E_GL_MDET_RX_FUNCTION_MASK, reg); + u8 event = FIELD_GET(I40E_GL_MDET_RX_EVENT_MASK, reg); + u16 queue = FIELD_GET(I40E_GL_MDET_RX_QUEUE_MASK, reg) - pf->hw.func_caps.base_queue; if (netif_msg_rx_err(pf)) dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", @@ -10840,36 +11268,48 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) /* see if one of the VFs needs its hand slapped */ for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { + bool is_mdd_on_tx = false; + bool is_mdd_on_rx = false; + vf = &(pf->vf[i]); reg = rd32(hw, I40E_VP_MDET_TX(i)); if (reg & I40E_VP_MDET_TX_VALID_MASK) { + set_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state); wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); - vf->num_mdd_events++; - dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", - i); - dev_info(&pf->pdev->dev, - "Use PF Control I/F to re-enable the VF\n"); + vf->mdd_tx_events.count++; set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); + is_mdd_on_tx = true; } reg = rd32(hw, I40E_VP_MDET_RX(i)); if (reg & I40E_VP_MDET_RX_VALID_MASK) { + set_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state); wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); - vf->num_mdd_events++; - dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", - i); - dev_info(&pf->pdev->dev, - "Use PF Control I/F to re-enable the VF\n"); + vf->mdd_rx_events.count++; set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); + is_mdd_on_rx = true; + } + + if ((is_mdd_on_tx || is_mdd_on_rx) && + test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { + /* VF MDD event counters will be cleared by + * reset, so print the event prior to reset. + */ + if (is_mdd_on_rx) + i40e_print_vf_mdd_event(pf, vf, false); + if (is_mdd_on_tx) + i40e_print_vf_mdd_event(pf, vf, true); + + i40e_vc_reset_vf(vf, true); } } - /* re-enable mdd interrupt cause */ - clear_bit(__I40E_MDD_EVENT_PENDING, pf->state); reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); i40e_flush(hw); + + i40e_print_vfs_mdd_events(pf); } /** @@ -10892,7 +11332,7 @@ static void i40e_service_task(struct work_struct *work) return; if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) { - i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]); + i40e_detect_recover_hung(pf); i40e_sync_filters_subtask(pf); i40e_reset_subtask(pf); i40e_handle_mdd_event(pf); @@ -10901,14 +11341,12 @@ static void i40e_service_task(struct work_struct *work) i40e_fdir_reinit_subtask(pf); if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) { /* Client subtask will reopen next time through. */ - i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], - true); + i40e_notify_client_of_netdev_close(pf, true); } else { i40e_client_subtask(pf); if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE, pf->state)) - i40e_notify_client_of_l2_param_changes( - pf->vsi[pf->lan_vsi]); + i40e_notify_client_of_l2_param_changes(pf); } i40e_sync_filters_subtask(pf); } else { @@ -10938,7 +11376,7 @@ static void i40e_service_task(struct work_struct *work) **/ static void i40e_service_timer(struct timer_list *t) { - struct i40e_pf *pf = from_timer(pf, t, service_timer); + struct i40e_pf *pf = timer_container_of(pf, t, service_timer); mod_timer(&pf->service_timer, round_jiffies(jiffies + pf->service_timer_period)); @@ -10962,7 +11400,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) if (!vsi->num_rx_desc) vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) vsi->num_q_vectors = pf->num_lan_msix; else vsi->num_q_vectors = 1; @@ -11280,7 +11718,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->count = vsi->num_tx_desc; ring->size = 0; ring->dcb_tc = 0; - if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) + if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps)) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; ring->itr_setting = pf->tx_itr_default; WRITE_ONCE(vsi->tx_rings[i], ring++); @@ -11297,7 +11735,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->count = vsi->num_tx_desc; ring->size = 0; ring->dcb_tc = 0; - if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) + if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps)) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; set_ring_xdp(ring); ring->itr_setting = pf->tx_itr_default; @@ -11361,7 +11799,7 @@ static int i40e_init_msix(struct i40e_pf *pf) int v_actual; int iwarp_requested = 0; - if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) + if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) return -ENODEV; /* The number of vectors we'll request will be comprised of: @@ -11400,7 +11838,7 @@ static int i40e_init_msix(struct i40e_pf *pf) vectors_left -= pf->num_lan_msix; /* reserve one vector for sideband flow director */ - if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { if (vectors_left) { pf->num_fdsb_msix = 1; v_budget++; @@ -11411,7 +11849,7 @@ static int i40e_init_msix(struct i40e_pf *pf) } /* can we reserve enough for iWARP? */ - if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { iwarp_requested = pf->num_iwarp_msix; if (!vectors_left) @@ -11423,7 +11861,7 @@ static int i40e_init_msix(struct i40e_pf *pf) } /* any vectors left over go for VMDq support */ - if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { + if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags)) { if (!vectors_left) { pf->num_vmdq_msix = 0; pf->num_vmdq_qps = 0; @@ -11480,7 +11918,7 @@ static int i40e_init_msix(struct i40e_pf *pf) v_actual = i40e_reserve_msix_vectors(pf, v_budget); if (v_actual < I40E_MIN_MSIX) { - pf->flags &= ~I40E_FLAG_MSIX_ENABLED; + clear_bit(I40E_FLAG_MSIX_ENA, pf->flags); kfree(pf->msix_entries); pf->msix_entries = NULL; pci_disable_msix(pf->pdev); @@ -11518,7 +11956,7 @@ static int i40e_init_msix(struct i40e_pf *pf) pf->num_lan_msix = 1; break; case 3: - if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { pf->num_lan_msix = 1; pf->num_iwarp_msix = 1; } else { @@ -11526,7 +11964,7 @@ static int i40e_init_msix(struct i40e_pf *pf) } break; default: - if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { pf->num_iwarp_msix = min_t(int, (vec / 3), iwarp_requested); pf->num_vmdq_vsis = min_t(int, (vec / 3), @@ -11535,7 +11973,7 @@ static int i40e_init_msix(struct i40e_pf *pf) pf->num_vmdq_vsis = min_t(int, (vec / 2), I40E_DEFAULT_NUM_VMDQ_VSI); } - if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { pf->num_fdsb_msix = 1; vec--; } @@ -11547,22 +11985,20 @@ static int i40e_init_msix(struct i40e_pf *pf) } } - if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && - (pf->num_fdsb_msix == 0)) { + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && pf->num_fdsb_msix == 0) { dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n"); - pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); } - if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && - (pf->num_vmdq_msix == 0)) { + if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) && pf->num_vmdq_msix == 0) { dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); - pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; + clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); } - if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && - (pf->num_iwarp_msix == 0)) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) && + pf->num_iwarp_msix == 0) { dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); - pf->flags &= ~I40E_FLAG_IWARP_ENABLED; + clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); } i40e_debug(&pf->hw, I40E_DEBUG_INIT, "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n", @@ -11595,8 +12031,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); if (vsi->netdev) - netif_napi_add(vsi->netdev, &q_vector->napi, - i40e_napi_poll, NAPI_POLL_WEIGHT); + netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll); /* tie q_vector and vsi together */ vsi->q_vectors[v_idx] = q_vector; @@ -11617,9 +12052,9 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) int err, v_idx, num_q_vectors; /* if not MSIX, give the one vector only to the LAN VSI */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) num_q_vectors = vsi->num_q_vectors; - else if (vsi == pf->vsi[pf->lan_vsi]) + else if (vsi->type == I40E_VSI_MAIN) num_q_vectors = 1; else return -EINVAL; @@ -11648,38 +12083,39 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) int vectors = 0; ssize_t size; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { vectors = i40e_init_msix(pf); if (vectors < 0) { - pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | - I40E_FLAG_IWARP_ENABLED | - I40E_FLAG_RSS_ENABLED | - I40E_FLAG_DCB_CAPABLE | - I40E_FLAG_DCB_ENABLED | - I40E_FLAG_SRIOV_ENABLED | - I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_ATR_ENABLED | - I40E_FLAG_VMDQ_ENABLED); - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + clear_bit(I40E_FLAG_MSIX_ENA, pf->flags); + clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); + clear_bit(I40E_FLAG_RSS_ENA, pf->flags); + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); + clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); + clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); /* rework the queue expectations without MSIX */ i40e_determine_queue_usage(pf); } } - if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && - (pf->flags & I40E_FLAG_MSI_ENABLED)) { + if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && + test_bit(I40E_FLAG_MSI_ENA, pf->flags)) { dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); vectors = pci_enable_msi(pf->pdev); if (vectors < 0) { dev_info(&pf->pdev->dev, "MSI init failed - %d\n", vectors); - pf->flags &= ~I40E_FLAG_MSI_ENABLED; + clear_bit(I40E_FLAG_MSI_ENA, pf->flags); } vectors = 1; /* one MSI or Legacy vector */ } - if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) + if (!test_bit(I40E_FLAG_MSI_ENA, pf->flags) && + !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); /* set up vector assignment tracking */ @@ -11689,7 +12125,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) return -ENOMEM; pf->irq_pile->num_entries = vectors; - pf->irq_pile->search_hint = 0; /* track first vector for misc interrupts, ignore return */ (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); @@ -11707,13 +12142,15 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) */ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf) { + struct i40e_vsi *vsi; int err, i; /* We cleared the MSI and MSI-X flags when disabling the old interrupt * scheme. We need to re-enabled them here in order to attempt to * re-acquire the MSI or MSI-X vectors */ - pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); + set_bit(I40E_FLAG_MSI_ENA, pf->flags); + set_bit(I40E_FLAG_MSIX_ENA, pf->flags); err = i40e_init_interrupt_scheme(pf); if (err) @@ -11722,20 +12159,19 @@ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf) /* Now that we've re-acquired IRQs, we need to remap the vectors and * rings together again. */ - for (i = 0; i < pf->num_alloc_vsi; i++) { - if (pf->vsi[i]) { - err = i40e_vsi_alloc_q_vectors(pf->vsi[i]); - if (err) - goto err_unwind; - i40e_vsi_map_rings_to_vectors(pf->vsi[i]); - } + i40e_pf_for_each_vsi(pf, i, vsi) { + err = i40e_vsi_alloc_q_vectors(vsi); + if (err) + goto err_unwind; + + i40e_vsi_map_rings_to_vectors(vsi); } err = i40e_setup_misc_vector(pf); if (err) goto err_unwind; - if (pf->flags & I40E_FLAG_IWARP_ENABLED) + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) i40e_client_update_msix_info(pf); return 0; @@ -11763,7 +12199,7 @@ static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf) { int err; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { err = i40e_setup_misc_vector(pf); if (err) { @@ -11773,7 +12209,7 @@ static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf) return err; } } else { - u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED; + u32 flags = test_bit(I40E_FLAG_MSI_ENA, pf->flags) ? 0 : IRQF_SHARED; err = request_irq(pf->pdev->irq, i40e_intr, flags, pf->int_name, pf); @@ -11851,10 +12287,9 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, (struct i40e_aqc_get_set_rss_key_data *)seed); if (ret) { dev_info(&pf->pdev->dev, - "Cannot get RSS key, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + "Cannot get RSS key, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(pf->hw.aq.asq_last_status)); return ret; } } @@ -11865,10 +12300,9 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); if (ret) { dev_info(&pf->pdev->dev, - "Cannot get RSS lut, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + "Cannot get RSS lut, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(pf->hw.aq.asq_last_status)); return ret; } } @@ -11977,7 +12411,7 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; - if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) + if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps)) return i40e_config_rss_aq(vsi, seed, lut, lut_size); else return i40e_config_rss_reg(vsi, seed, lut, lut_size); @@ -11996,7 +12430,7 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; - if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) + if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps)) return i40e_get_rss_aq(vsi, seed, lut, lut_size); else return i40e_get_rss_reg(vsi, seed, lut, lut_size); @@ -12024,7 +12458,7 @@ void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, **/ static int i40e_pf_config_rss(struct i40e_pf *pf) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); u8 seed[I40E_HKEY_ARRAY_SIZE]; u8 *lut; struct i40e_hw *hw = &pf->hw; @@ -12035,7 +12469,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf) /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); - hena |= i40e_pf_get_default_rss_hena(pf); + hena |= i40e_pf_get_default_rss_hashcfg(pf); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); @@ -12096,10 +12530,10 @@ static int i40e_pf_config_rss(struct i40e_pf *pf) **/ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); int new_rss_size; - if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) + if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags)) return 0; queue_count = min_t(int, queue_count, num_online_cpus()); @@ -12110,6 +12544,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) vsi->req_queue_pairs = queue_count; i40e_prep_for_reset(pf); + if (test_bit(__I40E_IN_REMOVE, pf->state)) + return pf->alloc_rss_size; pf->alloc_rss_size = new_rss_size; @@ -12139,11 +12575,11 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition * @pf: board private structure **/ -i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf) +int i40e_get_partition_bw_setting(struct i40e_pf *pf) { - i40e_status status; bool min_valid, max_valid; u32 max_bw, min_bw; + int status; status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, &min_valid, &max_valid); @@ -12162,10 +12598,10 @@ i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf) * i40e_set_partition_bw_setting - Set BW settings for this PF partition * @pf: board private structure **/ -i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf) +int i40e_set_partition_bw_setting(struct i40e_pf *pf) { struct i40e_aqc_configure_partition_bw_data bw_data; - i40e_status status; + int status; memset(&bw_data, 0, sizeof(bw_data)); @@ -12181,89 +12617,6 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf) } /** - * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition - * @pf: board private structure - **/ -i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf) -{ - /* Commit temporary BW setting to permanent NVM image */ - enum i40e_admin_queue_err last_aq_status; - i40e_status ret; - u16 nvm_word; - - if (pf->hw.partition_id != 1) { - dev_info(&pf->pdev->dev, - "Commit BW only works on partition 1! This is partition %d", - pf->hw.partition_id); - ret = I40E_NOT_SUPPORTED; - goto bw_commit_out; - } - - /* Acquire NVM for read access */ - ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); - last_aq_status = pf->hw.aq.asq_last_status; - if (ret) { - dev_info(&pf->pdev->dev, - "Cannot acquire NVM for read access, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, last_aq_status)); - goto bw_commit_out; - } - - /* Read word 0x10 of NVM - SW compatibility word 1 */ - ret = i40e_aq_read_nvm(&pf->hw, - I40E_SR_NVM_CONTROL_WORD, - 0x10, sizeof(nvm_word), &nvm_word, - false, NULL); - /* Save off last admin queue command status before releasing - * the NVM - */ - last_aq_status = pf->hw.aq.asq_last_status; - i40e_release_nvm(&pf->hw); - if (ret) { - dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, last_aq_status)); - goto bw_commit_out; - } - - /* Wait a bit for NVM release to complete */ - msleep(50); - - /* Acquire NVM for write access */ - ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); - last_aq_status = pf->hw.aq.asq_last_status; - if (ret) { - dev_info(&pf->pdev->dev, - "Cannot acquire NVM for write access, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, last_aq_status)); - goto bw_commit_out; - } - /* Write it back out unchanged to initiate update NVM, - * which will force a write of the shadow (alt) RAM to - * the NVM - thus storing the bandwidth values permanently. - */ - ret = i40e_aq_update_nvm(&pf->hw, - I40E_SR_NVM_CONTROL_WORD, - 0x10, sizeof(nvm_word), - &nvm_word, true, 0, NULL); - /* Save off last admin queue command status before releasing - * the NVM - */ - last_aq_status = pf->hw.aq.asq_last_status; - i40e_release_nvm(&pf->hw); - if (ret) - dev_info(&pf->pdev->dev, - "BW settings NOT SAVED, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, last_aq_status)); -bw_commit_out: - - return ret; -} - -/** * i40e_is_total_port_shutdown_enabled - read NVM and return value * if total port shutdown feature is enabled for this PF * @pf: board private structure @@ -12277,10 +12630,10 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf) #define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1 #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0) #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4 - i40e_status read_status = I40E_SUCCESS; u16 sr_emp_sr_settings_ptr = 0; u16 features_enable = 0; u16 link_behavior = 0; + int read_status = 0; bool ret = false; read_status = i40e_read_nvm_word(&pf->hw, @@ -12310,8 +12663,8 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf) err_nvm: dev_warn(&pf->pdev->dev, - "total-port-shutdown feature is off due to read nvm error: %s\n", - i40e_stat_str(&pf->hw, read_status)); + "total-port-shutdown feature is off due to read nvm error: %pe\n", + ERR_PTR(read_status)); return ret; } @@ -12330,9 +12683,9 @@ static int i40e_sw_init(struct i40e_pf *pf) u16 pow; /* Set default capability flags */ - pf->flags = I40E_FLAG_RX_CSUM_ENABLED | - I40E_FLAG_MSI_ENABLED | - I40E_FLAG_MSIX_ENABLED; + bitmap_zero(pf->flags, I40E_PF_FLAGS_NBITS); + set_bit(I40E_FLAG_MSI_ENA, pf->flags); + set_bit(I40E_FLAG_MSIX_ENA, pf->flags); /* Set default ITR */ pf->rx_itr_default = I40E_ITR_RX_DEF; @@ -12352,14 +12705,14 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->rss_size_max = min_t(int, pf->rss_size_max, pow); if (pf->hw.func_caps.rss) { - pf->flags |= I40E_FLAG_RSS_ENABLED; + set_bit(I40E_FLAG_RSS_ENA, pf->flags); pf->alloc_rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); } /* MFP mode enabled */ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { - pf->flags |= I40E_FLAG_MFP_ENABLED; + set_bit(I40E_FLAG_MFP_ENA, pf->flags); dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); if (i40e_get_partition_bw_setting(pf)) { dev_warn(&pf->pdev->dev, @@ -12376,84 +12729,31 @@ static int i40e_sw_init(struct i40e_pf *pf) if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || (pf->hw.func_caps.fd_filters_best_effort > 0)) { - pf->flags |= I40E_FLAG_FD_ATR_ENABLED; - pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; - if (pf->flags & I40E_FLAG_MFP_ENABLED && + set_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && pf->hw.num_partitions > 1) dev_info(&pf->pdev->dev, "Flow Director Sideband mode Disabled in MFP mode\n"); else - pf->flags |= I40E_FLAG_FD_SB_ENABLED; + set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); pf->fdir_pf_filter_count = pf->hw.func_caps.fd_filters_guaranteed; pf->hw.fdir_shared_filter_count = pf->hw.func_caps.fd_filters_best_effort; } - if (pf->hw.mac.type == I40E_MAC_X722) { - pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE | - I40E_HW_128_QP_RSS_CAPABLE | - I40E_HW_ATR_EVICT_CAPABLE | - I40E_HW_WB_ON_ITR_CAPABLE | - I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE | - I40E_HW_NO_PCI_LINK_CHECK | - I40E_HW_USE_SET_LLDP_MIB | - I40E_HW_GENEVE_OFFLOAD_CAPABLE | - I40E_HW_PTP_L4_CAPABLE | - I40E_HW_WOL_MC_MAGIC_PKT_WAKE | - I40E_HW_OUTER_UDP_CSUM_CAPABLE); - -#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03 - if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) != - I40E_FDEVICT_PCTYPE_DEFAULT) { - dev_warn(&pf->pdev->dev, - "FD EVICT PCTYPES are not right, disable FD HW EVICT\n"); - pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE; - } - } else if ((pf->hw.aq.api_maj_ver > 1) || - ((pf->hw.aq.api_maj_ver == 1) && - (pf->hw.aq.api_min_ver > 4))) { - /* Supported in FW API version higher than 1.4 */ - pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE; - } - /* Enable HW ATR eviction if possible */ - if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE) - pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; - - if ((pf->hw.mac.type == I40E_MAC_XL710) && - (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || - (pf->hw.aq.fw_maj_ver < 4))) { - pf->hw_features |= I40E_HW_RESTART_AUTONEG; - /* No DCB support for FW < v4.33 */ - pf->hw_features |= I40E_HW_NO_DCB_SUPPORT; - } - - /* Disable FW LLDP if FW < v4.3 */ - if ((pf->hw.mac.type == I40E_MAC_XL710) && - (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || - (pf->hw.aq.fw_maj_ver < 4))) - pf->hw_features |= I40E_HW_STOP_FW_LLDP; - - /* Use the FW Set LLDP MIB API if FW > v4.40 */ - if ((pf->hw.mac.type == I40E_MAC_XL710) && - (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || - (pf->hw.aq.fw_maj_ver >= 5))) - pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB; - - /* Enable PTP L4 if FW > v6.0 */ - if (pf->hw.mac.type == I40E_MAC_XL710 && - pf->hw.aq.fw_maj_ver >= 6) - pf->hw_features |= I40E_HW_PTP_L4_CAPABLE; + if (test_bit(I40E_HW_CAP_ATR_EVICT, pf->hw.caps)) + set_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags); if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) { pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; - pf->flags |= I40E_FLAG_VMDQ_ENABLED; + set_bit(I40E_FLAG_VMDQ_ENA, pf->flags); pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); } if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) { - pf->flags |= I40E_FLAG_IWARP_ENABLED; + set_bit(I40E_FLAG_IWARP_ENA, pf->flags); /* IWARP needs one extra vector for CQP just like MISC.*/ pf->num_iwarp_msix = (int)num_online_cpus() + 1; } @@ -12463,25 +12763,23 @@ static int i40e_sw_init(struct i40e_pf *pf) * if NPAR is functioning so unset this hw flag in this case. */ if (pf->hw.mac.type == I40E_MAC_XL710 && - pf->hw.func_caps.npar_enable && - (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) - pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE; + pf->hw.func_caps.npar_enable) + clear_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, pf->hw.caps); #ifdef CONFIG_PCI_IOV if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; - pf->flags |= I40E_FLAG_SRIOV_ENABLED; + set_bit(I40E_FLAG_SRIOV_ENA, pf->flags); pf->num_req_vfs = min_t(int, pf->hw.func_caps.num_vfs, I40E_MAX_VF_COUNT); } #endif /* CONFIG_PCI_IOV */ - pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; /* By default FW has this off for performance reasons */ - pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; + clear_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags); /* set up queue assignment tracking */ size = sizeof(struct i40e_lump_tracking) @@ -12492,7 +12790,6 @@ static int i40e_sw_init(struct i40e_pf *pf) goto sw_init_done; } pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; - pf->qp_pile->search_hint = 0; pf->tx_timeout_recovery_level = 1; @@ -12501,8 +12798,8 @@ static int i40e_sw_init(struct i40e_pf *pf) /* Link down on close must be on when total port shutdown * is enabled for a given port */ - pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED | - I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED); + set_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); + set_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); dev_info(&pf->pdev->dev, "total-port-shutdown was enabled, link-down-on-close is forced on\n"); } @@ -12528,31 +12825,31 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) */ if (features & NETIF_F_NTUPLE) { /* Enable filters and mark for reset */ - if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) need_reset = true; /* enable FD_SB only if there is MSI-X vector and no cloud * filters exist */ if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) { - pf->flags |= I40E_FLAG_FD_SB_ENABLED; - pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; + set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); } } else { /* turn off filters, mark for reset and clear SW filter list */ - if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { need_reset = true; i40e_fdir_filter_exit(pf); } - pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state); - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); /* reset fd counters */ pf->fd_add_err = 0; pf->fd_atr_cnt = 0; /* if ATR was auto disabled it can be re-enabled. */ if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); } @@ -12582,6 +12879,29 @@ static void i40e_clear_rss_lut(struct i40e_vsi *vsi) } /** + * i40e_set_loopback - turn on/off loopback mode on underlying PF + * @vsi: ptr to VSI + * @ena: flag to indicate the on/off setting + */ +static int i40e_set_loopback(struct i40e_vsi *vsi, bool ena) +{ + bool if_running = netif_running(vsi->netdev) && + !test_and_set_bit(__I40E_VSI_DOWN, vsi->state); + int ret; + + if (if_running) + i40e_down(vsi); + + ret = i40e_aq_set_mac_loopback(&vsi->back->hw, ena, NULL); + if (ret) + netdev_err(vsi->netdev, "Failed to toggle loopback state\n"); + if (if_running) + i40e_up(vsi); + + return ret; +} + +/** * i40e_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting @@ -12606,7 +12926,8 @@ static int i40e_set_features(struct net_device *netdev, else i40e_vlan_stripping_disable(vsi); - if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) { + if (!(features & NETIF_F_HW_TC) && + (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) { dev_err(&pf->pdev->dev, "Offloaded tc filters active, can't turn hw_tc_offload off"); return -EINVAL; @@ -12620,6 +12941,9 @@ static int i40e_set_features(struct net_device *netdev, if (need_reset) i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); + if ((features ^ netdev->features) & NETIF_F_LOOPBACK) + return i40e_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK)); + return 0; } @@ -12630,7 +12954,7 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; u8 type, filter_index; - i40e_status ret; + int ret; type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN : I40E_AQC_TUNNEL_TYPE_NGE; @@ -12638,9 +12962,8 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev, ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index, NULL); if (ret) { - netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); + netdev_info(netdev, "add UDP port failed, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status)); return -EIO; } @@ -12654,13 +12977,12 @@ static int i40e_udp_tunnel_unset_port(struct net_device *netdev, { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; - i40e_status ret; + int ret; ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL); if (ret) { - netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); + netdev_info(netdev, "delete UDP port failed, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status)); return -EIO; } @@ -12674,7 +12996,7 @@ static int i40e_get_phys_port_id(struct net_device *netdev, struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; - if (!(pf->hw_features & I40E_HW_PORT_ID_VALID)) + if (!test_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps)) return -EOPNOTSUPP; ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); @@ -12691,19 +13013,20 @@ static int i40e_get_phys_port_id(struct net_device *netdev, * @addr: the MAC address entry being added * @vid: VLAN ID * @flags: instructions from stack about fdb operation + * @notified: whether notification was emitted * @extack: netlink extended ack, unused currently */ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, - u16 flags, + u16 flags, bool *notified, struct netlink_ext_ack *extack) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_pf *pf = np->vsi->back; int err = 0; - if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) + if (!test_bit(I40E_FLAG_SRIOV_ENA, pf->flags)) return -EOPNOTSUPP; if (vid) { @@ -12757,36 +13080,31 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - struct i40e_veb *veb = NULL; struct nlattr *attr, *br_spec; - int i, rem; + struct i40e_veb *veb; + int rem; /* Only for PF VSI for now */ - if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + if (vsi->type != I40E_VSI_MAIN) return -EOPNOTSUPP; /* Find the HW bridge for PF VSI */ - for (i = 0; i < I40E_MAX_VEB && !veb; i++) { - if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) - veb = pf->veb[i]; - } + veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid); br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; - nla_for_each_nested(attr, br_spec, rem) { - __u16 mode; - - if (nla_type(attr) != IFLA_BRIDGE_MODE) - continue; + nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { + __u16 mode = nla_get_u16(attr); - mode = nla_get_u16(attr); if ((mode != BRIDGE_MODE_VEPA) && (mode != BRIDGE_MODE_VEB)) return -EINVAL; /* Insert a new HW bridge */ if (!veb) { - veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, + veb = i40e_veb_setup(pf, vsi->uplink_seid, vsi->seid, vsi->tc_config.enabled_tc); if (veb) { veb->bridge_mode = mode; @@ -12801,9 +13119,9 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, veb->bridge_mode = mode; /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ if (mode == BRIDGE_MODE_VEB) - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); else - pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); break; } @@ -12832,19 +13150,14 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - struct i40e_veb *veb = NULL; - int i; + struct i40e_veb *veb; /* Only for PF VSI for now */ - if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + if (vsi->type != I40E_VSI_MAIN) return -EOPNOTSUPP; /* Find the HW bridge for the PF VSI */ - for (i = 0; i < I40E_MAX_VEB && !veb; i++) { - if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) - veb = pf->veb[i]; - } - + veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid); if (!veb) return 0; @@ -12878,12 +13191,12 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb, features &= ~NETIF_F_GSO_MASK; /* MACLEN can support at most 63 words */ - len = skb_network_header(skb) - skb->data; + len = skb_network_offset(skb); if (len & ~(63 * 2)) goto out_err; /* IPLEN and EIPLEN can support at most 127 dwords */ - len = skb_transport_header(skb) - skb_network_header(skb); + len = skb_network_header_len(skb); if (len & ~(127 * 4)) goto out_err; @@ -12901,7 +13214,7 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb, } /* No need to validate L4LEN as TCP is the only protocol with a - * a flexible value and we support all possible values supported + * flexible value and we support all possible values supported * by TCP, which is at most 15 dwords */ @@ -12919,33 +13232,46 @@ out_err: static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, struct netlink_ext_ack *extack) { - int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + int frame_size = i40e_max_vsi_frame_size(vsi, prog); struct i40e_pf *pf = vsi->back; struct bpf_prog *old_prog; bool need_reset; int i; + /* VSI shall be deleted in a moment, block loading new programs */ + if (prog && test_bit(__I40E_IN_REMOVE, pf->state)) + return -EINVAL; + /* Don't allow frames that span over multiple buffers */ - if (frame_size > vsi->rx_buf_len) { - NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP"); + if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) { + NL_SET_ERR_MSG_MOD(extack, "MTU too large for linear frames and XDP prog does not support frags"); return -EINVAL; } /* When turning XDP on->off/off->on we reset and rebuild the rings. */ need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog); - if (need_reset) i40e_prep_for_reset(pf); old_prog = xchg(&vsi->xdp_prog, prog); if (need_reset) { - if (!prog) + if (!prog) { + xdp_features_clear_redirect_target(vsi->netdev); /* Wait until ndo_xsk_wakeup completes. */ synchronize_rcu(); + } i40e_reset_and_rebuild(pf, true, true); } + if (!i40e_enabled_xdp_vsi(vsi) && prog) { + if (i40e_realloc_rx_bi_zc(vsi, true)) + return -ENOMEM; + } else if (i40e_enabled_xdp_vsi(vsi) && !prog) { + if (i40e_realloc_rx_bi_zc(vsi, false)) + return -ENOMEM; + } + for (i = 0; i < vsi->num_queue_pairs; i++) WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); @@ -12955,11 +13281,13 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, /* Kick start the NAPI context if there is an AF_XDP socket open * on that queue id. This so that receiving will start. */ - if (need_reset && prog) + if (need_reset && prog) { for (i = 0; i < vsi->num_queue_pairs; i++) if (vsi->xdp_rings[i]->xsk_pool) (void)i40e_xsk_wakeup(vsi->netdev, i, XDP_WAKEUP_RX); + xdp_features_set_redirect_target(vsi->netdev, true); + } return 0; } @@ -13121,7 +13449,7 @@ static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair) struct i40e_hw *hw = &pf->hw; /* All rings in a qp belong to the same qvector. */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx); else i40e_irq_dynamic_enable_icr0(pf); @@ -13146,7 +13474,7 @@ static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair) * * All rings in a qp belong to the same qvector. */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { u32 intpf = vsi->base_vector + rxr->q_vector->v_idx; wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0); @@ -13177,8 +13505,9 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair) return err; i40e_queue_pair_disable_irq(vsi, queue_pair); - err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); + err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); + i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); i40e_queue_pair_clean_rings(vsi, queue_pair); i40e_queue_pair_reset_stats(vsi, queue_pair); @@ -13253,7 +13582,6 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = i40e_set_mac, .ndo_change_mtu = i40e_change_mtu, - .ndo_do_ioctl = i40e_ioctl, .ndo_tx_timeout = i40e_tx_timeout, .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, @@ -13261,6 +13589,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_poll_controller = i40e_netpoll, #endif .ndo_setup_tc = __i40e_setup_tc, + .ndo_select_queue = i40e_lan_select_queue, .ndo_set_features = i40e_set_features, .ndo_set_vf_mac = i40e_ndo_set_vf_mac, .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, @@ -13280,6 +13609,8 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_xsk_wakeup = i40e_xsk_wakeup, .ndo_dfwd_add_station = i40e_fwd_add, .ndo_dfwd_del_station = i40e_fwd_del, + .ndo_hwtstamp_get = i40e_ptp_hwtstamp_get, + .ndo_hwtstamp_set = i40e_ptp_hwtstamp_set, }; /** @@ -13310,8 +13641,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) np->vsi = vsi; hw_enc_features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | + NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_SOFT_FEATURES | NETIF_F_TSO | @@ -13330,7 +13660,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_RXCSUM | 0; - if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE)) + if (!test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps)) netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic; @@ -13342,6 +13672,23 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) /* record features VLANs can make use of */ netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; +#define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | + I40E_GSO_PARTIAL_FEATURES; + + netdev->mpls_features |= NETIF_F_SG; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->mpls_features |= NETIF_F_TSO; + netdev->mpls_features |= NETIF_F_TSO6; + netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES; + /* enable macvlan offloads */ netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; @@ -13349,14 +13696,16 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) + if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; - netdev->hw_features |= hw_features; + netdev->hw_features |= hw_features | NETIF_F_LOOPBACK; netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; + netdev->features &= ~NETIF_F_HW_TC; + if (vsi->type == I40E_VSI_MAIN) { SET_NETDEV_DEV(netdev, &pf->pdev->dev); ether_addr_copy(mac_addr, hw->mac.perm_addr); @@ -13374,15 +13723,22 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_add_mac_filter(vsi, mac_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); + + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY | + NETDEV_XDP_ACT_RX_SG; + netdev->xdp_zc_max_segs = I40E_MAX_BUFFER_TXD; } else { /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to * the end, which is 4 bytes long, so force truncation of the * original name by IFNAMSIZ - 4 */ - snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", - IFNAMSIZ - 4, - pf->vsi[pf->lan_vsi]->netdev->name); + struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf); + + snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", IFNAMSIZ - 4, + main_vsi->netdev->name); eth_random_addr(mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); @@ -13408,7 +13764,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) i40e_add_mac_filter(vsi, broadcast); spin_unlock_bh(&vsi->mac_filter_hash_lock); - ether_addr_copy(netdev->dev_addr, mac_addr); + eth_hw_addr_set(netdev, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); /* i40iw_net_event() reads 16 bytes from neigh->primary_key */ @@ -13514,10 +13870,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.flags = I40E_AQ_VSI_TYPE_PF; if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi config, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + "couldn't get PF vsi config, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(pf->hw.aq.asq_last_status)); return -ENOENT; } vsi->info = ctxt.info; @@ -13532,7 +13887,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) * negative logic - if it's set, we need to fiddle with * the VSI to disable source pruning. */ - if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) { + if (test_bit(I40E_FLAG_SOURCE_PRUNING_DIS, pf->flags)) { memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; @@ -13544,17 +13899,16 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "update vsi failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + "update vsi failed, err %d aq_err %s\n", + ret, + libie_aq_str(pf->hw.aq.asq_last_status)); ret = -ENOENT; goto err; } } /* MFP mode setup queue map and update VSI */ - if ((pf->flags & I40E_FLAG_MFP_ENABLED) && + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = pf->main_vsi_seid; @@ -13564,10 +13918,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "update vsi failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + "update vsi failed, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(pf->hw.aq.asq_last_status)); ret = -ENOENT; goto err; } @@ -13587,11 +13940,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) * message and continue */ dev_info(&pf->pdev->dev, - "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", + "failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n", enabled_tc, - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + ERR_PTR(ret), + libie_aq_str(pf->hw.aq.asq_last_status)); } } break; @@ -13602,7 +13954,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_PF; - if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && + if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags) && (i40e_is_vsi_uplink_mode_veb(vsi))) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); @@ -13650,7 +14002,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); } - if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags |= @@ -13683,10 +14035,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_aq_add_vsi(hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "add vsi failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + "add vsi failed, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(pf->hw.aq.asq_last_status)); ret = -ENOENT; goto err; } @@ -13696,15 +14047,15 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) vsi->id = ctxt.vsi_number; } - vsi->active_filters = 0; - clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); spin_lock_bh(&vsi->mac_filter_hash_lock); + vsi->active_filters = 0; /* If macvlan filters already exist, force them to get loaded */ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { f->state = I40E_FILTER_NEW; f_count++; } spin_unlock_bh(&vsi->mac_filter_hash_lock); + clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); if (f_count) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; @@ -13715,9 +14066,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_vsi_get_bw_info(vsi); if (ret) { dev_info(&pf->pdev->dev, - "couldn't get vsi bw info, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "couldn't get vsi bw info, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status)); /* VSI is already added so not tearing that up */ ret = 0; } @@ -13736,7 +14086,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) { struct i40e_mac_filter *f; struct hlist_node *h; - struct i40e_veb *veb = NULL; + struct i40e_veb *veb; struct i40e_pf *pf; u16 uplink_seid; int i, n, bkt; @@ -13749,13 +14099,13 @@ int i40e_vsi_release(struct i40e_vsi *vsi) vsi->seid, vsi->uplink_seid); return -ENODEV; } - if (vsi == pf->vsi[pf->lan_vsi] && - !test_bit(__I40E_DOWN, pf->state)) { + if (vsi->type == I40E_VSI_MAIN && !test_bit(__I40E_DOWN, pf->state)) { dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); return -ENODEV; } - + set_bit(__I40E_VSI_RELEASING, vsi->state); uplink_seid = vsi->uplink_seid; + if (vsi->type != I40E_VSI_SRIOV) { if (vsi->netdev_registered) { vsi->netdev_registered = false; @@ -13769,6 +14119,9 @@ int i40e_vsi_release(struct i40e_vsi *vsi) i40e_vsi_disable_irq(vsi); } + if (vsi->type == I40E_VSI_MAIN) + i40e_devlink_destroy_port(pf); + spin_lock_bh(&vsi->mac_filter_hash_lock); /* clear the sync flag on all filters */ @@ -13796,29 +14149,28 @@ int i40e_vsi_release(struct i40e_vsi *vsi) /* If this was the last thing on the VEB, except for the * controlling VSI, remove the VEB, which puts the controlling - * VSI onto the next level down in the switch. + * VSI onto the uplink port. * * Well, okay, there's one more exception here: don't remove - * the orphan VEBs yet. We'll wait for an explicit remove request + * the floating VEBs yet. We'll wait for an explicit remove request * from up the network stack. */ - for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { - if (pf->vsi[i] && - pf->vsi[i]->uplink_seid == uplink_seid && - (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { - n++; /* count the VSIs */ - } - } - for (i = 0; i < I40E_MAX_VEB; i++) { - if (!pf->veb[i]) - continue; - if (pf->veb[i]->uplink_seid == uplink_seid) - n++; /* count the VEBs */ - if (pf->veb[i]->seid == uplink_seid) - veb = pf->veb[i]; + veb = i40e_pf_get_veb_by_seid(pf, uplink_seid); + if (veb && veb->uplink_seid) { + n = 0; + + /* Count non-controlling VSIs present on the VEB */ + i40e_pf_for_each_vsi(pf, i, vsi) + if (vsi->uplink_seid == uplink_seid && + (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) + n++; + + /* If there is no VSI except the control one then release + * the VEB and put the control VSI onto VEB uplink. + */ + if (!n) + i40e_veb_release(veb); } - if (n == 0 && veb && veb->uplink_seid != 0) - i40e_veb_release(veb); return 0; } @@ -13862,7 +14214,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) /* In Legacy mode, we do not have to get any other vector since we * piggyback on the misc/ICR0 for queue interrupts. */ - if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) + if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) return ret; if (vsi->num_q_vectors) vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, @@ -13891,9 +14243,9 @@ vector_setup_out: **/ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) { + struct i40e_vsi *main_vsi; u16 alloc_queue_pairs; struct i40e_pf *pf; - u8 enabled_tc; int ret; if (!vsi) @@ -13925,10 +14277,10 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) /* Update the FW view of the VSI. Force a reset of TC and queue * layout configurations. */ - enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; - pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; - pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; - i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); + main_vsi = i40e_pf_get_main_vsi(pf); + main_vsi->seid = pf->main_vsi_seid; + i40e_vsi_reconfig_tc(main_vsi); + if (vsi->type == I40E_VSI_MAIN) i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr); @@ -13949,6 +14301,8 @@ err_rings: free_netdev(vsi->netdev); vsi->netdev = NULL; } + if (vsi->type == I40E_VSI_MAIN) + i40e_devlink_destroy_port(pf); i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); err_vsi: i40e_vsi_clear(vsi); @@ -13974,8 +14328,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, struct i40e_vsi *vsi = NULL; struct i40e_veb *veb = NULL; u16 alloc_queue_pairs; - int ret, i; int v_idx; + int ret; /* The requested uplink_seid must be either * - the PF's port seid @@ -13990,21 +14344,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, * * Find which uplink_seid we were given and create a new VEB if needed */ - for (i = 0; i < I40E_MAX_VEB; i++) { - if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { - veb = pf->veb[i]; - break; - } - } - + veb = i40e_pf_get_veb_by_seid(pf, uplink_seid); if (!veb && uplink_seid != pf->mac_seid) { - - for (i = 0; i < pf->num_alloc_vsi; i++) { - if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { - vsi = pf->vsi[i]; - break; - } - } + vsi = i40e_pf_get_vsi_by_seid(pf, uplink_seid); if (!vsi) { dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", uplink_seid); @@ -14012,13 +14354,13 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, } if (vsi->uplink_seid == pf->mac_seid) - veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, + veb = i40e_veb_setup(pf, pf->mac_seid, vsi->seid, vsi->tc_config.enabled_tc); else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) - veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, + veb = i40e_veb_setup(pf, vsi->uplink_seid, vsi->seid, vsi->tc_config.enabled_tc); if (veb) { - if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { + if (vsi->type != I40E_VSI_MAIN) { dev_info(&vsi->back->pdev->dev, "New VSI creation error, uplink seid of LAN VSI expected.\n"); return NULL; @@ -14027,16 +14369,13 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, * already enabled, in which case we can't force VEPA * mode. */ - if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { veb->bridge_mode = BRIDGE_MODE_VEPA; - pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); } i40e_config_bridge_mode(veb); } - for (i = 0; i < I40E_MAX_VEB && !veb; i++) { - if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) - veb = pf->veb[i]; - } + veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid); if (!veb) { dev_info(&pf->pdev->dev, "couldn't add VEB\n"); return NULL; @@ -14086,9 +14425,18 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, ret = i40e_config_netdev(vsi); if (ret) goto err_netdev; - ret = register_netdev(vsi->netdev); + ret = i40e_netif_set_realnum_tx_rx_queues(vsi); if (ret) goto err_netdev; + if (vsi->type == I40E_VSI_MAIN) { + ret = i40e_devlink_create_port(pf); + if (ret) + goto err_netdev; + SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); + } + ret = register_netdev(vsi->netdev); + if (ret) + goto err_dl_port; vsi->netdev_registered = true; netif_carrier_off(vsi->netdev); #ifdef CONFIG_I40E_DCB @@ -14116,12 +14464,16 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, break; } - if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && - (vsi->type == I40E_VSI_VMDQ2)) { + if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) && + vsi->type == I40E_VSI_VMDQ2) { ret = i40e_vsi_config_rss(vsi); + if (ret) + goto err_config; } return vsi; +err_config: + i40e_vsi_clear_rings(vsi); err_rings: i40e_vsi_free_q_vectors(vsi); err_msix: @@ -14131,6 +14483,9 @@ err_msix: free_netdev(vsi->netdev); vsi->netdev = NULL; } +err_dl_port: + if (vsi->type == I40E_VSI_MAIN) + i40e_devlink_destroy_port(pf); err_netdev: i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); err_vsi: @@ -14159,9 +14514,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb) &bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, - "query veb bw config failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); + "query veb bw config failed, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status)); goto out; } @@ -14169,9 +14523,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb) &ets_data, NULL); if (ret) { dev_info(&pf->pdev->dev, - "query veb bw ets config failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); + "query veb bw ets config failed, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status)); goto out; } @@ -14250,29 +14603,24 @@ static void i40e_switch_branch_release(struct i40e_veb *branch) struct i40e_pf *pf = branch->pf; u16 branch_seid = branch->seid; u16 veb_idx = branch->idx; + struct i40e_vsi *vsi; + struct i40e_veb *veb; int i; /* release any VEBs on this VEB - RECURSION */ - for (i = 0; i < I40E_MAX_VEB; i++) { - if (!pf->veb[i]) - continue; - if (pf->veb[i]->uplink_seid == branch->seid) - i40e_switch_branch_release(pf->veb[i]); - } + i40e_pf_for_each_veb(pf, i, veb) + if (veb->uplink_seid == branch->seid) + i40e_switch_branch_release(veb); /* Release the VSIs on this VEB, but not the owner VSI. * * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing * the VEB itself, so don't use (*branch) after this loop. */ - for (i = 0; i < pf->num_alloc_vsi; i++) { - if (!pf->vsi[i]) - continue; - if (pf->vsi[i]->uplink_seid == branch_seid && - (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { - i40e_vsi_release(pf->vsi[i]); - } - } + i40e_pf_for_each_vsi(pf, i, vsi) + if (vsi->uplink_seid == branch_seid && + (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) + i40e_vsi_release(vsi); /* There's one corner case where the VEB might not have been * removed, so double check it here and remove it if needed. @@ -14310,38 +14658,35 @@ static void i40e_veb_clear(struct i40e_veb *veb) **/ void i40e_veb_release(struct i40e_veb *veb) { - struct i40e_vsi *vsi = NULL; + struct i40e_vsi *vsi, *vsi_it; struct i40e_pf *pf; int i, n = 0; pf = veb->pf; /* find the remaining VSI and check for extras */ - for (i = 0; i < pf->num_alloc_vsi; i++) { - if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { + i40e_pf_for_each_vsi(pf, i, vsi_it) + if (vsi_it->uplink_seid == veb->seid) { + if (vsi_it->flags & I40E_VSI_FLAG_VEB_OWNER) + vsi = vsi_it; n++; - vsi = pf->vsi[i]; } - } - if (n != 1) { + + /* Floating VEB has to be empty and regular one must have + * single owner VSI. + */ + if ((veb->uplink_seid && n != 1) || (!veb->uplink_seid && n != 0)) { dev_info(&pf->pdev->dev, "can't remove VEB %d with %d VSIs left\n", veb->seid, n); return; } - /* move the remaining VSI to uplink veb */ - vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; + /* For regular VEB move the owner VSI to uplink port */ if (veb->uplink_seid) { + vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; vsi->uplink_seid = veb->uplink_seid; - if (veb->uplink_seid == pf->mac_seid) - vsi->veb_idx = I40E_NO_VEB; - else - vsi->veb_idx = veb->veb_idx; - } else { - /* floating VEB */ - vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; - vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; + vsi->veb_idx = I40E_NO_VEB; } i40e_aq_delete_element(&pf->hw, veb->seid, NULL); @@ -14356,19 +14701,18 @@ void i40e_veb_release(struct i40e_veb *veb) static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) { struct i40e_pf *pf = veb->pf; - bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); + bool enable_stats = !!test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags); int ret; - ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, - veb->enabled_tc, false, + ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi ? vsi->seid : 0, + veb->enabled_tc, vsi ? false : true, &veb->seid, enable_stats, NULL); /* get a VEB from the hardware */ if (ret) { dev_info(&pf->pdev->dev, - "couldn't add VEB, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "couldn't add VEB, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status)); return -EPERM; } @@ -14377,24 +14721,24 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) &veb->stats_idx, NULL, NULL, NULL); if (ret) { dev_info(&pf->pdev->dev, - "couldn't get VEB statistics idx, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "couldn't get VEB statistics idx, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status)); return -EPERM; } ret = i40e_veb_get_bw_info(veb); if (ret) { dev_info(&pf->pdev->dev, - "couldn't get VEB bw info, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "couldn't get VEB bw info, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status)); i40e_aq_delete_element(&pf->hw, veb->seid, NULL); return -ENOENT; } - vsi->uplink_seid = veb->seid; - vsi->veb_idx = veb->idx; - vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; + if (vsi) { + vsi->uplink_seid = veb->seid; + vsi->veb_idx = veb->idx; + vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; + } return 0; } @@ -14402,7 +14746,6 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) /** * i40e_veb_setup - Set up a VEB * @pf: board private structure - * @flags: VEB setup flags * @uplink_seid: the switch element to link to * @vsi_seid: the initial VSI seid * @enabled_tc: Enabled TC bit-map @@ -14415,12 +14758,12 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) * Returns pointer to the successfully allocated VEB sw struct on * success, otherwise returns NULL on failure. **/ -struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, - u16 uplink_seid, u16 vsi_seid, - u8 enabled_tc) +struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 uplink_seid, + u16 vsi_seid, u8 enabled_tc) { - struct i40e_veb *veb, *uplink_veb = NULL; - int vsi_idx, veb_idx; + struct i40e_vsi *vsi = NULL; + struct i40e_veb *veb; + int veb_idx; int ret; /* if one seid is 0, the other must be 0 to create a floating relay */ @@ -14433,26 +14776,11 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, } /* make sure there is such a vsi and uplink */ - for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) - if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) - break; - if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) { - dev_info(&pf->pdev->dev, "vsi seid %d not found\n", - vsi_seid); - return NULL; - } - - if (uplink_seid && uplink_seid != pf->mac_seid) { - for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { - if (pf->veb[veb_idx] && - pf->veb[veb_idx]->seid == uplink_seid) { - uplink_veb = pf->veb[veb_idx]; - break; - } - } - if (!uplink_veb) { - dev_info(&pf->pdev->dev, - "uplink seid %d not found\n", uplink_seid); + if (vsi_seid) { + vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid); + if (!vsi) { + dev_err(&pf->pdev->dev, "vsi seid %d not found\n", + vsi_seid); return NULL; } } @@ -14462,16 +14790,15 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, if (veb_idx < 0) goto err_alloc; veb = pf->veb[veb_idx]; - veb->flags = flags; veb->uplink_seid = uplink_seid; - veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); /* create the VEB in the switch */ - ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); + ret = i40e_add_veb(veb, vsi); if (ret) goto err_veb; - if (vsi_idx == pf->lan_vsi) + + if (vsi && vsi->idx == pf->lan_vsi) pf->lan_veb = veb->idx; return veb; @@ -14499,6 +14826,7 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf, u16 uplink_seid = le16_to_cpu(ele->uplink_seid); u8 element_type = ele->element_type; u16 seid = le16_to_cpu(ele->seid); + struct i40e_veb *veb; if (printconfig) dev_info(&pf->pdev->dev, @@ -14513,30 +14841,30 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf, /* Main VEB? */ if (uplink_seid != pf->mac_seid) break; - if (pf->lan_veb >= I40E_MAX_VEB) { + veb = i40e_pf_get_main_veb(pf); + if (!veb) { int v; /* find existing or else empty VEB */ - for (v = 0; v < I40E_MAX_VEB; v++) { - if (pf->veb[v] && (pf->veb[v]->seid == seid)) { - pf->lan_veb = v; - break; - } - } - if (pf->lan_veb >= I40E_MAX_VEB) { + veb = i40e_pf_get_veb_by_seid(pf, seid); + if (veb) { + pf->lan_veb = veb->idx; + } else { v = i40e_veb_mem_alloc(pf); if (v < 0) break; pf->lan_veb = v; } } - if (pf->lan_veb >= I40E_MAX_VEB) + + /* Try to get again main VEB as pf->lan_veb may have changed */ + veb = i40e_pf_get_main_veb(pf); + if (!veb) break; - pf->veb[pf->lan_veb]->seid = seid; - pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; - pf->veb[pf->lan_veb]->pf = pf; - pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; + veb->seid = seid; + veb->uplink_seid = pf->mac_seid; + veb->pf = pf; break; case I40E_SWITCH_ELEMENT_TYPE_VSI: if (num_reported != 1) @@ -14545,12 +14873,11 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf, * the PF's VSI */ pf->mac_seid = uplink_seid; - pf->pf_seid = downlink_seid; pf->main_vsi_seid = seid; if (printconfig) dev_info(&pf->pdev->dev, "pf_seid=%d main_vsi_seid=%d\n", - pf->pf_seid, pf->main_vsi_seid); + downlink_seid, pf->main_vsi_seid); break; case I40E_SWITCH_ELEMENT_TYPE_PF: case I40E_SWITCH_ELEMENT_TYPE_VF: @@ -14596,10 +14923,8 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) &next_seid, NULL); if (ret) { dev_info(&pf->pdev->dev, - "get switch config failed err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + "get switch config failed err %d aq_err %s\n", + ret, libie_aq_str(pf->hw.aq.asq_last_status)); kfree(aq_buf); return -ENOENT; } @@ -14635,6 +14960,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) **/ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired) { + struct i40e_vsi *main_vsi; u16 flags = 0; int ret; @@ -14642,9 +14968,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui ret = i40e_fetch_switch_configuration(pf, false); if (ret) { dev_info(&pf->pdev->dev, - "couldn't fetch switch config, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + "couldn't fetch switch config, err %pe aq_err %s\n", + ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status)); return ret; } i40e_pf_reset_stats(pf); @@ -14656,7 +14981,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui */ if ((pf->hw.pf_id == 0) && - !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { + !test_bit(I40E_FLAG_TRUE_PROMISC_ENA, pf->flags)) { flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; pf->last_sw_conf_flags = flags; } @@ -14667,34 +14992,36 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0, NULL); - if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { + if (ret && pf->hw.aq.asq_last_status != LIBIE_AQ_RC_ESRCH) { dev_info(&pf->pdev->dev, - "couldn't set switch config bits, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + "couldn't set switch config bits, err %pe aq_err %s\n", + ERR_PTR(ret), + libie_aq_str(pf->hw.aq.asq_last_status)); /* not a fatal problem, just keep going */ } pf->last_sw_conf_valid_flags = valid_flags; } /* first time setup */ - if (pf->lan_vsi == I40E_NO_VSI || reinit) { - struct i40e_vsi *vsi = NULL; + main_vsi = i40e_pf_get_main_vsi(pf); + if (!main_vsi || reinit) { + struct i40e_veb *veb; u16 uplink_seid; /* Set up the PF VSI associated with the PF's main VSI * that is already in the HW switch */ - if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) - uplink_seid = pf->veb[pf->lan_veb]->seid; + veb = i40e_pf_get_main_veb(pf); + if (veb) + uplink_seid = veb->seid; else uplink_seid = pf->mac_seid; - if (pf->lan_vsi == I40E_NO_VSI) - vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); + if (!main_vsi) + main_vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, + uplink_seid, 0); else if (reinit) - vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); - if (!vsi) { + main_vsi = i40e_vsi_reinit_setup(main_vsi); + if (!main_vsi) { dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); i40e_cloud_filter_exit(pf); i40e_fdir_teardown(pf); @@ -14702,13 +15029,10 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui } } else { /* force a reset of TC and queue layout configurations */ - u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; - - pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; - pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; - i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); + main_vsi->seid = pf->main_vsi_seid; + i40e_vsi_reconfig_tc(main_vsi); } - i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); + i40e_vlan_stripping_disable(main_vsi); i40e_fdir_sb_setup(pf); @@ -14723,23 +15047,19 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui /* enable RSS in the HW, even for only one queue, as the stack can use * the hash */ - if ((pf->flags & I40E_FLAG_RSS_ENABLED)) + if (test_bit(I40E_FLAG_RSS_ENA, pf->flags)) i40e_pf_config_rss(pf); /* fill in link information and enable LSE reporting */ i40e_link_event(pf); - /* Initialize user-specific link properties */ - pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & - I40E_AQ_AN_COMPLETED) ? true : false); - i40e_ptp_init(pf); if (!lock_acquired) rtnl_lock(); /* repopulate tunnel port filters */ - udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev); + udp_tunnel_nic_reset_ntf(main_vsi->netdev); if (!lock_acquired) rtnl_unlock(); @@ -14765,42 +15085,42 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) queues_left = pf->hw.func_caps.num_tx_qp; if ((queues_left == 1) || - !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { + !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { /* one qp for PF, no queues for anything else */ queues_left = 0; pf->alloc_rss_size = pf->num_lan_qps = 1; /* make sure all the fancies are disabled */ - pf->flags &= ~(I40E_FLAG_RSS_ENABLED | - I40E_FLAG_IWARP_ENABLED | - I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_ATR_ENABLED | - I40E_FLAG_DCB_CAPABLE | - I40E_FLAG_DCB_ENABLED | - I40E_FLAG_SRIOV_ENABLED | - I40E_FLAG_VMDQ_ENABLED); - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; - } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | - I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_ATR_ENABLED | - I40E_FLAG_DCB_CAPABLE))) { + clear_bit(I40E_FLAG_RSS_ENA, pf->flags); + clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); + clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags); + clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); + } else if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags) && + !test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && + !test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && + !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) { /* one qp for PF */ pf->alloc_rss_size = pf->num_lan_qps = 1; queues_left -= pf->num_lan_qps; - pf->flags &= ~(I40E_FLAG_RSS_ENABLED | - I40E_FLAG_IWARP_ENABLED | - I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_ATR_ENABLED | - I40E_FLAG_DCB_ENABLED | - I40E_FLAG_VMDQ_ENABLED); - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + clear_bit(I40E_FLAG_RSS_ENA, pf->flags); + clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); + clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); } else { /* Not enough queues for all TCs */ - if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && - (queues_left < I40E_MAX_TRAFFIC_CLASS)) { - pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | - I40E_FLAG_DCB_ENABLED); + if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags) && + queues_left < I40E_MAX_TRAFFIC_CLASS) { + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); } @@ -14813,24 +15133,24 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) queues_left -= pf->num_lan_qps; } - if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { if (queues_left > 1) { queues_left -= 1; /* save 1 queue for FD */ } else { - pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); } } - if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && + if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) && pf->num_vf_qps && pf->num_req_vfs && queues_left) { pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left / pf->num_vf_qps)); queues_left -= (pf->num_req_vfs * pf->num_vf_qps); } - if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && + if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) && pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, (queues_left / pf->num_vmdq_qps)); @@ -14841,7 +15161,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) dev_dbg(&pf->pdev->dev, "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", pf->hw.func_caps.num_tx_qp, - !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), + !!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags), pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left); @@ -14865,7 +15185,8 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf) settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; /* Flow Director is enabled */ - if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) || + test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags)) settings->enable_fdir = true; /* Ethtype and MACVLAN filters enabled for PF */ @@ -14882,6 +15203,7 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf) #define REMAIN(__x) (INFO_STRING_LEN - (__x)) static void i40e_print_features(struct i40e_pf *pf) { + struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf); struct i40e_hw *hw = &pf->hw; char *buf; int i; @@ -14895,23 +15217,22 @@ static void i40e_print_features(struct i40e_pf *pf) i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); #endif i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d", - pf->hw.func_caps.num_vsis, - pf->vsi[pf->lan_vsi]->num_queue_pairs); - if (pf->flags & I40E_FLAG_RSS_ENABLED) + pf->hw.func_caps.num_vsis, main_vsi->num_queue_pairs); + if (test_bit(I40E_FLAG_RSS_ENA, pf->flags)) i += scnprintf(&buf[i], REMAIN(i), " RSS"); - if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) + if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags)) i += scnprintf(&buf[i], REMAIN(i), " FD_ATR"); - if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { i += scnprintf(&buf[i], REMAIN(i), " FD_SB"); i += scnprintf(&buf[i], REMAIN(i), " NTUPLE"); } - if (pf->flags & I40E_FLAG_DCB_CAPABLE) + if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) i += scnprintf(&buf[i], REMAIN(i), " DCB"); i += scnprintf(&buf[i], REMAIN(i), " VxLAN"); i += scnprintf(&buf[i], REMAIN(i), " Geneve"); - if (pf->flags & I40E_FLAG_PTP) + if (test_bit(I40E_FLAG_PTP_ENA, pf->flags)) i += scnprintf(&buf[i], REMAIN(i), " PTP"); - if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) + if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) i += scnprintf(&buf[i], REMAIN(i), " VEB"); else i += scnprintf(&buf[i], REMAIN(i), " VEPA"); @@ -14942,22 +15263,26 @@ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) * @fec_cfg: FEC option to set in flags * @flags: ptr to flags in which we set FEC option **/ -void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags) +void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags) { - if (fec_cfg & I40E_AQ_SET_FEC_AUTO) - *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC; + if (fec_cfg & I40E_AQ_SET_FEC_AUTO) { + set_bit(I40E_FLAG_RS_FEC, flags); + set_bit(I40E_FLAG_BASE_R_FEC, flags); + } if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) || (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) { - *flags |= I40E_FLAG_RS_FEC; - *flags &= ~I40E_FLAG_BASE_R_FEC; + set_bit(I40E_FLAG_RS_FEC, flags); + clear_bit(I40E_FLAG_BASE_R_FEC, flags); } if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) || (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) { - *flags |= I40E_FLAG_BASE_R_FEC; - *flags &= ~I40E_FLAG_RS_FEC; + set_bit(I40E_FLAG_BASE_R_FEC, flags); + clear_bit(I40E_FLAG_RS_FEC, flags); + } + if (fec_cfg == 0) { + clear_bit(I40E_FLAG_RS_FEC, flags); + clear_bit(I40E_FLAG_BASE_R_FEC, flags); } - if (fec_cfg == 0) - *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC); } /** @@ -15007,21 +15332,20 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf) * * Return 0 on success, negative on failure. **/ -static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf) +static int i40e_pf_loop_reset(struct i40e_pf *pf) { /* wait max 10 seconds for PF reset to succeed */ const unsigned long time_end = jiffies + 10 * HZ; - struct i40e_hw *hw = &pf->hw; - i40e_status ret; + int ret; ret = i40e_pf_reset(hw); - while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) { + while (ret != 0 && time_before(jiffies, time_end)) { usleep_range(10000, 20000); ret = i40e_pf_reset(hw); } - if (ret == I40E_SUCCESS) + if (ret == 0) pf->pfr_count++; else dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret); @@ -15059,15 +15383,15 @@ static bool i40e_check_fw_empr(struct i40e_pf *pf) * Return 0 if NIC is healthy or negative value when there are issues * with resets **/ -static i40e_status i40e_handle_resets(struct i40e_pf *pf) +static int i40e_handle_resets(struct i40e_pf *pf) { - const i40e_status pfr = i40e_pf_loop_reset(pf); + const int pfr = i40e_pf_loop_reset(pf); const bool is_empr = i40e_check_fw_empr(pf); - if (is_empr || pfr != I40E_SUCCESS) + if (is_empr || pfr != 0) dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n"); - return is_empr ? I40E_ERR_RESET_FAILED : pfr; + return is_empr ? -EIO : pfr; } /** @@ -15086,6 +15410,7 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw) int err; int v_idx; + pci_set_drvdata(pf->pdev, pf); pci_save_state(pf->pdev); /* set up periodic task facility */ @@ -15156,18 +15481,33 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw) err_switch_setup: i40e_reset_interrupt_capability(pf); - del_timer_sync(&pf->service_timer); + timer_shutdown_sync(&pf->service_timer); i40e_shutdown_adminq(hw); iounmap(hw->hw_addr); - pci_disable_pcie_error_reporting(pf->pdev); pci_release_mem_regions(pf->pdev); pci_disable_device(pf->pdev); - kfree(pf); + i40e_free_pf(pf); return err; } /** + * i40e_set_subsystem_device_id - set subsystem device id + * @hw: pointer to the hardware info + * + * Set PCI subsystem device id either from a pci_dev structure or + * a specific FW register. + **/ +static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw) +{ + struct i40e_pf *pf = i40e_hw_to_pf(hw); + + hw->subsystem_device_id = pf->pdev->subsystem_device ? + pf->pdev->subsystem_device : + (ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX); +} + +/** * i40e_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in i40e_pci_tbl @@ -15183,16 +15523,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct i40e_aq_get_phy_abilities_resp abilities; #ifdef CONFIG_I40E_DCB enum i40e_get_fw_lldp_status_resp lldp_status; - i40e_status status; #endif /* CONFIG_I40E_DCB */ + struct i40e_vsi *vsi; struct i40e_pf *pf; struct i40e_hw *hw; - static u16 pfs_found; u16 wol_nvm_bits; + char nvm_ver[32]; u16 link_status; +#ifdef CONFIG_I40E_DCB + int status; +#endif /* CONFIG_I40E_DCB */ int err; u32 val; - u32 i; err = pci_enable_device_mem(pdev); if (err) @@ -15201,12 +15543,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* set up for high or low dma */ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "DMA configuration failed: 0x%x\n", err); - goto err_dma; - } + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; } /* set up pci connections */ @@ -15217,7 +15556,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_pci_reg; } - pci_enable_pcie_error_reporting(pdev); pci_set_master(pdev); /* Now that we have a PCI connection, we need to do the @@ -15225,7 +15563,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * the Admin Queue structures and then querying for the * device's current profile information. */ - pf = kzalloc(sizeof(*pf), GFP_KERNEL); + pf = i40e_alloc_pf(&pdev->dev); if (!pf) { err = -ENOMEM; goto err_pf_alloc; @@ -15235,7 +15573,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) set_bit(__I40E_DOWN, pf->state); hw = &pf->hw; - hw->back = pf; pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), I40E_MAX_CSR_SPACE); @@ -15262,11 +15599,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->device_id = pdev->device; pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); hw->subsystem_vendor_id = pdev->subsystem_vendor; - hw->subsystem_device_id = pdev->subsystem_device; + i40e_set_subsystem_device_id(hw); hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn); hw->bus.bus_id = pdev->bus->number; - pf->instance = pfs_found; /* Select something other than the 802.1ad ethertype for the * switch to use internally and drop on ingress. @@ -15328,7 +15664,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; - pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", @@ -15346,7 +15681,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = i40e_init_adminq(hw); if (err) { - if (err == I40E_ERR_FIRMWARE_API_VERSION) + if (err == -EIO) dev_info(&pdev->dev, "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n", hw->aq.api_maj_ver, @@ -15360,23 +15695,25 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_pf_reset; } i40e_get_oem_version(hw); + i40e_get_pba_string(hw); /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */ + i40e_nvm_version_str(hw, nvm_ver, sizeof(nvm_ver)); dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n", hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, - hw->aq.api_maj_ver, hw->aq.api_min_ver, - i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id, - hw->subsystem_vendor_id, hw->subsystem_device_id); - - if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && - hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) - dev_info(&pdev->dev, - "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n", + hw->aq.api_maj_ver, hw->aq.api_min_ver, nvm_ver, + hw->vendor_id, hw->device_id, hw->subsystem_vendor_id, + hw->subsystem_device_id); + + if (i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR, + I40E_FW_MINOR_VERSION(hw) + 1)) + dev_dbg(&pdev->dev, + "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver, I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw)); - else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) + else if (i40e_is_aq_api_ver_lt(hw, 1, 4)) dev_info(&pdev->dev, "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n", hw->aq.api_maj_ver, @@ -15423,7 +15760,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * Ignore error return codes because if it was already disabled via * hardware settings this will fail */ - if (pf->hw_features & I40E_HW_STOP_FW_LLDP) { + if (test_bit(I40E_HW_CAP_STOP_FW_LLDP, pf->hw.caps)) { dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); i40e_aq_stop_lldp(hw, true, false, NULL); } @@ -15440,8 +15777,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); i40e_get_port_mac_addr(hw, hw->mac.port_addr); if (is_valid_ether_addr(hw->mac.port_addr)) - pf->hw_features |= I40E_HW_PORT_ID_VALID; + set_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps); + i40e_ptp_alloc_pins(pf); pci_set_drvdata(pdev, pf); pci_save_state(pdev); @@ -15449,10 +15787,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status); (!status && lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ? - (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) : - (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP); + (clear_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) : + (set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)); dev_info(&pdev->dev, - (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ? + test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ? "FW LLDP is disabled\n" : "FW LLDP is enabled\n"); @@ -15462,7 +15800,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = i40e_init_pf_dcb(pf); if (err) { dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); - pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED); + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); /* Continue without DCB enabled */ } #endif /* CONFIG_I40E_DCB */ @@ -15498,7 +15837,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port; pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port; - pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared; pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS; pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN | @@ -15530,11 +15868,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #ifdef CONFIG_PCI_IOV /* prep for VF support */ - if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && - (pf->flags & I40E_FLAG_MSIX_ENABLED) && + if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) && + test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && !test_bit(__I40E_BAD_EEPROM, pf->state)) { if (pci_num_vf(pdev)) - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); } #endif err = i40e_setup_pf_switch(pf, false, false); @@ -15542,15 +15880,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); goto err_vsis; } - INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list); + + vsi = i40e_pf_get_main_vsi(pf); + INIT_LIST_HEAD(&vsi->ch_list); /* if FDIR VSI was set up, start it now */ - for (i = 0; i < pf->num_alloc_vsi; i++) { - if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { - i40e_vsi_open(pf->vsi[i]); - break; - } - } + vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); + if (vsi) + i40e_vsi_open(vsi); /* The driver only wants link up/down and module qualification * reports from firmware. Note the negative logic. @@ -15560,9 +15897,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) I40E_AQ_EVENT_MEDIA_NA | I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); if (err) - dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, err), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n", + ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status)); + + /* VF MDD event logs are rate limited to one second intervals */ + ratelimit_state_init(&pf->mdd_message_rate_limit, 1 * HZ, 1); /* Reconfigure hardware for allowing smaller MSS in the case * of TSO, so that we avoid the MDD being fired and causing @@ -15575,14 +15914,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) wr32(hw, I40E_REG_MSS, val); } - if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { + if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) { msleep(75); err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (err) - dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, err), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n", + ERR_PTR(err), + libie_aq_str(pf->hw.aq.asq_last_status)); } /* The main driver is (mostly) up and happy. We need to set this state * before setting up the misc vector or we get a race and the vector @@ -15595,7 +15933,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * the misc functionality and queue processing is combined in * the same vector and that gets setup at open. */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { err = i40e_setup_misc_vector(pf); if (err) { dev_info(&pdev->dev, @@ -15608,8 +15946,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #ifdef CONFIG_PCI_IOV /* prep for VF support */ - if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && - (pf->flags & I40E_FLAG_MSIX_ENABLED) && + if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) && + test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && !test_bit(__I40E_BAD_EEPROM, pf->state)) { /* disable link interrupts for VFs */ val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); @@ -15629,7 +15967,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } #endif /* CONFIG_PCI_IOV */ - if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile, pf->num_iwarp_msix, I40E_IWARP_IRQ_PILE_ID); @@ -15637,7 +15975,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "failed to get tracking for %d vectors for IWARP err=%d\n", pf->num_iwarp_msix, pf->iwarp_base_vector); - pf->flags &= ~I40E_FLAG_IWARP_ENABLED; + clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); } } @@ -15651,7 +15989,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) round_jiffies(jiffies + pf->service_timer_period)); /* add this PF to client device list and launch a client service task */ - if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { err = i40e_lan_add_device(pf); if (err) dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", @@ -15664,7 +16002,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * and will report PCI Gen 1 x 1 by default so don't bother * checking them. */ - if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) { + if (!test_bit(I40E_HW_CAP_NO_PCI_LINK_CHECK, pf->hw.caps)) { char speed[PCI_SPEED_SIZE] = "Unknown"; char width[PCI_WIDTH_SIZE] = "Unknown"; @@ -15678,23 +16016,23 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) switch (hw->bus.speed) { case i40e_bus_speed_8000: - strlcpy(speed, "8.0", PCI_SPEED_SIZE); break; + strscpy(speed, "8.0", PCI_SPEED_SIZE); break; case i40e_bus_speed_5000: - strlcpy(speed, "5.0", PCI_SPEED_SIZE); break; + strscpy(speed, "5.0", PCI_SPEED_SIZE); break; case i40e_bus_speed_2500: - strlcpy(speed, "2.5", PCI_SPEED_SIZE); break; + strscpy(speed, "2.5", PCI_SPEED_SIZE); break; default: break; } switch (hw->bus.width) { case i40e_bus_width_pcie_x8: - strlcpy(width, "8", PCI_WIDTH_SIZE); break; + strscpy(width, "8", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x4: - strlcpy(width, "4", PCI_WIDTH_SIZE); break; + strscpy(width, "4", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x2: - strlcpy(width, "2", PCI_WIDTH_SIZE); break; + strscpy(width, "2", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x1: - strlcpy(width, "1", PCI_WIDTH_SIZE); break; + strscpy(width, "1", PCI_WIDTH_SIZE); break; default: break; } @@ -15712,28 +16050,30 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* get the requested speeds from the fw */ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); if (err) - dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n", - i40e_stat_str(&pf->hw, err), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n", + ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status)); pf->hw.phy.link_info.requested_speeds = abilities.link_speed; /* set the FEC config due to the board capabilities */ - i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags); + i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, pf->flags); /* get the supported phy types from the fw */ err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); if (err) - dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n", - i40e_stat_str(&pf->hw, err), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n", + ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status)); - /* make sure the MFS hasn't been set lower than the default */ #define MAX_FRAME_SIZE_DEFAULT 0x2600 - val = (rd32(&pf->hw, I40E_PRTGL_SAH) & - I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT; - if (val < MAX_FRAME_SIZE_DEFAULT) - dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n", - i, val); + + err = i40e_aq_set_mac_config(hw, MAX_FRAME_SIZE_DEFAULT, NULL); + if (err) + dev_warn(&pdev->dev, "set mac config ret = %pe last_status = %s\n", + ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status)); + + /* Make sure the MFS is set to the expected value */ + val = rd32(hw, I40E_PRTGL_SAH); + FIELD_MODIFY(I40E_PRTGL_SAH_MFS_MASK, &val, MAX_FRAME_SIZE_DEFAULT); + wr32(hw, I40E_PRTGL_SAH, val); /* Add a filter to drop all Flow control frames from any VSI from being * transmitted. By doing so we stop a malicious VF from sending out @@ -15745,13 +16085,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pf->main_vsi_seid); if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || - (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) - pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS; + (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) + set_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps); if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722) - pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER; + set_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps); /* print a string summarizing features */ i40e_print_features(pf); + i40e_devlink_register(pf); + return 0; /* Unwind what we've done if something failed in the setup */ @@ -15761,7 +16103,7 @@ err_vsis: kfree(pf->vsi); err_switch_setup: i40e_reset_interrupt_capability(pf); - del_timer_sync(&pf->service_timer); + timer_shutdown_sync(&pf->service_timer); err_mac_addr: err_configure_lan_hmc: (void)i40e_shutdown_lan_hmc(hw); @@ -15772,9 +16114,8 @@ err_adminq_setup: err_pf_reset: iounmap(hw->hw_addr); err_ioremap: - kfree(pf); + i40e_free_pf(pf); err_pf_alloc: - pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: @@ -15795,9 +16136,13 @@ static void i40e_remove(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; - i40e_status ret_code; + struct i40e_vsi *vsi; + struct i40e_veb *veb; + int ret_code; int i; + i40e_devlink_unregister(pf); + i40e_dbg_pf_exit(pf); i40e_ptp_stop(pf); @@ -15806,19 +16151,24 @@ static void i40e_remove(struct pci_dev *pdev) i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); - while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) + /* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE + * flags, once they are set, i40e_rebuild should not be called as + * i40e_prep_for_reset always returns early. + */ + while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) usleep_range(1000, 2000); + set_bit(__I40E_IN_REMOVE, pf->state); - if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { + if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags)) { set_bit(__I40E_VF_RESETS_DISABLED, pf->state); i40e_free_vfs(pf); - pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; + clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags); } /* no more scheduling of any task */ set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); if (pf->service_timer.function) - del_timer_sync(&pf->service_timer); + timer_shutdown_sync(&pf->service_timer); if (pf->service_task.func) cancel_work_sync(&pf->service_task); @@ -15838,32 +16188,31 @@ static void i40e_remove(struct pci_dev *pdev) /* Client close must be called explicitly here because the timer * has been stopped. */ - i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); + i40e_notify_client_of_netdev_close(pf, false); i40e_fdir_teardown(pf); /* If there is a switch structure or any orphans, remove them. * This will leave only the PF's VSI remaining. */ - for (i = 0; i < I40E_MAX_VEB; i++) { - if (!pf->veb[i]) - continue; - - if (pf->veb[i]->uplink_seid == pf->mac_seid || - pf->veb[i]->uplink_seid == 0) - i40e_switch_branch_release(pf->veb[i]); - } + i40e_pf_for_each_veb(pf, i, veb) + if (veb->uplink_seid == pf->mac_seid || + veb->uplink_seid == 0) + i40e_switch_branch_release(veb); - /* Now we can shutdown the PF's VSI, just before we kill + /* Now we can shutdown the PF's VSIs, just before we kill * adminq and hmc. */ - if (pf->vsi[pf->lan_vsi]) - i40e_vsi_release(pf->vsi[pf->lan_vsi]); + i40e_pf_for_each_vsi(pf, i, vsi) { + i40e_vsi_close(vsi); + i40e_vsi_release(vsi); + pf->vsi[i] = NULL; + } i40e_cloud_filter_exit(pf); /* remove attached clients */ - if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { ret_code = i40e_lan_del_device(pf); if (ret_code) dev_warn(&pdev->dev, "Failed to delete client device: %d\n", @@ -15882,7 +16231,7 @@ static void i40e_remove(struct pci_dev *pdev) unmap: /* Free MSI/legacy interrupt 0 when in recovery mode. */ if (test_bit(__I40E_RECOVERY_MODE, pf->state) && - !(pf->flags & I40E_FLAG_MSIX_ENABLED)) + !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) free_irq(pf->pdev->irq, pf); /* shutdown the adminq */ @@ -15895,18 +16244,17 @@ unmap: /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ rtnl_lock(); i40e_clear_interrupt_scheme(pf); - for (i = 0; i < pf->num_alloc_vsi; i++) { - if (pf->vsi[i]) { - if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) - i40e_vsi_clear_rings(pf->vsi[i]); - i40e_vsi_clear(pf->vsi[i]); - pf->vsi[i] = NULL; - } + i40e_pf_for_each_vsi(pf, i, vsi) { + if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) + i40e_vsi_clear_rings(vsi); + + i40e_vsi_clear(vsi); + pf->vsi[i] = NULL; } rtnl_unlock(); - for (i = 0; i < I40E_MAX_VEB; i++) { - kfree(pf->veb[i]); + i40e_pf_for_each_veb(pf, i, veb) { + kfree(veb); pf->veb[i] = NULL; } @@ -15914,14 +16262,146 @@ unmap: kfree(pf->vsi); iounmap(hw->hw_addr); - kfree(pf); + i40e_free_pf(pf); pci_release_mem_regions(pdev); - pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); } /** + * i40e_enable_mc_magic_wake - enable multicast magic packet wake up + * using the mac_address_write admin q function + * @pf: pointer to i40e_pf struct + **/ +static void i40e_enable_mc_magic_wake(struct i40e_pf *pf) +{ + struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf); + struct i40e_hw *hw = &pf->hw; + u8 mac_addr[6]; + u16 flags = 0; + int ret; + + /* Get current MAC address in case it's an LAA */ + if (main_vsi && main_vsi->netdev) { + ether_addr_copy(mac_addr, main_vsi->netdev->dev_addr); + } else { + dev_err(&pf->pdev->dev, + "Failed to retrieve MAC address; using default\n"); + ether_addr_copy(mac_addr, hw->mac.addr); + } + + /* The FW expects the mac address write cmd to first be called with + * one of these flags before calling it again with the multicast + * enable flags. + */ + flags = I40E_AQC_WRITE_TYPE_LAA_WOL; + + if (hw->func_caps.flex10_enable && hw->partition_id != 1) + flags = I40E_AQC_WRITE_TYPE_LAA_ONLY; + + ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up"); + return; + } + + flags = I40E_AQC_MC_MAG_EN + | I40E_AQC_WOL_PRESERVE_ON_PFR + | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG; + ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); + if (ret) + dev_err(&pf->pdev->dev, + "Failed to enable Multicast Magic Packet wake up\n"); +} + +/** + * i40e_io_suspend - suspend all IO operations + * @pf: pointer to i40e_pf struct + * + **/ +static int i40e_io_suspend(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + + set_bit(__I40E_DOWN, pf->state); + + /* Ensure service task will not be running */ + timer_delete_sync(&pf->service_timer); + cancel_work_sync(&pf->service_task); + + /* Client close must be called explicitly here because the timer + * has been stopped. + */ + i40e_notify_client_of_netdev_close(pf, false); + + if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) && + pf->wol_en) + i40e_enable_mc_magic_wake(pf); + + /* Since we're going to destroy queues during the + * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this + * whole section + */ + rtnl_lock(); + + i40e_prep_for_reset(pf); + + wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); + wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + + /* Clear the interrupt scheme and release our IRQs so that the system + * can safely hibernate even when there are a large number of CPUs. + * Otherwise hibernation might fail when mapping all the vectors back + * to CPU0. + */ + i40e_clear_interrupt_scheme(pf); + + rtnl_unlock(); + + return 0; +} + +/** + * i40e_io_resume - resume IO operations + * @pf: pointer to i40e_pf struct + * + **/ +static int i40e_io_resume(struct i40e_pf *pf) +{ + struct device *dev = &pf->pdev->dev; + int err; + + /* We need to hold the RTNL lock prior to restoring interrupt schemes, + * since we're going to be restoring queues + */ + rtnl_lock(); + + /* We cleared the interrupt scheme when we suspended, so we need to + * restore it now to resume device functionality. + */ + err = i40e_restore_interrupt_scheme(pf); + if (err) { + dev_err(dev, "Cannot restore interrupt scheme: %d\n", + err); + } + + clear_bit(__I40E_DOWN, pf->state); + i40e_reset_and_rebuild(pf, false, true); + + rtnl_unlock(); + + /* Clear suspended state last after everything is recovered */ + clear_bit(__I40E_SUSPENDED, pf->state); + + /* Restart the service task */ + mod_timer(&pf->service_timer, + round_jiffies(jiffies + pf->service_timer_period)); + + return 0; +} + +/** * i40e_pci_error_detected - warning that something funky happened in PCI land * @pdev: PCI device information struct * @error: the type of PCI error @@ -15945,7 +16425,7 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, /* shutdown all operations */ if (!test_bit(__I40E_SUSPENDED, pf->state)) - i40e_prep_for_reset(pf); + i40e_io_suspend(pf); /* Request a slot reset */ return PCI_ERS_RESULT_NEED_RESET; @@ -15967,14 +16447,14 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) u32 reg; dev_dbg(&pdev->dev, "%s\n", __func__); - if (pci_enable_device_mem(pdev)) { + /* enable I/O and memory of the device */ + if (pci_enable_device(pdev)) { dev_info(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { pci_set_master(pdev); pci_restore_state(pdev); - pci_save_state(pdev); pci_wake_from_d3(pdev, false); reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); @@ -16006,7 +16486,13 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); + if (test_bit(__I40E_IN_REMOVE, pf->state)) + return; + i40e_reset_and_rebuild(pf, false, false); +#ifdef CONFIG_PCI_IOV + i40e_restore_all_vfs_msi_state(pdev); +#endif /* CONFIG_PCI_IOV */ } /** @@ -16024,54 +16510,7 @@ static void i40e_pci_error_resume(struct pci_dev *pdev) if (test_bit(__I40E_SUSPENDED, pf->state)) return; - i40e_handle_reset_warning(pf, false); -} - -/** - * i40e_enable_mc_magic_wake - enable multicast magic packet wake up - * using the mac_address_write admin q function - * @pf: pointer to i40e_pf struct - **/ -static void i40e_enable_mc_magic_wake(struct i40e_pf *pf) -{ - struct i40e_hw *hw = &pf->hw; - i40e_status ret; - u8 mac_addr[6]; - u16 flags = 0; - - /* Get current MAC address in case it's an LAA */ - if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) { - ether_addr_copy(mac_addr, - pf->vsi[pf->lan_vsi]->netdev->dev_addr); - } else { - dev_err(&pf->pdev->dev, - "Failed to retrieve MAC address; using default\n"); - ether_addr_copy(mac_addr, hw->mac.addr); - } - - /* The FW expects the mac address write cmd to first be called with - * one of these flags before calling it again with the multicast - * enable flags. - */ - flags = I40E_AQC_WRITE_TYPE_LAA_WOL; - - if (hw->func_caps.flex10_enable && hw->partition_id != 1) - flags = I40E_AQC_WRITE_TYPE_LAA_ONLY; - - ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); - if (ret) { - dev_err(&pf->pdev->dev, - "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up"); - return; - } - - flags = I40E_AQC_MC_MAG_EN - | I40E_AQC_WOL_PRESERVE_ON_PFR - | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG; - ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); - if (ret) - dev_err(&pf->pdev->dev, - "Failed to enable Multicast Magic Packet wake up\n"); + i40e_io_resume(pf); } /** @@ -16086,7 +16525,7 @@ static void i40e_shutdown(struct pci_dev *pdev) set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); - del_timer_sync(&pf->service_timer); + timer_delete_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); i40e_cloud_filter_exit(pf); i40e_fdir_teardown(pf); @@ -16094,9 +16533,10 @@ static void i40e_shutdown(struct pci_dev *pdev) /* Client close must be called explicitly here because the timer * has been stopped. */ - i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); + i40e_notify_client_of_netdev_close(pf, false); - if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) + if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) && + pf->wol_en) i40e_enable_mc_magic_wake(pf); i40e_prep_for_reset(pf); @@ -16108,7 +16548,7 @@ static void i40e_shutdown(struct pci_dev *pdev) /* Free MSI/legacy interrupt 0 when in recovery mode. */ if (test_bit(__I40E_RECOVERY_MODE, pf->state) && - !(pf->flags & I40E_FLAG_MSIX_ENABLED)) + !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) free_irq(pf->pdev->irq, pf); /* Since we're going to destroy queues during the @@ -16129,92 +16569,28 @@ static void i40e_shutdown(struct pci_dev *pdev) * i40e_suspend - PM callback for moving to D3 * @dev: generic device information structure **/ -static int __maybe_unused i40e_suspend(struct device *dev) +static int i40e_suspend(struct device *dev) { struct i40e_pf *pf = dev_get_drvdata(dev); - struct i40e_hw *hw = &pf->hw; /* If we're already suspended, then there is nothing to do */ if (test_and_set_bit(__I40E_SUSPENDED, pf->state)) return 0; - - set_bit(__I40E_DOWN, pf->state); - - /* Ensure service task will not be running */ - del_timer_sync(&pf->service_timer); - cancel_work_sync(&pf->service_task); - - /* Client close must be called explicitly here because the timer - * has been stopped. - */ - i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); - - if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) - i40e_enable_mc_magic_wake(pf); - - /* Since we're going to destroy queues during the - * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this - * whole section - */ - rtnl_lock(); - - i40e_prep_for_reset(pf); - - wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); - wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); - - /* Clear the interrupt scheme and release our IRQs so that the system - * can safely hibernate even when there are a large number of CPUs. - * Otherwise hibernation might fail when mapping all the vectors back - * to CPU0. - */ - i40e_clear_interrupt_scheme(pf); - - rtnl_unlock(); - - return 0; + return i40e_io_suspend(pf); } /** * i40e_resume - PM callback for waking up from D3 * @dev: generic device information structure **/ -static int __maybe_unused i40e_resume(struct device *dev) +static int i40e_resume(struct device *dev) { struct i40e_pf *pf = dev_get_drvdata(dev); - int err; /* If we're not suspended, then there is nothing to do */ if (!test_bit(__I40E_SUSPENDED, pf->state)) return 0; - - /* We need to hold the RTNL lock prior to restoring interrupt schemes, - * since we're going to be restoring queues - */ - rtnl_lock(); - - /* We cleared the interrupt scheme when we suspended, so we need to - * restore it now to resume device functionality. - */ - err = i40e_restore_interrupt_scheme(pf); - if (err) { - dev_err(dev, "Cannot restore interrupt scheme: %d\n", - err); - } - - clear_bit(__I40E_DOWN, pf->state); - i40e_reset_and_rebuild(pf, false, true); - - rtnl_unlock(); - - /* Clear suspended state last after everything is recovered */ - clear_bit(__I40E_SUSPENDED, pf->state); - - /* Restart the service task */ - mod_timer(&pf->service_timer, - round_jiffies(jiffies + pf->service_timer_period)); - - return 0; + return i40e_io_resume(pf); } static const struct pci_error_handlers i40e_err_handler = { @@ -16225,16 +16601,14 @@ static const struct pci_error_handlers i40e_err_handler = { .resume = i40e_pci_error_resume, }; -static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume); static struct pci_driver i40e_driver = { .name = i40e_driver_name, .id_table = i40e_pci_tbl, .probe = i40e_probe, .remove = i40e_remove, - .driver = { - .pm = &i40e_pm_ops, - }, + .driver.pm = pm_sleep_ptr(&i40e_pm_ops), .shutdown = i40e_shutdown, .err_handler = &i40e_err_handler, .sriov_configure = i40e_pci_sriov_configure, @@ -16248,6 +16622,8 @@ static struct pci_driver i40e_driver = { **/ static int __init i40e_init_module(void) { + int err; + pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); @@ -16258,14 +16634,21 @@ static int __init i40e_init_module(void) * since we need to be able to guarantee forward progress even under * memory pressure. */ - i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name); + i40e_wq = alloc_workqueue("%s", WQ_PERCPU, 0, i40e_driver_name); if (!i40e_wq) { pr_err("%s: Failed to create workqueue\n", i40e_driver_name); return -ENOMEM; } i40e_dbg_init(); - return pci_register_driver(&i40e_driver); + err = pci_register_driver(&i40e_driver); + if (err) { + destroy_workqueue(i40e_wq); + i40e_dbg_exit(); + return err; + } + + return 0; } module_init(i40e_init_module); |
