diff options
Diffstat (limited to 'drivers/net/ethernet/intel/igb/igb_main.c')
| -rw-r--r-- | drivers/net/ethernet/intel/igb/igb_main.c | 1224 |
1 files changed, 778 insertions, 446 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 7e6435dc7e80..dbea37269d2c 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -28,7 +28,6 @@ #include <linux/tcp.h> #include <linux/sctp.h> #include <linux/if_ether.h> -#include <linux/aer.h> #include <linux/prefetch.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> @@ -107,8 +106,7 @@ static int igb_setup_all_rx_resources(struct igb_adapter *); static void igb_free_all_tx_resources(struct igb_adapter *); static void igb_free_all_rx_resources(struct igb_adapter *); static void igb_setup_mrqc(struct igb_adapter *); -static int igb_probe(struct pci_dev *, const struct pci_device_id *); -static void igb_remove(struct pci_dev *pdev); +static void igb_init_queue_configuration(struct igb_adapter *adapter); static int igb_sw_init(struct igb_adapter *); int igb_open(struct net_device *); int igb_close(struct net_device *); @@ -117,8 +115,6 @@ static void igb_configure_tx(struct igb_adapter *); static void igb_configure_rx(struct igb_adapter *); static void igb_clean_all_tx_rings(struct igb_adapter *); static void igb_clean_all_rx_rings(struct igb_adapter *); -static void igb_clean_tx_ring(struct igb_ring *); -static void igb_clean_rx_ring(struct igb_ring *); static void igb_set_rx_mode(struct net_device *); static void igb_update_phy_info(struct timer_list *); static void igb_watchdog(struct timer_list *); @@ -175,23 +171,9 @@ static void igb_nfc_filter_restore(struct igb_adapter *adapter); #ifdef CONFIG_PCI_IOV static int igb_vf_configure(struct igb_adapter *adapter, int vf); -static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); -static int igb_disable_sriov(struct pci_dev *dev); -static int igb_pci_disable_sriov(struct pci_dev *dev); +static int igb_disable_sriov(struct pci_dev *dev, bool reinit); #endif -static int igb_suspend(struct device *); -static int igb_resume(struct device *); -static int igb_runtime_suspend(struct device *dev); -static int igb_runtime_resume(struct device *dev); -static int igb_runtime_idle(struct device *dev); -static const struct dev_pm_ops igb_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) - SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, - igb_runtime_idle) -}; -static void igb_shutdown(struct pci_dev *); -static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs); #ifdef CONFIG_IGB_DCA static int igb_notify_dca(struct notifier_block *, unsigned long, void *); static struct notifier_block dca_notifier = { @@ -202,7 +184,7 @@ static struct notifier_block dca_notifier = { #endif #ifdef CONFIG_PCI_IOV static unsigned int max_vfs; -module_param(max_vfs, uint, 0); +module_param(max_vfs, uint, 0444); MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function"); #endif /* CONFIG_PCI_IOV */ @@ -219,20 +201,6 @@ static const struct pci_error_handlers igb_err_handler = { static void igb_init_dmac(struct igb_adapter *adapter, u32 pba); -static struct pci_driver igb_driver = { - .name = igb_driver_name, - .id_table = igb_pci_tbl, - .probe = igb_probe, - .remove = igb_remove, -#ifdef CONFIG_PM - .driver.pm = &igb_pm_ops, -#endif - .shutdown = igb_shutdown, - .sriov_configure = igb_pci_sriov_configure, - .err_handler = &igb_err_handler -}; - -MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); MODULE_LICENSE("GPL v2"); @@ -504,12 +472,17 @@ rx_ring_summary: for (i = 0; i < rx_ring->count; i++) { const char *next_desc; - struct igb_rx_buffer *buffer_info; - buffer_info = &rx_ring->rx_buffer_info[i]; + dma_addr_t dma = (dma_addr_t)0; + struct igb_rx_buffer *buffer_info = NULL; rx_desc = IGB_RX_DESC(rx_ring, i); u0 = (struct my_u0 *)rx_desc; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + if (!rx_ring->xsk_pool) { + buffer_info = &rx_ring->rx_buffer_info[i]; + dma = buffer_info->dma; + } + if (i == rx_ring->next_to_use) next_desc = " NTU"; else if (i == rx_ring->next_to_clean) @@ -529,11 +502,11 @@ rx_ring_summary: "R ", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), - (u64)buffer_info->dma, + (u64)dma, next_desc); if (netif_msg_pktdata(adapter) && - buffer_info->dma && buffer_info->page) { + buffer_info && dma && buffer_info->page) { print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, @@ -577,16 +550,15 @@ static void igb_set_i2c_data(void *data, int state) struct e1000_hw *hw = &adapter->hw; s32 i2cctl = rd32(E1000_I2CPARAMS); - if (state) - i2cctl |= E1000_I2C_DATA_OUT; - else + if (state) { + i2cctl |= E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N; + } else { + i2cctl &= ~E1000_I2C_DATA_OE_N; i2cctl &= ~E1000_I2C_DATA_OUT; + } - i2cctl &= ~E1000_I2C_DATA_OE_N; - i2cctl |= E1000_I2C_CLK_OE_N; wr32(E1000_I2CPARAMS, i2cctl); wrfl(); - } /** @@ -603,8 +575,7 @@ static void igb_set_i2c_clk(void *data, int state) s32 i2cctl = rd32(E1000_I2CPARAMS); if (state) { - i2cctl |= E1000_I2C_CLK_OUT; - i2cctl &= ~E1000_I2C_CLK_OE_N; + i2cctl |= E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N; } else { i2cctl &= ~E1000_I2C_CLK_OUT; i2cctl &= ~E1000_I2C_CLK_OE_N; @@ -649,6 +620,8 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw) return adapter->netdev; } +static struct pci_driver igb_driver; + /** * igb_init_module - Driver Registration Routine * @@ -666,6 +639,10 @@ static int __init igb_init_module(void) dca_register_notify(&dca_notifier); #endif ret = pci_register_driver(&igb_driver); +#ifdef CONFIG_IGB_DCA + if (ret) + dca_unregister_notify(&dca_notifier); +#endif return ret; } @@ -931,6 +908,7 @@ static void igb_configure_msix(struct igb_adapter *adapter) **/ static int igb_request_msix(struct igb_adapter *adapter) { + unsigned int num_q_vectors = adapter->num_q_vectors; struct net_device *netdev = adapter->netdev; int i, err = 0, vector = 0, free_vector = 0; @@ -939,7 +917,13 @@ static int igb_request_msix(struct igb_adapter *adapter) if (err) goto err_out; - for (i = 0; i < adapter->num_q_vectors; i++) { + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; vector++; @@ -963,6 +947,9 @@ static int igb_request_msix(struct igb_adapter *adapter) q_vector); if (err) goto err_free; + + netif_napi_set_irq(&q_vector->napi, + adapter->msix_entries[vector].vector); } igb_configure_msix(adapter); @@ -1190,15 +1177,19 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, return -ENOMEM; ring_count = txr_count + rxr_count; - size = struct_size(q_vector, ring, ring_count); + size = kmalloc_size_roundup(struct_size(q_vector, ring, ring_count)); /* allocate q_vector and rings */ q_vector = adapter->q_vector[v_idx]; if (!q_vector) { q_vector = kzalloc(size, GFP_KERNEL); } else if (size > ksize(q_vector)) { - kfree_rcu(q_vector, rcu); - q_vector = kzalloc(size, GFP_KERNEL); + struct igb_q_vector *new_q_vector; + + new_q_vector = kzalloc(size, GFP_KERNEL); + if (new_q_vector) + kfree_rcu(q_vector, rcu); + q_vector = new_q_vector; } else { memset(q_vector, 0, size); } @@ -1206,8 +1197,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, return -ENOMEM; /* initialize NAPI */ - netif_napi_add(adapter->netdev, &q_vector->napi, - igb_poll, 64); + netif_napi_add_config(adapter->netdev, &q_vector->napi, igb_poll, + v_idx); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; @@ -1223,7 +1214,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, /* initialize pointer to rings */ ring = q_vector->ring; - /* intialize ITR */ + /* initialize ITR */ if (rxr_count) { /* rx or rx/tx vector */ if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) @@ -1540,8 +1531,7 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter) adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; } - if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && - (vid != old_vid) && + if (old_vid != IGB_MNG_VLAN_NONE && vid != old_vid && !test_bit(old_vid, adapter->active_vlans)) { /* remove VID from filter table */ igb_vfta_set(hw, vid, pf_id, false, true); @@ -1678,14 +1668,15 @@ static bool is_any_txtime_enabled(struct igb_adapter *adapter) **/ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) { - struct igb_ring *ring = adapter->tx_ring[queue]; struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; + struct igb_ring *ring; u32 tqavcc, tqavctrl; u16 value; WARN_ON(hw->mac.type != e1000_i210); WARN_ON(queue < 0 || queue > 1); + ring = adapter->tx_ring[queue]; /* If any of the Qav features is enabled, configure queues as SR and * with HIGH PRIO. If none is, then configure them with LOW PRIO and @@ -1939,7 +1930,7 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter) * However, when we do so, no frame from queue 2 and 3 are * transmitted. It seems the MAX_TPKT_SIZE should not be great * or _equal_ to the buffer size programmed in TXPBS. For this - * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64. + * reason, we set MAX_ TPKT_SIZE to (4kB - 1) / 64. */ val = (4096 - 1) / 64; wr32(E1000_I210_DTXMXPKTSZ, val); @@ -2004,7 +1995,11 @@ static void igb_configure(struct igb_adapter *adapter) */ for (i = 0; i < adapter->num_rx_queues; i++) { struct igb_ring *ring = adapter->rx_ring[i]; - igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); + if (ring->xsk_pool) + igb_alloc_rx_buffers_zc(ring, ring->xsk_pool, + igb_desc_unused(ring)); + else + igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); } } @@ -2104,6 +2099,22 @@ static void igb_check_swap_media(struct igb_adapter *adapter) wr32(E1000_CTRL_EXT, ctrl_ext); } +void igb_set_queue_napi(struct igb_adapter *adapter, int vector, + struct napi_struct *napi) +{ + struct igb_q_vector *q_vector = adapter->q_vector[vector]; + + if (q_vector->rx.ring) + netif_queue_set_napi(adapter->netdev, + q_vector->rx.ring->queue_index, + NETDEV_QUEUE_TYPE_RX, napi); + + if (q_vector->tx.ring) + netif_queue_set_napi(adapter->netdev, + q_vector->tx.ring->queue_index, + NETDEV_QUEUE_TYPE_TX, napi); +} + /** * igb_up - Open the interface and prepare it to handle traffic * @adapter: board private structure @@ -2111,6 +2122,7 @@ static void igb_check_swap_media(struct igb_adapter *adapter) int igb_up(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; + struct napi_struct *napi; int i; /* hardware has been reset, we need to reload some things */ @@ -2118,8 +2130,11 @@ int igb_up(struct igb_adapter *adapter) clear_bit(__IGB_DOWN, &adapter->state); - for (i = 0; i < adapter->num_q_vectors; i++) - napi_enable(&(adapter->q_vector[i]->napi)); + for (i = 0; i < adapter->num_q_vectors; i++) { + napi = &adapter->q_vector[i]->napi; + napi_enable(napi); + igb_set_queue_napi(adapter, i, napi); + } if (adapter->flags & IGB_FLAG_HAS_MSIX) igb_configure_msix(adapter); @@ -2189,12 +2204,13 @@ void igb_down(struct igb_adapter *adapter) for (i = 0; i < adapter->num_q_vectors; i++) { if (adapter->q_vector[i]) { napi_synchronize(&adapter->q_vector[i]->napi); + igb_set_queue_napi(adapter, i, NULL); napi_disable(&adapter->q_vector[i]->napi); } } - del_timer_sync(&adapter->watchdog_timer); - del_timer_sync(&adapter->phy_info_timer); + timer_delete_sync(&adapter->watchdog_timer); + timer_delete_sync(&adapter->phy_info_timer); /* record the stats before reset*/ spin_lock(&adapter->stats64_lock); @@ -2247,6 +2263,30 @@ static void igb_enable_mas(struct igb_adapter *adapter) } } +#ifdef CONFIG_IGB_HWMON +/** + * igb_set_i2c_bb - Init I2C interface + * @hw: pointer to hardware structure + **/ +static void igb_set_i2c_bb(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 i2cctl; + + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_I2C_ENA; + wr32(E1000_CTRL_EXT, ctrl_ext); + wrfl(); + + i2cctl = rd32(E1000_I2CPARAMS); + i2cctl |= E1000_I2CBB_EN + | E1000_I2C_CLK_OE_N + | E1000_I2C_DATA_OE_N; + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); +} +#endif + void igb_reset(struct igb_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; @@ -2391,7 +2431,8 @@ void igb_reset(struct igb_adapter *adapter) * interface. */ if (adapter->ets) - mac->ops.init_thermal_sensor_thresh(hw); + igb_set_i2c_bb(hw); + mac->ops.init_thermal_sensor_thresh(hw); } } #endif @@ -2479,7 +2520,7 @@ static int igb_set_features(struct net_device *netdev, static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, - u16 flags, + u16 flags, bool *notified, struct netlink_ext_ack *extack) { /* guarantee we can provide a unique filter for the unicast address */ @@ -2504,7 +2545,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev, unsigned int network_hdr_len, mac_hdr_len; /* Make certain the headers can be described by a context descriptor */ - mac_hdr_len = skb_network_header(skb) - skb->data; + mac_hdr_len = skb_network_offset(skb); if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN)) return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | @@ -2581,15 +2622,18 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter, struct netlink_ext_ack *extack = f->common.extack; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_VLAN))) { + ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) { NL_SET_ERR_MSG_MOD(extack, "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported"); return -EOPNOTSUPP; } + if (flow_rule_match_has_control_flags(rule, extack)) + return -EOPNOTSUPP; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_match_eth_addrs match; @@ -2801,6 +2845,22 @@ static int igb_offload_txtime(struct igb_adapter *adapter, return 0; } +static int igb_tc_query_caps(struct igb_adapter *adapter, + struct tc_query_caps_base *base) +{ + switch (base->type) { + case TC_SETUP_QDISC_TAPRIO: { + struct tc_taprio_caps *caps = base->caps; + + caps->broken_mqprio = true; + + return 0; + } + default: + return -EOPNOTSUPP; + } +} + static LIST_HEAD(igb_block_cb_list); static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, @@ -2809,6 +2869,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, struct igb_adapter *adapter = netdev_priv(dev); switch (type) { + case TC_QUERY_CAPS: + return igb_tc_query_caps(adapter, type_data); case TC_SETUP_QDISC_CBS: return igb_offload_cbs(adapter, type_data); case TC_SETUP_BLOCK: @@ -2862,8 +2924,14 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf) bpf_prog_put(old_prog); /* bpf is just replaced, RXQ and MTU are already setup */ - if (!need_reset) + if (!need_reset) { return 0; + } else { + if (prog) + xdp_features_set_redirect_target(dev, true); + else + xdp_features_clear_redirect_target(dev); + } if (running) igb_open(dev); @@ -2873,34 +2941,20 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf) static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) { + struct igb_adapter *adapter = netdev_priv(dev); + switch (xdp->command) { case XDP_SETUP_PROG: return igb_xdp_setup(dev, xdp); + case XDP_SETUP_XSK_POOL: + return igb_xsk_pool_setup(adapter, xdp->xsk.pool, + xdp->xsk.queue_id); default: return -EINVAL; } } -static void igb_xdp_ring_update_tail(struct igb_ring *ring) -{ - /* Force memory writes to complete before letting h/w know there - * are new descriptors to fetch. - */ - wmb(); - writel(ring->next_to_use, ring->tail); -} - -static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) -{ - unsigned int r_idx = smp_processor_id(); - - if (r_idx >= adapter->num_tx_queues) - r_idx = r_idx % adapter->num_tx_queues; - - return adapter->tx_ring[r_idx]; -} - -static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) +int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) { struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); int cpu = smp_processor_id(); @@ -2914,14 +2968,15 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) /* During program transitions its possible adapter->xdp_prog is assigned * but ring has not been configured yet. In this case simply abort xmit. */ - tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; + tx_ring = igb_xdp_is_enabled(adapter) ? + igb_xdp_tx_queue_mapping(adapter) : NULL; if (unlikely(!tx_ring)) return IGB_XDP_CONSUMED; nq = txring_txq(tx_ring); __netif_tx_lock(nq, cpu); /* Avoid transmit queue timeout since we share it with the slow path */ - nq->trans_start = jiffies; + txq_trans_cond_update(nq); ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); __netif_tx_unlock(nq); @@ -2947,15 +3002,19 @@ static int igb_xdp_xmit(struct net_device *dev, int n, /* During program transitions its possible adapter->xdp_prog is assigned * but ring has not been configured yet. In this case simply abort xmit. */ - tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; + tx_ring = igb_xdp_is_enabled(adapter) ? + igb_xdp_tx_queue_mapping(adapter) : NULL; if (unlikely(!tx_ring)) return -ENXIO; + if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags))) + return -ENXIO; + nq = txring_txq(tx_ring); __netif_tx_lock(nq, cpu); /* Avoid transmit queue timeout since we share it with the slow path */ - nq->trans_start = jiffies; + txq_trans_cond_update(nq); for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; @@ -2967,11 +3026,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n, nxmit++; } - __netif_tx_unlock(nq); - if (unlikely(flags & XDP_XMIT_FLUSH)) igb_xdp_ring_update_tail(tx_ring); + __netif_tx_unlock(nq); + return nxmit; } @@ -2983,7 +3042,7 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_set_rx_mode = igb_set_rx_mode, .ndo_set_mac_address = igb_set_mac, .ndo_change_mtu = igb_change_mtu, - .ndo_do_ioctl = igb_ioctl, + .ndo_eth_ioctl = igb_ioctl, .ndo_tx_timeout = igb_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, @@ -3001,6 +3060,9 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_setup_tc = igb_setup_tc, .ndo_bpf = igb_xdp, .ndo_xdp_xmit = igb_xdp_xmit, + .ndo_xsk_wakeup = igb_xsk_wakeup, + .ndo_hwtstamp_get = igb_ptp_hwtstamp_get, + .ndo_hwtstamp_set = igb_ptp_hwtstamp_set, }; /** @@ -3027,7 +3089,7 @@ void igb_set_fw_version(struct igb_adapter *adapter) } fallthrough; default: - /* if option is rom valid, display its version too */ + /* if option rom is valid, display its version too */ if (fw.or_valid) { snprintf(adapter->fw_version, sizeof(adapter->fw_version), @@ -3037,14 +3099,14 @@ void igb_set_fw_version(struct igb_adapter *adapter) /* no option rom */ } else if (fw.etrack_id != 0X0000) { snprintf(adapter->fw_version, - sizeof(adapter->fw_version), - "%d.%d, 0x%08x", - fw.eep_major, fw.eep_minor, fw.etrack_id); + sizeof(adapter->fw_version), + "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, fw.etrack_id); } else { - snprintf(adapter->fw_version, - sizeof(adapter->fw_version), - "%d.%d.%d", - fw.eep_major, fw.eep_minor, fw.eep_build); + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d.%d", + fw.eep_major, fw.eep_minor, fw.eep_build); } break; } @@ -3123,7 +3185,7 @@ static s32 igb_init_i2c(struct igb_adapter *adapter) adapter->i2c_algo.data = adapter; adapter->i2c_adap.algo_data = &adapter->i2c_algo; adapter->i2c_adap.dev.parent = &adapter->pdev->dev; - strlcpy(adapter->i2c_adap.name, "igb BB", + strscpy(adapter->i2c_adap.name, "igb BB", sizeof(adapter->i2c_adap.name)); status = i2c_bit_add_bus(&adapter->i2c_adap); return status; @@ -3149,8 +3211,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) s32 ret_val; static int global_quad_port_a; /* global quad port a indication */ const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; - int err, pci_using_dac; u8 part_str[E1000_PBANUM_LENGTH]; + int err; /* Catch broken hardware that put the wrong VF device ID in * the PCIe SR-IOV capability. @@ -3165,25 +3227,17 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) return err; - pci_using_dac = 0; err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) { - pci_using_dac = 1; - } else { - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "No usable DMA configuration, aborting\n"); - goto err_dma; - } + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; } err = pci_request_mem_regions(pdev, igb_driver_name); if (err) goto err_pci_reg; - pci_enable_pcie_error_reporting(pdev); - pci_set_master(pdev); pci_save_state(pdev); @@ -3214,7 +3268,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) igb_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); netdev->mem_start = pci_resource_start(pdev, 0); netdev->mem_end = pci_resource_end(pdev, 0); @@ -3291,8 +3345,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (hw->mac.type >= e1000_i350) netdev->hw_features |= NETIF_F_NTUPLE; - if (pci_using_dac) - netdev->features |= NETIF_F_HIGHDMA; + netdev->features |= NETIF_F_HIGHDMA; netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; netdev->mpls_features |= NETIF_F_HW_CSUM; @@ -3306,6 +3359,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->priv_flags |= IFF_SUPP_NOFCS; netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY; /* MTU range: 68 - 9216 */ netdev->min_mtu = ETH_MIN_MTU; @@ -3348,7 +3403,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&pdev->dev, "NVM Read Error\n"); } - memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + eth_hw_addr_set(netdev, hw->mac.addr); if (!is_valid_ether_addr(netdev->dev_addr)) { dev_err(&pdev->dev, "Invalid MAC Address\n"); @@ -3510,6 +3565,12 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->ets = true; else adapter->ets = false; + /* Only enable I2C bit banging if an external thermal + * sensor is supported. + */ + if (adapter->ets) + igb_set_i2c_bb(hw); + hw->mac.ops.init_thermal_sensor_thresh(hw); if (igb_sysfs_init(adapter)) dev_err(&pdev->dev, "failed to allocate sysfs resources\n"); @@ -3609,7 +3670,7 @@ err_sw_init: kfree(adapter->shadow_vfta); igb_clear_interrupt_scheme(adapter); #ifdef CONFIG_PCI_IOV - igb_disable_sriov(pdev); + igb_disable_sriov(pdev, false); #endif pci_iounmap(pdev, adapter->io_addr); err_ioremap: @@ -3623,11 +3684,43 @@ err_dma: } #ifdef CONFIG_PCI_IOV -static int igb_disable_sriov(struct pci_dev *pdev) +static int igb_sriov_reinit(struct pci_dev *dev) +{ + struct net_device *netdev = pci_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + rtnl_lock(); + + if (netif_running(netdev)) + igb_close(netdev); + else + igb_reset(adapter); + + igb_clear_interrupt_scheme(adapter); + + igb_init_queue_configuration(adapter); + + if (igb_init_interrupt_scheme(adapter, true)) { + rtnl_unlock(); + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + igb_open(netdev); + + rtnl_unlock(); + + return 0; +} + +static int igb_disable_sriov(struct pci_dev *pdev, bool reinit) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + unsigned long flags; /* reclaim resources allocated to VFs */ if (adapter->vf_data) { @@ -3640,12 +3733,13 @@ static int igb_disable_sriov(struct pci_dev *pdev) pci_disable_sriov(pdev); msleep(500); } - + spin_lock_irqsave(&adapter->vfs_lock, flags); kfree(adapter->vf_mac_list); adapter->vf_mac_list = NULL; kfree(adapter->vf_data); adapter->vf_data = NULL; adapter->vfs_allocated_count = 0; + spin_unlock_irqrestore(&adapter->vfs_lock, flags); wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); wrfl(); msleep(100); @@ -3655,10 +3749,10 @@ static int igb_disable_sriov(struct pci_dev *pdev) adapter->flags |= IGB_FLAG_DMAC; } - return 0; + return reinit ? igb_sriov_reinit(pdev) : 0; } -static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) +static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs, bool reinit) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); @@ -3723,12 +3817,6 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) "Unable to allocate memory for VF MAC filter list\n"); } - /* only call pci_enable_sriov() if no VFs are allocated already */ - if (!old_vfs) { - err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); - if (err) - goto err_out; - } dev_info(&pdev->dev, "%d VFs allocated\n", adapter->vfs_allocated_count); for (i = 0; i < adapter->vfs_allocated_count; i++) @@ -3736,6 +3824,20 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) /* DMA Coalescing is not supported in IOV mode. */ adapter->flags &= ~IGB_FLAG_DMAC; + + if (reinit) { + err = igb_sriov_reinit(pdev); + if (err) + goto err_out; + } + + /* only call pci_enable_sriov() if no VFs are allocated already */ + if (!old_vfs) { + err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); + if (err) + goto err_out; + } + goto out; err_out: @@ -3784,8 +3886,8 @@ static void igb_remove(struct pci_dev *pdev) * disable watchdog from being rescheduled. */ set_bit(__IGB_DOWN, &adapter->state); - del_timer_sync(&adapter->watchdog_timer); - del_timer_sync(&adapter->phy_info_timer); + timer_delete_sync(&adapter->watchdog_timer); + timer_delete_sync(&adapter->phy_info_timer); cancel_work_sync(&adapter->reset_task); cancel_work_sync(&adapter->watchdog_task); @@ -3805,7 +3907,7 @@ static void igb_remove(struct pci_dev *pdev) igb_release_hw_control(adapter); #ifdef CONFIG_PCI_IOV - igb_disable_sriov(pdev); + igb_disable_sriov(pdev, false); #endif unregister_netdev(netdev); @@ -3821,8 +3923,6 @@ static void igb_remove(struct pci_dev *pdev) kfree(adapter->shadow_vfta); free_netdev(netdev); - pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); } @@ -3832,7 +3932,7 @@ static void igb_remove(struct pci_dev *pdev) * * This function initializes the vf specific data storage and then attempts to * allocate the VFs. The reason for ordering it this way is because it is much - * mor expensive time wise to disable SR-IOV than it is to allocate and free + * more expensive time wise to disable SR-IOV than it is to allocate and free * the memory for the VFs. **/ static void igb_probe_vfs(struct igb_adapter *adapter) @@ -3841,8 +3941,9 @@ static void igb_probe_vfs(struct igb_adapter *adapter) struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; - /* Virtualization features not supported on i210 family. */ - if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) + /* Virtualization features not supported on i210 and 82580 family. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211) || + (hw->mac.type == e1000_82580)) return; /* Of the below we really only want the effect of getting @@ -3853,7 +3954,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter) igb_reset_interrupt_capability(adapter); pci_sriov_set_totalvfs(pdev, 7); - igb_enable_sriov(pdev, max_vfs); + igb_enable_sriov(pdev, max_vfs, false); #endif /* CONFIG_PCI_IOV */ } @@ -3965,6 +4066,9 @@ static int igb_sw_init(struct igb_adapter *adapter) spin_lock_init(&adapter->nfc_lock); spin_lock_init(&adapter->stats64_lock); + + /* init spinlock to avoid concurrency of VF resources */ + spin_lock_init(&adapter->vfs_lock); #ifdef CONFIG_PCI_IOV switch (hw->mac.type) { case e1000_82576: @@ -4035,8 +4139,9 @@ static int igb_sw_init(struct igb_adapter *adapter) static int __igb_open(struct net_device *netdev, bool resuming) { struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + struct napi_struct *napi; int err; int i; @@ -4088,8 +4193,11 @@ static int __igb_open(struct net_device *netdev, bool resuming) /* From here on the code is the same as igb_up() */ clear_bit(__IGB_DOWN, &adapter->state); - for (i = 0; i < adapter->num_q_vectors; i++) - napi_enable(&(adapter->q_vector[i]->napi)); + for (i = 0; i < adapter->num_q_vectors; i++) { + napi = &adapter->q_vector[i]->napi; + napi_enable(napi); + igb_set_queue_napi(adapter, i, napi); + } /* Clear any pending interrupts. */ rd32(E1000_TSICR); @@ -4282,6 +4390,8 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, u64 tdba = ring->dma; int reg_idx = ring->reg_idx; + WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring)); + wr32(E1000_TDLEN(reg_idx), ring->count * sizeof(union e1000_adv_tx_desc)); wr32(E1000_TDBAL(reg_idx), @@ -4336,7 +4446,18 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) { struct igb_adapter *adapter = netdev_priv(rx_ring->netdev); struct device *dev = rx_ring->dev; - int size; + int size, res; + + /* XDP RX-queue info */ + if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, + rx_ring->queue_index, 0); + if (res < 0) { + dev_err(dev, "Failed to register xdp_rxq index %u\n", + rx_ring->queue_index); + return res; + } size = sizeof(struct igb_rx_buffer) * rx_ring->count; @@ -4359,14 +4480,10 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) rx_ring->xdp_prog = adapter->xdp_prog; - /* XDP RX-queue info */ - if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, - rx_ring->queue_index, 0) < 0) - goto err; - return 0; err: + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); @@ -4631,12 +4748,17 @@ void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring) struct e1000_hw *hw = &adapter->hw; int reg_idx = ring->reg_idx; u32 srrctl = 0; + u32 buf_size; - srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; - if (ring_uses_large_buffer(ring)) - srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT; + if (ring->xsk_pool) + buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); + else if (ring_uses_large_buffer(ring)) + buf_size = IGB_RXBUFFER_3072; else - srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; + buf_size = IGB_RXBUFFER_2048; + + srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; if (hw->mac.type >= e1000_82580) srrctl |= E1000_SRRCTL_TIMESTAMP; @@ -4668,8 +4790,17 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, u32 rxdctl = 0; xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); - WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, - MEM_TYPE_PAGE_SHARED, NULL)); + WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring)); + if (ring->xsk_pool) { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL)); + } /* disable the queue */ wr32(E1000_RXDCTL(reg_idx), 0); @@ -4696,9 +4827,12 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, rxdctl |= IGB_RX_HTHRESH << 8; rxdctl |= IGB_RX_WTHRESH << 16; - /* initialize rx_buffer_info */ - memset(ring->rx_buffer_info, 0, - sizeof(struct igb_rx_buffer) * ring->count); + if (ring->xsk_pool) + memset(ring->rx_buffer_info_zc, 0, + sizeof(*ring->rx_buffer_info_zc) * ring->count); + else + memset(ring->rx_buffer_info, 0, + sizeof(*ring->rx_buffer_info) * ring->count); /* initialize Rx descriptor 0 */ rx_desc = IGB_RX_DESC(ring, 0); @@ -4712,6 +4846,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, static void igb_set_rx_buffer_len(struct igb_adapter *adapter, struct igb_ring *rx_ring) { +#if (PAGE_SIZE < 8192) + struct e1000_hw *hw = &adapter->hw; +#endif + /* set build_skb and buffer size flags */ clear_ring_build_skb_enabled(rx_ring); clear_ring_uses_large_buffer(rx_ring); @@ -4722,10 +4860,10 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter, set_ring_build_skb_enabled(rx_ring); #if (PAGE_SIZE < 8192) - if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) - return; - - set_ring_uses_large_buffer(rx_ring); + if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB || + IGB_2K_TOO_SMALL_WITH_PADDING || + rd32(E1000_RCTL) & E1000_RCTL_SBP) + set_ring_uses_large_buffer(rx_ring); #endif } @@ -4795,16 +4933,24 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter) * igb_clean_tx_ring - Free Tx Buffers * @tx_ring: ring to be cleaned **/ -static void igb_clean_tx_ring(struct igb_ring *tx_ring) +void igb_clean_tx_ring(struct igb_ring *tx_ring) { u16 i = tx_ring->next_to_clean; struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + u32 xsk_frames = 0; while (i != tx_ring->next_to_use) { union e1000_adv_tx_desc *eop_desc, *tx_desc; - /* Free all the Tx ring sk_buffs */ - dev_kfree_skb_any(tx_buffer->skb); + /* Free all the Tx ring sk_buffs or xdp frames */ + if (tx_buffer->type == IGB_TYPE_SKB) { + dev_kfree_skb_any(tx_buffer->skb); + } else if (tx_buffer->type == IGB_TYPE_XDP) { + xdp_return_frame(tx_buffer->xdpf); + } else if (tx_buffer->type == IGB_TYPE_XSK) { + xsk_frames++; + goto skip_for_xsk; + } /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -4835,6 +4981,9 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) DMA_TO_DEVICE); } +skip_for_xsk: + tx_buffer->next_to_watch = NULL; + /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; i++; @@ -4847,6 +4996,9 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) /* reset BQL for queue */ netdev_tx_reset_queue(txring_txq(tx_ring)); + if (tx_ring->xsk_pool && xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; @@ -4877,8 +5029,13 @@ void igb_free_rx_resources(struct igb_ring *rx_ring) rx_ring->xdp_prog = NULL; xdp_rxq_info_unreg(&rx_ring->xdp_rxq); - vfree(rx_ring->rx_buffer_info); - rx_ring->rx_buffer_info = NULL; + if (rx_ring->xsk_pool) { + vfree(rx_ring->rx_buffer_info_zc); + rx_ring->rx_buffer_info_zc = NULL; + } else { + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + } /* if not set, then don't free */ if (!rx_ring->desc) @@ -4909,13 +5066,18 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) * igb_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from **/ -static void igb_clean_rx_ring(struct igb_ring *rx_ring) +void igb_clean_rx_ring(struct igb_ring *rx_ring) { u16 i = rx_ring->next_to_clean; dev_kfree_skb(rx_ring->skb); rx_ring->skb = NULL; + if (rx_ring->xsk_pool) { + igb_clean_rx_ring_zc(rx_ring); + goto skip_for_xsk; + } + /* Free all the Rx ring sk_buffs */ while (i != rx_ring->next_to_alloc) { struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; @@ -4943,6 +5105,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) i = 0; } +skip_for_xsk: rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; @@ -4977,7 +5140,7 @@ static int igb_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); /* set the correct pool for the new PF MAC address in entry 0 */ @@ -5302,7 +5465,8 @@ static void igb_spoof_check(struct igb_adapter *adapter) */ static void igb_update_phy_info(struct timer_list *t) { - struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer); + struct igb_adapter *adapter = timer_container_of(adapter, t, + phy_info_timer); igb_get_phy_info(&adapter->hw); } @@ -5392,7 +5556,8 @@ static void igb_check_lvmmc(struct igb_adapter *adapter) **/ static void igb_watchdog(struct timer_list *t) { - struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer); + struct igb_adapter *adapter = timer_container_of(adapter, t, + watchdog_timer); /* Do the rest outside of interrupt context */ schedule_work(&adapter->watchdog_task); } @@ -5487,7 +5652,8 @@ static void igb_watchdog_task(struct work_struct *work) break; } - if (adapter->link_speed != SPEED_1000) + if (adapter->link_speed != SPEED_1000 || + !hw->phy.ops.read_reg) goto no_wait; /* wait for Remote receiver status OK */ @@ -5591,11 +5757,29 @@ no_wait: if (adapter->flags & IGB_FLAG_HAS_MSIX) { u32 eics = 0; - for (i = 0; i < adapter->num_q_vectors; i++) - eics |= adapter->q_vector[i]->eims_value; - wr32(E1000_EICS, eics); + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + struct igb_ring *rx_ring; + + if (!q_vector->rx.ring) + continue; + + rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index]; + + if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) { + eics |= q_vector->eims_value; + clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); + } + } + if (eics) + wr32(E1000_EICS, eics); } else { - wr32(E1000_ICS, E1000_ICS_RXDMT0); + struct igb_ring *rx_ring = adapter->rx_ring[0]; + + if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) { + clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); + wr32(E1000_ICS, E1000_ICS_RXDMT0); + } } igb_spoof_check(adapter); @@ -6238,74 +6422,108 @@ int igb_xmit_xdp_ring(struct igb_adapter *adapter, struct igb_ring *tx_ring, struct xdp_frame *xdpf) { - union e1000_adv_tx_desc *tx_desc; - u32 len, cmd_type, olinfo_status; - struct igb_tx_buffer *tx_buffer; - dma_addr_t dma; - u16 i; + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); + u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; + u16 count, i, index = tx_ring->next_to_use; + struct igb_tx_buffer *tx_head = &tx_ring->tx_buffer_info[index]; + struct igb_tx_buffer *tx_buffer = tx_head; + union e1000_adv_tx_desc *tx_desc = IGB_TX_DESC(tx_ring, index); + u32 len = xdpf->len, cmd_type, olinfo_status; + void *data = xdpf->data; - len = xdpf->len; - - if (unlikely(!igb_desc_unused(tx_ring))) - return IGB_XDP_CONSUMED; + count = TXD_USE_COUNT(len); + for (i = 0; i < nr_frags; i++) + count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); - dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE); - if (dma_mapping_error(tx_ring->dev, dma)) + if (igb_maybe_stop_tx(tx_ring, count + 3)) return IGB_XDP_CONSUMED; + i = 0; /* record the location of the first descriptor for this packet */ - tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; - tx_buffer->bytecount = len; - tx_buffer->gso_segs = 1; - tx_buffer->protocol = 0; + tx_head->bytecount = xdp_get_frame_len(xdpf); + tx_head->type = IGB_TYPE_XDP; + tx_head->gso_segs = 1; + tx_head->xdpf = xdpf; - i = tx_ring->next_to_use; - tx_desc = IGB_TX_DESC(tx_ring, i); + olinfo_status = tx_head->bytecount << E1000_ADVTXD_PAYLEN_SHIFT; + /* 82575 requires a unique index per ring */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + olinfo_status |= tx_ring->reg_idx << 4; + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); - dma_unmap_len_set(tx_buffer, len, len); - dma_unmap_addr_set(tx_buffer, dma, dma); - tx_buffer->type = IGB_TYPE_XDP; - tx_buffer->xdpf = xdpf; + for (;;) { + dma_addr_t dma; - tx_desc->read.buffer_addr = cpu_to_le64(dma); + dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto unmap; - /* put descriptor type bits */ - cmd_type = E1000_ADVTXD_DTYP_DATA | - E1000_ADVTXD_DCMD_DEXT | - E1000_ADVTXD_DCMD_IFCS; - cmd_type |= len | IGB_TXD_DCMD; - tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, len); + dma_unmap_addr_set(tx_buffer, dma, dma); - olinfo_status = len << E1000_ADVTXD_PAYLEN_SHIFT; - /* 82575 requires a unique index per ring */ - if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) - olinfo_status |= tx_ring->reg_idx << 4; + /* put descriptor type bits */ + cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT | + E1000_ADVTXD_DCMD_IFCS | len; - tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + tx_buffer->protocol = 0; - netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount); + if (++index == tx_ring->count) + index = 0; + if (i == nr_frags) + break; + + tx_buffer = &tx_ring->tx_buffer_info[index]; + tx_desc = IGB_TX_DESC(tx_ring, index); + tx_desc->read.olinfo_status = 0; + + data = skb_frag_address(&sinfo->frags[i]); + len = skb_frag_size(&sinfo->frags[i]); + i++; + } + tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_TXD_DCMD); + + netdev_tx_sent_queue(txring_txq(tx_ring), tx_head->bytecount); /* set the timestamp */ - tx_buffer->time_stamp = jiffies; + tx_head->time_stamp = jiffies; /* Avoid any potential race with xdp_xmit and cleanup */ smp_wmb(); /* set next_to_watch value indicating a packet is present */ - i++; - if (i == tx_ring->count) - i = 0; - - tx_buffer->next_to_watch = tx_desc; - tx_ring->next_to_use = i; + tx_head->next_to_watch = tx_desc; + tx_ring->next_to_use = index; /* Make sure there is space in the ring for the next send. */ igb_maybe_stop_tx(tx_ring, DESC_NEEDED); if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) - writel(i, tx_ring->tail); + writel(index, tx_ring->tail); return IGB_XDP_TX; + +unmap: + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[index]; + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + if (tx_buffer == tx_head) + break; + + if (!index) + index += tx_ring->count; + index--; + } + + return IGB_XDP_CONSUMED; } netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, @@ -6334,6 +6552,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_BUSY; } + if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags))) + return NETDEV_TX_BUSY; + /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; first->type = IGB_TYPE_SKB; @@ -6489,7 +6710,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) struct igb_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD; - if (adapter->xdp_prog) { + if (igb_xdp_is_enabled(adapter)) { int i; for (i = 0; i < adapter->num_rx_queues; i++) { @@ -6519,7 +6740,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (netif_running(netdev)) igb_up(adapter); @@ -6570,10 +6791,10 @@ void igb_update_stats(struct igb_adapter *adapter) } do { - start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + start = u64_stats_fetch_begin(&ring->rx_syncp); _bytes = ring->rx_stats.bytes; _packets = ring->rx_stats.packets; - } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); bytes += _bytes; packets += _packets; } @@ -6586,10 +6807,10 @@ void igb_update_stats(struct igb_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *ring = adapter->tx_ring[i]; do { - start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + start = u64_stats_fetch_begin(&ring->tx_syncp); _bytes = ring->tx_stats.bytes; _packets = ring->tx_stats.packets; - } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); bytes += _bytes; packets += _packets; } @@ -6721,77 +6942,156 @@ void igb_update_stats(struct igb_adapter *adapter) } } -static void igb_tsync_interrupt(struct igb_adapter *adapter) +static void igb_perout(struct igb_adapter *adapter, int tsintr_tt) { + int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_PEROUT, tsintr_tt); + struct e1000_hw *hw = &adapter->hw; + struct timespec64 ts; + u32 tsauxc; + + if (pin < 0 || pin >= IGB_N_SDP) + return; + + spin_lock(&adapter->tmreg_lock); + + if (hw->mac.type == e1000_82580 || + hw->mac.type == e1000_i354 || + hw->mac.type == e1000_i350) { + s64 ns = timespec64_to_ns(&adapter->perout[tsintr_tt].period); + u32 systiml, systimh, level_mask, level, rem; + u64 systim, now; + + /* read systim registers in sequence */ + rd32(E1000_SYSTIMR); + systiml = rd32(E1000_SYSTIML); + systimh = rd32(E1000_SYSTIMH); + systim = (((u64)(systimh & 0xFF)) << 32) | ((u64)systiml); + now = timecounter_cyc2time(&adapter->tc, systim); + + if (pin < 2) { + level_mask = (tsintr_tt == 1) ? 0x80000 : 0x40000; + level = (rd32(E1000_CTRL) & level_mask) ? 1 : 0; + } else { + level_mask = (tsintr_tt == 1) ? 0x80 : 0x40; + level = (rd32(E1000_CTRL_EXT) & level_mask) ? 1 : 0; + } + + div_u64_rem(now, ns, &rem); + systim = systim + (ns - rem); + + /* synchronize pin level with rising/falling edges */ + div_u64_rem(now, ns << 1, &rem); + if (rem < ns) { + /* first half of period */ + if (level == 0) { + /* output is already low, skip this period */ + systim += ns; + pr_notice("igb: periodic output on %s missed falling edge\n", + adapter->sdp_config[pin].name); + } + } else { + /* second half of period */ + if (level == 1) { + /* output is already high, skip this period */ + systim += ns; + pr_notice("igb: periodic output on %s missed rising edge\n", + adapter->sdp_config[pin].name); + } + } + + /* for this chip family tv_sec is the upper part of the binary value, + * so not seconds + */ + ts.tv_nsec = (u32)systim; + ts.tv_sec = ((u32)(systim >> 32)) & 0xFF; + } else { + ts = timespec64_add(adapter->perout[tsintr_tt].start, + adapter->perout[tsintr_tt].period); + } + + /* u32 conversion of tv_sec is safe until y2106 */ + wr32((tsintr_tt == 1) ? E1000_TRGTTIML1 : E1000_TRGTTIML0, ts.tv_nsec); + wr32((tsintr_tt == 1) ? E1000_TRGTTIMH1 : E1000_TRGTTIMH0, (u32)ts.tv_sec); + tsauxc = rd32(E1000_TSAUXC); + tsauxc |= TSAUXC_EN_TT0; + wr32(E1000_TSAUXC, tsauxc); + adapter->perout[tsintr_tt].start = ts; + + spin_unlock(&adapter->tmreg_lock); +} + +static void igb_extts(struct igb_adapter *adapter, int tsintr_tt) +{ + int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_EXTTS, tsintr_tt); + int auxstmpl = (tsintr_tt == 1) ? E1000_AUXSTMPL1 : E1000_AUXSTMPL0; + int auxstmph = (tsintr_tt == 1) ? E1000_AUXSTMPH1 : E1000_AUXSTMPH0; struct e1000_hw *hw = &adapter->hw; struct ptp_clock_event event; struct timespec64 ts; - u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR); + unsigned long flags; + + if (pin < 0 || pin >= IGB_N_SDP) + return; + + if (hw->mac.type == e1000_82580 || + hw->mac.type == e1000_i354 || + hw->mac.type == e1000_i350) { + u64 ns = rd32(auxstmpl); + + ns += ((u64)(rd32(auxstmph) & 0xFF)) << 32; + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->tc, ns); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + ts = ns_to_timespec64(ns); + } else { + ts.tv_nsec = rd32(auxstmpl); + ts.tv_sec = rd32(auxstmph); + } + + event.type = PTP_CLOCK_EXTTS; + event.index = tsintr_tt; + event.timestamp = ts.tv_sec * 1000000000ULL + ts.tv_nsec; + ptp_clock_event(adapter->ptp_clock, &event); +} + +static void igb_tsync_interrupt(struct igb_adapter *adapter) +{ + const u32 mask = (TSINTR_SYS_WRAP | E1000_TSICR_TXTS | + TSINTR_TT0 | TSINTR_TT1 | + TSINTR_AUTT0 | TSINTR_AUTT1); + struct e1000_hw *hw = &adapter->hw; + u32 tsicr = rd32(E1000_TSICR); + struct ptp_clock_event event; + + if (hw->mac.type == e1000_82580) { + /* 82580 has a hardware bug that requires an explicit + * write to clear the TimeSync interrupt cause. + */ + wr32(E1000_TSICR, tsicr & mask); + } if (tsicr & TSINTR_SYS_WRAP) { event.type = PTP_CLOCK_PPS; if (adapter->ptp_caps.pps) ptp_clock_event(adapter->ptp_clock, &event); - ack |= TSINTR_SYS_WRAP; } if (tsicr & E1000_TSICR_TXTS) { /* retrieve hardware timestamp */ schedule_work(&adapter->ptp_tx_work); - ack |= E1000_TSICR_TXTS; - } - - if (tsicr & TSINTR_TT0) { - spin_lock(&adapter->tmreg_lock); - ts = timespec64_add(adapter->perout[0].start, - adapter->perout[0].period); - /* u32 conversion of tv_sec is safe until y2106 */ - wr32(E1000_TRGTTIML0, ts.tv_nsec); - wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec); - tsauxc = rd32(E1000_TSAUXC); - tsauxc |= TSAUXC_EN_TT0; - wr32(E1000_TSAUXC, tsauxc); - adapter->perout[0].start = ts; - spin_unlock(&adapter->tmreg_lock); - ack |= TSINTR_TT0; - } - - if (tsicr & TSINTR_TT1) { - spin_lock(&adapter->tmreg_lock); - ts = timespec64_add(adapter->perout[1].start, - adapter->perout[1].period); - wr32(E1000_TRGTTIML1, ts.tv_nsec); - wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec); - tsauxc = rd32(E1000_TSAUXC); - tsauxc |= TSAUXC_EN_TT1; - wr32(E1000_TSAUXC, tsauxc); - adapter->perout[1].start = ts; - spin_unlock(&adapter->tmreg_lock); - ack |= TSINTR_TT1; - } - - if (tsicr & TSINTR_AUTT0) { - nsec = rd32(E1000_AUXSTMPL0); - sec = rd32(E1000_AUXSTMPH0); - event.type = PTP_CLOCK_EXTTS; - event.index = 0; - event.timestamp = sec * 1000000000ULL + nsec; - ptp_clock_event(adapter->ptp_clock, &event); - ack |= TSINTR_AUTT0; - } - - if (tsicr & TSINTR_AUTT1) { - nsec = rd32(E1000_AUXSTMPL1); - sec = rd32(E1000_AUXSTMPH1); - event.type = PTP_CLOCK_EXTTS; - event.index = 1; - event.timestamp = sec * 1000000000ULL + nsec; - ptp_clock_event(adapter->ptp_clock, &event); - ack |= TSINTR_AUTT1; - } - - /* acknowledge the interrupts */ - wr32(E1000_TSICR, ack); + } + + if (tsicr & TSINTR_TT0) + igb_perout(adapter, 0); + + if (tsicr & TSINTR_TT1) + igb_perout(adapter, 1); + + if (tsicr & TSINTR_AUTT0) + igb_extts(adapter, 0); + + if (tsicr & TSINTR_AUTT1) + igb_extts(adapter, 1); } static irqreturn_t igb_msix_other(int irq, void *data) @@ -7065,7 +7365,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) static int igb_set_vf_multicasts(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) { - int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + int n = FIELD_GET(E1000_VT_MSGINFO_MASK, msgbuf[0]); u16 *hash_list = (u16 *)&msgbuf[1]; struct vf_data_storage *vf_data = &adapter->vf_data[vf]; int i; @@ -7325,7 +7625,7 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf, static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) { - int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + int add = FIELD_GET(E1000_VT_MSGINFO_MASK, msgbuf[0]); int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); int ret; @@ -7381,7 +7681,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; - u32 reg, msgbuf[3]; + u32 reg, msgbuf[3] = {}; u8 *addr = (u8 *)(&msgbuf[1]); /* process all the same items cleared in a function level reset */ @@ -7626,15 +7926,28 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, { struct pci_dev *pdev = adapter->pdev; struct vf_data_storage *vf_data = &adapter->vf_data[vf]; - struct list_head *pos; - struct vf_mac_filter *entry = NULL; + struct vf_mac_filter *entry; + bool found = false; int ret = 0; + if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && + !vf_data->trusted) { + dev_warn(&pdev->dev, + "VF %d requested MAC filter but is administratively denied\n", + vf); + return -EINVAL; + } + if (!is_valid_ether_addr(addr)) { + dev_warn(&pdev->dev, + "VF %d attempted to set invalid MAC filter\n", + vf); + return -EINVAL; + } + switch (info) { case E1000_VF_MAC_FILTER_CLR: /* remove all unicast MAC filters related to the current VF */ - list_for_each(pos, &adapter->vf_macs.l) { - entry = list_entry(pos, struct vf_mac_filter, l); + list_for_each_entry(entry, &adapter->vf_macs.l, l) { if (entry->vf == vf) { entry->vf = -1; entry->free = true; @@ -7643,28 +7956,15 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, } break; case E1000_VF_MAC_FILTER_ADD: - if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && - !vf_data->trusted) { - dev_warn(&pdev->dev, - "VF %d requested MAC filter but is administratively denied\n", - vf); - return -EINVAL; - } - if (!is_valid_ether_addr(addr)) { - dev_warn(&pdev->dev, - "VF %d attempted to set invalid MAC filter\n", - vf); - return -EINVAL; - } - /* try to find empty slot in the list */ - list_for_each(pos, &adapter->vf_macs.l) { - entry = list_entry(pos, struct vf_mac_filter, l); - if (entry->free) + list_for_each_entry(entry, &adapter->vf_macs.l, l) { + if (entry->free) { + found = true; break; + } } - if (entry && entry->free) { + if (found) { entry->free = false; entry->vf = vf; ether_addr_copy(entry->vf_mac, addr); @@ -7824,8 +8124,10 @@ unlock: static void igb_msg_task(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; + unsigned long flags; u32 vf; + spin_lock_irqsave(&adapter->vfs_lock, flags); for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { /* process any reset requests */ if (!igb_check_for_rst(hw, vf)) @@ -7839,6 +8141,7 @@ static void igb_msg_task(struct igb_adapter *adapter) if (!igb_check_for_ack(hw, vf)) igb_rcv_ack_from_vf(adapter, vf); } + spin_unlock_irqrestore(&adapter->vfs_lock, flags); } /** @@ -7980,6 +8283,7 @@ static int igb_poll(struct napi_struct *napi, int budget) struct igb_q_vector *q_vector = container_of(napi, struct igb_q_vector, napi); + struct xsk_buff_pool *xsk_pool; bool clean_complete = true; int work_done = 0; @@ -7991,7 +8295,12 @@ static int igb_poll(struct napi_struct *napi, int budget) clean_complete = igb_clean_tx_irq(q_vector, budget); if (q_vector->rx.ring) { - int cleaned = igb_clean_rx_irq(q_vector, budget); + int cleaned; + + xsk_pool = READ_ONCE(q_vector->rx.ring->xsk_pool); + cleaned = xsk_pool ? + igb_clean_rx_irq_zc(q_vector, xsk_pool, budget) : + igb_clean_rx_irq(q_vector, budget); work_done += cleaned; if (cleaned >= budget) @@ -8008,7 +8317,7 @@ static int igb_poll(struct napi_struct *napi, int budget) if (likely(napi_complete_done(napi, work_done))) igb_ring_irq_enable(q_vector); - return min(work_done, budget - 1); + return work_done; } /** @@ -8020,13 +8329,18 @@ static int igb_poll(struct napi_struct *napi, int budget) **/ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) { - struct igb_adapter *adapter = q_vector->adapter; - struct igb_ring *tx_ring = q_vector->tx.ring; - struct igb_tx_buffer *tx_buffer; - union e1000_adv_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0; + struct igb_adapter *adapter = q_vector->adapter; unsigned int budget = q_vector->tx.work_limit; + struct igb_ring *tx_ring = q_vector->tx.ring; unsigned int i = tx_ring->next_to_clean; + union e1000_adv_tx_desc *tx_desc; + struct igb_tx_buffer *tx_buffer; + struct xsk_buff_pool *xsk_pool; + int cpu = smp_processor_id(); + bool xsk_xmit_done = true; + struct netdev_queue *nq; + u32 xsk_frames = 0; if (test_bit(__IGB_DOWN, &adapter->state)) return true; @@ -8057,10 +8371,14 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) total_packets += tx_buffer->gso_segs; /* free the skb */ - if (tx_buffer->type == IGB_TYPE_SKB) + if (tx_buffer->type == IGB_TYPE_SKB) { napi_consume_skb(tx_buffer->skb, napi_budget); - else + } else if (tx_buffer->type == IGB_TYPE_XDP) { xdp_return_frame(tx_buffer->xdpf); + } else if (tx_buffer->type == IGB_TYPE_XSK) { + xsk_frames++; + goto skip_for_xsk; + } /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -8092,6 +8410,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) } } +skip_for_xsk: /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; tx_desc++; @@ -8120,6 +8439,21 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; + xsk_pool = READ_ONCE(tx_ring->xsk_pool); + if (xsk_pool) { + if (xsk_frames) + xsk_tx_completed(xsk_pool, xsk_frames); + if (xsk_uses_need_wakeup(xsk_pool)) + xsk_set_tx_need_wakeup(xsk_pool); + + nq = txring_txq(tx_ring); + __netif_tx_lock(nq, cpu); + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + xsk_xmit_done = igb_xmit_zc(tx_ring, xsk_pool); + __netif_tx_unlock(nq); + } + if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { struct e1000_hw *hw = &adapter->hw; @@ -8182,7 +8516,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) } } - return !!budget; + return !!budget && xsk_xmit_done; } /** @@ -8349,7 +8683,7 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, net_prefetch(xdp->data_meta); /* build an skb around the page buffer */ - skb = build_skb(xdp->data_hard_start, truesize); + skb = napi_build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; @@ -8373,9 +8707,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, return skb; } -static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, - struct igb_ring *rx_ring, - struct xdp_buff *xdp) +static int igb_run_xdp(struct igb_adapter *adapter, struct igb_ring *rx_ring, + struct xdp_buff *xdp) { int err, result = IGB_XDP_PASS; struct bpf_prog *xdp_prog; @@ -8404,7 +8737,7 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, result = IGB_XDP_REDIR; break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: out_failure: @@ -8415,7 +8748,7 @@ out_failure: break; } xdp_out: - return ERR_PTR(-result); + return result; } static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring, @@ -8541,10 +8874,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) { - /* XDP packets use error pointer so abort at this point */ - if (IS_ERR(skb)) - return true; - if (unlikely((igb_test_staterr(rx_desc, E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { struct net_device *netdev = rx_ring->netdev; @@ -8571,9 +8900,9 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring, * order to populate the hash, checksum, VLAN, timestamp, protocol, and * other fields within the skb. **/ -static void igb_process_skb_fields(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) +void igb_process_skb_fields(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) { struct net_device *dev = rx_ring->netdev; @@ -8655,17 +8984,50 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring, rx_buffer->page = NULL; } +void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status) +{ + int cpu = smp_processor_id(); + struct netdev_queue *nq; + + if (status & IGB_XDP_REDIR) + xdp_do_flush(); + + if (status & IGB_XDP_TX) { + struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); + + nq = txring_txq(tx_ring); + __netif_tx_lock(nq, cpu); + igb_xdp_ring_update_tail(tx_ring); + __netif_tx_unlock(nq); + } +} + +void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets, + unsigned int bytes) +{ + struct igb_ring *ring = q_vector->rx.ring; + + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.packets += packets; + ring->rx_stats.bytes += bytes; + u64_stats_update_end(&ring->rx_syncp); + + q_vector->rx.total_packets += packets; + q_vector->rx.total_bytes += bytes; +} + static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) { + unsigned int total_bytes = 0, total_packets = 0; struct igb_adapter *adapter = q_vector->adapter; struct igb_ring *rx_ring = q_vector->rx.ring; - struct sk_buff *skb = rx_ring->skb; - unsigned int total_bytes = 0, total_packets = 0; u16 cleaned_count = igb_desc_unused(rx_ring); + struct sk_buff *skb = rx_ring->skb; unsigned int xdp_xmit = 0; struct xdp_buff xdp; u32 frame_sz = 0; int rx_buf_pgcnt; + int xdp_res = 0; /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ #if (PAGE_SIZE < 8192) @@ -8718,16 +9080,15 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) unsigned int offset = pkt_offset + igb_rx_offset(rx_ring); xdp_prepare_buff(&xdp, hard_start, offset, size, true); + xdp_buff_clear_frags_flag(&xdp); #if (PAGE_SIZE > 4096) /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size); #endif - skb = igb_run_xdp(adapter, rx_ring, &xdp); + xdp_res = igb_run_xdp(adapter, rx_ring, &xdp); } - if (IS_ERR(skb)) { - unsigned int xdp_res = -PTR_ERR(skb); - + if (xdp_res) { if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) { xdp_xmit |= xdp_res; igb_rx_buffer_flip(rx_ring, rx_buffer, size); @@ -8746,9 +9107,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) &xdp, timestamp); /* exit if we failed to retrieve a buffer */ - if (!skb) { + if (!xdp_res && !skb) { rx_ring->rx_stats.alloc_failed++; rx_buffer->pagecnt_bias++; + set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); break; } @@ -8760,7 +9122,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) continue; /* verify the packet layout is correct */ - if (igb_cleanup_headers(rx_ring, rx_desc, skb)) { + if (xdp_res || igb_cleanup_headers(rx_ring, rx_desc, skb)) { skb = NULL; continue; } @@ -8783,21 +9145,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) /* place incomplete frames back on ring for completion */ rx_ring->skb = skb; - if (xdp_xmit & IGB_XDP_REDIR) - xdp_do_flush(); - - if (xdp_xmit & IGB_XDP_TX) { - struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); - - igb_xdp_ring_update_tail(tx_ring); - } + if (xdp_xmit) + igb_finalize_xdp(adapter, xdp_xmit); - u64_stats_update_begin(&rx_ring->rx_syncp); - rx_ring->rx_stats.packets += total_packets; - rx_ring->rx_stats.bytes += total_bytes; - u64_stats_update_end(&rx_ring->rx_syncp); - q_vector->rx.total_packets += total_packets; - q_vector->rx.total_bytes += total_bytes; + igb_update_rx_stats(q_vector, total_packets, total_bytes); if (cleaned_count) igb_alloc_rx_buffers(rx_ring, cleaned_count); @@ -8819,6 +9170,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, page = dev_alloc_pages(igb_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; + set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); return false; } @@ -8835,6 +9187,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, __free_pages(page, igb_rx_pg_order(rx_ring)); rx_ring->rx_stats.alloc_failed++; + set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); return false; } @@ -8941,6 +9294,10 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return -EIO; break; case SIOCSMIIREG: + if (igb_write_phy_reg(&adapter->hw, data->reg_num & 0x1F, + data->val_in)) + return -EIO; + break; default: return -EOPNOTSUPP; } @@ -8960,10 +9317,6 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) case SIOCGMIIREG: case SIOCSMIIREG: return igb_mii_ioctl(netdev, ifr, cmd); - case SIOCGHWTSTAMP: - return igb_ptp_get_ts_config(netdev, ifr); - case SIOCSHWTSTAMP: - return igb_ptp_set_ts_config(netdev, ifr); default: return -EOPNOTSUPP; } @@ -9231,12 +9584,12 @@ static void igb_deliver_wake_packet(struct net_device *netdev) netif_rx(skb); } -static int __maybe_unused igb_suspend(struct device *dev) +static int igb_suspend(struct device *dev) { return __igb_shutdown(to_pci_dev(dev), NULL, 0); } -static int __maybe_unused igb_resume(struct device *dev) +static int __igb_resume(struct device *dev, bool rpm) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); @@ -9246,7 +9599,6 @@ static int __maybe_unused igb_resume(struct device *dev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - pci_save_state(pdev); if (!pci_device_is_present(pdev)) return -ENODEV; @@ -9279,18 +9631,25 @@ static int __maybe_unused igb_resume(struct device *dev) wr32(E1000_WUS, ~0); - rtnl_lock(); + if (!rpm) + rtnl_lock(); if (!err && netif_running(netdev)) err = __igb_open(netdev, true); if (!err) netif_device_attach(netdev); - rtnl_unlock(); + if (!rpm) + rtnl_unlock(); return err; } -static int __maybe_unused igb_runtime_idle(struct device *dev) +static int igb_resume(struct device *dev) +{ + return __igb_resume(dev, false); +} + +static int igb_runtime_idle(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct igb_adapter *adapter = netdev_priv(netdev); @@ -9301,14 +9660,14 @@ static int __maybe_unused igb_runtime_idle(struct device *dev) return -EBUSY; } -static int __maybe_unused igb_runtime_suspend(struct device *dev) +static int igb_runtime_suspend(struct device *dev) { return __igb_shutdown(to_pci_dev(dev), NULL, 1); } -static int __maybe_unused igb_runtime_resume(struct device *dev) +static int igb_runtime_resume(struct device *dev) { - return igb_resume(dev); + return __igb_resume(dev, true); } static void igb_shutdown(struct pci_dev *pdev) @@ -9323,71 +9682,17 @@ static void igb_shutdown(struct pci_dev *pdev) } } -#ifdef CONFIG_PCI_IOV -static int igb_sriov_reinit(struct pci_dev *dev) -{ - struct net_device *netdev = pci_get_drvdata(dev); - struct igb_adapter *adapter = netdev_priv(netdev); - struct pci_dev *pdev = adapter->pdev; - - rtnl_lock(); - - if (netif_running(netdev)) - igb_close(netdev); - else - igb_reset(adapter); - - igb_clear_interrupt_scheme(adapter); - - igb_init_queue_configuration(adapter); - - if (igb_init_interrupt_scheme(adapter, true)) { - rtnl_unlock(); - dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); - return -ENOMEM; - } - - if (netif_running(netdev)) - igb_open(netdev); - - rtnl_unlock(); - - return 0; -} - -static int igb_pci_disable_sriov(struct pci_dev *dev) -{ - int err = igb_disable_sriov(dev); - - if (!err) - err = igb_sriov_reinit(dev); - - return err; -} - -static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs) -{ - int err = igb_enable_sriov(dev, num_vfs); - - if (err) - goto out; - - err = igb_sriov_reinit(dev); - if (!err) - return num_vfs; - -out: - return err; -} - -#endif static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) { #ifdef CONFIG_PCI_IOV - if (num_vfs == 0) - return igb_pci_disable_sriov(dev); - else - return igb_pci_enable_sriov(dev, num_vfs); + int err; + + if (num_vfs == 0) { + return igb_disable_sriov(dev, true); + } else { + err = igb_enable_sriov(dev, num_vfs, true); + return err ? err : num_vfs; + } #endif return 0; } @@ -9406,16 +9711,24 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); + if (state == pci_channel_io_normal) { + dev_warn(&pdev->dev, "Non-correctable non-fatal error reported.\n"); + return PCI_ERS_RESULT_CAN_RECOVER; + } + netif_device_detach(netdev); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; + rtnl_lock(); if (netif_running(netdev)) igb_down(adapter); + rtnl_unlock(); + pci_disable_device(pdev); - /* Request a slot slot reset. */ + /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } @@ -9424,7 +9737,7 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. Implementation - * resembles the first-half of the igb_resume routine. + * resembles the first-half of the __igb_resume routine. **/ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) { @@ -9440,7 +9753,6 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) } else { pci_set_master(pdev); pci_restore_state(pdev); - pci_save_state(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); @@ -9464,19 +9776,28 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the - * second-half of the igb_resume routine. + * second-half of the __igb_resume routine. */ static void igb_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); + rtnl_lock(); if (netif_running(netdev)) { + if (!test_bit(__IGB_DOWN, &adapter->state)) { + dev_dbg(&pdev->dev, "Resuming from non-fatal error, do nothing.\n"); + rtnl_unlock(); + return; + } + if (igb_up(adapter)) { dev_err(&pdev->dev, "igb_up failed after reset\n"); + rtnl_unlock(); return; } } + rtnl_unlock(); netif_device_attach(netdev); @@ -9618,8 +9939,7 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, tx_rate; bcnrc_val = E1000_RTTBCNRC_RS_ENA; - bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) & - E1000_RTTBCNRC_RF_INT_MASK); + bcnrc_val |= FIELD_PREP(E1000_RTTBCNRC_RF_INT_MASK, rf_int); bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK); } else { bcnrc_val = 0; @@ -9794,11 +10114,10 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) struct e1000_hw *hw = &adapter->hw; u32 dmac_thr; u16 hwm; + u32 reg; if (hw->mac.type > e1000_82580) { if (adapter->flags & IGB_FLAG_DMAC) { - u32 reg; - /* force threshold to 0. */ wr32(E1000_DMCTXTH, 0); @@ -9809,8 +10128,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) hwm = 64 * (pba - 6); reg = rd32(E1000_FCRTC); reg &= ~E1000_FCRTC_RTH_COAL_MASK; - reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) - & E1000_FCRTC_RTH_COAL_MASK); + reg |= FIELD_PREP(E1000_FCRTC_RTH_COAL_MASK, hwm); wr32(E1000_FCRTC, reg); /* Set the DMA Coalescing Rx threshold to PBA - 2 * max @@ -9819,8 +10137,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) dmac_thr = pba - 10; reg = rd32(E1000_DMACR); reg &= ~E1000_DMACR_DMACTHR_MASK; - reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) - & E1000_DMACR_DMACTHR_MASK); + reg |= FIELD_PREP(E1000_DMACR_DMACTHR_MASK, dmac_thr); /* transition to L0x or L1 if available..*/ reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); @@ -9831,7 +10148,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) /* Disable BMC-to-OS Watchdog Enable */ if (hw->mac.type != e1000_i354) reg &= ~E1000_DMACR_DC_BMC2OSW_EN; - wr32(E1000_DMACR, reg); /* no lower threshold to disable @@ -9848,12 +10164,12 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) */ wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); + } - /* make low power state decision controlled - * by DMA coal - */ + if (hw->mac.type >= e1000_i210 || + (adapter->flags & IGB_FLAG_DMAC)) { reg = rd32(E1000_PCIEMISC); - reg &= ~E1000_PCIEMISC_LX_DECISION; + reg |= E1000_PCIEMISC_LX_DECISION; wr32(E1000_PCIEMISC, reg); } /* endif adapter->dmac is not disabled */ } else if (hw->mac.type == e1000_82580) { @@ -9982,4 +10298,20 @@ static void igb_nfc_filter_restore(struct igb_adapter *adapter) spin_unlock(&adapter->nfc_lock); } + +static _DEFINE_DEV_PM_OPS(igb_pm_ops, igb_suspend, igb_resume, + igb_runtime_suspend, igb_runtime_resume, + igb_runtime_idle); + +static struct pci_driver igb_driver = { + .name = igb_driver_name, + .id_table = igb_pci_tbl, + .probe = igb_probe, + .remove = igb_remove, + .driver.pm = pm_ptr(&igb_pm_ops), + .shutdown = igb_shutdown, + .sriov_configure = igb_pci_sriov_configure, + .err_handler = &igb_err_handler +}; + /* igb_main.c */ |
