diff options
Diffstat (limited to 'drivers/net/ethernet/intel/igc/igc_main.c')
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_main.c | 2290 |
1 files changed, 1798 insertions, 492 deletions
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 95323095094d..7aafa60ba0c8 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -4,7 +4,6 @@ #include <linux/module.h> #include <linux/types.h> #include <linux/if_vlan.h> -#include <linux/aer.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/ip.h> @@ -12,6 +11,9 @@ #include <net/pkt_sched.h> #include <linux/bpf_trace.h> #include <net/xdp_sock_drv.h> +#include <linux/pci.h> +#include <linux/mdio.h> + #include <net/ipv6.h> #include "igc.h" @@ -30,7 +32,6 @@ static int debug = -1; -MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_LICENSE("GPL v2"); module_param(debug, int, 0); @@ -54,6 +55,7 @@ static const struct pci_device_id igc_pci_tbl[] = { { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, @@ -118,7 +120,7 @@ void igc_reset(struct igc_adapter *adapter) igc_ptp_reset(adapter); /* Re-enable TSN offloading, where applicable. */ - igc_tsn_offload_apply(adapter); + igc_tsn_reset(adapter); igc_get_phy_info(hw); } @@ -149,6 +151,9 @@ static void igc_release_hw_control(struct igc_adapter *adapter) struct igc_hw *hw = &adapter->hw; u32 ctrl_ext; + if (!pci_device_is_present(adapter->pdev)) + return; + /* Let firmware take over control of h/w */ ctrl_ext = rd32(IGC_CTRL_EXT); wr32(IGC_CTRL_EXT, @@ -232,6 +237,8 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring) igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); } + tx_buffer->next_to_watch = NULL; + /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; i++; @@ -247,6 +254,13 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring) /* reset BQL for queue */ netdev_tx_reset_queue(txring_txq(tx_ring)); + /* Zero out the buffer ring */ + memset(tx_ring->tx_buffer_info, 0, + sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; @@ -260,7 +274,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring) */ void igc_free_tx_resources(struct igc_ring *tx_ring) { - igc_clean_tx_ring(tx_ring); + igc_disable_tx_ring(tx_ring); vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; @@ -302,6 +316,33 @@ static void igc_clean_all_tx_rings(struct igc_adapter *adapter) igc_clean_tx_ring(adapter->tx_ring[i]); } +static void igc_disable_tx_ring_hw(struct igc_ring *ring) +{ + struct igc_hw *hw = &ring->q_vector->adapter->hw; + u8 idx = ring->reg_idx; + u32 txdctl; + + txdctl = rd32(IGC_TXDCTL(idx)); + txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; + txdctl |= IGC_TXDCTL_SWFLUSH; + wr32(IGC_TXDCTL(idx), txdctl); +} + +/** + * igc_disable_all_tx_rings_hw - Disable all transmit queue operation + * @adapter: board private structure + */ +static void igc_disable_all_tx_rings_hw(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + igc_disable_tx_ring_hw(tx_ring); + } +} + /** * igc_setup_tx_resources - allocate Tx resources (Descriptors) * @tx_ring: tx descriptor ring (for a specific queue) to setup @@ -497,6 +538,9 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring) u8 index = rx_ring->queue_index; int size, desc_len, res; + /* XDP RX-queue info */ + if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, rx_ring->q_vector->napi.napi_id); if (res < 0) { @@ -630,15 +674,18 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter, else buf_size = IGC_RXBUFFER_2048; - srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; - srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT; + srrctl = rd32(IGC_SRRCTL(reg_idx)); + srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK | + IGC_SRRCTL_DESCTYPE_MASK); + srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN); + srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size); srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; wr32(IGC_SRRCTL(reg_idx), srrctl); - rxdctl |= IGC_RX_PTHRESH; - rxdctl |= IGC_RX_HTHRESH << 8; - rxdctl |= IGC_RX_WTHRESH << 16; + rxdctl |= IGC_RXDCTL_PTHRESH; + rxdctl |= IGC_RXDCTL_HTHRESH << 8; + rxdctl |= IGC_RXDCTL_WTHRESH << 16; /* initialize rx_buffer_info */ memset(ring->rx_buffer_info, 0, @@ -691,7 +738,6 @@ static void igc_configure_tx_ring(struct igc_adapter *adapter, /* disable the queue */ wr32(IGC_TXDCTL(reg_idx), 0); wrfl(); - mdelay(10); wr32(IGC_TDLEN(reg_idx), ring->count * sizeof(union igc_adv_tx_desc)); @@ -703,11 +749,9 @@ static void igc_configure_tx_ring(struct igc_adapter *adapter, wr32(IGC_TDH(reg_idx), 0); writel(0, ring->tail); - txdctl |= IGC_TX_PTHRESH; - txdctl |= IGC_TX_HTHRESH << 8; - txdctl |= IGC_TX_WTHRESH << 16; + txdctl |= IGC_TXDCTL_PTHRESH(8) | IGC_TXDCTL_HTHRESH(1) | + IGC_TXDCTL_WTHRESH(16) | IGC_TXDCTL_QUEUE_ENABLE; - txdctl |= IGC_TXDCTL_QUEUE_ENABLE; wr32(IGC_TXDCTL(reg_idx), txdctl); } @@ -942,7 +986,7 @@ static int igc_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); /* set the correct pool for the new PF MAC address in entry 0 */ @@ -989,25 +1033,110 @@ static int igc_write_mc_addr_list(struct net_device *netdev) return netdev_mc_count(netdev); } -static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime) +static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime, + bool *first_flag, bool *insert_empty) { + struct igc_adapter *adapter = netdev_priv(ring->netdev); ktime_t cycle_time = adapter->cycle_time; ktime_t base_time = adapter->base_time; - u32 launchtime; + ktime_t now = ktime_get_clocktai(); + ktime_t baset_est, end_of_cycle; + s32 launchtime; + s64 n; + + n = div64_s64(ktime_sub_ns(now, base_time), cycle_time); + + baset_est = ktime_add_ns(base_time, cycle_time * (n)); + end_of_cycle = ktime_add_ns(baset_est, cycle_time); + + if (ktime_compare(txtime, end_of_cycle) >= 0) { + if (baset_est != ring->last_ff_cycle) { + *first_flag = true; + ring->last_ff_cycle = baset_est; - /* FIXME: when using ETF together with taprio, we may have a - * case where 'delta' is larger than the cycle_time, this may - * cause problems if we don't read the current value of - * IGC_BASET, as the value writen into the launchtime - * descriptor field may be misinterpreted. + if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0) + *insert_empty = true; + } + } + + /* Introducing a window at end of cycle on which packets + * potentially not honor launchtime. Window of 5us chosen + * considering software update the tail pointer and packets + * are dma'ed to packet buffer. */ - div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime); + if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC)) + netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", + txtime); + + ring->last_tx_cycle = end_of_cycle; + + launchtime = ktime_sub_ns(txtime, baset_est); + if (launchtime > 0) + div_s64_rem(launchtime, cycle_time, &launchtime); + else + launchtime = 0; return cpu_to_le32(launchtime); } +static int igc_init_empty_frame(struct igc_ring *ring, + struct igc_tx_buffer *buffer, + struct sk_buff *skb) +{ + unsigned int size; + dma_addr_t dma; + + size = skb_headlen(skb); + + dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + net_err_ratelimited("%s: DMA mapping error for empty frame\n", + netdev_name(ring->netdev)); + return -ENOMEM; + } + + buffer->type = IGC_TX_BUFFER_TYPE_SKB; + buffer->skb = skb; + buffer->protocol = 0; + buffer->bytecount = skb->len; + buffer->gso_segs = 1; + buffer->time_stamp = jiffies; + dma_unmap_len_set(buffer, len, skb->len); + dma_unmap_addr_set(buffer, dma, dma); + + return 0; +} + +static void igc_init_tx_empty_descriptor(struct igc_ring *ring, + struct sk_buff *skb, + struct igc_tx_buffer *first) +{ + union igc_adv_tx_desc *desc; + u32 cmd_type, olinfo_status; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + first->bytecount; + olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + + desc = IGC_TX_DESC(ring, ring->next_to_use); + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); + + netdev_tx_sent_queue(txring_txq(ring), skb->len); + + first->next_to_watch = desc; + + ring->next_to_use++; + if (ring->next_to_use == ring->count) + ring->next_to_use = 0; +} + +#define IGC_EMPTY_FRAME_SIZE 60 + static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, - struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag, u32 vlan_macip_lens, u32 type_tucmd, u32 mss_l4len_idx) { @@ -1026,26 +1155,17 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) mss_l4len_idx |= tx_ring->reg_idx << 4; + if (first_flag) + mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST; + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); - - /* We assume there is always a valid Tx time available. Invalid times - * should have been handled by the upper layers. - */ - if (tx_ring->launchtime_enable) { - struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); - ktime_t txtime = first->skb->tstamp; - - skb_txtime_consumed(first->skb); - context_desc->launch_time = igc_tx_launchtime(adapter, - txtime); - } else { - context_desc->launch_time = 0; - } + context_desc->launch_time = launch_time; } -static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) +static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; @@ -1085,7 +1205,8 @@ no_csum: vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; - igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, 0); } static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) @@ -1140,10 +1261,21 @@ static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, (IGC_ADVTXD_DCMD_TSE)); - /* set timestamp bit if present */ + /* set timestamp bit if present, will select the register set + * based on the _TSTAMP(_X) bit. + */ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, (IGC_ADVTXD_MAC_TSTAMP)); + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_1, + (IGC_ADVTXD_TSTAMP_REG_1)); + + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_2, + (IGC_ADVTXD_TSTAMP_REG_2)); + + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_3, + (IGC_ADVTXD_TSTAMP_REG_3)); + /* insert frame checksum */ cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); @@ -1157,14 +1289,16 @@ static void igc_tx_olinfo_status(struct igc_ring *tx_ring, u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; /* insert L4 checksum */ - olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * - ((IGC_TXD_POPTS_TXSM << 8) / - IGC_TX_FLAGS_CSUM); + olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_CSUM, + (IGC_TXD_POPTS_TXSM << 8)); /* insert IPv4 checksum */ - olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * - (((IGC_TXD_POPTS_IXSM << 8)) / - IGC_TX_FLAGS_IPV4); + olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_IPV4, + (IGC_TXD_POPTS_IXSM << 8)); + + /* Use the second timer (free running, in general) for the timestamp */ + olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_TIMER_1, + IGC_TXD_PTP2_TIMER_1); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); } @@ -1309,6 +1443,7 @@ dma_error: static int igc_tso(struct igc_ring *tx_ring, struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag, u8 *hdr_len) { u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; @@ -1395,20 +1530,78 @@ static int igc_tso(struct igc_ring *tx_ring, vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; - igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, - type_tucmd, mss_l4len_idx); + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, mss_l4len_idx); return 1; } +static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *skb, u32 *flags) +{ + int i; + + for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { + struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; + + if (tstamp->skb) + continue; + + tstamp->skb = skb_get(skb); + tstamp->start = jiffies; + *flags = tstamp->flags; + + return true; + } + + return false; +} + +static int igc_insert_empty_frame(struct igc_ring *tx_ring) +{ + struct igc_tx_buffer *empty_info; + struct sk_buff *empty_skb; + void *data; + int ret; + + empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + empty_skb = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); + if (unlikely(!empty_skb)) { + net_err_ratelimited("%s: skb alloc error for empty frame\n", + netdev_name(tx_ring->netdev)); + return -ENOMEM; + } + + data = skb_put(empty_skb, IGC_EMPTY_FRAME_SIZE); + memset(data, 0, IGC_EMPTY_FRAME_SIZE); + + /* Prepare DMA mapping and Tx buffer information */ + ret = igc_init_empty_frame(tx_ring, empty_info, empty_skb); + if (unlikely(ret)) { + dev_kfree_skb_any(empty_skb); + return ret; + } + + /* Prepare advanced context descriptor for empty packet */ + igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); + + /* Prepare advanced data descriptor for empty packet */ + igc_init_tx_empty_descriptor(tx_ring, empty_skb, empty_info); + + return 0; +} + static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, struct igc_ring *tx_ring) { + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + bool first_flag = false, insert_empty = false; u16 count = TXD_USE_COUNT(skb_headlen(skb)); __be16 protocol = vlan_get_protocol(skb); struct igc_tx_buffer *first; + __le32 launch_time = 0; u32 tx_flags = 0; unsigned short f; + ktime_t txtime; u8 hdr_len = 0; int tso = 0; @@ -1416,17 +1609,39 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head, * + 1 desc for context descriptor, + * + 2 desc for inserting an empty packet for launch time, * otherwise try next time */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) count += TXD_USE_COUNT(skb_frag_size( &skb_shinfo(skb)->frags[f])); - if (igc_maybe_stop_tx(tx_ring, count + 3)) { + if (igc_maybe_stop_tx(tx_ring, count + 5)) { /* this is a hard error */ return NETDEV_TX_BUSY; } + if (!tx_ring->launchtime_enable) + goto done; + + txtime = skb->tstamp; + skb->tstamp = ktime_set(0, 0); + launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty); + + if (insert_empty) { + /* Reset the launch time if the required empty frame fails to + * be inserted. However, this packet is not dropped, so it + * "dirties" the current Qbv cycle. This ensures that the + * upcoming packet, which is scheduled in the next Qbv cycle, + * does not require an empty frame. This way, the launch time + * continues to function correctly despite the current failure + * to insert the empty frame. + */ + if (igc_insert_empty_frame(tx_ring)) + launch_time = 0; + } + +done: /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; first->type = IGC_TX_BUFFER_TYPE_SKB; @@ -1434,24 +1649,31 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, first->bytecount = skb->len; first->gso_segs = 1; - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { - struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + if (adapter->qbv_transition || tx_ring->oper_gate_closed) + goto out_drop; - /* FIXME: add support for retrieving timestamps from - * the other timer registers before skipping the - * timestamping request. - */ - if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && - !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS, - &adapter->state)) { - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - tx_flags |= IGC_TX_FLAGS_TSTAMP; + if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) { + adapter->stats.txdrop++; + goto out_drop; + } + + if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + unsigned long flags; + u32 tstamp_flags; - adapter->ptp_tx_skb = skb_get(skb); - adapter->ptp_tx_start = jiffies; + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags; + if (skb->sk && + READ_ONCE(skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC) + tx_flags |= IGC_TX_FLAGS_TSTAMP_TIMER_1; } else { adapter->tx_hwtstamp_skipped++; } + + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); } if (skb_vlan_tag_present(skb)) { @@ -1463,11 +1685,20 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, first->tx_flags = tx_flags; first->protocol = protocol; - tso = igc_tso(tx_ring, first, &hdr_len); + /* For preemptible queue, manually pad the skb so that HW includes + * padding bytes in mCRC calculation + */ + if (tx_ring->preemptible && skb->len < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + goto out_drop; + skb_put(skb, ETH_ZLEN - skb->len); + } + + tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len); if (tso < 0) goto out_drop; else if (!tso) - igc_tx_csum(tx_ring, first); + igc_tx_csum(tx_ring, first, launch_time, first_flag); igc_tx_map(tx_ring, first, hdr_len); @@ -1548,14 +1779,36 @@ static void igc_rx_checksum(struct igc_ring *ring, le32_to_cpu(rx_desc->wb.upper.status_error)); } +/* Mapping HW RSS Type to enum pkt_hash_types */ +static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = { + [IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2, + [IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3, + [IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3, + [IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3, + [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4, + [10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ + [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */ + [12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */ + [13] = PKT_HASH_TYPE_NONE, + [14] = PKT_HASH_TYPE_NONE, + [15] = PKT_HASH_TYPE_NONE, +}; + static inline void igc_rx_hash(struct igc_ring *ring, union igc_adv_rx_desc *rx_desc, struct sk_buff *skb) { - if (ring->netdev->features & NETIF_F_RXHASH) - skb_set_hash(skb, - le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), - PKT_HASH_TYPE_L3); + if (ring->netdev->features & NETIF_F_RXHASH) { + u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + u32 rss_type = igc_rss_type(rx_desc); + + skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]); + } } static void igc_rx_vlan(struct igc_ring *rx_ring, @@ -1710,24 +1963,26 @@ static void igc_add_rx_frag(struct igc_ring *rx_ring, static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, struct igc_rx_buffer *rx_buffer, - union igc_adv_rx_desc *rx_desc, - unsigned int size) + struct xdp_buff *xdp) { - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + unsigned int size = xdp->data_end - xdp->data; unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + unsigned int metasize = xdp->data - xdp->data_meta; struct sk_buff *skb; /* prefetch first cache line of first page */ - net_prefetch(va); + net_prefetch(xdp->data_meta); /* build an skb around the page buffer */ - skb = build_skb(va - IGC_SKB_PAD, truesize); + skb = napi_build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; /* update pointers within the skb to store the data */ - skb_reserve(skb, IGC_SKB_PAD); + skb_reserve(skb, xdp->data - xdp->data_hard_start); __skb_put(skb, size); + if (metasize) + skb_metadata_set(skb, metasize); igc_rx_buffer_flip(rx_buffer, truesize); return skb; @@ -1735,9 +1990,10 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, struct igc_rx_buffer *rx_buffer, - struct xdp_buff *xdp, - ktime_t timestamp) + struct igc_xdp_buff *ctx) { + struct xdp_buff *xdp = &ctx->xdp; + unsigned int metasize = xdp->data - xdp->data_meta; unsigned int size = xdp->data_end - xdp->data; unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); void *va = xdp->data; @@ -1745,15 +2001,18 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, struct sk_buff *skb; /* prefetch first cache line of first page */ - net_prefetch(va); + net_prefetch(xdp->data_meta); /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + IGC_RX_HDR_LEN + metasize); if (unlikely(!skb)) return NULL; - if (timestamp) - skb_hwtstamps(skb)->hwtstamp = timestamp; + if (ctx->rx_ts) { + skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV; + skb_hwtstamps(skb)->netdev_data = ctx->rx_ts; + } /* Determine available headroom for copy */ headlen = size; @@ -1761,7 +2020,13 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, + ALIGN(headlen + metasize, sizeof(long))); + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } /* update all of the pointers */ size -= headlen; @@ -1885,10 +2150,6 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring, union igc_adv_rx_desc *rx_desc, struct sk_buff *skb) { - /* XDP packets use error pointer so abort at this point */ - if (IS_ERR(skb)) - return true; - if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { struct net_device *netdev = rx_ring->netdev; @@ -1953,6 +2214,7 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; + set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); return false; } @@ -1969,6 +2231,7 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, __free_page(page); rx_ring->rx_stats.alloc_failed++; + set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); return false; } @@ -2062,6 +2325,8 @@ static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) if (!count) return ok; + XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff); + desc = IGC_RX_DESC(ring, i); bi = &ring->rx_buffer_info[i]; i -= ring->count; @@ -2108,69 +2373,105 @@ static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) return ok; } -static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer, - struct xdp_frame *xdpf, - struct igc_ring *ring) -{ - dma_addr_t dma; - - dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); - if (dma_mapping_error(ring->dev, dma)) { - netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); - return -ENOMEM; - } - - buffer->type = IGC_TX_BUFFER_TYPE_XDP; - buffer->xdpf = xdpf; - buffer->protocol = 0; - buffer->bytecount = xdpf->len; - buffer->gso_segs = 1; - buffer->time_stamp = jiffies; - dma_unmap_len_set(buffer, len, xdpf->len); - dma_unmap_addr_set(buffer, dma, dma); - return 0; -} - /* This function requires __netif_tx_lock is held by the caller. */ static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, struct xdp_frame *xdpf) { - struct igc_tx_buffer *buffer; - union igc_adv_tx_desc *desc; - u32 cmd_type, olinfo_status; - int err; + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); + u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; + u16 count, index = ring->next_to_use; + struct igc_tx_buffer *head = &ring->tx_buffer_info[index]; + struct igc_tx_buffer *buffer = head; + union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index); + u32 olinfo_status, len = xdpf->len, cmd_type; + void *data = xdpf->data; + u16 i; - if (!igc_desc_unused(ring)) - return -EBUSY; + count = TXD_USE_COUNT(len); + for (i = 0; i < nr_frags; i++) + count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); - buffer = &ring->tx_buffer_info[ring->next_to_use]; - err = igc_xdp_init_tx_buffer(buffer, xdpf, ring); - if (err) - return err; + if (igc_maybe_stop_tx(ring, count + 3)) { + /* this is a hard error */ + return -EBUSY; + } - cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | - IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | - buffer->bytecount; - olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + i = 0; + head->bytecount = xdp_get_frame_len(xdpf); + head->type = IGC_TX_BUFFER_TYPE_XDP; + head->gso_segs = 1; + head->xdpf = xdpf; - desc = IGC_TX_DESC(ring, ring->next_to_use); - desc->read.cmd_type_len = cpu_to_le32(cmd_type); + olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; desc->read.olinfo_status = cpu_to_le32(olinfo_status); - desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma)); - netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount); + for (;;) { + dma_addr_t dma; - buffer->next_to_watch = desc; + dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, + "Failed to map DMA for TX\n"); + goto unmap; + } - ring->next_to_use++; - if (ring->next_to_use == ring->count) - ring->next_to_use = 0; + dma_unmap_len_set(buffer, len, len); + dma_unmap_addr_set(buffer, dma, dma); + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | len; + + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.buffer_addr = cpu_to_le64(dma); + + buffer->protocol = 0; + + if (++index == ring->count) + index = 0; + + if (i == nr_frags) + break; + + buffer = &ring->tx_buffer_info[index]; + desc = IGC_TX_DESC(ring, index); + desc->read.olinfo_status = 0; + + data = skb_frag_address(&sinfo->frags[i]); + len = skb_frag_size(&sinfo->frags[i]); + i++; + } + desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD); + + netdev_tx_sent_queue(txring_txq(ring), head->bytecount); + /* set the timestamp */ + head->time_stamp = jiffies; + /* set next_to_watch value indicating a packet is present */ + head->next_to_watch = desc; + ring->next_to_use = index; return 0; + +unmap: + for (;;) { + buffer = &ring->tx_buffer_info[index]; + if (dma_unmap_len(buffer, len)) + dma_unmap_page(ring->dev, + dma_unmap_addr(buffer, dma), + dma_unmap_len(buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(buffer, len, 0); + if (buffer == head) + break; + + if (!index) + index += ring->count; + index--; + } + + return -ENOMEM; } -static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, - int cpu) +struct igc_ring *igc_get_tx_ring(struct igc_adapter *adapter, int cpu) { int index = cpu; @@ -2194,10 +2495,12 @@ static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) if (unlikely(!xdpf)) return -EFAULT; - ring = igc_xdp_get_tx_ring(adapter, cpu); + ring = igc_get_tx_ring(adapter, cpu); nq = txring_txq(ring); __netif_tx_lock(nq, cpu); + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); res = igc_xdp_init_tx_descriptor(ring, xdpf); __netif_tx_unlock(nq); return res; @@ -2223,7 +2526,7 @@ static int __igc_xdp_run_prog(struct igc_adapter *adapter, return IGC_XDP_REDIRECT; break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(adapter->netdev, prog, act); fallthrough; case XDP_ABORTED: out_failure: @@ -2234,8 +2537,7 @@ out_failure: } } -static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, - struct xdp_buff *xdp) +static int igc_xdp_run_prog(struct igc_adapter *adapter, struct xdp_buff *xdp) { struct bpf_prog *prog; int res; @@ -2249,11 +2551,11 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, res = __igc_xdp_run_prog(adapter, prog, xdp); out: - return ERR_PTR(-res); + return res; } /* This function assumes __netif_tx_lock is held by the caller. */ -static void igc_flush_tx_descriptors(struct igc_ring *ring) +void igc_flush_tx_descriptors(struct igc_ring *ring) { /* Once tail pointer is updated, hardware can fetch the descriptors * any time so we issue a write membar here to ensure all memory @@ -2270,7 +2572,7 @@ static void igc_finalize_xdp(struct igc_adapter *adapter, int status) struct igc_ring *ring; if (status & IGC_XDP_TX) { - ring = igc_xdp_get_tx_ring(adapter, cpu); + ring = igc_get_tx_ring(adapter, cpu); nq = txring_txq(ring); __netif_tx_lock(nq, cpu); @@ -2304,13 +2606,13 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) struct sk_buff *skb = rx_ring->skb; u16 cleaned_count = igc_desc_unused(rx_ring); int xdp_status = 0, rx_buffer_pgcnt; + int xdp_res = 0; while (likely(total_packets < budget)) { - union igc_adv_rx_desc *rx_desc; + struct igc_xdp_buff ctx = { .rx_ts = NULL }; struct igc_rx_buffer *rx_buffer; + union igc_adv_rx_desc *rx_desc; unsigned int size, truesize; - ktime_t timestamp = 0; - struct xdp_buff xdp; int pkt_offset = 0; void *pktbuf; @@ -2337,23 +2639,31 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { - timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, - pktbuf); + ctx.rx_ts = pktbuf; pkt_offset = IGC_TS_HDR_LEN; size -= IGC_TS_HDR_LEN; } - if (!skb) { - xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq); - xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring), - igc_rx_offset(rx_ring) + pkt_offset, size, false); - - skb = igc_xdp_run_prog(adapter, &xdp); + if (igc_fpe_is_pmac_enabled(adapter) && + igc_fpe_handle_mpacket(adapter, rx_desc, size, pktbuf)) { + /* Advance the ring next-to-clean */ + igc_is_non_eop(rx_ring, rx_desc); + cleaned_count++; + continue; } - if (IS_ERR(skb)) { - unsigned int xdp_res = -PTR_ERR(skb); + if (!skb) { + xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq); + xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring), + igc_rx_offset(rx_ring) + pkt_offset, + size, true); + xdp_buff_clear_frags_flag(&ctx.xdp); + ctx.rx_desc = rx_desc; + + xdp_res = igc_xdp_run_prog(adapter, &ctx.xdp); + } + if (xdp_res) { switch (xdp_res) { case IGC_XDP_CONSUMED: rx_buffer->pagecnt_bias++; @@ -2370,15 +2680,15 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) } else if (skb) igc_add_rx_frag(rx_ring, rx_buffer, skb, size); else if (ring_uses_build_skb(rx_ring)) - skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size); + skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp); else - skb = igc_construct_skb(rx_ring, rx_buffer, &xdp, - timestamp); + skb = igc_construct_skb(rx_ring, rx_buffer, &ctx); /* exit if we failed to retrieve a buffer */ - if (!skb) { + if (!xdp_res && !skb) { rx_ring->rx_stats.alloc_failed++; rx_buffer->pagecnt_bias++; + set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); break; } @@ -2390,7 +2700,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) continue; /* verify the packet layout is correct */ - if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { + if (xdp_res || igc_cleanup_headers(rx_ring, rx_desc, skb)) { skb = NULL; continue; } @@ -2425,44 +2735,49 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) } static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, - struct xdp_buff *xdp) + struct igc_xdp_buff *ctx) { + struct xdp_buff *xdp = &ctx->xdp; + unsigned int totalsize = xdp->data_end - xdp->data_meta; unsigned int metasize = xdp->data - xdp->data_meta; - unsigned int datasize = xdp->data_end - xdp->data; - unsigned int totalsize = metasize + datasize; struct sk_buff *skb; - skb = __napi_alloc_skb(&ring->q_vector->napi, - xdp->data_end - xdp->data_hard_start, - GFP_ATOMIC | __GFP_NOWARN); + net_prefetch(xdp->data_meta); + + skb = napi_alloc_skb(&ring->q_vector->napi, totalsize); if (unlikely(!skb)) return NULL; - skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); - memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize); - if (metasize) + memcpy(__skb_put(skb, totalsize), xdp->data_meta, + ALIGN(totalsize, sizeof(long))); + + if (metasize) { skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + + if (ctx->rx_ts) { + skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV; + skb_hwtstamps(skb)->netdev_data = ctx->rx_ts; + } return skb; } static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, union igc_adv_rx_desc *desc, - struct xdp_buff *xdp, - ktime_t timestamp) + struct igc_xdp_buff *ctx) { struct igc_ring *ring = q_vector->rx.ring; struct sk_buff *skb; - skb = igc_construct_skb_zc(ring, xdp); + skb = igc_construct_skb_zc(ring, ctx); if (!skb) { ring->rx_stats.alloc_failed++; + set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags); return; } - if (timestamp) - skb_hwtstamps(skb)->hwtstamp = timestamp; - if (igc_cleanup_headers(ring, desc, skb)) return; @@ -2470,6 +2785,15 @@ static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, napi_gro_receive(&q_vector->napi, skb); } +static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp) +{ + /* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The + * igc_xdp_buff shares its layout with xdp_buff_xsk and private + * igc_xdp_buff fields fall into xdp_buff_xsk->cb + */ + return (struct igc_xdp_buff *)xdp; +} + static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) { struct igc_adapter *adapter = q_vector->adapter; @@ -2488,7 +2812,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) while (likely(total_packets < budget)) { union igc_adv_rx_desc *desc; struct igc_rx_buffer *bi; - ktime_t timestamp = 0; + struct igc_xdp_buff *ctx; unsigned int size; int res; @@ -2505,9 +2829,11 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) bi = &ring->rx_buffer_info[ntc]; + ctx = xsk_buff_to_igc_ctx(bi->xdp); + ctx->rx_desc = desc; + if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { - timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, - bi->xdp->data); + ctx->rx_ts = bi->xdp->data; bi->xdp->data += IGC_TS_HDR_LEN; @@ -2516,15 +2842,17 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) */ bi->xdp->data_meta += IGC_TS_HDR_LEN; size -= IGC_TS_HDR_LEN; + } else { + ctx->rx_ts = NULL; } bi->xdp->data_end = bi->xdp->data + size; - xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); + xsk_buff_dma_sync_for_cpu(bi->xdp); res = __igc_xdp_run_prog(adapter, prog, bi->xdp); switch (res) { case IGC_XDP_PASS: - igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); + igc_dispatch_skb_zc(q_vector, desc, ctx); fallthrough; case IGC_XDP_CONSUMED: xsk_buff_free(bi->xdp); @@ -2580,42 +2908,193 @@ static void igc_update_tx_stats(struct igc_q_vector *q_vector, q_vector->tx.total_packets += packets; } +static void igc_xsk_request_timestamp(void *_priv) +{ + struct igc_metadata_request *meta_req = _priv; + struct igc_ring *tx_ring = meta_req->tx_ring; + struct igc_tx_timestamp_request *tstamp; + u32 tx_flags = IGC_TX_FLAGS_TSTAMP; + struct igc_adapter *adapter; + unsigned long lock_flags; + bool found = false; + int i; + + if (test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags)) { + adapter = netdev_priv(tx_ring->netdev); + + spin_lock_irqsave(&adapter->ptp_tx_lock, lock_flags); + + /* Search for available tstamp regs */ + for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { + tstamp = &adapter->tx_tstamp[i]; + + /* tstamp->skb and tstamp->xsk_tx_buffer are in union. + * When tstamp->skb is equal to NULL, + * tstamp->xsk_tx_buffer is equal to NULL as well. + * This condition means that the particular tstamp reg + * is not occupied by other packet. + */ + if (!tstamp->skb) { + found = true; + break; + } + } + + /* Return if no available tstamp regs */ + if (!found) { + adapter->tx_hwtstamp_skipped++; + spin_unlock_irqrestore(&adapter->ptp_tx_lock, + lock_flags); + return; + } + + tstamp->start = jiffies; + tstamp->xsk_queue_index = tx_ring->queue_index; + tstamp->xsk_tx_buffer = meta_req->tx_buffer; + tstamp->buffer_type = IGC_TX_BUFFER_TYPE_XSK; + + /* Hold the transmit completion until timestamp is ready */ + meta_req->tx_buffer->xsk_pending_ts = true; + + /* Keep the pointer to tx_timestamp, which is located in XDP + * metadata area. It is the location to store the value of + * tx hardware timestamp. + */ + xsk_tx_metadata_to_compl(meta_req->meta, &tstamp->xsk_meta); + + /* Set timestamp bit based on the _TSTAMP(_X) bit. */ + tx_flags |= tstamp->flags; + meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, + IGC_TX_FLAGS_TSTAMP, + (IGC_ADVTXD_MAC_TSTAMP)); + meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, + IGC_TX_FLAGS_TSTAMP_1, + (IGC_ADVTXD_TSTAMP_REG_1)); + meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, + IGC_TX_FLAGS_TSTAMP_2, + (IGC_ADVTXD_TSTAMP_REG_2)); + meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, + IGC_TX_FLAGS_TSTAMP_3, + (IGC_ADVTXD_TSTAMP_REG_3)); + + spin_unlock_irqrestore(&adapter->ptp_tx_lock, lock_flags); + } +} + +static u64 igc_xsk_fill_timestamp(void *_priv) +{ + return *(u64 *)_priv; +} + +static void igc_xsk_request_launch_time(u64 launch_time, void *_priv) +{ + struct igc_metadata_request *meta_req = _priv; + struct igc_ring *tx_ring = meta_req->tx_ring; + __le32 launch_time_offset; + bool insert_empty = false; + bool first_flag = false; + u16 used_desc = 0; + + if (!tx_ring->launchtime_enable) + return; + + launch_time_offset = igc_tx_launchtime(tx_ring, + ns_to_ktime(launch_time), + &first_flag, &insert_empty); + if (insert_empty) { + /* Disregard the launch time request if the required empty frame + * fails to be inserted. + */ + if (igc_insert_empty_frame(tx_ring)) + return; + + meta_req->tx_buffer = + &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + /* Inserting an empty packet requires two descriptors: + * one data descriptor and one context descriptor. + */ + used_desc += 2; + } + + /* Use one context descriptor to specify launch time and first flag. */ + igc_tx_ctxtdesc(tx_ring, launch_time_offset, first_flag, 0, 0, 0); + used_desc += 1; + + /* Update the number of used descriptors in this request */ + meta_req->used_desc += used_desc; +} + +const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops = { + .tmo_request_timestamp = igc_xsk_request_timestamp, + .tmo_fill_timestamp = igc_xsk_fill_timestamp, + .tmo_request_launch_time = igc_xsk_request_launch_time, +}; + static void igc_xdp_xmit_zc(struct igc_ring *ring) { struct xsk_buff_pool *pool = ring->xsk_pool; struct netdev_queue *nq = txring_txq(ring); union igc_adv_tx_desc *tx_desc = NULL; int cpu = smp_processor_id(); - u16 ntu = ring->next_to_use; struct xdp_desc xdp_desc; - u16 budget; + u16 budget, ntu; if (!netif_carrier_ok(ring->netdev)) return; __netif_tx_lock(nq, cpu); + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + + ntu = ring->next_to_use; budget = igc_desc_unused(ring); - while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { - u32 cmd_type, olinfo_status; + /* Packets with launch time require one data descriptor and one context + * descriptor. When the launch time falls into the next Qbv cycle, we + * may need to insert an empty packet, which requires two more + * descriptors. Therefore, to be safe, we always ensure we have at least + * 4 descriptors available. + */ + while (budget >= 4 && xsk_tx_peek_desc(pool, &xdp_desc)) { + struct igc_metadata_request meta_req; + struct xsk_tx_metadata *meta = NULL; struct igc_tx_buffer *bi; + u32 olinfo_status; dma_addr_t dma; - cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | - IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | - xdp_desc.len; + meta_req.cmd_type = IGC_ADVTXD_DTYP_DATA | + IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | + IGC_TXD_DCMD | xdp_desc.len; olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr); + meta = xsk_buff_get_metadata(pool, xdp_desc.addr); xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len); + bi = &ring->tx_buffer_info[ntu]; + + meta_req.tx_ring = ring; + meta_req.tx_buffer = bi; + meta_req.meta = meta; + meta_req.used_desc = 0; + xsk_tx_metadata_request(meta, &igc_xsk_tx_metadata_ops, + &meta_req); + + /* xsk_tx_metadata_request() may have updated next_to_use */ + ntu = ring->next_to_use; + + /* xsk_tx_metadata_request() may have updated Tx buffer info */ + bi = meta_req.tx_buffer; + + /* xsk_tx_metadata_request() may use a few descriptors */ + budget -= meta_req.used_desc; tx_desc = IGC_TX_DESC(ring, ntu); - tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.cmd_type_len = cpu_to_le32(meta_req.cmd_type); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); tx_desc->read.buffer_addr = cpu_to_le64(dma); - bi = &ring->tx_buffer_info[ntu]; bi->type = IGC_TX_BUFFER_TYPE_XSK; bi->protocol = 0; bi->bytecount = xdp_desc.len; @@ -2628,9 +3107,11 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring) ntu++; if (ntu == ring->count) ntu = 0; + + ring->next_to_use = ntu; + budget--; } - ring->next_to_use = ntu; if (tx_desc) { igc_flush_tx_descriptors(ring); xsk_tx_release(pool); @@ -2678,6 +3159,18 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) break; + if (igc_fpe_is_pmac_enabled(adapter) && + igc_fpe_transmitted_smd_v(tx_desc)) + ethtool_mmsv_event_handle(&adapter->fpe.mmsv, + ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET); + + /* Hold the completions while there's a pending tx hardware + * timestamp request from XDP Tx metadata. + */ + if (tx_buffer->type == IGC_TX_BUFFER_TYPE_XSK && + tx_buffer->xsk_pending_ts) + break; + /* clear next_to_watch to prevent false hangs */ tx_buffer->next_to_watch = NULL; @@ -2761,7 +3254,9 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) if (tx_buffer->next_to_watch && time_after(jiffies, tx_buffer->time_stamp + (adapter->tx_timeout_factor * HZ)) && - !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) { + !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) && + (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) && + !tx_ring->oper_gate_closed) { /* detected Tx unit hang */ netdev_err(tx_ring->netdev, "Detected Tx Unit Hang\n" @@ -3073,11 +3568,337 @@ static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) etype); } +static int igc_flex_filter_select(struct igc_adapter *adapter, + struct igc_flex_filter *input, + u32 *fhft) +{ + struct igc_hw *hw = &adapter->hw; + u8 fhft_index; + u32 fhftsl; + + if (input->index >= MAX_FLEX_FILTER) { + netdev_err(adapter->netdev, "Wrong Flex Filter index selected!\n"); + return -EINVAL; + } + + /* Indirect table select register */ + fhftsl = rd32(IGC_FHFTSL); + fhftsl &= ~IGC_FHFTSL_FTSL_MASK; + switch (input->index) { + case 0 ... 7: + fhftsl |= 0x00; + break; + case 8 ... 15: + fhftsl |= 0x01; + break; + case 16 ... 23: + fhftsl |= 0x02; + break; + case 24 ... 31: + fhftsl |= 0x03; + break; + } + wr32(IGC_FHFTSL, fhftsl); + + /* Normalize index down to host table register */ + fhft_index = input->index % 8; + + *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) : + IGC_FHFT_EXT(fhft_index - 4); + + return 0; +} + +static int igc_write_flex_filter_ll(struct igc_adapter *adapter, + struct igc_flex_filter *input) +{ + struct igc_hw *hw = &adapter->hw; + u8 *data = input->data; + u8 *mask = input->mask; + u32 queuing; + u32 fhft; + u32 wufc; + int ret; + int i; + + /* Length has to be aligned to 8. Otherwise the filter will fail. Bail + * out early to avoid surprises later. + */ + if (input->length % 8 != 0) { + netdev_err(adapter->netdev, "The length of a flex filter has to be 8 byte aligned!\n"); + return -EINVAL; + } + + /* Select corresponding flex filter register and get base for host table. */ + ret = igc_flex_filter_select(adapter, input, &fhft); + if (ret) + return ret; + + /* When adding a filter globally disable flex filter feature. That is + * recommended within the datasheet. + */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); + + /* Configure filter */ + queuing = input->length & IGC_FHFT_LENGTH_MASK; + queuing |= FIELD_PREP(IGC_FHFT_QUEUE_MASK, input->rx_queue); + queuing |= FIELD_PREP(IGC_FHFT_PRIO_MASK, input->prio); + + if (input->immediate_irq) + queuing |= IGC_FHFT_IMM_INT; + + if (input->drop) + queuing |= IGC_FHFT_DROP; + + wr32(fhft + 0xFC, queuing); + + /* Write data (128 byte) and mask (128 bit) */ + for (i = 0; i < 16; ++i) { + const size_t data_idx = i * 8; + const size_t row_idx = i * 16; + u32 dw0 = + (data[data_idx + 0] << 0) | + (data[data_idx + 1] << 8) | + (data[data_idx + 2] << 16) | + (data[data_idx + 3] << 24); + u32 dw1 = + (data[data_idx + 4] << 0) | + (data[data_idx + 5] << 8) | + (data[data_idx + 6] << 16) | + (data[data_idx + 7] << 24); + u32 tmp; + + /* Write row: dw0, dw1 and mask */ + wr32(fhft + row_idx, dw0); + wr32(fhft + row_idx + 4, dw1); + + /* mask is only valid for MASK(7, 0) */ + tmp = rd32(fhft + row_idx + 8); + tmp &= ~GENMASK(7, 0); + tmp |= mask[i]; + wr32(fhft + row_idx + 8, tmp); + } + + /* Enable filter. */ + wufc |= IGC_WUFC_FLEX_HQ; + if (input->index > 8) { + /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); + + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc |= (IGC_WUFC_FLX0 << input->index); + } + wr32(IGC_WUFC, wufc); + + netdev_dbg(adapter->netdev, "Added flex filter %u to HW.\n", + input->index); + + return 0; +} + +static void igc_flex_filter_add_field(struct igc_flex_filter *flex, + const void *src, unsigned int offset, + size_t len, const void *mask) +{ + int i; + + /* data */ + memcpy(&flex->data[offset], src, len); + + /* mask */ + for (i = 0; i < len; ++i) { + const unsigned int idx = i + offset; + const u8 *ptr = mask; + + if (mask) { + if (ptr[i] & 0xff) + flex->mask[idx / 8] |= BIT(idx % 8); + + continue; + } + + flex->mask[idx / 8] |= BIT(idx % 8); + } +} + +static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + int i; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + for (i = 0; i < MAX_FLEX_FILTER; i++) { + if (i < 8) { + if (!(wufc & (IGC_WUFC_FLX0 << i))) + return i; + } else { + if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) + return i; + } + } + + return -ENOSPC; +} + +static bool igc_flex_filter_in_use(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + if (wufc & IGC_WUFC_FILTER_MASK) + return true; + + if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK) + return true; + + return false; +} + +static int igc_add_flex_filter(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + struct igc_nfc_filter *filter = &rule->filter; + unsigned int eth_offset, user_offset; + struct igc_flex_filter flex = { }; + int ret, index; + bool vlan; + + index = igc_find_avail_flex_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + /* Construct the flex filter: + * -> dest_mac [6] + * -> src_mac [6] + * -> tpid [2] + * -> vlan tci [2] + * -> ether type [2] + * -> user data [8] + * -> = 26 bytes => 32 length + */ + flex.index = index; + flex.length = 32; + flex.rx_queue = rule->action; + + vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; + eth_offset = vlan ? 16 : 12; + user_offset = vlan ? 18 : 14; + + /* Add destination MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, + ETH_ALEN, NULL); + + /* Add source MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->src_addr, 6, + ETH_ALEN, NULL); + + /* Add VLAN etype */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) { + __be16 vlan_etype = cpu_to_be16(filter->vlan_etype); + + igc_flex_filter_add_field(&flex, &vlan_etype, 12, + sizeof(vlan_etype), NULL); + } + + /* Add VLAN TCI */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) + igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, + sizeof(filter->vlan_tci), NULL); + + /* Add Ether type */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + __be16 etype = cpu_to_be16(filter->etype); + + igc_flex_filter_add_field(&flex, &etype, eth_offset, + sizeof(etype), NULL); + } + + /* Add user data */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) + igc_flex_filter_add_field(&flex, &filter->user_data, + user_offset, + sizeof(filter->user_data), + filter->user_mask); + + /* Add it down to the hardware and enable it. */ + ret = igc_write_flex_filter_ll(adapter, &flex); + if (ret) + return ret; + + filter->flex_index = index; + + return 0; +} + +static void igc_del_flex_filter(struct igc_adapter *adapter, + u16 reg_index) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc; + + /* Just disable the filter. The filter table itself is kept + * intact. Another flex_filter_add() should override the "old" data + * then. + */ + if (reg_index > 8) { + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc = rd32(IGC_WUFC); + + wufc &= ~(IGC_WUFC_FLX0 << reg_index); + wr32(IGC_WUFC, wufc); + } + + if (igc_flex_filter_in_use(adapter)) + return; + + /* No filters are in use, we may disable flex filters */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); +} + +static void igc_set_default_queue_filter(struct igc_adapter *adapter, u32 queue) +{ + struct igc_hw *hw = &adapter->hw; + u32 mrqc = rd32(IGC_MRQC); + + mrqc &= ~IGC_MRQC_DEFAULT_QUEUE_MASK; + mrqc |= FIELD_PREP(IGC_MRQC_DEFAULT_QUEUE_MASK, queue); + wr32(IGC_MRQC, mrqc); +} + +static void igc_reset_default_queue_filter(struct igc_adapter *adapter) +{ + /* Reset the default queue to its default value which is Queue 0 */ + igc_set_default_queue_filter(adapter, 0); +} + static int igc_enable_nfc_rule(struct igc_adapter *adapter, - const struct igc_nfc_rule *rule) + struct igc_nfc_rule *rule) { int err; + if (rule->flex) { + return igc_add_flex_filter(adapter, rule); + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { err = igc_add_etype_filter(adapter, rule->filter.etype, rule->action); @@ -3100,26 +3921,32 @@ static int igc_enable_nfc_rule(struct igc_adapter *adapter, } if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { - int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> - VLAN_PRIO_SHIFT; + int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci); err = igc_add_vlan_prio_filter(adapter, prio, rule->action); if (err) return err; } + if (rule->filter.match_flags & IGC_FILTER_FLAG_DEFAULT_QUEUE) + igc_set_default_queue_filter(adapter, rule->action); + return 0; } static void igc_disable_nfc_rule(struct igc_adapter *adapter, const struct igc_nfc_rule *rule) { + if (rule->flex) { + igc_del_flex_filter(adapter, rule->filter.flex_index); + return; + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) igc_del_etype_filter(adapter, rule->filter.etype); if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { - int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> - VLAN_PRIO_SHIFT; + int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci); igc_del_vlan_prio_filter(adapter, prio); } @@ -3131,6 +3958,9 @@ static void igc_disable_nfc_rule(struct igc_adapter *adapter, if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, rule->filter.dst_addr); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DEFAULT_QUEUE) + igc_reset_default_queue_filter(adapter); } /** @@ -3248,6 +4078,30 @@ static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) } /** + * igc_enable_empty_addr_recv - Enable Rx of packets with all-zeroes MAC address + * @adapter: Pointer to the igc_adapter structure. + * + * Frame preemption verification requires that packets with the all-zeroes + * MAC address are allowed to be received by the driver. This function adds the + * all-zeroes destination address to the list of acceptable addresses. + * + * Return: 0 on success, negative value otherwise. + */ +int igc_enable_empty_addr_recv(struct igc_adapter *adapter) +{ + u8 empty[ETH_ALEN] = {}; + + return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, empty, -1); +} + +void igc_disable_empty_addr_recv(struct igc_adapter *adapter) +{ + u8 empty[ETH_ALEN] = {}; + + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, empty); +} + +/** * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure * @@ -4017,8 +4871,7 @@ static int igc_alloc_q_vector(struct igc_adapter *adapter, return -ENOMEM; /* initialize NAPI */ - netif_napi_add(adapter->netdev, &q_vector->napi, - igc_poll, 64); + netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; @@ -4214,6 +5067,7 @@ static int igc_sw_init(struct igc_adapter *adapter) adapter->nfc_rule_count = 0; spin_lock_init(&adapter->stats64_lock); + spin_lock_init(&adapter->qbv_tx_lock); /* Assume MSI-X interrupts, will be checked during IRQ allocation */ adapter->flags |= IGC_FLAG_HAS_MSIX; @@ -4233,6 +5087,22 @@ static int igc_sw_init(struct igc_adapter *adapter) return 0; } +static void igc_set_queue_napi(struct igc_adapter *adapter, int vector, + struct napi_struct *napi) +{ + struct igc_q_vector *q_vector = adapter->q_vector[vector]; + + if (q_vector->rx.ring) + netif_queue_set_napi(adapter->netdev, + q_vector->rx.ring->queue_index, + NETDEV_QUEUE_TYPE_RX, napi); + + if (q_vector->tx.ring) + netif_queue_set_napi(adapter->netdev, + q_vector->tx.ring->queue_index, + NETDEV_QUEUE_TYPE_TX, napi); +} + /** * igc_up - Open the interface and prepare it to handle traffic * @adapter: board private structure @@ -4240,6 +5110,7 @@ static int igc_sw_init(struct igc_adapter *adapter) void igc_up(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; + struct napi_struct *napi; int i = 0; /* hardware has been reset, we need to reload some things */ @@ -4247,8 +5118,11 @@ void igc_up(struct igc_adapter *adapter) clear_bit(__IGC_DOWN, &adapter->state); - for (i = 0; i < adapter->num_q_vectors; i++) - napi_enable(&adapter->q_vector[i]->napi); + for (i = 0; i < adapter->num_q_vectors; i++) { + napi = &adapter->q_vector[i]->napi; + napi_enable(napi); + igc_set_queue_napi(adapter, i, napi); + } if (adapter->msix_entries) igc_configure_msix(adapter); @@ -4306,10 +5180,10 @@ void igc_update_stats(struct igc_adapter *adapter) } do { - start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + start = u64_stats_fetch_begin(&ring->rx_syncp); _bytes = ring->rx_stats.bytes; _packets = ring->rx_stats.packets; - } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); bytes += _bytes; packets += _packets; } @@ -4323,10 +5197,10 @@ void igc_update_stats(struct igc_adapter *adapter) struct igc_ring *ring = adapter->tx_ring[i]; do { - start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + start = u64_stats_fetch_begin(&ring->tx_syncp); _bytes = ring->tx_stats.bytes; _packets = ring->tx_stats.packets; - } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); bytes += _bytes; packets += _packets; } @@ -4424,7 +5298,8 @@ void igc_update_stats(struct igc_adapter *adapter) net_stats->tx_window_errors = adapter->stats.latecol; net_stats->tx_carrier_errors = adapter->stats.tncrs; - /* Tx Dropped needs to be maintained elsewhere */ + /* Tx Dropped */ + net_stats->tx_dropped = adapter->stats.txdrop; /* Management Stats */ adapter->stats.mgptc += rd32(IGC_MGTPTC); @@ -4447,38 +5322,42 @@ void igc_down(struct igc_adapter *adapter) igc_ptp_suspend(adapter); - /* disable receives in the hardware */ - rctl = rd32(IGC_RCTL); - wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); - /* flush and sleep below */ - + if (pci_device_is_present(adapter->pdev)) { + /* disable receives in the hardware */ + rctl = rd32(IGC_RCTL); + wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); + /* flush and sleep below */ + } /* set trans_start so we don't get spurious watchdogs during reset */ netif_trans_update(netdev); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); - /* disable transmits in the hardware */ - tctl = rd32(IGC_TCTL); - tctl &= ~IGC_TCTL_EN; - wr32(IGC_TCTL, tctl); - /* flush both disables and wait for them to finish */ - wrfl(); - usleep_range(10000, 20000); + if (pci_device_is_present(adapter->pdev)) { + /* disable transmits in the hardware */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_EN; + wr32(IGC_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 20000); - igc_irq_disable(adapter); + igc_irq_disable(adapter); + } adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; for (i = 0; i < adapter->num_q_vectors; i++) { if (adapter->q_vector[i]) { napi_synchronize(&adapter->q_vector[i]->napi); + igc_set_queue_napi(adapter, i, NULL); napi_disable(&adapter->q_vector[i]->napi); } } - del_timer_sync(&adapter->watchdog_timer); - del_timer_sync(&adapter->phy_info_timer); + timer_delete_sync(&adapter->watchdog_timer); + timer_delete_sync(&adapter->phy_info_timer); /* record the stats before reset*/ spin_lock(&adapter->stats64_lock); @@ -4494,8 +5373,12 @@ void igc_down(struct igc_adapter *adapter) /* clear VLAN promisc flag so VFTA will be updated if necessary */ adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; + igc_disable_all_tx_rings_hw(adapter); igc_clean_all_tx_rings(adapter); igc_clean_all_rx_rings(adapter); + + if (adapter->fpe.mmsv.pmac_enabled) + ethtool_mmsv_stop(&adapter->fpe.mmsv); } void igc_reinit_locked(struct igc_adapter *adapter) @@ -4559,7 +5442,7 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu) igc_down(adapter); netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (netif_running(netdev)) igc_up(adapter); @@ -4572,6 +5455,24 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu) } /** + * igc_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: queue number that timed out + **/ +static void igc_tx_timeout(struct net_device *netdev, + unsigned int __always_unused txqueue) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + wr32(IGC_EICS, + (adapter->eims_enable_mask & ~adapter->eims_other)); +} + +/** * igc_get_stats64 - Get System Network Statistics * @netdev: network interface device structure * @stats: rtnl_link_stats64 pointer @@ -4638,7 +5539,7 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev, unsigned int network_hdr_len, mac_hdr_len; /* Make certain the headers can be described by a context descriptor */ - mac_hdr_len = skb_network_header(skb) - skb->data; + mac_hdr_len = skb_network_offset(skb); if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | @@ -4664,25 +5565,22 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev, static void igc_tsync_interrupt(struct igc_adapter *adapter) { - u32 ack, tsauxc, sec, nsec, tsicr; struct igc_hw *hw = &adapter->hw; + u32 tsauxc, sec, nsec, tsicr; struct ptp_clock_event event; struct timespec64 ts; tsicr = rd32(IGC_TSICR); - ack = 0; if (tsicr & IGC_TSICR_SYS_WRAP) { event.type = PTP_CLOCK_PPS; if (adapter->ptp_caps.pps) ptp_clock_event(adapter->ptp_clock, &event); - ack |= IGC_TSICR_SYS_WRAP; } if (tsicr & IGC_TSICR_TXTS) { /* retrieve hardware timestamp */ - schedule_work(&adapter->ptp_tx_work); - ack |= IGC_TSICR_TXTS; + igc_ptp_tx_tstamp_event(adapter); } if (tsicr & IGC_TSICR_TT0) { @@ -4696,7 +5594,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter) wr32(IGC_TSAUXC, tsauxc); adapter->perout[0].start = ts; spin_unlock(&adapter->tmreg_lock); - ack |= IGC_TSICR_TT0; } if (tsicr & IGC_TSICR_TT1) { @@ -4710,7 +5607,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter) wr32(IGC_TSAUXC, tsauxc); adapter->perout[1].start = ts; spin_unlock(&adapter->tmreg_lock); - ack |= IGC_TSICR_TT1; } if (tsicr & IGC_TSICR_AUTT0) { @@ -4720,7 +5616,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter) event.index = 0; event.timestamp = sec * NSEC_PER_SEC + nsec; ptp_clock_event(adapter->ptp_clock, &event); - ack |= IGC_TSICR_AUTT0; } if (tsicr & IGC_TSICR_AUTT1) { @@ -4730,11 +5625,7 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter) event.index = 1; event.timestamp = sec * NSEC_PER_SEC + nsec; ptp_clock_event(adapter->ptp_clock, &event); - ack |= IGC_TSICR_AUTT1; } - - /* acknowledge the interrupts */ - wr32(IGC_TSICR, ack); } /** @@ -4809,6 +5700,7 @@ static irqreturn_t igc_msix_ring(int irq, void *data) */ static int igc_request_msix(struct igc_adapter *adapter) { + unsigned int num_q_vectors = adapter->num_q_vectors; int i = 0, err = 0, vector = 0, free_vector = 0; struct net_device *netdev = adapter->netdev; @@ -4817,7 +5709,13 @@ static int igc_request_msix(struct igc_adapter *adapter) if (err) goto err_out; - for (i = 0; i < adapter->num_q_vectors; i++) { + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { struct igc_q_vector *q_vector = adapter->q_vector[i]; vector++; @@ -4841,6 +5739,9 @@ static int igc_request_msix(struct igc_adapter *adapter) q_vector); if (err) goto err_free; + + netif_napi_set_irq(&q_vector->napi, + adapter->msix_entries[vector].vector); } igc_configure_msix(adapter); @@ -4877,7 +5778,8 @@ static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) */ static void igc_update_phy_info(struct timer_list *t) { - struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); + struct igc_adapter *adapter = timer_container_of(adapter, t, + phy_info_timer); igc_get_phy_info(&adapter->hw); } @@ -4896,20 +5798,12 @@ bool igc_has_link(struct igc_adapter *adapter) * false until the igc_check_for_link establishes link * for copper adapters ONLY */ - switch (hw->phy.media_type) { - case igc_media_type_copper: - if (!hw->mac.get_link_status) - return true; - hw->mac.ops.check_for_link(hw); - link_active = !hw->mac.get_link_status; - break; - default: - case igc_media_type_unknown: - break; - } + if (!hw->mac.get_link_status) + return true; + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; - if (hw->mac.type == igc_i225 && - hw->phy.id == I225_I_PHY_ID) { + if (hw->mac.type == igc_i225) { if (!netif_carrier_ok(adapter->netdev)) { adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { @@ -4927,7 +5821,8 @@ bool igc_has_link(struct igc_adapter *adapter) */ static void igc_watchdog(struct timer_list *t) { - struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); + struct igc_adapter *adapter = timer_container_of(adapter, t, + watchdog_timer); /* Do the rest outside of interrupt context */ schedule_work(&adapter->watchdog_task); } @@ -4997,10 +5892,23 @@ static void igc_watchdog_task(struct work_struct *work) adapter->tx_timeout_factor = 14; break; case SPEED_100: - /* maybe add some timeout factor ? */ + case SPEED_1000: + case SPEED_2500: + adapter->tx_timeout_factor = 1; break; } + /* Once the launch time has been set on the wire, there + * is a delay before the link speed can be determined + * based on link-up activity. Write into the register + * as soon as we know the correct link speed. + */ + igc_tsn_adjust_txtime_offset(adapter); + + if (adapter->fpe.mmsv.pmac_enabled) + ethtool_mmsv_link_state_handle(&adapter->fpe.mmsv, + true); + if (adapter->link_speed != SPEED_1000) goto no_wait; @@ -5036,30 +5944,17 @@ no_wait: netdev_info(netdev, "NIC Link is Down\n"); netif_carrier_off(netdev); + if (adapter->fpe.mmsv.pmac_enabled) + ethtool_mmsv_link_state_handle(&adapter->fpe.mmsv, + false); + /* link state has changed, schedule phy info update */ if (!test_bit(__IGC_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); - /* link is down, time to check for alternate media */ - if (adapter->flags & IGC_FLAG_MAS_ENABLE) { - if (adapter->flags & IGC_FLAG_MEDIA_RESET) { - schedule_work(&adapter->reset_task); - /* return immediately */ - return; - } - } pm_schedule_suspend(netdev->dev.parent, MSEC_PER_SEC * 5); - - /* also check for alternate media here */ - } else if (!netif_carrier_ok(netdev) && - (adapter->flags & IGC_FLAG_MAS_ENABLE)) { - if (adapter->flags & IGC_FLAG_MEDIA_RESET) { - schedule_work(&adapter->reset_task); - /* return immediately */ - return; - } } } @@ -5092,11 +5987,29 @@ no_wait: if (adapter->flags & IGC_FLAG_HAS_MSIX) { u32 eics = 0; - for (i = 0; i < adapter->num_q_vectors; i++) - eics |= adapter->q_vector[i]->eims_value; - wr32(IGC_EICS, eics); + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + struct igc_ring *rx_ring; + + if (!q_vector->rx.ring) + continue; + + rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index]; + + if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) { + eics |= q_vector->eims_value; + clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); + } + } + if (eics) + wr32(IGC_EICS, eics); } else { - wr32(IGC_ICS, IGC_ICS_RXDMT0); + struct igc_ring *rx_ring = adapter->rx_ring[0]; + + if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) { + clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); + wr32(IGC_ICS, IGC_ICS_RXDMT0); + } } igc_ptp_tx_hang(adapter); @@ -5141,6 +6054,9 @@ static irqreturn_t igc_intr_msi(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + napi_schedule(&q_vector->napi); return IRQ_HANDLED; @@ -5184,6 +6100,9 @@ static irqreturn_t igc_intr(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + napi_schedule(&q_vector->napi); return IRQ_HANDLED; @@ -5275,6 +6194,7 @@ static int __igc_open(struct net_device *netdev, bool resuming) struct igc_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = adapter->pdev; struct igc_hw *hw = &adapter->hw; + struct napi_struct *napi; int err = 0; int i = 0; @@ -5308,19 +6228,13 @@ static int __igc_open(struct net_device *netdev, bool resuming) if (err) goto err_req_irq; - /* Notify the stack of the actual queue counts. */ - err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); - if (err) - goto err_set_queues; - - err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); - if (err) - goto err_set_queues; - clear_bit(__IGC_DOWN, &adapter->state); - for (i = 0; i < adapter->num_q_vectors; i++) - napi_enable(&adapter->q_vector[i]->napi); + for (i = 0; i < adapter->num_q_vectors; i++) { + napi = &adapter->q_vector[i]->napi; + napi_enable(napi); + igc_set_queue_napi(adapter, i, napi); + } /* Clear any pending interrupts. */ rd32(IGC_ICR); @@ -5337,8 +6251,6 @@ static int __igc_open(struct net_device *netdev, bool resuming) return IGC_SUCCESS; -err_set_queues: - igc_free_irq(adapter); err_req_irq: igc_release_hw_control(adapter); igc_power_down_phy_copper_base(&adapter->hw); @@ -5355,6 +6267,17 @@ err_setup_tx: int igc_open(struct net_device *netdev) { + struct igc_adapter *adapter = netdev_priv(netdev); + int err; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_queues(netdev, adapter->num_tx_queues, + adapter->num_rx_queues); + if (err) { + netdev_err(netdev, "error setting real queue count\n"); + return err; + } + return __igc_open(netdev, false); } @@ -5402,29 +6325,10 @@ int igc_close(struct net_device *netdev) return 0; } -/** - * igc_ioctl - Access the hwtstamp interface - * @netdev: network interface device structure - * @ifr: interface request data - * @cmd: ioctl command - **/ -static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - case SIOCGHWTSTAMP: - return igc_ptp_get_ts_config(netdev, ifr); - case SIOCSHWTSTAMP: - return igc_ptp_set_ts_config(netdev, ifr); - default: - return -EOPNOTSUPP; - } -} - static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, bool enable) { struct igc_ring *ring; - int i; if (queue < 0 || queue >= adapter->num_tx_queues) return -EINVAL; @@ -5432,17 +6336,6 @@ static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, ring = adapter->tx_ring[queue]; ring->launchtime_enable = enable; - if (adapter->base_time) - return 0; - - adapter->cycle_time = NSEC_PER_SEC; - - for (i = 0; i < adapter->num_tx_queues; i++) { - ring = adapter->tx_ring[i]; - ring->start_time = 0; - ring->end_time = NSEC_PER_SEC; - } - return 0; } @@ -5459,6 +6352,7 @@ static bool validate_schedule(struct igc_adapter *adapter, const struct tc_taprio_qopt_offload *qopt) { int queue_uses[IGC_MAX_TX_QUEUES] = { }; + struct igc_hw *hw = &adapter->hw; struct timespec64 now; size_t n; @@ -5471,14 +6365,17 @@ static bool validate_schedule(struct igc_adapter *adapter, * in the future, it will hold all the packets until that * time, causing a lot of TX Hangs, so to avoid that, we * reject schedules that would start in the future. + * Note: Limitation above is no longer in i226. */ - if (!is_base_time_past(qopt->base_time, &now)) + if (!is_base_time_past(qopt->base_time, &now) && + igc_is_device_id_i225(hw)) return false; for (n = 0; n < qopt->num_entries; n++) { - const struct tc_taprio_sched_entry *e; + const struct tc_taprio_sched_entry *e, *prev; int i; + prev = n ? &qopt->entries[n - 1] : NULL; e = &qopt->entries[n]; /* i225 only supports "global" frame preemption @@ -5487,13 +6384,18 @@ static bool validate_schedule(struct igc_adapter *adapter, if (e->command != TC_TAPRIO_CMD_SET_GATES) return false; - for (i = 0; i < IGC_MAX_TX_QUEUES; i++) { - if (e->gate_mask & BIT(i)) + for (i = 0; i < adapter->num_tx_queues; i++) + if (e->gate_mask & BIT(i)) { queue_uses[i]++; - if (queue_uses[i] > 1) - return false; - } + /* There are limitations: A single queue cannot + * be opened and closed multiple times per cycle + * unless the gate stays open. Check for it. + */ + if (queue_uses[i] > 1 && + !(prev->gate_mask & BIT(i))) + return false; + } } return true; @@ -5515,48 +6417,189 @@ static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, return igc_tsn_offload_apply(adapter); } +static int igc_qbv_clear_schedule(struct igc_adapter *adapter) +{ + unsigned long flags; + int i; + + adapter->base_time = 0; + adapter->cycle_time = NSEC_PER_SEC; + adapter->taprio_offload_enable = false; + adapter->qbv_config_change_errors = 0; + adapter->qbv_count = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->start_time = 0; + ring->end_time = NSEC_PER_SEC; + ring->max_sdu = 0; + ring->preemptible = false; + } + + spin_lock_irqsave(&adapter->qbv_tx_lock, flags); + + adapter->qbv_transition = false; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->oper_gate_closed = false; + ring->admin_gate_closed = false; + } + + spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); + + return 0; +} + +static int igc_tsn_clear_schedule(struct igc_adapter *adapter) +{ + igc_qbv_clear_schedule(adapter); + + return 0; +} + +static void igc_taprio_stats(struct net_device *dev, + struct tc_taprio_qopt_stats *stats) +{ + /* When Strict_End is enabled, the tx_overruns counter + * will always be zero. + */ + stats->tx_overruns = 0; +} + +static void igc_taprio_queue_stats(struct net_device *dev, + struct tc_taprio_qopt_queue_stats *queue_stats) +{ + struct tc_taprio_qopt_stats *stats = &queue_stats->stats; + + /* When Strict_End is enabled, the tx_overruns counter + * will always be zero. + */ + stats->tx_overruns = 0; +} + static int igc_save_qbv_schedule(struct igc_adapter *adapter, struct tc_taprio_qopt_offload *qopt) { + bool queue_configured[IGC_MAX_TX_QUEUES] = { }; + struct igc_hw *hw = &adapter->hw; u32 start_time = 0, end_time = 0; + struct timespec64 now; + unsigned long flags; size_t n; + int i; - if (!qopt->enable) { - adapter->base_time = 0; - return 0; - } + if (qopt->base_time < 0) + return -ERANGE; - if (adapter->base_time) + if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable) return -EALREADY; if (!validate_schedule(adapter, qopt)) return -EINVAL; + if (qopt->mqprio.preemptible_tcs && + !(adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)) { + NL_SET_ERR_MSG_MOD(qopt->extack, + "reverse-tsn-txq-prio private flag must be enabled before setting preemptible tc"); + return -ENODEV; + } + + igc_ptp_read(adapter, &now); + + if (igc_tsn_is_taprio_activated_by_user(adapter) && + is_base_time_past(qopt->base_time, &now)) + adapter->qbv_config_change_errors++; + adapter->cycle_time = qopt->cycle_time; adapter->base_time = qopt->base_time; + adapter->taprio_offload_enable = true; - /* FIXME: be a little smarter about cases when the gate for a - * queue stays open for more than one entry. - */ for (n = 0; n < qopt->num_entries; n++) { struct tc_taprio_sched_entry *e = &qopt->entries[n]; - int i; end_time += e->interval; - for (i = 0; i < IGC_MAX_TX_QUEUES; i++) { + /* If any of the conditions below are true, we need to manually + * control the end time of the cycle. + * 1. Qbv users can specify a cycle time that is not equal + * to the total GCL intervals. Hence, recalculation is + * necessary here to exclude the time interval that + * exceeds the cycle time. + * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, + * once the end of the list is reached, it will switch + * to the END_OF_CYCLE state and leave the gates in the + * same state until the next cycle is started. + */ + if (end_time > adapter->cycle_time || + n + 1 == qopt->num_entries) + end_time = adapter->cycle_time; + + for (i = 0; i < adapter->num_tx_queues; i++) { struct igc_ring *ring = adapter->tx_ring[i]; if (!(e->gate_mask & BIT(i))) continue; - ring->start_time = start_time; + /* Check whether a queue stays open for more than one + * entry. If so, keep the start and advance the end + * time. + */ + if (!queue_configured[i]) + ring->start_time = start_time; ring->end_time = end_time; + + if (ring->start_time >= adapter->cycle_time) + queue_configured[i] = false; + else + queue_configured[i] = true; } start_time += e->interval; } + spin_lock_irqsave(&adapter->qbv_tx_lock, flags); + + /* Check whether a queue gets configured. + * If not, set the start and end time to be end time. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (!is_base_time_past(qopt->base_time, &now)) { + ring->admin_gate_closed = false; + } else { + ring->oper_gate_closed = false; + ring->admin_gate_closed = false; + } + + if (!queue_configured[i]) { + if (!is_base_time_past(qopt->base_time, &now)) + ring->admin_gate_closed = true; + else + ring->oper_gate_closed = true; + + ring->start_time = end_time; + ring->end_time = end_time; + } + } + + spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + struct net_device *dev = adapter->netdev; + + if (qopt->max_sdu[i]) + ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN; + else + ring->max_sdu = 0; + } + + igc_fpe_save_preempt_queue(adapter, &qopt->mqprio); + return 0; } @@ -5569,25 +6612,240 @@ static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, if (hw->mac.type != igc_i225) return -EOPNOTSUPP; - err = igc_save_qbv_schedule(adapter, qopt); + switch (qopt->cmd) { + case TAPRIO_CMD_REPLACE: + err = igc_save_qbv_schedule(adapter, qopt); + break; + case TAPRIO_CMD_DESTROY: + err = igc_tsn_clear_schedule(adapter); + break; + case TAPRIO_CMD_STATS: + igc_taprio_stats(adapter->netdev, &qopt->stats); + return 0; + case TAPRIO_CMD_QUEUE_STATS: + igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats); + return 0; + default: + return -EOPNOTSUPP; + } + if (err) return err; return igc_tsn_offload_apply(adapter); } +static int igc_save_cbs_params(struct igc_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + bool cbs_status[IGC_MAX_SR_QUEUES] = { false }; + struct net_device *netdev = adapter->netdev; + struct igc_ring *ring; + int i; + + /* i225 has two sets of credit-based shaper logic. + * Supporting it only on the top two priority queues + */ + if (queue < 0 || queue > 1) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + + for (i = 0; i < IGC_MAX_SR_QUEUES; i++) + if (adapter->tx_ring[i]) + cbs_status[i] = adapter->tx_ring[i]->cbs_enable; + + /* CBS should be enabled on the highest priority queue first in order + * for the CBS algorithm to operate as intended. + */ + if (enable) { + if (queue == 1 && !cbs_status[0]) { + netdev_err(netdev, + "Enabling CBS on queue1 before queue0\n"); + return -EINVAL; + } + } else { + if (queue == 0 && cbs_status[1]) { + netdev_err(netdev, + "Disabling CBS on queue0 before queue1\n"); + return -EINVAL; + } + } + + ring->cbs_enable = enable; + ring->idleslope = idleslope; + ring->sendslope = sendslope; + ring->hicredit = hicredit; + ring->locredit = locredit; + + return 0; +} + +static int igc_tsn_enable_cbs(struct igc_adapter *adapter, + struct tc_cbs_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_tc_query_caps(struct igc_adapter *adapter, + struct tc_query_caps_base *base) +{ + struct igc_hw *hw = &adapter->hw; + + switch (base->type) { + case TC_SETUP_QDISC_MQPRIO: { + struct tc_mqprio_caps *caps = base->caps; + + caps->validate_queue_counts = true; + + return 0; + } + case TC_SETUP_QDISC_TAPRIO: { + struct tc_taprio_caps *caps = base->caps; + + if (!(adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)) + caps->broken_mqprio = true; + + if (hw->mac.type == igc_i225) { + caps->supports_queue_max_sdu = true; + caps->gate_mask_per_txq = true; + } + + return 0; + } + default: + return -EOPNOTSUPP; + } +} + +static void igc_save_mqprio_params(struct igc_adapter *adapter, u8 num_tc, + u16 *offset) +{ + int i; + + adapter->strict_priority_enable = true; + adapter->num_tc = num_tc; + + for (i = 0; i < num_tc; i++) + adapter->queue_per_tc[i] = offset[i]; +} + +static bool +igc_tsn_is_tc_to_queue_priority_ordered(struct tc_mqprio_qopt_offload *mqprio) +{ + int num_tc = mqprio->qopt.num_tc; + int i; + + for (i = 1; i < num_tc; i++) { + if (mqprio->qopt.offset[i - 1] > mqprio->qopt.offset[i]) + return false; + } + + return true; +} + +static int igc_tsn_enable_mqprio(struct igc_adapter *adapter, + struct tc_mqprio_qopt_offload *mqprio) +{ + struct igc_hw *hw = &adapter->hw; + int err, i; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + if (!mqprio->qopt.num_tc) { + adapter->strict_priority_enable = false; + igc_fpe_clear_preempt_queue(adapter); + netdev_reset_tc(adapter->netdev); + goto apply; + } + + /* There are as many TCs as Tx queues. */ + if (mqprio->qopt.num_tc != adapter->num_tx_queues) { + NL_SET_ERR_MSG_FMT_MOD(mqprio->extack, + "Only %d traffic classes supported", + adapter->num_tx_queues); + return -EOPNOTSUPP; + } + + /* Only one queue per TC is supported. */ + for (i = 0; i < mqprio->qopt.num_tc; i++) { + if (mqprio->qopt.count[i] != 1) { + NL_SET_ERR_MSG_MOD(mqprio->extack, + "Only one queue per TC supported"); + return -EOPNOTSUPP; + } + } + + if (!igc_tsn_is_tc_to_queue_priority_ordered(mqprio)) { + NL_SET_ERR_MSG_MOD(mqprio->extack, + "tc to queue mapping must preserve increasing priority (higher tc -> higher queue)"); + return -EOPNOTSUPP; + } + + igc_save_mqprio_params(adapter, mqprio->qopt.num_tc, + mqprio->qopt.offset); + + err = netdev_set_num_tc(adapter->netdev, adapter->num_tc); + if (err) + return err; + + for (i = 0; i < adapter->num_tc; i++) { + err = netdev_set_tc_queue(adapter->netdev, i, 1, + adapter->queue_per_tc[i]); + if (err) + return err; + } + + /* In case the card is configured with less than four queues. */ + for (; i < IGC_MAX_TX_QUEUES; i++) + adapter->queue_per_tc[i] = i; + + mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS; + igc_fpe_save_preempt_queue(adapter, mqprio); + +apply: + return igc_tsn_offload_apply(adapter); +} + static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { struct igc_adapter *adapter = netdev_priv(dev); + adapter->tc_setup_type = type; + switch (type) { + case TC_QUERY_CAPS: + return igc_tc_query_caps(adapter, type_data); case TC_SETUP_QDISC_TAPRIO: return igc_tsn_enable_qbv_scheduling(adapter, type_data); case TC_SETUP_QDISC_ETF: return igc_tsn_enable_launchtime(adapter, type_data); + case TC_SETUP_QDISC_CBS: + return igc_tsn_enable_cbs(adapter, type_data); + + case TC_SETUP_QDISC_MQPRIO: + return igc_tsn_enable_mqprio(adapter, type_data); + default: return -EOPNOTSUPP; } @@ -5615,29 +6873,31 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames, int cpu = smp_processor_id(); struct netdev_queue *nq; struct igc_ring *ring; - int i, drops; + int i, nxmit; - if (unlikely(test_bit(__IGC_DOWN, &adapter->state))) + if (unlikely(!netif_carrier_ok(dev))) return -ENETDOWN; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; - ring = igc_xdp_get_tx_ring(adapter, cpu); + ring = igc_get_tx_ring(adapter, cpu); nq = txring_txq(ring); __netif_tx_lock(nq, cpu); - drops = 0; + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + + nxmit = 0; for (i = 0; i < num_frames; i++) { int err; struct xdp_frame *xdpf = frames[i]; err = igc_xdp_init_tx_descriptor(ring, xdpf); - if (err) { - xdp_return_frame_rx_napi(xdpf); - drops++; - } + if (err) + break; + nxmit++; } if (flags & XDP_XMIT_FLUSH) @@ -5645,7 +6905,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames, __netif_tx_unlock(nq); - return num_frames - drops; + return nxmit; } static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, @@ -5685,6 +6945,24 @@ int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) return 0; } +static ktime_t igc_get_tstamp(struct net_device *dev, + const struct skb_shared_hwtstamps *hwtstamps, + bool cycles) +{ + struct igc_adapter *adapter = netdev_priv(dev); + struct igc_inline_rx_tstamps *tstamp; + ktime_t timestamp; + + tstamp = hwtstamps->netdev_data; + + if (cycles) + timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer1); + else + timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0); + + return timestamp; +} + static const struct net_device_ops igc_netdev_ops = { .ndo_open = igc_open, .ndo_stop = igc_close, @@ -5692,62 +6970,29 @@ static const struct net_device_ops igc_netdev_ops = { .ndo_set_rx_mode = igc_set_rx_mode, .ndo_set_mac_address = igc_set_mac, .ndo_change_mtu = igc_change_mtu, + .ndo_tx_timeout = igc_tx_timeout, .ndo_get_stats64 = igc_get_stats64, .ndo_fix_features = igc_fix_features, .ndo_set_features = igc_set_features, .ndo_features_check = igc_features_check, - .ndo_do_ioctl = igc_ioctl, .ndo_setup_tc = igc_setup_tc, .ndo_bpf = igc_bpf, .ndo_xdp_xmit = igc_xdp_xmit, .ndo_xsk_wakeup = igc_xsk_wakeup, + .ndo_get_tstamp = igc_get_tstamp, + .ndo_hwtstamp_get = igc_ptp_hwtstamp_get, + .ndo_hwtstamp_set = igc_ptp_hwtstamp_set, }; -/* PCIe configuration access */ -void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) -{ - struct igc_adapter *adapter = hw->back; - - pci_read_config_word(adapter->pdev, reg, value); -} - -void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) -{ - struct igc_adapter *adapter = hw->back; - - pci_write_config_word(adapter->pdev, reg, *value); -} - -s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) -{ - struct igc_adapter *adapter = hw->back; - - if (!pci_is_pcie(adapter->pdev)) - return -IGC_ERR_CONFIG; - - pcie_capability_read_word(adapter->pdev, reg, value); - - return IGC_SUCCESS; -} - -s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) -{ - struct igc_adapter *adapter = hw->back; - - if (!pci_is_pcie(adapter->pdev)) - return -IGC_ERR_CONFIG; - - pcie_capability_write_word(adapter->pdev, reg, *value); - - return IGC_SUCCESS; -} - u32 igc_rd32(struct igc_hw *hw, u32 reg) { struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); u32 value = 0; + if (IGC_REMOVED(hw_addr)) + return ~value; + value = readl(&hw_addr[reg]); /* reads should not return all F's */ @@ -5764,54 +7009,85 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg) return value; } -int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) +/* Mapping HW RSS Type to enum xdp_rss_hash_type */ +static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = { + [IGC_RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_L2, + [IGC_RSS_TYPE_HASH_TCP_IPV4] = XDP_RSS_TYPE_L4_IPV4_TCP, + [IGC_RSS_TYPE_HASH_IPV4] = XDP_RSS_TYPE_L3_IPV4, + [IGC_RSS_TYPE_HASH_TCP_IPV6] = XDP_RSS_TYPE_L4_IPV6_TCP, + [IGC_RSS_TYPE_HASH_IPV6_EX] = XDP_RSS_TYPE_L3_IPV6_EX, + [IGC_RSS_TYPE_HASH_IPV6] = XDP_RSS_TYPE_L3_IPV6, + [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX, + [IGC_RSS_TYPE_HASH_UDP_IPV4] = XDP_RSS_TYPE_L4_IPV4_UDP, + [IGC_RSS_TYPE_HASH_UDP_IPV6] = XDP_RSS_TYPE_L4_IPV6_UDP, + [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX, + [10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ + [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */ + [12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons */ + [13] = XDP_RSS_TYPE_NONE, + [14] = XDP_RSS_TYPE_NONE, + [15] = XDP_RSS_TYPE_NONE, +}; + +static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash, + enum xdp_rss_hash_type *rss_type) { - struct igc_mac_info *mac = &adapter->hw.mac; + const struct igc_xdp_buff *ctx = (void *)_ctx; - mac->autoneg = false; + if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)) + return -ENODATA; - /* Make sure dplx is at most 1 bit and lsb of speed is not set - * for the switch() below to work - */ - if ((spd & 1) || (dplx & ~1)) - goto err_inval; + *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss); + *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)]; - switch (spd + dplx) { - case SPEED_10 + DUPLEX_HALF: - mac->forced_speed_duplex = ADVERTISE_10_HALF; - break; - case SPEED_10 + DUPLEX_FULL: - mac->forced_speed_duplex = ADVERTISE_10_FULL; - break; - case SPEED_100 + DUPLEX_HALF: - mac->forced_speed_duplex = ADVERTISE_100_HALF; - break; - case SPEED_100 + DUPLEX_FULL: - mac->forced_speed_duplex = ADVERTISE_100_FULL; - break; - case SPEED_1000 + DUPLEX_FULL: - mac->autoneg = true; - adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; - break; - case SPEED_1000 + DUPLEX_HALF: /* not supported */ - goto err_inval; - case SPEED_2500 + DUPLEX_FULL: - mac->autoneg = true; - adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; - break; - case SPEED_2500 + DUPLEX_HALF: /* not supported */ - default: - goto err_inval; + return 0; +} + +static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) +{ + const struct igc_xdp_buff *ctx = (void *)_ctx; + struct igc_adapter *adapter = netdev_priv(ctx->xdp.rxq->dev); + struct igc_inline_rx_tstamps *tstamp = ctx->rx_ts; + + if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) { + *timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0); + + return 0; } - /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ - adapter->hw.phy.mdix = AUTO_ALL_MODES; + return -ENODATA; +} - return 0; +static const struct xdp_metadata_ops igc_xdp_metadata_ops = { + .xmo_rx_hash = igc_xdp_rx_hash, + .xmo_rx_timestamp = igc_xdp_rx_timestamp, +}; + +static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer) +{ + struct igc_adapter *adapter = container_of(timer, struct igc_adapter, + hrtimer); + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&adapter->qbv_tx_lock, flags); + + adapter->qbv_transition = true; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + if (tx_ring->admin_gate_closed) { + tx_ring->admin_gate_closed = false; + tx_ring->oper_gate_closed = true; + } else { + tx_ring->oper_gate_closed = false; + } + } + adapter->qbv_transition = false; + + spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); -err_inval: - netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n"); - return -EINVAL; + return HRTIMER_NORESTART; } /** @@ -5832,30 +7108,26 @@ static int igc_probe(struct pci_dev *pdev, struct net_device *netdev; struct igc_hw *hw; const struct igc_info *ei = igc_info_tbl[ent->driver_data]; - int err, pci_using_dac; + int err; err = pci_enable_device_mem(pdev); if (err) return err; - pci_using_dac = 0; err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) { - pci_using_dac = 1; - } else { - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "No usable DMA configuration, aborting\n"); - goto err_dma; - } + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; } err = pci_request_mem_regions(pdev, igc_driver_name); if (err) goto err_pci_reg; - pci_enable_pcie_error_reporting(pdev); + err = pci_enable_ptm(pdev, NULL); + if (err < 0) + dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); pci_set_master(pdev); @@ -5877,6 +7149,17 @@ static int igc_probe(struct pci_dev *pdev, adapter->port_num = hw->bus.func; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Disable ASPM L1.2 on I226 devices to avoid packet loss */ + if (igc_is_device_id_i226(hw)) + pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2); + err = pci_save_state(pdev); if (err) goto err_ioremap; @@ -5891,19 +7174,14 @@ static int igc_probe(struct pci_dev *pdev, hw->hw_addr = adapter->io_addr; netdev->netdev_ops = &igc_netdev_ops; + netdev->xdp_metadata_ops = &igc_xdp_metadata_ops; + netdev->xsk_tx_metadata_ops = &igc_xsk_tx_metadata_ops; igc_ethtool_set_ops(netdev); netdev->watchdog_timeo = 5 * HZ; netdev->mem_start = pci_resource_start(pdev, 0); netdev->mem_end = pci_resource_end(pdev, 0); - /* PCI config space info */ - hw->vendor_id = pdev->vendor; - hw->device_id = pdev->device; - hw->revision_id = pdev->revision; - hw->subsystem_vendor_id = pdev->subsystem_vendor; - hw->subsystem_device_id = pdev->subsystem_device; - /* Copy the default MAC and PHY function pointers */ memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); @@ -5918,6 +7196,7 @@ static int igc_probe(struct pci_dev *pdev, netdev->features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO6; netdev->features |= NETIF_F_TSO_ECN; + netdev->features |= NETIF_F_RXHASH; netdev->features |= NETIF_F_RXCSUM; netdev->features |= NETIF_F_HW_CSUM; netdev->features |= NETIF_F_SCTP_CRC; @@ -5944,10 +7223,17 @@ static int igc_probe(struct pci_dev *pdev, netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; netdev->hw_features |= netdev->features; - if (pci_using_dac) - netdev->features |= NETIF_F_HIGHDMA; + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY; - netdev->vlan_features |= netdev->features; + /* enable HW vlan tag insertion/stripping by default */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; /* MTU range: 68 - 9216 */ netdev->min_mtu = ETH_MIN_MTU; @@ -5972,7 +7258,7 @@ static int igc_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "NVM Read Error\n"); } - memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + eth_hw_addr_set(netdev, hw->mac.addr); if (!is_valid_ether_addr(netdev->dev_addr)) { dev_err(&pdev->dev, "Invalid MAC Address\n"); @@ -5981,8 +7267,8 @@ static int igc_probe(struct pci_dev *pdev, } /* configure RXPBSIZE and TXPBSIZE */ - wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); - wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + wr32(IGC_RXPBS, IGC_RXPBSIZE_EXP_BMC_DEFAULT); + wr32(IGC_TXPBS, IGC_TXPBSIZE_DEFAULT); timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); @@ -5990,9 +7276,11 @@ static int igc_probe(struct pci_dev *pdev, INIT_WORK(&adapter->reset_task, igc_reset_task); INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); + hrtimer_setup(&adapter->hrtimer, &igc_qbv_scheduling_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + /* Initialize link properties that are user-changeable */ adapter->fc_autoneg = true; - hw->mac.autoneg = true; hw->phy.autoneg_advertised = 0xaf; hw->fc.requested_mode = igc_fc_default; @@ -6010,6 +7298,10 @@ static int igc_probe(struct pci_dev *pdev, igc_ptp_init(adapter); + igc_tsn_clear_schedule(adapter); + + igc_fpe_init(adapter); + /* reset the hardware with the new settings */ igc_reset(adapter); @@ -6018,7 +7310,7 @@ static int igc_probe(struct pci_dev *pdev, */ igc_get_hw_control(adapter); - strncpy(netdev->name, "eth%d", IFNAMSIZ); + strscpy(netdev->name, "eth%d", sizeof(netdev->name)); err = register_netdev(netdev); if (err) goto err_register; @@ -6041,10 +7333,23 @@ static int igc_probe(struct pci_dev *pdev, pm_runtime_put_noidle(&pdev->dev); + if (IS_ENABLED(CONFIG_IGC_LEDS)) { + err = igc_led_setup(adapter); + if (err) { + netdev_warn_once(netdev, + "LED init failed (%d); continuing without LED support\n", + err); + adapter->leds_available = false; + } else { + adapter->leds_available = true; + } + } + return 0; err_register: igc_release_hw_control(adapter); + igc_ptp_stop(adapter); err_eeprom: if (!igc_check_reset_block(hw)) igc_reset_phy(hw); @@ -6081,13 +7386,20 @@ static void igc_remove(struct pci_dev *pdev) igc_ptp_stop(adapter); + pci_disable_ptm(pdev); + pci_clear_master(pdev); + set_bit(__IGC_DOWN, &adapter->state); - del_timer_sync(&adapter->watchdog_timer); - del_timer_sync(&adapter->phy_info_timer); + timer_delete_sync(&adapter->watchdog_timer); + timer_delete_sync(&adapter->phy_info_timer); cancel_work_sync(&adapter->reset_task); cancel_work_sync(&adapter->watchdog_task); + hrtimer_cancel(&adapter->hrtimer); + + if (IS_ENABLED(CONFIG_IGC_LEDS) && adapter->leds_available) + igc_led_free(adapter); /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. @@ -6101,8 +7413,6 @@ static void igc_remove(struct pci_dev *pdev) free_netdev(netdev); - pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); } @@ -6175,8 +7485,7 @@ static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, return 0; } -#ifdef CONFIG_PM -static int __maybe_unused igc_runtime_suspend(struct device *dev) +static int igc_runtime_suspend(struct device *dev) { return __igc_shutdown(to_pci_dev(dev), NULL, 1); } @@ -6211,7 +7520,7 @@ static void igc_deliver_wake_packet(struct net_device *netdev) netif_rx(skb); } -static int __maybe_unused igc_resume(struct device *dev) +static int __igc_resume(struct device *dev, bool rpm) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); @@ -6221,7 +7530,6 @@ static int __maybe_unused igc_resume(struct device *dev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - pci_save_state(pdev); if (!pci_device_is_present(pdev)) return -ENODEV; @@ -6235,6 +7543,9 @@ static int __maybe_unused igc_resume(struct device *dev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); + if (igc_is_device_id_i226(hw)) + pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2); + if (igc_init_interrupt_scheme(adapter, true)) { netdev_err(netdev, "Unable to allocate memory for queues\n"); return -ENOMEM; @@ -6253,23 +7564,30 @@ static int __maybe_unused igc_resume(struct device *dev) wr32(IGC_WUS, ~0); - rtnl_lock(); - if (!err && netif_running(netdev)) + if (netif_running(netdev)) { + if (!rpm) + rtnl_lock(); err = __igc_open(netdev, true); - - if (!err) - netif_device_attach(netdev); - rtnl_unlock(); + if (!rpm) + rtnl_unlock(); + if (!err) + netif_device_attach(netdev); + } return err; } -static int __maybe_unused igc_runtime_resume(struct device *dev) +static int igc_resume(struct device *dev) { - return igc_resume(dev); + return __igc_resume(dev, false); } -static int __maybe_unused igc_suspend(struct device *dev) +static int igc_runtime_resume(struct device *dev) +{ + return __igc_resume(dev, true); +} + +static int igc_suspend(struct device *dev) { return __igc_shutdown(to_pci_dev(dev), NULL, 0); } @@ -6284,7 +7602,6 @@ static int __maybe_unused igc_runtime_idle(struct device *dev) return -EBUSY; } -#endif /* CONFIG_PM */ static void igc_shutdown(struct pci_dev *pdev) { @@ -6312,14 +7629,18 @@ static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, struct net_device *netdev = pci_get_drvdata(pdev); struct igc_adapter *adapter = netdev_priv(netdev); + rtnl_lock(); netif_device_detach(netdev); - if (state == pci_channel_io_perm_failure) + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); return PCI_ERS_RESULT_DISCONNECT; + } if (netif_running(netdev)) igc_down(adapter); pci_disable_device(pdev); + rtnl_unlock(); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; @@ -6330,7 +7651,7 @@ static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. Implementation - * resembles the first-half of the igc_resume routine. + * resembles the first-half of the __igc_resume routine. **/ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) { @@ -6345,11 +7666,13 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) } else { pci_set_master(pdev); pci_restore_state(pdev); - pci_save_state(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); + if (igc_is_device_id_i226(hw)) + pci_disable_link_state_locked(pdev, PCIE_LINK_STATE_L1_2); + /* In case of PCI error, adapter loses its HW address * so we should re-assign it here. */ @@ -6369,7 +7692,7 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the - * second-half of the igc_resume routine. + * second-half of the __igc_resume routine. */ static void igc_io_resume(struct pci_dev *pdev) { @@ -6379,6 +7702,7 @@ static void igc_io_resume(struct pci_dev *pdev) rtnl_lock(); if (netif_running(netdev)) { if (igc_open(netdev)) { + rtnl_unlock(); netdev_err(netdev, "igc_open failed after reset\n"); return; } @@ -6399,22 +7723,16 @@ static const struct pci_error_handlers igc_err_handler = { .resume = igc_io_resume, }; -#ifdef CONFIG_PM -static const struct dev_pm_ops igc_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) - SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, - igc_runtime_idle) -}; -#endif +static _DEFINE_DEV_PM_OPS(igc_pm_ops, igc_suspend, igc_resume, + igc_runtime_suspend, igc_runtime_resume, + igc_runtime_idle); static struct pci_driver igc_driver = { .name = igc_driver_name, .id_table = igc_pci_tbl, .probe = igc_probe, .remove = igc_remove, -#ifdef CONFIG_PM - .driver.pm = &igc_pm_ops, -#endif + .driver.pm = pm_ptr(&igc_pm_ops), .shutdown = igc_shutdown, .err_handler = &igc_err_handler, }; @@ -6487,18 +7805,6 @@ void igc_enable_rx_ring(struct igc_ring *ring) igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); } -static void igc_disable_tx_ring_hw(struct igc_ring *ring) -{ - struct igc_hw *hw = &ring->q_vector->adapter->hw; - u8 idx = ring->reg_idx; - u32 txdctl; - - txdctl = rd32(IGC_TXDCTL(idx)); - txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; - txdctl |= IGC_TXDCTL_SWFLUSH; - wr32(IGC_TXDCTL(idx), txdctl); -} - void igc_disable_tx_ring(struct igc_ring *ring) { igc_disable_tx_ring_hw(ring); |
