summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c')
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c388
1 files changed, 210 insertions, 178 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 49e23afa05a2..d5ce20f47def 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
/******************************************************************************
Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
@@ -30,6 +30,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/atomic.h>
+#include <net/xfrm.h>
#include "ixgbevf.h"
@@ -37,10 +38,8 @@ const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "4.1.0-k"
-const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
- "Copyright (c) 2009 - 2018 Intel Corporation.";
+ "Copyright (c) 2009 - 2024 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info,
@@ -52,6 +51,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
[board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
[board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
+ [board_e610_vf] = &ixgbevf_e610_vf_info,
+ [board_e610_vf_hv] = &ixgbevf_e610_vf_hv_info,
};
/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -72,15 +73,16 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
+ {PCI_VDEVICE_SUB(INTEL, IXGBE_DEV_ID_E610_VF, PCI_ANY_ID,
+ IXGBE_SUBDEV_ID_E610_VF_HV), board_e610_vf_hv},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_VF), board_e610_vf},
/* required last entry */
{0, }
};
MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
static int debug = -1;
@@ -248,8 +250,9 @@ static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: transmit queue hanging (unused)
**/
-static void ixgbevf_tx_timeout(struct net_device *netdev)
+static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -734,10 +737,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
/* verify that the packet does not have any known errors */
if (unlikely(ixgbevf_test_staterr(rx_desc,
IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
@@ -782,18 +781,13 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
-static inline bool ixgbevf_page_is_reserved(struct page *page)
-{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
-}
-
static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
- /* avoid re-using remote pages */
- if (unlikely(ixgbevf_page_is_reserved(page)))
+ /* avoid re-using remote and pfmemalloc pages */
+ if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
@@ -868,10 +862,8 @@ struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data);
+
/* Note, we get here by enabling legacy-rx via:
*
* ethtool --set-priv-flags <dev> legacy-rx on
@@ -895,7 +887,8 @@ struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
/* Determine available headroom for copy */
headlen = size;
if (headlen > IXGBEVF_RX_HDR_SIZE)
- headlen = eth_get_headlen(xdp->data, IXGBEVF_RX_HDR_SIZE);
+ headlen = eth_get_headlen(skb->dev, xdp->data,
+ IXGBEVF_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), xdp->data,
@@ -948,13 +941,10 @@ static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
* have a consumer accessing first few bytes of meta data,
* and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data_meta + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
@@ -1055,16 +1045,15 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
return IXGBEVF_XDP_TX;
}
-static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring,
- struct xdp_buff *xdp)
+static int ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int result = IXGBEVF_XDP_PASS;
struct ixgbevf_ring *xdp_ring;
struct bpf_prog *xdp_prog;
u32 act;
- rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (!xdp_prog)
@@ -1077,35 +1066,49 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
case XDP_TX:
xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
+ if (result == IXGBEVF_XDP_CONSUMED)
+ goto out_failure;
break;
default:
- bpf_warn_invalid_xdp_action(act);
- /* fallthrough */
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
+ fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- /* fallthrough -- handle aborts by dropping packet */
+ fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
result = IXGBEVF_XDP_CONSUMED;
break;
}
xdp_out:
- rcu_read_unlock();
- return ERR_PTR(-result);
+ return result;
+}
+
+static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = ixgbevf_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
}
static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *rx_buffer,
unsigned int size)
{
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
+ unsigned int truesize = ixgbevf_rx_frame_truesize(rx_ring, size);
+#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
- unsigned int truesize = ring_uses_build_skb(rx_ring) ?
- SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
- SKB_DATA_ALIGN(size);
-
rx_buffer->page_offset += truesize;
#endif
}
@@ -1114,14 +1117,19 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring,
int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
struct ixgbevf_adapter *adapter = q_vector->adapter;
u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
struct sk_buff *skb = rx_ring->skb;
bool xdp_xmit = false;
struct xdp_buff xdp;
+ int xdp_res = 0;
- xdp.rxq = &rx_ring->xdp_rxq;
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
+#if (PAGE_SIZE < 8192)
+ frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
+#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
while (likely(total_rx_packets < budget)) {
struct ixgbevf_rx_buffer *rx_buffer;
@@ -1149,18 +1157,21 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
/* retrieve a buffer from the ring */
if (!skb) {
- xdp.data = page_address(rx_buffer->page) +
- rx_buffer->page_offset;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data -
- ixgbevf_rx_offset(rx_ring);
- xdp.data_end = xdp.data + size;
-
- skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
+ unsigned int offset = ixgbevf_rx_offset(rx_ring);
+ unsigned char *hard_start;
+
+ hard_start = page_address(rx_buffer->page) +
+ rx_buffer->page_offset - offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
+#endif
+ xdp_res = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
+ if (xdp_res) {
+ if (xdp_res == IXGBEVF_XDP_TX) {
xdp_xmit = true;
ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
size);
@@ -1180,7 +1191,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
}
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
rx_buffer->pagecnt_bias++;
break;
@@ -1194,7 +1205,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
continue;
/* verify the packet layout is correct */
- if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -1422,6 +1433,9 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
*/
/* what was last interrupt timeslice? */
timepassed_us = q_vector->itr >> 2;
+ if (timepassed_us == 0)
+ return;
+
bytes_perint = bytes / timepassed_us; /* bytes/usec */
switch (itr_setting) {
@@ -1971,14 +1985,15 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
return;
- set_ring_build_skb_enabled(rx_ring);
+ if (PAGE_SIZE < 8192)
+ if (max_frame > IXGBEVF_MAX_FRAME_BUILD_SKB)
+ set_ring_uses_large_buffer(rx_ring);
- if (PAGE_SIZE < 8192) {
- if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
- return;
+ /* 82599 can't rely on RXDCTL.RLPML to restrict the size of the frame */
+ if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring))
+ return;
- set_ring_uses_large_buffer(rx_ring);
- }
+ set_ring_build_skb_enabled(rx_ring);
}
/**
@@ -2030,12 +2045,16 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
spin_unlock_bh(&adapter->mbx_lock);
- /* translate error return types so error makes sense */
- if (err == IXGBE_ERR_MBX)
- return -EIO;
+ if (err) {
+ netdev_err(netdev, "VF could not set VLAN %d\n", vid);
+
+ /* translate error return types so error makes sense */
+ if (err == IXGBE_ERR_MBX)
+ return -EIO;
- if (err == IXGBE_ERR_INVALID_ARGUMENT)
- return -EACCES;
+ if (err == IXGBE_ERR_INVALID_ARGUMENT)
+ return -EACCES;
+ }
set_bit(vid, adapter->active_vlans);
@@ -2056,6 +2075,9 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
spin_unlock_bh(&adapter->mbx_lock);
+ if (err)
+ netdev_err(netdev, "Could not remove VLAN %d\n", vid);
+
clear_bit(vid, adapter->active_vlans);
return err;
@@ -2076,11 +2098,6 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
struct ixgbe_hw *hw = &adapter->hw;
int count = 0;
- if ((netdev_uc_count(netdev)) > 10) {
- pr_err("Too many unicast filters - No Space\n");
- return -ENOSPC;
- }
-
if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;
@@ -2254,15 +2271,44 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
}
+/**
+ * ixgbevf_set_features - Set features supported by PF
+ * @adapter: pointer to the adapter struct
+ *
+ * Negotiate with PF supported features and then set pf_features accordingly.
+ */
+static void ixgbevf_set_features(struct ixgbevf_adapter *adapter)
+{
+ u32 *pf_features = &adapter->pf_features;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ err = hw->mac.ops.negotiate_features(hw, pf_features);
+ if (err && err != -EOPNOTSUPP)
+ netdev_dbg(adapter->netdev,
+ "PF feature negotiation failed.\n");
+
+ /* Address also pre API 1.7 cases */
+ if (hw->api_version == ixgbe_mbox_api_14)
+ *pf_features |= IXGBEVF_PF_SUP_IPSEC;
+ else if (hw->api_version == ixgbe_mbox_api_15)
+ *pf_features |= IXGBEVF_PF_SUP_ESX_MBX;
+}
+
static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- int api[] = { ixgbe_mbox_api_14,
- ixgbe_mbox_api_13,
- ixgbe_mbox_api_12,
- ixgbe_mbox_api_11,
- ixgbe_mbox_api_10,
- ixgbe_mbox_api_unknown };
+ static const int api[] = {
+ ixgbe_mbox_api_17,
+ ixgbe_mbox_api_16,
+ ixgbe_mbox_api_15,
+ ixgbe_mbox_api_14,
+ ixgbe_mbox_api_13,
+ ixgbe_mbox_api_12,
+ ixgbe_mbox_api_11,
+ ixgbe_mbox_api_10,
+ ixgbe_mbox_api_unknown
+ };
int err, idx = 0;
spin_lock_bh(&adapter->mbx_lock);
@@ -2274,13 +2320,23 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
idx++;
}
+ ixgbevf_set_features(adapter);
+
+ if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX) {
+ hw->mbx.ops.init_params(hw);
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ sizeof(struct ixgbe_mbx_operations));
+ }
+
spin_unlock_bh(&adapter->mbx_lock);
}
static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
struct ixgbe_hw *hw = &adapter->hw;
+ bool state;
ixgbevf_configure_msix(adapter);
@@ -2293,6 +2349,11 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
spin_unlock_bh(&adapter->mbx_lock);
+ state = adapter->link_state;
+ hw->mac.ops.get_link_state(hw, &adapter->link_state);
+ if (state && state != adapter->link_state)
+ dev_info(&pdev->dev, "VF is administratively disabled\n");
+
smp_mb__before_atomic();
clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter);
@@ -2481,7 +2542,7 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
ixgbevf_napi_disable_all(adapter);
- del_timer_sync(&adapter->service_timer);
+ timer_delete_sync(&adapter->service_timer);
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -2507,12 +2568,11 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
{
- WARN_ON(in_interrupt());
-
while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
msleep(1);
ixgbevf_down(adapter);
+ pci_set_master(adapter->pdev);
ixgbevf_up(adapter);
clear_bit(__IXGBEVF_RESETTING, &adapter->state);
@@ -2531,7 +2591,7 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
}
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
@@ -2582,7 +2642,7 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
* important, starting with the "most" number of features turned on at once,
* and ending with the smallest set of features. This way large combinations
* can be allocated if they're turned on, and smaller combinations are the
- * fallthrough conditions.
+ * fall through conditions.
*
**/
static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
@@ -2618,6 +2678,9 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
if (adapter->xdp_prog &&
hw->mac.max_tx_queues == rss)
rss = rss > 3 ? 2 : 1;
@@ -2625,6 +2688,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
adapter->num_rx_queues = rss;
adapter->num_tx_queues = rss;
adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0;
+ break;
default:
break;
}
@@ -2707,7 +2771,7 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
@@ -2734,7 +2798,7 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
ring->reg_idx = reg_idx;
/* assign ring to adapter */
- adapter->tx_ring[txr_idx] = ring;
+ adapter->tx_ring[txr_idx] = ring;
/* update count and index */
txr_count--;
@@ -3044,7 +3108,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
else if (is_zero_ether_addr(adapter->hw.mac.addr))
dev_info(&pdev->dev,
"MAC address not assigned by administrator.\n");
- ether_addr_copy(netdev->dev_addr, hw->mac.addr);
+ eth_hw_addr_set(netdev, hw->mac.addr);
}
if (!is_valid_ether_addr(netdev->dev_addr)) {
@@ -3062,6 +3126,8 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
+ adapter->link_state = true;
+
set_bit(__IXGBEVF_DOWN, &adapter->state);
return 0;
@@ -3140,8 +3206,8 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_service_timer(struct timer_list *t)
{
- struct ixgbevf_adapter *adapter = from_timer(adapter, t,
- service_timer);
+ struct ixgbevf_adapter *adapter = timer_container_of(adapter, t,
+ service_timer);
/* Reset the timer */
mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
@@ -3294,7 +3360,7 @@ static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
ixgbevf_watchdog_update_link(adapter);
- if (adapter->link_up)
+ if (adapter->link_up && adapter->link_state)
ixgbevf_watchdog_link_is_up(adapter);
else
ixgbevf_watchdog_link_is_down(adapter);
@@ -3479,7 +3545,7 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
/* XDP RX-queue info */
if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
- rx_ring->queue_index) < 0)
+ rx_ring->queue_index, 0) < 0)
goto err;
rx_ring->xdp_prog = adapter->xdp_prog;
@@ -3805,7 +3871,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+ csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
@@ -3830,15 +3896,6 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
return 1;
}
-static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
struct ixgbevf_ipsec_tx_data *itd)
@@ -3854,19 +3911,16 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
switch (skb->csum_offset) {
case offsetof(struct tcphdr, check):
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
- /* fall through */
+ fallthrough;
case offsetof(struct udphdr, check):
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if (((first->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- ((first->protocol == htons(ETH_P_IPV6)) &&
- ixgbevf_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
}
- /* fall through */
+ fallthrough;
default:
skb_checksum_help(skb);
goto no_csum;
@@ -3945,7 +3999,7 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
struct sk_buff *skb = first->skb;
struct ixgbevf_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t dma;
unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
@@ -4130,8 +4184,11 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
+
+ count += TXD_USE_COUNT(skb_frag_size(frag));
+ }
#else
count += skb_shinfo(skb)->nr_frags;
#endif
@@ -4157,7 +4214,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
first->protocol = vlan_get_protocol(skb);
#ifdef CONFIG_IXGBEVF_IPSEC
- if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
+ if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop;
#endif
tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
@@ -4230,7 +4287,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
ether_addr_copy(hw->mac.addr, addr->sa_data);
ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
return 0;
}
@@ -4266,7 +4323,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
ixgbevf_reinit_locked(adapter);
@@ -4274,13 +4331,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
+static int ixgbevf_suspend(struct device *dev_d)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev_d);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-#ifdef CONFIG_PM
- int retval = 0;
-#endif
rtnl_lock();
netif_device_detach(netdev);
@@ -4291,36 +4345,15 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
ixgbevf_clear_interrupt_scheme(adapter);
rtnl_unlock();
-#ifdef CONFIG_PM
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
-#endif
- if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
- pci_disable_device(pdev);
-
return 0;
}
-#ifdef CONFIG_PM
-static int ixgbevf_resume(struct pci_dev *pdev)
+static int ixgbevf_resume(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- u32 err;
-
- pci_restore_state(pdev);
- /* pci_restore_state clears dev->state_saved so call
- * pci_save_state to restore it.
- */
- pci_save_state(pdev);
-
- err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
- return err;
- }
+ int err;
adapter->hw.hw_addr = adapter->io_addr;
smp_mb__before_atomic();
@@ -4342,10 +4375,9 @@ static int ixgbevf_resume(struct pci_dev *pdev)
return err;
}
-#endif /* CONFIG_PM */
static void ixgbevf_shutdown(struct pci_dev *pdev)
{
- ixgbevf_suspend(pdev, PMSG_SUSPEND);
+ ixgbevf_suspend(&pdev->dev);
}
static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
@@ -4356,10 +4388,10 @@ static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
if (ring) {
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
bytes = ring->stats.bytes;
packets = ring->stats.packets;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->tx_bytes += bytes;
stats->tx_packets += packets;
}
@@ -4382,10 +4414,10 @@ static void ixgbevf_get_stats(struct net_device *netdev,
for (i = 0; i < adapter->num_rx_queues; i++) {
ring = adapter->rx_ring[i];
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
bytes = ring->stats.bytes;
packets = ring->stats.packets;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->rx_bytes += bytes;
stats->rx_packets += packets;
}
@@ -4412,7 +4444,7 @@ ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
@@ -4479,15 +4511,9 @@ static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
- struct ixgbevf_adapter *adapter = netdev_priv(dev);
-
switch (xdp->command) {
case XDP_SETUP_PROG:
return ixgbevf_xdp_setup(dev, xdp->prog);
- case XDP_QUERY_PROG:
- xdp->prog_id = adapter->xdp_prog ?
- adapter->xdp_prog->aux->id : 0;
- return 0;
default:
return -EINVAL;
}
@@ -4533,22 +4559,17 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbevf_adapter *adapter = NULL;
struct ixgbe_hw *hw = NULL;
const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
- int err, pci_using_dac;
bool disable_dev = false;
+ int err;
err = pci_enable_device(pdev);
if (err)
return err;
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
- pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_regions(pdev, ixgbevf_driver_name);
@@ -4595,7 +4616,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
hw->mac.type = ii->mac;
- memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy,
sizeof(struct ixgbe_mbx_operations));
/* setup the private structure */
@@ -4628,10 +4649,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_GSO_PARTIAL |
IXGBEVF_GSO_PARTIAL_FEATURES;
- netdev->features = netdev->hw_features;
-
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_SG |
@@ -4647,6 +4665,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_VLAN_CTAG_TX;
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
/* MTU range: 68 - 1504 or 9710 */
netdev->min_mtu = ETH_MIN_MTU;
@@ -4655,6 +4674,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN);
break;
@@ -4705,6 +4727,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mac_X540_vf:
dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
break;
+ case ixgbe_mac_e610_vf:
+ dev_info(&pdev->dev, "Intel(R) E610 Virtual Function\n");
+ break;
case ixgbe_mac_82599_vf:
default:
dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
@@ -4806,7 +4831,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
pci_disable_device(pdev);
rtnl_unlock();
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -4865,16 +4890,17 @@ static const struct pci_error_handlers ixgbevf_err_handler = {
.resume = ixgbevf_io_resume,
};
+static DEFINE_SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume);
+
static struct pci_driver ixgbevf_driver = {
.name = ixgbevf_driver_name,
.id_table = ixgbevf_pci_tbl,
.probe = ixgbevf_probe,
.remove = ixgbevf_remove,
-#ifdef CONFIG_PM
+
/* Power Management Hooks */
- .suspend = ixgbevf_suspend,
- .resume = ixgbevf_resume,
-#endif
+ .driver.pm = pm_sleep_ptr(&ixgbevf_pm_ops),
+
.shutdown = ixgbevf_shutdown,
.err_handler = &ixgbevf_err_handler
};
@@ -4887,9 +4913,9 @@ static struct pci_driver ixgbevf_driver = {
**/
static int __init ixgbevf_init_module(void)
{
- pr_info("%s - version %s\n", ixgbevf_driver_string,
- ixgbevf_driver_version);
+ int err;
+ pr_info("%s\n", ixgbevf_driver_string);
pr_info("%s\n", ixgbevf_copyright);
ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
if (!ixgbevf_wq) {
@@ -4897,7 +4923,13 @@ static int __init ixgbevf_init_module(void)
return -ENOMEM;
}
- return pci_register_driver(&ixgbevf_driver);
+ err = pci_register_driver(&ixgbevf_driver);
+ if (err) {
+ destroy_workqueue(ixgbevf_wq);
+ return err;
+ }
+
+ return 0;
}
module_init(ixgbevf_init_module);