diff options
Diffstat (limited to 'drivers/net/ethernet/sfc/efx.c')
| -rw-r--r-- | drivers/net/ethernet/sfc/efx.c | 2920 |
1 files changed, 644 insertions, 2276 deletions
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index c72968840f1a..112e55b98ed3 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1,13 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0-only /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2005-2011 Solarflare Communications Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation, incorporated herein by reference. + * Copyright 2005-2013 Solarflare Communications Inc. */ +#include <linux/filter.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/netdevice.h> @@ -17,88 +15,43 @@ #include <linux/ip.h> #include <linux/tcp.h> #include <linux/in.h> -#include <linux/crc32.h> #include <linux/ethtool.h> #include <linux/topology.h> #include <linux/gfp.h> -#include <linux/aer.h> #include <linux/interrupt.h> #include "net_driver.h" +#include <net/gre.h> +#include <net/udp_tunnel.h> +#include <net/netdev_queues.h> #include "efx.h" +#include "efx_common.h" +#include "efx_channels.h" +#include "ef100.h" +#include "rx_common.h" +#include "tx_common.h" #include "nic.h" +#include "io.h" #include "selftest.h" +#include "sriov.h" +#include "efx_devlink.h" -#include "mcdi.h" +#include "mcdi_port_common.h" +#include "mcdi_pcol.h" #include "workarounds.h" /************************************************************************** * - * Type name strings - * - ************************************************************************** - */ - -/* Loopback mode names (see LOOPBACK_MODE()) */ -const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; -const char *const efx_loopback_mode_names[] = { - [LOOPBACK_NONE] = "NONE", - [LOOPBACK_DATA] = "DATAPATH", - [LOOPBACK_GMAC] = "GMAC", - [LOOPBACK_XGMII] = "XGMII", - [LOOPBACK_XGXS] = "XGXS", - [LOOPBACK_XAUI] = "XAUI", - [LOOPBACK_GMII] = "GMII", - [LOOPBACK_SGMII] = "SGMII", - [LOOPBACK_XGBR] = "XGBR", - [LOOPBACK_XFI] = "XFI", - [LOOPBACK_XAUI_FAR] = "XAUI_FAR", - [LOOPBACK_GMII_FAR] = "GMII_FAR", - [LOOPBACK_SGMII_FAR] = "SGMII_FAR", - [LOOPBACK_XFI_FAR] = "XFI_FAR", - [LOOPBACK_GPHY] = "GPHY", - [LOOPBACK_PHYXS] = "PHYXS", - [LOOPBACK_PCS] = "PCS", - [LOOPBACK_PMAPMD] = "PMA/PMD", - [LOOPBACK_XPORT] = "XPORT", - [LOOPBACK_XGMII_WS] = "XGMII_WS", - [LOOPBACK_XAUI_WS] = "XAUI_WS", - [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", - [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", - [LOOPBACK_GMII_WS] = "GMII_WS", - [LOOPBACK_XFI_WS] = "XFI_WS", - [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", - [LOOPBACK_PHYXS_WS] = "PHYXS_WS", -}; - -const unsigned int efx_reset_type_max = RESET_TYPE_MAX; -const char *const efx_reset_type_names[] = { - [RESET_TYPE_INVISIBLE] = "INVISIBLE", - [RESET_TYPE_ALL] = "ALL", - [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", - [RESET_TYPE_WORLD] = "WORLD", - [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE", - [RESET_TYPE_DISABLE] = "DISABLE", - [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", - [RESET_TYPE_INT_ERROR] = "INT_ERROR", - [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", - [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", - [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", - [RESET_TYPE_TX_SKIP] = "TX_SKIP", - [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", -}; - -/* Reset workqueue. If any NIC has a hardware failure then a reset will be - * queued onto this work queue. This is not a per-nic work queue, because - * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. - */ -static struct workqueue_struct *reset_workqueue; - -/************************************************************************** - * * Configurable values * *************************************************************************/ +module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444); +MODULE_PARM_DESC(interrupt_mode, + "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); + +module_param(rss_cpus, uint, 0444); +MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); + /* * Use separate channels for TX and RX events * @@ -107,26 +60,11 @@ static struct workqueue_struct *reset_workqueue; * * This is only used in MSI-X interrupt mode */ -static bool separate_tx_channels; -module_param(separate_tx_channels, bool, 0444); -MODULE_PARM_DESC(separate_tx_channels, +bool efx_separate_tx_channels; +module_param(efx_separate_tx_channels, bool, 0444); +MODULE_PARM_DESC(efx_separate_tx_channels, "Use separate channels for TX and RX"); -/* This is the weight assigned to each of the (per-channel) virtual - * NAPI devices. - */ -static int napi_weight = 64; - -/* This is the time (in jiffies) between invocations of the hardware - * monitor. - * On Falcon-based NICs, this will: - * - Check the on-board hardware monitor; - * - Poll the link state and reconfigure the hardware as necessary. - * On Siena-based NICs for power systems with EEH support, this will give EEH a - * chance to start. - */ -static unsigned int efx_monitor_interval = 1 * HZ; - /* Initial interrupt moderation settings. They can be modified after * module load with ethtool. * @@ -146,38 +84,10 @@ static unsigned int rx_irq_mod_usec = 60; */ static unsigned int tx_irq_mod_usec = 150; -/* This is the first interrupt mode to try out of: - * 0 => MSI-X - * 1 => MSI - * 2 => legacy - */ -static unsigned int interrupt_mode; - -/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), - * i.e. the number of CPUs among which we may distribute simultaneous - * interrupt handling. - * - * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. - * The default (0) means to assign an interrupt to each core. - */ -static unsigned int rss_cpus; -module_param(rss_cpus, uint, 0444); -MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); - static bool phy_flash_cfg; module_param(phy_flash_cfg, bool, 0644); MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); -static unsigned irq_adapt_low_thresh = 8000; -module_param(irq_adapt_low_thresh, uint, 0644); -MODULE_PARM_DESC(irq_adapt_low_thresh, - "Threshold score for reducing IRQ moderation"); - -static unsigned irq_adapt_high_thresh = 16000; -module_param(irq_adapt_high_thresh, uint, 0644); -MODULE_PARM_DESC(irq_adapt_high_thresh, - "Threshold score for increasing IRQ moderation"); - static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | @@ -191,712 +101,11 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); * *************************************************************************/ -static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq); -static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq); -static void efx_remove_channel(struct efx_channel *channel); -static void efx_remove_channels(struct efx_nic *efx); -static const struct efx_channel_type efx_default_channel_type; static void efx_remove_port(struct efx_nic *efx); -static void efx_init_napi_channel(struct efx_channel *channel); -static void efx_fini_napi(struct efx_nic *efx); -static void efx_fini_napi_channel(struct efx_channel *channel); -static void efx_fini_struct(struct efx_nic *efx); -static void efx_start_all(struct efx_nic *efx); -static void efx_stop_all(struct efx_nic *efx); - -#define EFX_ASSERT_RESET_SERIALISED(efx) \ - do { \ - if ((efx->state == STATE_READY) || \ - (efx->state == STATE_RECOVERY) || \ - (efx->state == STATE_DISABLED)) \ - ASSERT_RTNL(); \ - } while (0) - -static int efx_check_disabled(struct efx_nic *efx) -{ - if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) { - netif_err(efx, drv, efx->net_dev, - "device is disabled due to earlier errors\n"); - return -EIO; - } - return 0; -} - -/************************************************************************** - * - * Event queue processing - * - *************************************************************************/ - -/* Process channel's event queue - * - * This function is responsible for processing the event queue of a - * single channel. The caller must guarantee that this function will - * never be concurrently called more than once on the same channel, - * though different channels may be being processed concurrently. - */ -static int efx_process_channel(struct efx_channel *channel, int budget) -{ - int spent; - - if (unlikely(!channel->enabled)) - return 0; - - spent = efx_nic_process_eventq(channel, budget); - if (spent && efx_channel_has_rx_queue(channel)) { - struct efx_rx_queue *rx_queue = - efx_channel_get_rx_queue(channel); - - efx_rx_flush_packet(channel); - if (rx_queue->enabled) - efx_fast_push_rx_descriptors(rx_queue); - } - - return spent; -} - -/* Mark channel as finished processing - * - * Note that since we will not receive further interrupts for this - * channel before we finish processing and call the eventq_read_ack() - * method, there is no need to use the interrupt hold-off timers. - */ -static inline void efx_channel_processed(struct efx_channel *channel) -{ - /* The interrupt handler for this channel may set work_pending - * as soon as we acknowledge the events we've seen. Make sure - * it's cleared before then. */ - channel->work_pending = false; - smp_wmb(); - - efx_nic_eventq_read_ack(channel); -} - -/* NAPI poll handler - * - * NAPI guarantees serialisation of polls of the same device, which - * provides the guarantee required by efx_process_channel(). - */ -static int efx_poll(struct napi_struct *napi, int budget) -{ - struct efx_channel *channel = - container_of(napi, struct efx_channel, napi_str); - struct efx_nic *efx = channel->efx; - int spent; - - netif_vdbg(efx, intr, efx->net_dev, - "channel %d NAPI poll executing on CPU %d\n", - channel->channel, raw_smp_processor_id()); - - spent = efx_process_channel(channel, budget); - - if (spent < budget) { - if (efx_channel_has_rx_queue(channel) && - efx->irq_rx_adaptive && - unlikely(++channel->irq_count == 1000)) { - if (unlikely(channel->irq_mod_score < - irq_adapt_low_thresh)) { - if (channel->irq_moderation > 1) { - channel->irq_moderation -= 1; - efx->type->push_irq_moderation(channel); - } - } else if (unlikely(channel->irq_mod_score > - irq_adapt_high_thresh)) { - if (channel->irq_moderation < - efx->irq_rx_moderation) { - channel->irq_moderation += 1; - efx->type->push_irq_moderation(channel); - } - } - channel->irq_count = 0; - channel->irq_mod_score = 0; - } - - efx_filter_rfs_expire(channel); - - /* There is no race here; although napi_disable() will - * only wait for napi_complete(), this isn't a problem - * since efx_channel_processed() will have no effect if - * interrupts have already been disabled. - */ - napi_complete(napi); - efx_channel_processed(channel); - } - - return spent; -} - -/* Process the eventq of the specified channel immediately on this CPU - * - * Disable hardware generated interrupts, wait for any existing - * processing to finish, then directly poll (and ack ) the eventq. - * Finally reenable NAPI and interrupts. - * - * This is for use only during a loopback self-test. It must not - * deliver any packets up the stack as this can result in deadlock. - */ -void efx_process_channel_now(struct efx_channel *channel) -{ - struct efx_nic *efx = channel->efx; - - BUG_ON(channel->channel >= efx->n_channels); - BUG_ON(!channel->enabled); - BUG_ON(!efx->loopback_selftest); - - /* Disable interrupts and wait for ISRs to complete */ - efx_nic_disable_interrupts(efx); - if (efx->legacy_irq) { - synchronize_irq(efx->legacy_irq); - efx->legacy_irq_enabled = false; - } - if (channel->irq) - synchronize_irq(channel->irq); - - /* Wait for any NAPI processing to complete */ - napi_disable(&channel->napi_str); - - /* Poll the channel */ - efx_process_channel(channel, channel->eventq_mask + 1); - - /* Ack the eventq. This may cause an interrupt to be generated - * when they are reenabled */ - efx_channel_processed(channel); - - napi_enable(&channel->napi_str); - if (efx->legacy_irq) - efx->legacy_irq_enabled = true; - efx_nic_enable_interrupts(efx); -} - -/* Create event queue - * Event queue memory allocations are done only once. If the channel - * is reset, the memory buffer will be reused; this guards against - * errors during channel reset and also simplifies interrupt handling. - */ -static int efx_probe_eventq(struct efx_channel *channel) -{ - struct efx_nic *efx = channel->efx; - unsigned long entries; - - netif_dbg(efx, probe, efx->net_dev, - "chan %d create event queue\n", channel->channel); - - /* Build an event queue with room for one event per tx and rx buffer, - * plus some extra for link state events and MCDI completions. */ - entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); - EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); - channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; - - return efx_nic_probe_eventq(channel); -} - -/* Prepare channel's event queue */ -static void efx_init_eventq(struct efx_channel *channel) -{ - netif_dbg(channel->efx, drv, channel->efx->net_dev, - "chan %d init event queue\n", channel->channel); - - channel->eventq_read_ptr = 0; - - efx_nic_init_eventq(channel); -} - -/* Enable event queue processing and NAPI */ -static void efx_start_eventq(struct efx_channel *channel) -{ - netif_dbg(channel->efx, ifup, channel->efx->net_dev, - "chan %d start event queue\n", channel->channel); - - /* The interrupt handler for this channel may set work_pending - * as soon as we enable it. Make sure it's cleared before - * then. Similarly, make sure it sees the enabled flag set. - */ - channel->work_pending = false; - channel->enabled = true; - smp_wmb(); - - napi_enable(&channel->napi_str); - efx_nic_eventq_read_ack(channel); -} - -/* Disable event queue processing and NAPI */ -static void efx_stop_eventq(struct efx_channel *channel) -{ - if (!channel->enabled) - return; - - napi_disable(&channel->napi_str); - channel->enabled = false; -} - -static void efx_fini_eventq(struct efx_channel *channel) -{ - netif_dbg(channel->efx, drv, channel->efx->net_dev, - "chan %d fini event queue\n", channel->channel); - - efx_nic_fini_eventq(channel); -} - -static void efx_remove_eventq(struct efx_channel *channel) -{ - netif_dbg(channel->efx, drv, channel->efx->net_dev, - "chan %d remove event queue\n", channel->channel); - - efx_nic_remove_eventq(channel); -} - -/************************************************************************** - * - * Channel handling - * - *************************************************************************/ - -/* Allocate and initialise a channel structure. */ -static struct efx_channel * -efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) -{ - struct efx_channel *channel; - struct efx_rx_queue *rx_queue; - struct efx_tx_queue *tx_queue; - int j; - - channel = kzalloc(sizeof(*channel), GFP_KERNEL); - if (!channel) - return NULL; - - channel->efx = efx; - channel->channel = i; - channel->type = &efx_default_channel_type; - - for (j = 0; j < EFX_TXQ_TYPES; j++) { - tx_queue = &channel->tx_queue[j]; - tx_queue->efx = efx; - tx_queue->queue = i * EFX_TXQ_TYPES + j; - tx_queue->channel = channel; - } - - rx_queue = &channel->rx_queue; - rx_queue->efx = efx; - setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, - (unsigned long)rx_queue); - - return channel; -} - -/* Allocate and initialise a channel structure, copying parameters - * (but not resources) from an old channel structure. - */ -static struct efx_channel * -efx_copy_channel(const struct efx_channel *old_channel) -{ - struct efx_channel *channel; - struct efx_rx_queue *rx_queue; - struct efx_tx_queue *tx_queue; - int j; - - channel = kmalloc(sizeof(*channel), GFP_KERNEL); - if (!channel) - return NULL; - - *channel = *old_channel; - - channel->napi_dev = NULL; - memset(&channel->eventq, 0, sizeof(channel->eventq)); - - for (j = 0; j < EFX_TXQ_TYPES; j++) { - tx_queue = &channel->tx_queue[j]; - if (tx_queue->channel) - tx_queue->channel = channel; - tx_queue->buffer = NULL; - memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); - } - - rx_queue = &channel->rx_queue; - rx_queue->buffer = NULL; - memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); - setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, - (unsigned long)rx_queue); - - return channel; -} - -static int efx_probe_channel(struct efx_channel *channel) -{ - struct efx_tx_queue *tx_queue; - struct efx_rx_queue *rx_queue; - int rc; - - netif_dbg(channel->efx, probe, channel->efx->net_dev, - "creating channel %d\n", channel->channel); - - rc = channel->type->pre_probe(channel); - if (rc) - goto fail; - - rc = efx_probe_eventq(channel); - if (rc) - goto fail; - - efx_for_each_channel_tx_queue(tx_queue, channel) { - rc = efx_probe_tx_queue(tx_queue); - if (rc) - goto fail; - } - - efx_for_each_channel_rx_queue(rx_queue, channel) { - rc = efx_probe_rx_queue(rx_queue); - if (rc) - goto fail; - } - - channel->n_rx_frm_trunc = 0; - - return 0; - -fail: - efx_remove_channel(channel); - return rc; -} - -static void -efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len) -{ - struct efx_nic *efx = channel->efx; - const char *type; - int number; - - number = channel->channel; - if (efx->tx_channel_offset == 0) { - type = ""; - } else if (channel->channel < efx->tx_channel_offset) { - type = "-rx"; - } else { - type = "-tx"; - number -= efx->tx_channel_offset; - } - snprintf(buf, len, "%s%s-%d", efx->name, type, number); -} - -static void efx_set_channel_names(struct efx_nic *efx) -{ - struct efx_channel *channel; - - efx_for_each_channel(channel, efx) - channel->type->get_name(channel, - efx->channel_name[channel->channel], - sizeof(efx->channel_name[0])); -} - -static int efx_probe_channels(struct efx_nic *efx) -{ - struct efx_channel *channel; - int rc; - - /* Restart special buffer allocation */ - efx->next_buffer_table = 0; - - /* Probe channels in reverse, so that any 'extra' channels - * use the start of the buffer table. This allows the traffic - * channels to be resized without moving them or wasting the - * entries before them. - */ - efx_for_each_channel_rev(channel, efx) { - rc = efx_probe_channel(channel); - if (rc) { - netif_err(efx, probe, efx->net_dev, - "failed to create channel %d\n", - channel->channel); - goto fail; - } - } - efx_set_channel_names(efx); - - return 0; - -fail: - efx_remove_channels(efx); - return rc; -} - -/* Channels are shutdown and reinitialised whilst the NIC is running - * to propagate configuration changes (mtu, checksum offload), or - * to clear hardware error conditions - */ -static void efx_start_datapath(struct efx_nic *efx) -{ - bool old_rx_scatter = efx->rx_scatter; - struct efx_tx_queue *tx_queue; - struct efx_rx_queue *rx_queue; - struct efx_channel *channel; - size_t rx_buf_len; - - /* Calculate the rx buffer allocation parameters required to - * support the current MTU, including padding for header - * alignment and overruns. - */ - efx->rx_dma_len = (efx->type->rx_buffer_hash_size + - EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + - efx->type->rx_buffer_padding); - rx_buf_len = (sizeof(struct efx_rx_page_state) + - NET_IP_ALIGN + efx->rx_dma_len); - if (rx_buf_len <= PAGE_SIZE) { - efx->rx_scatter = false; - efx->rx_buffer_order = 0; - } else if (efx->type->can_rx_scatter) { - BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES); - BUILD_BUG_ON(sizeof(struct efx_rx_page_state) + - 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE, - EFX_RX_BUF_ALIGNMENT) > - PAGE_SIZE); - efx->rx_scatter = true; - efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; - efx->rx_buffer_order = 0; - } else { - efx->rx_scatter = false; - efx->rx_buffer_order = get_order(rx_buf_len); - } - - efx_rx_config_page_split(efx); - if (efx->rx_buffer_order) - netif_dbg(efx, drv, efx->net_dev, - "RX buf len=%u; page order=%u batch=%u\n", - efx->rx_dma_len, efx->rx_buffer_order, - efx->rx_pages_per_batch); - else - netif_dbg(efx, drv, efx->net_dev, - "RX buf len=%u step=%u bpp=%u; page batch=%u\n", - efx->rx_dma_len, efx->rx_page_buf_step, - efx->rx_bufs_per_page, efx->rx_pages_per_batch); - - /* RX filters also have scatter-enabled flags */ - if (efx->rx_scatter != old_rx_scatter) - efx_filter_update_rx_scatter(efx); - - /* We must keep at least one descriptor in a TX ring empty. - * We could avoid this when the queue size does not exactly - * match the hardware ring size, but it's not that important. - * Therefore we stop the queue when one more skb might fill - * the ring completely. We wake it when half way back to - * empty. - */ - efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx); - efx->txq_wake_thresh = efx->txq_stop_thresh / 2; - - /* Initialise the channels */ - efx_for_each_channel(channel, efx) { - efx_for_each_channel_tx_queue(tx_queue, channel) - efx_init_tx_queue(tx_queue); - - efx_for_each_channel_rx_queue(rx_queue, channel) { - efx_init_rx_queue(rx_queue); - efx_nic_generate_fill_event(rx_queue); - } - - WARN_ON(channel->rx_pkt_n_frags); - } - - if (netif_device_present(efx->net_dev)) - netif_tx_wake_all_queues(efx->net_dev); -} - -static void efx_stop_datapath(struct efx_nic *efx) -{ - struct efx_channel *channel; - struct efx_tx_queue *tx_queue; - struct efx_rx_queue *rx_queue; - struct pci_dev *dev = efx->pci_dev; - int rc; - - EFX_ASSERT_RESET_SERIALISED(efx); - BUG_ON(efx->port_enabled); - - /* Only perform flush if dma is enabled */ - if (dev->is_busmaster && efx->state != STATE_RECOVERY) { - rc = efx_nic_flush_queues(efx); - - if (rc && EFX_WORKAROUND_7803(efx)) { - /* Schedule a reset to recover from the flush failure. The - * descriptor caches reference memory we're about to free, - * but falcon_reconfigure_mac_wrapper() won't reconnect - * the MACs because of the pending reset. */ - netif_err(efx, drv, efx->net_dev, - "Resetting to recover from flush failure\n"); - efx_schedule_reset(efx, RESET_TYPE_ALL); - } else if (rc) { - netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); - } else { - netif_dbg(efx, drv, efx->net_dev, - "successfully flushed all queues\n"); - } - } - - efx_for_each_channel(channel, efx) { - /* RX packet processing is pipelined, so wait for the - * NAPI handler to complete. At least event queue 0 - * might be kept active by non-data events, so don't - * use napi_synchronize() but actually disable NAPI - * temporarily. - */ - if (efx_channel_has_rx_queue(channel)) { - efx_stop_eventq(channel); - efx_start_eventq(channel); - } - - efx_for_each_channel_rx_queue(rx_queue, channel) - efx_fini_rx_queue(rx_queue); - efx_for_each_possible_channel_tx_queue(tx_queue, channel) - efx_fini_tx_queue(tx_queue); - } -} - -static void efx_remove_channel(struct efx_channel *channel) -{ - struct efx_tx_queue *tx_queue; - struct efx_rx_queue *rx_queue; - - netif_dbg(channel->efx, drv, channel->efx->net_dev, - "destroy chan %d\n", channel->channel); - - efx_for_each_channel_rx_queue(rx_queue, channel) - efx_remove_rx_queue(rx_queue); - efx_for_each_possible_channel_tx_queue(tx_queue, channel) - efx_remove_tx_queue(tx_queue); - efx_remove_eventq(channel); - channel->type->post_remove(channel); -} - -static void efx_remove_channels(struct efx_nic *efx) -{ - struct efx_channel *channel; - - efx_for_each_channel(channel, efx) - efx_remove_channel(channel); -} - -int -efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) -{ - struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; - u32 old_rxq_entries, old_txq_entries; - unsigned i, next_buffer_table = 0; - int rc; - - rc = efx_check_disabled(efx); - if (rc) - return rc; - - /* Not all channels should be reallocated. We must avoid - * reallocating their buffer table entries. - */ - efx_for_each_channel(channel, efx) { - struct efx_rx_queue *rx_queue; - struct efx_tx_queue *tx_queue; - - if (channel->type->copy) - continue; - next_buffer_table = max(next_buffer_table, - channel->eventq.index + - channel->eventq.entries); - efx_for_each_channel_rx_queue(rx_queue, channel) - next_buffer_table = max(next_buffer_table, - rx_queue->rxd.index + - rx_queue->rxd.entries); - efx_for_each_channel_tx_queue(tx_queue, channel) - next_buffer_table = max(next_buffer_table, - tx_queue->txd.index + - tx_queue->txd.entries); - } - - efx_device_detach_sync(efx); - efx_stop_all(efx); - efx_stop_interrupts(efx, true); - - /* Clone channels (where possible) */ - memset(other_channel, 0, sizeof(other_channel)); - for (i = 0; i < efx->n_channels; i++) { - channel = efx->channel[i]; - if (channel->type->copy) - channel = channel->type->copy(channel); - if (!channel) { - rc = -ENOMEM; - goto out; - } - other_channel[i] = channel; - } - - /* Swap entry counts and channel pointers */ - old_rxq_entries = efx->rxq_entries; - old_txq_entries = efx->txq_entries; - efx->rxq_entries = rxq_entries; - efx->txq_entries = txq_entries; - for (i = 0; i < efx->n_channels; i++) { - channel = efx->channel[i]; - efx->channel[i] = other_channel[i]; - other_channel[i] = channel; - } - - /* Restart buffer table allocation */ - efx->next_buffer_table = next_buffer_table; - - for (i = 0; i < efx->n_channels; i++) { - channel = efx->channel[i]; - if (!channel->type->copy) - continue; - rc = efx_probe_channel(channel); - if (rc) - goto rollback; - efx_init_napi_channel(efx->channel[i]); - } - -out: - /* Destroy unused channel structures */ - for (i = 0; i < efx->n_channels; i++) { - channel = other_channel[i]; - if (channel && channel->type->copy) { - efx_fini_napi_channel(channel); - efx_remove_channel(channel); - kfree(channel); - } - } - - efx_start_interrupts(efx, true); - efx_start_all(efx); - netif_device_attach(efx->net_dev); - return rc; - -rollback: - /* Swap back */ - efx->rxq_entries = old_rxq_entries; - efx->txq_entries = old_txq_entries; - for (i = 0; i < efx->n_channels; i++) { - channel = efx->channel[i]; - efx->channel[i] = other_channel[i]; - other_channel[i] = channel; - } - goto out; -} - -void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) -{ - mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); -} - -static const struct efx_channel_type efx_default_channel_type = { - .pre_probe = efx_channel_dummy_op_int, - .post_remove = efx_channel_dummy_op_void, - .get_name = efx_get_channel_name, - .copy = efx_copy_channel, - .keep_eventq = false, -}; - -int efx_channel_dummy_op_int(struct efx_channel *channel) -{ - return 0; -} - -void efx_channel_dummy_op_void(struct efx_channel *channel) -{ -} +static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog); +static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp); +static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, + u32 flags); /************************************************************************** * @@ -904,132 +113,8 @@ void efx_channel_dummy_op_void(struct efx_channel *channel) * **************************************************************************/ -/* This ensures that the kernel is kept informed (via - * netif_carrier_on/off) of the link status, and also maintains the - * link status's stop on the port's TX queue. - */ -void efx_link_status_changed(struct efx_nic *efx) -{ - struct efx_link_state *link_state = &efx->link_state; - - /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure - * that no events are triggered between unregister_netdev() and the - * driver unloading. A more general condition is that NETDEV_CHANGE - * can only be generated between NETDEV_UP and NETDEV_DOWN */ - if (!netif_running(efx->net_dev)) - return; - - if (link_state->up != netif_carrier_ok(efx->net_dev)) { - efx->n_link_state_changes++; - - if (link_state->up) - netif_carrier_on(efx->net_dev); - else - netif_carrier_off(efx->net_dev); - } - - /* Status message for kernel log */ - if (link_state->up) - netif_info(efx, link, efx->net_dev, - "link up at %uMbps %s-duplex (MTU %d)%s\n", - link_state->speed, link_state->fd ? "full" : "half", - efx->net_dev->mtu, - (efx->promiscuous ? " [PROMISC]" : "")); - else - netif_info(efx, link, efx->net_dev, "link down\n"); -} - -void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) -{ - efx->link_advertising = advertising; - if (advertising) { - if (advertising & ADVERTISED_Pause) - efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); - else - efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); - if (advertising & ADVERTISED_Asym_Pause) - efx->wanted_fc ^= EFX_FC_TX; - } -} - -void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) -{ - efx->wanted_fc = wanted_fc; - if (efx->link_advertising) { - if (wanted_fc & EFX_FC_RX) - efx->link_advertising |= (ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - else - efx->link_advertising &= ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - if (wanted_fc & EFX_FC_TX) - efx->link_advertising ^= ADVERTISED_Asym_Pause; - } -} - static void efx_fini_port(struct efx_nic *efx); -/* Push loopback/power/transmit disable settings to the PHY, and reconfigure - * the MAC appropriately. All other PHY configuration changes are pushed - * through phy_op->set_settings(), and pushed asynchronously to the MAC - * through efx_monitor(). - * - * Callers must hold the mac_lock - */ -int __efx_reconfigure_port(struct efx_nic *efx) -{ - enum efx_phy_mode phy_mode; - int rc; - - WARN_ON(!mutex_is_locked(&efx->mac_lock)); - - /* Serialise the promiscuous flag with efx_set_rx_mode. */ - netif_addr_lock_bh(efx->net_dev); - netif_addr_unlock_bh(efx->net_dev); - - /* Disable PHY transmit in mac level loopbacks */ - phy_mode = efx->phy_mode; - if (LOOPBACK_INTERNAL(efx)) - efx->phy_mode |= PHY_MODE_TX_DISABLED; - else - efx->phy_mode &= ~PHY_MODE_TX_DISABLED; - - rc = efx->type->reconfigure_port(efx); - - if (rc) - efx->phy_mode = phy_mode; - - return rc; -} - -/* Reinitialise the MAC to pick up new PHY settings, even if the port is - * disabled. */ -int efx_reconfigure_port(struct efx_nic *efx) -{ - int rc; - - EFX_ASSERT_RESET_SERIALISED(efx); - - mutex_lock(&efx->mac_lock); - rc = __efx_reconfigure_port(efx); - mutex_unlock(&efx->mac_lock); - - return rc; -} - -/* Asynchronous work item for changing MAC promiscuity and multicast - * hash. Avoid a drain/rx_ingress enable by reconfiguring the current - * MAC directly. */ -static void efx_mac_work(struct work_struct *data) -{ - struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); - - mutex_lock(&efx->mac_lock); - if (efx->port_enabled) - efx->type->reconfigure_mac(efx); - mutex_unlock(&efx->mac_lock); -} - static int efx_probe_port(struct efx_nic *efx) { int rc; @@ -1045,7 +130,7 @@ static int efx_probe_port(struct efx_nic *efx) return rc; /* Initialise MAC address to permanent address */ - memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN); + eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr); return 0; } @@ -1058,60 +143,21 @@ static int efx_init_port(struct efx_nic *efx) mutex_lock(&efx->mac_lock); - rc = efx->phy_op->init(efx); - if (rc) - goto fail1; - efx->port_initialized = true; - /* Reconfigure the MAC before creating dma queues (required for - * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ - efx->type->reconfigure_mac(efx); - /* Ensure the PHY advertises the correct flow control settings */ - rc = efx->phy_op->reconfigure(efx); - if (rc) - goto fail2; + rc = efx_mcdi_port_reconfigure(efx); + if (rc && rc != -EPERM) + goto fail; mutex_unlock(&efx->mac_lock); return 0; -fail2: - efx->phy_op->fini(efx); -fail1: +fail: mutex_unlock(&efx->mac_lock); return rc; } -static void efx_start_port(struct efx_nic *efx) -{ - netif_dbg(efx, ifup, efx->net_dev, "start port\n"); - BUG_ON(efx->port_enabled); - - mutex_lock(&efx->mac_lock); - efx->port_enabled = true; - - /* efx_mac_work() might have been scheduled after efx_stop_port(), - * and then cancelled by efx_flush_all() */ - efx->type->reconfigure_mac(efx); - - mutex_unlock(&efx->mac_lock); -} - -/* Prevent efx_mac_work() and efx_monitor() from working */ -static void efx_stop_port(struct efx_nic *efx) -{ - netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); - - mutex_lock(&efx->mac_lock); - efx->port_enabled = false; - mutex_unlock(&efx->mac_lock); - - /* Serialise against efx_set_multicast_list() */ - netif_addr_lock_bh(efx->net_dev); - netif_addr_unlock_bh(efx->net_dev); -} - static void efx_fini_port(struct efx_nic *efx) { netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); @@ -1119,7 +165,6 @@ static void efx_fini_port(struct efx_nic *efx) if (!efx->port_initialized) return; - efx->phy_op->fini(efx); efx->port_initialized = false; efx->link_state.up = false; @@ -1139,349 +184,79 @@ static void efx_remove_port(struct efx_nic *efx) * **************************************************************************/ -/* This configures the PCI device to enable I/O and DMA. */ -static int efx_init_io(struct efx_nic *efx) -{ - struct pci_dev *pci_dev = efx->pci_dev; - dma_addr_t dma_mask = efx->type->max_dma_mask; - int rc; - - netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); - - rc = pci_enable_device(pci_dev); - if (rc) { - netif_err(efx, probe, efx->net_dev, - "failed to enable PCI device\n"); - goto fail1; - } - - pci_set_master(pci_dev); +static LIST_HEAD(efx_primary_list); +static LIST_HEAD(efx_unassociated_list); - /* Set the PCI DMA mask. Try all possibilities from our - * genuine mask down to 32 bits, because some architectures - * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit - * masks event though they reject 46 bit masks. - */ - while (dma_mask > 0x7fffffffUL) { - if (dma_supported(&pci_dev->dev, dma_mask)) { - rc = dma_set_mask(&pci_dev->dev, dma_mask); - if (rc == 0) - break; - } - dma_mask >>= 1; - } - if (rc) { - netif_err(efx, probe, efx->net_dev, - "could not find a suitable DMA mask\n"); - goto fail2; - } - netif_dbg(efx, probe, efx->net_dev, - "using DMA mask %llx\n", (unsigned long long) dma_mask); - rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask); - if (rc) { - /* dma_set_coherent_mask() is not *allowed* to - * fail with a mask that dma_set_mask() accepted, - * but just in case... - */ - netif_err(efx, probe, efx->net_dev, - "failed to set consistent DMA mask\n"); - goto fail2; - } - - efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); - rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); - if (rc) { - netif_err(efx, probe, efx->net_dev, - "request for memory BAR failed\n"); - rc = -EIO; - goto fail3; - } - efx->membase = ioremap_nocache(efx->membase_phys, - efx->type->mem_map_size); - if (!efx->membase) { - netif_err(efx, probe, efx->net_dev, - "could not map memory BAR at %llx+%x\n", - (unsigned long long)efx->membase_phys, - efx->type->mem_map_size); - rc = -ENOMEM; - goto fail4; - } - netif_dbg(efx, probe, efx->net_dev, - "memory BAR at %llx+%x (virtual %p)\n", - (unsigned long long)efx->membase_phys, - efx->type->mem_map_size, efx->membase); - - return 0; - - fail4: - pci_release_region(efx->pci_dev, EFX_MEM_BAR); - fail3: - efx->membase_phys = 0; - fail2: - pci_disable_device(efx->pci_dev); - fail1: - return rc; -} - -static void efx_fini_io(struct efx_nic *efx) +static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right) { - netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); - - if (efx->membase) { - iounmap(efx->membase); - efx->membase = NULL; - } - - if (efx->membase_phys) { - pci_release_region(efx->pci_dev, EFX_MEM_BAR); - efx->membase_phys = 0; - } - - pci_disable_device(efx->pci_dev); + return left->type == right->type && + left->vpd_sn && right->vpd_sn && + !strcmp(left->vpd_sn, right->vpd_sn); } -static unsigned int efx_wanted_parallelism(struct efx_nic *efx) +static void efx_associate(struct efx_nic *efx) { - cpumask_var_t thread_mask; - unsigned int count; - int cpu; + struct efx_nic *other, *next; - if (rss_cpus) { - count = rss_cpus; - } else { - if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) { - netif_warn(efx, probe, efx->net_dev, - "RSS disabled due to allocation failure\n"); - return 1; - } + if (efx->primary == efx) { + /* Adding primary function; look for secondaries */ - count = 0; - for_each_online_cpu(cpu) { - if (!cpumask_test_cpu(cpu, thread_mask)) { - ++count; - cpumask_or(thread_mask, thread_mask, - topology_thread_cpumask(cpu)); - } - } - - free_cpumask_var(thread_mask); - } - - /* If RSS is requested for the PF *and* VFs then we can't write RSS - * table entries that are inaccessible to VFs - */ - if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 && - count > efx_vf_size(efx)) { - netif_warn(efx, probe, efx->net_dev, - "Reducing number of RSS channels from %u to %u for " - "VF support. Increase vf-msix-limit to use more " - "channels on the PF.\n", - count, efx_vf_size(efx)); - count = efx_vf_size(efx); - } - - return count; -} - -/* Probe the number and type of interrupts we are able to obtain, and - * the resulting numbers of channels and RX queues. - */ -static int efx_probe_interrupts(struct efx_nic *efx) -{ - unsigned int max_channels = - min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS); - unsigned int extra_channels = 0; - unsigned int i, j; - int rc; - - for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) - if (efx->extra_channel_type[i]) - ++extra_channels; - - if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { - struct msix_entry xentries[EFX_MAX_CHANNELS]; - unsigned int n_channels; - - n_channels = efx_wanted_parallelism(efx); - if (separate_tx_channels) - n_channels *= 2; - n_channels += extra_channels; - n_channels = min(n_channels, max_channels); + netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n"); + list_add_tail(&efx->node, &efx_primary_list); - for (i = 0; i < n_channels; i++) - xentries[i].entry = i; - rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); - if (rc > 0) { - netif_err(efx, drv, efx->net_dev, - "WARNING: Insufficient MSI-X vectors" - " available (%d < %u).\n", rc, n_channels); - netif_err(efx, drv, efx->net_dev, - "WARNING: Performance may be reduced.\n"); - EFX_BUG_ON_PARANOID(rc >= n_channels); - n_channels = rc; - rc = pci_enable_msix(efx->pci_dev, xentries, - n_channels); - } - - if (rc == 0) { - efx->n_channels = n_channels; - if (n_channels > extra_channels) - n_channels -= extra_channels; - if (separate_tx_channels) { - efx->n_tx_channels = max(n_channels / 2, 1U); - efx->n_rx_channels = max(n_channels - - efx->n_tx_channels, - 1U); - } else { - efx->n_tx_channels = n_channels; - efx->n_rx_channels = n_channels; + list_for_each_entry_safe(other, next, &efx_unassociated_list, + node) { + if (efx_same_controller(efx, other)) { + list_del(&other->node); + netif_dbg(other, probe, other->net_dev, + "moving to secondary list of %s %s\n", + pci_name(efx->pci_dev), + efx->net_dev->name); + list_add_tail(&other->node, + &efx->secondary_list); + other->primary = efx; } - for (i = 0; i < efx->n_channels; i++) - efx_get_channel(efx, i)->irq = - xentries[i].vector; - } else { - /* Fall back to single channel MSI */ - efx->interrupt_mode = EFX_INT_MODE_MSI; - netif_err(efx, drv, efx->net_dev, - "could not enable MSI-X\n"); - } - } - - /* Try single interrupt MSI */ - if (efx->interrupt_mode == EFX_INT_MODE_MSI) { - efx->n_channels = 1; - efx->n_rx_channels = 1; - efx->n_tx_channels = 1; - rc = pci_enable_msi(efx->pci_dev); - if (rc == 0) { - efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; - } else { - netif_err(efx, drv, efx->net_dev, - "could not enable MSI\n"); - efx->interrupt_mode = EFX_INT_MODE_LEGACY; } - } - - /* Assume legacy interrupts */ - if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { - efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); - efx->n_rx_channels = 1; - efx->n_tx_channels = 1; - efx->legacy_irq = efx->pci_dev->irq; - } - - /* Assign extra channels if possible */ - j = efx->n_channels; - for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) { - if (!efx->extra_channel_type[i]) - continue; - if (efx->interrupt_mode != EFX_INT_MODE_MSIX || - efx->n_channels <= extra_channels) { - efx->extra_channel_type[i]->handle_no_channel(efx); - } else { - --j; - efx_get_channel(efx, j)->type = - efx->extra_channel_type[i]; + } else { + /* Adding secondary function; look for primary */ + + list_for_each_entry(other, &efx_primary_list, node) { + if (efx_same_controller(efx, other)) { + netif_dbg(efx, probe, efx->net_dev, + "adding to secondary list of %s %s\n", + pci_name(other->pci_dev), + other->net_dev->name); + list_add_tail(&efx->node, + &other->secondary_list); + efx->primary = other; + return; + } } - } - - /* RSS might be usable on VFs even if it is disabled on the PF */ - efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ? - efx->n_rx_channels : efx_vf_size(efx)); - return 0; -} - -/* Enable interrupts, then probe and start the event queues */ -static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq) -{ - struct efx_channel *channel; - - BUG_ON(efx->state == STATE_DISABLED); - - if (efx->eeh_disabled_legacy_irq) { - enable_irq(efx->legacy_irq); - efx->eeh_disabled_legacy_irq = false; - } - if (efx->legacy_irq) - efx->legacy_irq_enabled = true; - efx_nic_enable_interrupts(efx); - - efx_for_each_channel(channel, efx) { - if (!channel->type->keep_eventq || !may_keep_eventq) - efx_init_eventq(channel); - efx_start_eventq(channel); + netif_dbg(efx, probe, efx->net_dev, + "adding to unassociated list\n"); + list_add_tail(&efx->node, &efx_unassociated_list); } - - efx_mcdi_mode_event(efx); -} - -static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq) -{ - struct efx_channel *channel; - - if (efx->state == STATE_DISABLED) - return; - - efx_mcdi_mode_poll(efx); - - efx_nic_disable_interrupts(efx); - if (efx->legacy_irq) { - synchronize_irq(efx->legacy_irq); - efx->legacy_irq_enabled = false; - } - - efx_for_each_channel(channel, efx) { - if (channel->irq) - synchronize_irq(channel->irq); - - efx_stop_eventq(channel); - if (!channel->type->keep_eventq || !may_keep_eventq) - efx_fini_eventq(channel); - } -} - -static void efx_remove_interrupts(struct efx_nic *efx) -{ - struct efx_channel *channel; - - /* Remove MSI/MSI-X interrupts */ - efx_for_each_channel(channel, efx) - channel->irq = 0; - pci_disable_msi(efx->pci_dev); - pci_disable_msix(efx->pci_dev); - - /* Remove legacy interrupt */ - efx->legacy_irq = 0; } -static void efx_set_channels(struct efx_nic *efx) +static void efx_dissociate(struct efx_nic *efx) { - struct efx_channel *channel; - struct efx_tx_queue *tx_queue; + struct efx_nic *other, *next; - efx->tx_channel_offset = - separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; - - /* We need to mark which channels really have RX and TX - * queues, and adjust the TX queue numbers if we have separate - * RX-only and TX-only channels. - */ - efx_for_each_channel(channel, efx) { - if (channel->channel < efx->n_rx_channels) - channel->rx_queue.core_index = channel->channel; - else - channel->rx_queue.core_index = -1; + list_del(&efx->node); + efx->primary = NULL; - efx_for_each_channel_tx_queue(tx_queue, channel) - tx_queue->queue -= (efx->tx_channel_offset * - EFX_TXQ_TYPES); + list_for_each_entry_safe(other, next, &efx->secondary_list, node) { + list_del(&other->node); + netif_dbg(other, probe, other->net_dev, + "moving to unassociated list\n"); + list_add_tail(&other->node, &efx_unassociated_list); + other->primary = NULL; } } static int efx_probe_nic(struct efx_nic *efx) { - size_t i; int rc; netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); @@ -1491,31 +266,52 @@ static int efx_probe_nic(struct efx_nic *efx) if (rc) return rc; - /* Determine the number of channels and queues by trying to hook - * in MSI-X interrupts. */ - rc = efx_probe_interrupts(efx); - if (rc) - goto fail; + do { + if (!efx->max_channels || !efx->max_tx_channels) { + netif_err(efx, drv, efx->net_dev, + "Insufficient resources to allocate" + " any channels\n"); + rc = -ENOSPC; + goto fail1; + } - efx->type->dimension_resources(efx); + /* Determine the number of channels and queues by trying + * to hook in MSI-X interrupts. + */ + rc = efx_probe_interrupts(efx); + if (rc) + goto fail1; - if (efx->n_channels > 1) - get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); - for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) - efx->rx_indir_table[i] = - ethtool_rxfh_indir_default(i, efx->rss_spread); + rc = efx_set_channels(efx); + if (rc) + goto fail1; + + /* dimension_resources can fail with EAGAIN */ + rc = efx->type->dimension_resources(efx); + if (rc != 0 && rc != -EAGAIN) + goto fail2; + + if (rc == -EAGAIN) + /* try again with new max_channels */ + efx_remove_interrupts(efx); + + } while (rc == -EAGAIN); - efx_set_channels(efx); - netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); - netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); + if (efx->n_channels > 1) + netdev_rss_key_fill(efx->rss_context.rx_hash_key, + sizeof(efx->rss_context.rx_hash_key)); + efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table); /* Initialise the interrupt moderation settings */ + efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, true); return 0; -fail: +fail2: + efx_remove_interrupts(efx); +fail1: efx->type->remove(efx); return rc; } @@ -1555,23 +351,36 @@ static int efx_probe_all(struct efx_nic *efx) rc = -EINVAL; goto fail3; } - efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; + +#ifdef CONFIG_SFC_SRIOV + rc = efx->type->vswitching_probe(efx); + if (rc) /* not fatal; the PF will still work fine */ + netif_warn(efx, probe, efx->net_dev, + "failed to setup vswitching rc=%d;" + " VFs may not function\n", rc); +#endif rc = efx_probe_filters(efx); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to create filter tables\n"); - goto fail3; + goto fail4; } rc = efx_probe_channels(efx); if (rc) - goto fail4; + goto fail5; + + efx->state = STATE_NET_DOWN; return 0; - fail4: + fail5: efx_remove_filters(efx); + fail4: +#ifdef CONFIG_SFC_SRIOV + efx->type->vswitching_remove(efx); +#endif fail3: efx_remove_port(efx); fail2: @@ -1580,90 +389,17 @@ static int efx_probe_all(struct efx_nic *efx) return rc; } -/* If the interface is supposed to be running but is not, start - * the hardware and software data path, regular activity for the port - * (MAC statistics, link polling, etc.) and schedule the port to be - * reconfigured. Interrupts must already be enabled. This function - * is safe to call multiple times, so long as the NIC is not disabled. - * Requires the RTNL lock. - */ -static void efx_start_all(struct efx_nic *efx) -{ - EFX_ASSERT_RESET_SERIALISED(efx); - BUG_ON(efx->state == STATE_DISABLED); - - /* Check that it is appropriate to restart the interface. All - * of these flags are safe to read under just the rtnl lock */ - if (efx->port_enabled || !netif_running(efx->net_dev)) - return; - - efx_start_port(efx); - efx_start_datapath(efx); - - /* Start the hardware monitor if there is one */ - if (efx->type->monitor != NULL) - queue_delayed_work(efx->workqueue, &efx->monitor_work, - efx_monitor_interval); - - /* If link state detection is normally event-driven, we have - * to poll now because we could have missed a change - */ - if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { - mutex_lock(&efx->mac_lock); - if (efx->phy_op->poll(efx)) - efx_link_status_changed(efx); - mutex_unlock(&efx->mac_lock); - } - - efx->type->start_stats(efx); -} - -/* Flush all delayed work. Should only be called when no more delayed work - * will be scheduled. This doesn't flush pending online resets (efx_reset), - * since we're holding the rtnl_lock at this point. */ -static void efx_flush_all(struct efx_nic *efx) -{ - /* Make sure the hardware monitor and event self-test are stopped */ - cancel_delayed_work_sync(&efx->monitor_work); - efx_selftest_async_cancel(efx); - /* Stop scheduled port reconfigurations */ - cancel_work_sync(&efx->mac_work); -} - -/* Quiesce the hardware and software data path, and regular activity - * for the port without bringing the link down. Safe to call multiple - * times with the NIC in almost any state, but interrupts should be - * enabled. Requires the RTNL lock. - */ -static void efx_stop_all(struct efx_nic *efx) -{ - EFX_ASSERT_RESET_SERIALISED(efx); - - /* port_enabled can be read safely under the rtnl lock */ - if (!efx->port_enabled) - return; - - efx->type->stop_stats(efx); - efx_stop_port(efx); - - /* Flush efx_mac_work(), refill_workqueue, monitor_work */ - efx_flush_all(efx); - - /* Stop the kernel transmit interface. This is only valid if - * the device is stopped or detached; otherwise the watchdog - * may fire immediately. - */ - WARN_ON(netif_running(efx->net_dev) && - netif_device_present(efx->net_dev)); - netif_tx_disable(efx->net_dev); - - efx_stop_datapath(efx); -} - static void efx_remove_all(struct efx_nic *efx) { + rtnl_lock(); + efx_xdp_setup_prog(efx, NULL); + rtnl_unlock(); + efx_remove_channels(efx); efx_remove_filters(efx); +#ifdef CONFIG_SFC_SRIOV + efx->type->vswitching_remove(efx); +#endif efx_remove_port(efx); efx_remove_nic(efx); } @@ -1673,14 +409,13 @@ static void efx_remove_all(struct efx_nic *efx) * Interrupt moderation * **************************************************************************/ - -static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns) +unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs) { if (usecs == 0) return 0; - if (usecs * 1000 < quantum_ns) + if (usecs * 1000 < efx->timer_quantum_ns) return 1; /* never round down to 0 */ - return usecs * 1000 / quantum_ns; + return usecs * 1000 / efx->timer_quantum_ns; } /* Set interrupt moderation parameters */ @@ -1689,21 +424,16 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, bool rx_may_override_tx) { struct efx_channel *channel; - unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max * - efx->timer_quantum_ns, - 1000); - unsigned int tx_ticks; - unsigned int rx_ticks; + unsigned int timer_max_us; EFX_ASSERT_RESET_SERIALISED(efx); - if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max) - return -EINVAL; + timer_max_us = efx->timer_max_ns / 1000; - tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns); - rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns); + if (tx_usecs > timer_max_us || rx_usecs > timer_max_us) + return -EINVAL; - if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 && + if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 && !rx_may_override_tx) { netif_err(efx, drv, efx->net_dev, "Channels are shared. " "RX and TX IRQ moderation must be equal\n"); @@ -1711,12 +441,14 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, } efx->irq_rx_adaptive = rx_adaptive; - efx->irq_rx_moderation = rx_ticks; + efx->irq_rx_moderation_us = rx_usecs; efx_for_each_channel(channel, efx) { if (efx_channel_has_rx_queue(channel)) - channel->irq_moderation = rx_ticks; + channel->irq_moderation_us = rx_usecs; else if (efx_channel_has_tx_queues(channel)) - channel->irq_moderation = tx_ticks; + channel->irq_moderation_us = tx_usecs; + else if (efx_channel_is_xdp_tx(channel)) + channel->irq_moderation_us = tx_usecs; } return 0; @@ -1725,154 +457,33 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, unsigned int *rx_usecs, bool *rx_adaptive) { - /* We must round up when converting ticks to microseconds - * because we round down when converting the other way. - */ - *rx_adaptive = efx->irq_rx_adaptive; - *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation * - efx->timer_quantum_ns, - 1000); + *rx_usecs = efx->irq_rx_moderation_us; /* If channels are shared between RX and TX, so is IRQ * moderation. Otherwise, IRQ moderation is the same for all * TX channels and is not adaptive. */ - if (efx->tx_channel_offset == 0) + if (efx->tx_channel_offset == 0) { *tx_usecs = *rx_usecs; - else - *tx_usecs = DIV_ROUND_UP( - efx->channel[efx->tx_channel_offset]->irq_moderation * - efx->timer_quantum_ns, - 1000); -} - -/************************************************************************** - * - * Hardware monitor - * - **************************************************************************/ + } else { + struct efx_channel *tx_channel; -/* Run periodically off the general workqueue */ -static void efx_monitor(struct work_struct *data) -{ - struct efx_nic *efx = container_of(data, struct efx_nic, - monitor_work.work); - - netif_vdbg(efx, timer, efx->net_dev, - "hardware monitor executing on CPU %d\n", - raw_smp_processor_id()); - BUG_ON(efx->type->monitor == NULL); - - /* If the mac_lock is already held then it is likely a port - * reconfiguration is already in place, which will likely do - * most of the work of monitor() anyway. */ - if (mutex_trylock(&efx->mac_lock)) { - if (efx->port_enabled) - efx->type->monitor(efx); - mutex_unlock(&efx->mac_lock); + tx_channel = efx->channel[efx->tx_channel_offset]; + *tx_usecs = tx_channel->irq_moderation_us; } - - queue_delayed_work(efx->workqueue, &efx->monitor_work, - efx_monitor_interval); -} - -/************************************************************************** - * - * ioctls - * - *************************************************************************/ - -/* Net device ioctl - * Context: process, rtnl_lock() held. - */ -static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) -{ - struct efx_nic *efx = netdev_priv(net_dev); - struct mii_ioctl_data *data = if_mii(ifr); - - if (cmd == SIOCSHWTSTAMP) - return efx_ptp_ioctl(efx, ifr, cmd); - - /* Convert phy_id from older PRTAD/DEVAD format */ - if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && - (data->phy_id & 0xfc00) == 0x0400) - data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; - - return mdio_mii_ioctl(&efx->mdio, data, cmd); -} - -/************************************************************************** - * - * NAPI interface - * - **************************************************************************/ - -static void efx_init_napi_channel(struct efx_channel *channel) -{ - struct efx_nic *efx = channel->efx; - - channel->napi_dev = efx->net_dev; - netif_napi_add(channel->napi_dev, &channel->napi_str, - efx_poll, napi_weight); -} - -static void efx_init_napi(struct efx_nic *efx) -{ - struct efx_channel *channel; - - efx_for_each_channel(channel, efx) - efx_init_napi_channel(channel); -} - -static void efx_fini_napi_channel(struct efx_channel *channel) -{ - if (channel->napi_dev) - netif_napi_del(&channel->napi_str); - channel->napi_dev = NULL; -} - -static void efx_fini_napi(struct efx_nic *efx) -{ - struct efx_channel *channel; - - efx_for_each_channel(channel, efx) - efx_fini_napi_channel(channel); } /************************************************************************** * - * Kernel netpoll interface - * - *************************************************************************/ - -#ifdef CONFIG_NET_POLL_CONTROLLER - -/* Although in the common case interrupts will be disabled, this is not - * guaranteed. However, all our work happens inside the NAPI callback, - * so no locking is required. - */ -static void efx_netpoll(struct net_device *net_dev) -{ - struct efx_nic *efx = netdev_priv(net_dev); - struct efx_channel *channel; - - efx_for_each_channel(channel, efx) - efx_schedule_channel(channel); -} - -#endif - -/************************************************************************** - * * Kernel net device interface * *************************************************************************/ /* Context: process, rtnl_lock() held. */ -static int efx_net_open(struct net_device *net_dev) +int efx_net_open(struct net_device *net_dev) { - struct efx_nic *efx = netdev_priv(net_dev); + struct efx_nic *efx = efx_netdev_priv(net_dev); int rc; netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", @@ -1891,7 +502,11 @@ static int efx_net_open(struct net_device *net_dev) efx_link_status_changed(efx); efx_start_all(efx); - efx_selftest_async_start(efx); + if (efx->state == STATE_DISABLED || efx->reset_pending) + netif_device_detach(efx->net_dev); + else + efx->state = STATE_NET_UP; + return 0; } @@ -1899,9 +514,9 @@ static int efx_net_open(struct net_device *net_dev) * Note that the kernel will ignore our return code; this method * should really be a void. */ -static int efx_net_stop(struct net_device *net_dev) +int efx_net_stop(struct net_device *net_dev) { - struct efx_nic *efx = netdev_priv(net_dev); + struct efx_nic *efx = efx_netdev_priv(net_dev); netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", raw_smp_processor_id()); @@ -1912,151 +527,41 @@ static int efx_net_stop(struct net_device *net_dev) return 0; } -/* Context: process, dev_base_lock or RTNL held, non-blocking. */ -static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, - struct rtnl_link_stats64 *stats) -{ - struct efx_nic *efx = netdev_priv(net_dev); - struct efx_mac_stats *mac_stats = &efx->mac_stats; - - spin_lock_bh(&efx->stats_lock); - - efx->type->update_stats(efx); - - stats->rx_packets = mac_stats->rx_packets; - stats->tx_packets = mac_stats->tx_packets; - stats->rx_bytes = mac_stats->rx_bytes; - stats->tx_bytes = mac_stats->tx_bytes; - stats->rx_dropped = efx->n_rx_nodesc_drop_cnt; - stats->multicast = mac_stats->rx_multicast; - stats->collisions = mac_stats->tx_collision; - stats->rx_length_errors = (mac_stats->rx_gtjumbo + - mac_stats->rx_length_error); - stats->rx_crc_errors = mac_stats->rx_bad; - stats->rx_frame_errors = mac_stats->rx_align_error; - stats->rx_fifo_errors = mac_stats->rx_overflow; - stats->rx_missed_errors = mac_stats->rx_missed; - stats->tx_window_errors = mac_stats->tx_late_collision; - - stats->rx_errors = (stats->rx_length_errors + - stats->rx_crc_errors + - stats->rx_frame_errors + - mac_stats->rx_symbol_error); - stats->tx_errors = (stats->tx_window_errors + - mac_stats->tx_bad); - - spin_unlock_bh(&efx->stats_lock); - - return stats; -} - -/* Context: netif_tx_lock held, BHs disabled. */ -static void efx_watchdog(struct net_device *net_dev) +static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid) { - struct efx_nic *efx = netdev_priv(net_dev); - - netif_err(efx, tx_err, efx->net_dev, - "TX stuck with port_enabled=%d: resetting channels\n", - efx->port_enabled); - - efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); -} - - -/* Context: process, rtnl_lock() held. */ -static int efx_change_mtu(struct net_device *net_dev, int new_mtu) -{ - struct efx_nic *efx = netdev_priv(net_dev); - int rc; - - rc = efx_check_disabled(efx); - if (rc) - return rc; - if (new_mtu > EFX_MAX_MTU) - return -EINVAL; - - netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); - - efx_device_detach_sync(efx); - efx_stop_all(efx); - - mutex_lock(&efx->mac_lock); - net_dev->mtu = new_mtu; - efx->type->reconfigure_mac(efx); - mutex_unlock(&efx->mac_lock); + struct efx_nic *efx = efx_netdev_priv(net_dev); - efx_start_all(efx); - netif_device_attach(efx->net_dev); - return 0; + if (efx->type->vlan_rx_add_vid) + return efx->type->vlan_rx_add_vid(efx, proto, vid); + else + return -EOPNOTSUPP; } -static int efx_set_mac_address(struct net_device *net_dev, void *data) +static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid) { - struct efx_nic *efx = netdev_priv(net_dev); - struct sockaddr *addr = data; - char *new_addr = addr->sa_data; + struct efx_nic *efx = efx_netdev_priv(net_dev); - if (!is_valid_ether_addr(new_addr)) { - netif_err(efx, drv, efx->net_dev, - "invalid ethernet MAC address requested: %pM\n", - new_addr); - return -EADDRNOTAVAIL; - } - - memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); - efx_sriov_mac_address_changed(efx); - - /* Reconfigure the MAC */ - mutex_lock(&efx->mac_lock); - efx->type->reconfigure_mac(efx); - mutex_unlock(&efx->mac_lock); - - return 0; + if (efx->type->vlan_rx_kill_vid) + return efx->type->vlan_rx_kill_vid(efx, proto, vid); + else + return -EOPNOTSUPP; } -/* Context: netif_addr_lock held, BHs disabled. */ -static void efx_set_rx_mode(struct net_device *net_dev) +static int efx_hwtstamp_set(struct net_device *net_dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct efx_nic *efx = netdev_priv(net_dev); - struct netdev_hw_addr *ha; - union efx_multicast_hash *mc_hash = &efx->multicast_hash; - u32 crc; - int bit; - - efx->promiscuous = !!(net_dev->flags & IFF_PROMISC); - - /* Build multicast hash table */ - if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) { - memset(mc_hash, 0xff, sizeof(*mc_hash)); - } else { - memset(mc_hash, 0x00, sizeof(*mc_hash)); - netdev_for_each_mc_addr(ha, net_dev) { - crc = ether_crc_le(ETH_ALEN, ha->addr); - bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); - __set_bit_le(bit, mc_hash); - } - - /* Broadcast packets go through the multicast hash filter. - * ether_crc_le() of the broadcast address is 0xbe2612ff - * so we always add bit 0xff to the mask. - */ - __set_bit_le(0xff, mc_hash); - } + struct efx_nic *efx = efx_netdev_priv(net_dev); - if (efx->port_enabled) - queue_work(efx->workqueue, &efx->mac_work); - /* Otherwise efx_start_port() will do this */ + return efx_ptp_set_ts_config(efx, config, extack); } -static int efx_set_features(struct net_device *net_dev, netdev_features_t data) +static int efx_hwtstamp_get(struct net_device *net_dev, + struct kernel_hwtstamp_config *config) { - struct efx_nic *efx = netdev_priv(net_dev); - - /* If disabling RX n-tuple filtering, clear existing filters */ - if (net_dev->features & ~data & NETIF_F_NTUPLE) - efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); + struct efx_nic *efx = efx_netdev_priv(net_dev); - return 0; + return efx_ptp_get_ts_config(efx, config); } static const struct net_device_ops efx_netdev_ops = { @@ -2066,26 +571,188 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_tx_timeout = efx_watchdog, .ndo_start_xmit = efx_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = efx_ioctl, .ndo_change_mtu = efx_change_mtu, .ndo_set_mac_address = efx_set_mac_address, .ndo_set_rx_mode = efx_set_rx_mode, .ndo_set_features = efx_set_features, + .ndo_features_check = efx_features_check, + .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid, + .ndo_hwtstamp_set = efx_hwtstamp_set, + .ndo_hwtstamp_get = efx_hwtstamp_get, #ifdef CONFIG_SFC_SRIOV .ndo_set_vf_mac = efx_sriov_set_vf_mac, .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, .ndo_get_vf_config = efx_sriov_get_vf_config, + .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, #endif -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = efx_netpoll, -#endif - .ndo_setup_tc = efx_setup_tc, + .ndo_get_phys_port_id = efx_get_phys_port_id, + .ndo_get_phys_port_name = efx_get_phys_port_name, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = efx_filter_rfs, #endif + .ndo_xdp_xmit = efx_xdp_xmit, + .ndo_bpf = efx_xdp }; +static void efx_get_queue_stats_rx(struct net_device *net_dev, int idx, + struct netdev_queue_stats_rx *stats) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct efx_rx_queue *rx_queue; + struct efx_channel *channel; + + channel = efx_get_channel(efx, idx); + rx_queue = efx_channel_get_rx_queue(channel); + /* Count only packets since last time datapath was started */ + stats->packets = rx_queue->rx_packets - rx_queue->old_rx_packets; + stats->bytes = rx_queue->rx_bytes - rx_queue->old_rx_bytes; + stats->hw_drops = efx_get_queue_stat_rx_hw_drops(channel) - + channel->old_n_rx_hw_drops; + stats->hw_drop_overruns = channel->n_rx_nodesc_trunc - + channel->old_n_rx_hw_drop_overruns; +} + +static void efx_get_queue_stats_tx(struct net_device *net_dev, int idx, + struct netdev_queue_stats_tx *stats) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct efx_tx_queue *tx_queue; + struct efx_channel *channel; + + channel = efx_get_tx_channel(efx, idx); + stats->packets = 0; + stats->bytes = 0; + stats->hw_gso_packets = 0; + stats->hw_gso_wire_packets = 0; + efx_for_each_channel_tx_queue(tx_queue, channel) { + stats->packets += tx_queue->complete_packets - + tx_queue->old_complete_packets; + stats->bytes += tx_queue->complete_bytes - + tx_queue->old_complete_bytes; + /* Note that, unlike stats->packets and stats->bytes, + * these count TXes enqueued, rather than completed, + * which may not be what users expect. + */ + stats->hw_gso_packets += tx_queue->tso_bursts - + tx_queue->old_tso_bursts; + stats->hw_gso_wire_packets += tx_queue->tso_packets - + tx_queue->old_tso_packets; + } +} + +static void efx_get_base_stats(struct net_device *net_dev, + struct netdev_queue_stats_rx *rx, + struct netdev_queue_stats_tx *tx) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + struct efx_channel *channel; + + rx->packets = 0; + rx->bytes = 0; + rx->hw_drops = 0; + rx->hw_drop_overruns = 0; + tx->packets = 0; + tx->bytes = 0; + tx->hw_gso_packets = 0; + tx->hw_gso_wire_packets = 0; + + /* Count all packets on non-core queues, and packets before last + * datapath start on core queues. + */ + efx_for_each_channel(channel, efx) { + rx_queue = efx_channel_get_rx_queue(channel); + if (channel->channel >= net_dev->real_num_rx_queues) { + rx->packets += rx_queue->rx_packets; + rx->bytes += rx_queue->rx_bytes; + rx->hw_drops += efx_get_queue_stat_rx_hw_drops(channel); + rx->hw_drop_overruns += channel->n_rx_nodesc_trunc; + } else { + rx->packets += rx_queue->old_rx_packets; + rx->bytes += rx_queue->old_rx_bytes; + rx->hw_drops += channel->old_n_rx_hw_drops; + rx->hw_drop_overruns += channel->old_n_rx_hw_drop_overruns; + } + efx_for_each_channel_tx_queue(tx_queue, channel) { + if (channel->channel < efx->tx_channel_offset || + channel->channel >= efx->tx_channel_offset + + net_dev->real_num_tx_queues) { + tx->packets += tx_queue->complete_packets; + tx->bytes += tx_queue->complete_bytes; + tx->hw_gso_packets += tx_queue->tso_bursts; + tx->hw_gso_wire_packets += tx_queue->tso_packets; + } else { + tx->packets += tx_queue->old_complete_packets; + tx->bytes += tx_queue->old_complete_bytes; + tx->hw_gso_packets += tx_queue->old_tso_bursts; + tx->hw_gso_wire_packets += tx_queue->old_tso_packets; + } + /* Include XDP TX in device-wide stats */ + tx->packets += tx_queue->complete_xdp_packets; + tx->bytes += tx_queue->complete_xdp_bytes; + } + } +} + +static const struct netdev_stat_ops efx_stat_ops = { + .get_queue_stats_rx = efx_get_queue_stats_rx, + .get_queue_stats_tx = efx_get_queue_stats_tx, + .get_base_stats = efx_get_base_stats, +}; + +static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog) +{ + struct bpf_prog *old_prog; + + if (efx->xdp_rxq_info_failed) { + netif_err(efx, drv, efx->net_dev, + "Unable to bind XDP program due to previous failure of rxq_info\n"); + return -EINVAL; + } + + if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) { + netif_err(efx, drv, efx->net_dev, + "Unable to configure XDP with MTU of %d (max: %d)\n", + efx->net_dev->mtu, efx_xdp_max_mtu(efx)); + return -EINVAL; + } + + old_prog = rtnl_dereference(efx->xdp_prog); + rcu_assign_pointer(efx->xdp_prog, prog); + /* Release the reference that was originally passed by the caller. */ + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} + +/* Context: process, rtnl_lock() held. */ +static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct efx_nic *efx = efx_netdev_priv(dev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return efx_xdp_setup_prog(efx, xdp->prog); + default: + return -EINVAL; + } +} + +static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, + u32 flags) +{ + struct efx_nic *efx = efx_netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH); +} + static void efx_update_name(struct efx_nic *efx) { strcpy(efx->name, efx->net_dev->name); @@ -2098,9 +765,9 @@ static int efx_netdev_event(struct notifier_block *this, { struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); - if (net_dev->netdev_ops == &efx_netdev_ops && + if ((net_dev->netdev_ops == &efx_netdev_ops) && event == NETDEV_CHANGENAME) - efx_update_name(netdev_priv(net_dev)); + efx_update_name(efx_netdev_priv(net_dev)); return NOTIFY_DONE; } @@ -2109,13 +776,13 @@ static struct notifier_block efx_netdev_notifier = { .notifier_call = efx_netdev_event, }; -static ssize_t -show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t phy_type_show(struct device *dev, + struct device_attribute *attr, char *buf) { - struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_nic *efx = dev_get_drvdata(dev); return sprintf(buf, "%d\n", efx->phy_type); } -static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); +static DEVICE_ATTR_RO(phy_type); static int efx_register_netdev(struct efx_nic *efx) { @@ -2126,8 +793,13 @@ static int efx_register_netdev(struct efx_nic *efx) net_dev->watchdog_timeo = 5 * HZ; net_dev->irq = efx->pci_dev->irq; net_dev->netdev_ops = &efx_netdev_ops; - SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); - net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; + net_dev->stat_ops = &efx_stat_ops; + if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) + net_dev->priv_flags |= IFF_UNICAST_FLT; + net_dev->ethtool_ops = &efx_ethtool_ops; + netif_set_tso_max_segs(net_dev, EFX_TSO_MAX_SEGS); + net_dev->min_mtu = EFX_MIN_MTU; + net_dev->max_mtu = EFX_MAX_MTU; rtnl_lock(); @@ -2135,11 +807,8 @@ static int efx_register_netdev(struct efx_nic *efx) * already requested. If so, the NIC is probably hosed so we * abort. */ - efx->state = STATE_READY; - smp_mb(); /* ensure we change state before checking reset_pending */ if (efx->reset_pending) { - netif_err(efx, probe, efx->net_dev, - "aborting probe due to scheduled reset\n"); + pci_err(efx->pci_dev, "aborting probe due to scheduled reset\n"); rc = -EIO; goto fail_locked; } @@ -2162,6 +831,10 @@ static int efx_register_netdev(struct efx_nic *efx) efx_init_tx_queue_core_txq(tx_queue); } + efx_associate(efx); + + efx->state = STATE_NET_DOWN; + rtnl_unlock(); rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); @@ -2171,10 +844,13 @@ static int efx_register_netdev(struct efx_nic *efx) goto fail_registered; } + efx_init_mcdi_logging(efx); + return 0; fail_registered: rtnl_lock(); + efx_dissociate(efx); unregister_netdevice(net_dev); fail_locked: efx->state = STATE_UNINIT; @@ -2185,259 +861,18 @@ fail_locked: static void efx_unregister_netdev(struct efx_nic *efx) { - struct efx_channel *channel; - struct efx_tx_queue *tx_queue; - if (!efx->net_dev) return; - BUG_ON(netdev_priv(efx->net_dev) != efx); - - /* Free up any skbs still remaining. This has to happen before - * we try to unregister the netdev as running their destructors - * may be needed to get the device ref. count to 0. */ - efx_for_each_channel(channel, efx) { - efx_for_each_channel_tx_queue(tx_queue, channel) - efx_release_tx_buffers(tx_queue); - } - - strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); - device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); - - rtnl_lock(); - unregister_netdevice(efx->net_dev); - efx->state = STATE_UNINIT; - rtnl_unlock(); -} - -/************************************************************************** - * - * Device reset and suspend - * - **************************************************************************/ - -/* Tears down the entire software state and most of the hardware state - * before reset. */ -void efx_reset_down(struct efx_nic *efx, enum reset_type method) -{ - EFX_ASSERT_RESET_SERIALISED(efx); - - efx_stop_all(efx); - efx_stop_interrupts(efx, false); - - mutex_lock(&efx->mac_lock); - if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) - efx->phy_op->fini(efx); - efx->type->fini(efx); -} - -/* This function will always ensure that the locks acquired in - * efx_reset_down() are released. A failure return code indicates - * that we were unable to reinitialise the hardware, and the - * driver should be disabled. If ok is false, then the rx and tx - * engines are not restarted, pending a RESET_DISABLE. */ -int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) -{ - int rc; - - EFX_ASSERT_RESET_SERIALISED(efx); - - rc = efx->type->init(efx); - if (rc) { - netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); - goto fail; - } - - if (!ok) - goto fail; - - if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { - rc = efx->phy_op->init(efx); - if (rc) - goto fail; - if (efx->phy_op->reconfigure(efx)) - netif_err(efx, drv, efx->net_dev, - "could not restore PHY settings\n"); - } - - efx->type->reconfigure_mac(efx); - - efx_start_interrupts(efx, false); - efx_restore_filters(efx); - efx_sriov_reset(efx); - - mutex_unlock(&efx->mac_lock); - - efx_start_all(efx); - - return 0; - -fail: - efx->port_initialized = false; - - mutex_unlock(&efx->mac_lock); - - return rc; -} - -/* Reset the NIC using the specified method. Note that the reset may - * fail, in which case the card will be left in an unusable state. - * - * Caller must hold the rtnl_lock. - */ -int efx_reset(struct efx_nic *efx, enum reset_type method) -{ - int rc, rc2; - bool disabled; - - netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", - RESET_TYPE(method)); - - efx_device_detach_sync(efx); - efx_reset_down(efx, method); - - rc = efx->type->reset(efx, method); - if (rc) { - netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); - goto out; - } - - /* Clear flags for the scopes we covered. We assume the NIC and - * driver are now quiescent so that there is no race here. - */ - efx->reset_pending &= -(1 << (method + 1)); - - /* Reinitialise bus-mastering, which may have been turned off before - * the reset was scheduled. This is still appropriate, even in the - * RESET_TYPE_DISABLE since this driver generally assumes the hardware - * can respond to requests. */ - pci_set_master(efx->pci_dev); - -out: - /* Leave device stopped if necessary */ - disabled = rc || - method == RESET_TYPE_DISABLE || - method == RESET_TYPE_RECOVER_OR_DISABLE; - rc2 = efx_reset_up(efx, method, !disabled); - if (rc2) { - disabled = true; - if (!rc) - rc = rc2; - } - - if (disabled) { - dev_close(efx->net_dev); - netif_err(efx, drv, efx->net_dev, "has been disabled\n"); - efx->state = STATE_DISABLED; - } else { - netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); - netif_device_attach(efx->net_dev); - } - return rc; -} - -/* Try recovery mechanisms. - * For now only EEH is supported. - * Returns 0 if the recovery mechanisms are unsuccessful. - * Returns a non-zero value otherwise. - */ -int efx_try_recovery(struct efx_nic *efx) -{ -#ifdef CONFIG_EEH - /* A PCI error can occur and not be seen by EEH because nothing - * happens on the PCI bus. In this case the driver may fail and - * schedule a 'recover or reset', leading to this recovery handler. - * Manually call the eeh failure check function. - */ - struct eeh_dev *eehdev = - of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev)); - - if (eeh_dev_check_failure(eehdev)) { - /* The EEH mechanisms will handle the error and reset the - * device if necessary. - */ - return 1; - } -#endif - return 0; -} - -/* The worker thread exists so that code that cannot sleep can - * schedule a reset for later. - */ -static void efx_reset_work(struct work_struct *data) -{ - struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); - unsigned long pending; - enum reset_type method; - - pending = ACCESS_ONCE(efx->reset_pending); - method = fls(pending) - 1; - - if ((method == RESET_TYPE_RECOVER_OR_DISABLE || - method == RESET_TYPE_RECOVER_OR_ALL) && - efx_try_recovery(efx)) - return; - - if (!pending) + if (WARN_ON(efx_netdev_priv(efx->net_dev) != efx)) return; - rtnl_lock(); - - /* We checked the state in efx_schedule_reset() but it may - * have changed by now. Now that we have the RTNL lock, - * it cannot change again. - */ - if (efx->state == STATE_READY) - (void)efx_reset(efx, method); - - rtnl_unlock(); -} - -void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) -{ - enum reset_type method; - - if (efx->state == STATE_RECOVERY) { - netif_dbg(efx, drv, efx->net_dev, - "recovering: skip scheduling %s reset\n", - RESET_TYPE(type)); - return; - } - - switch (type) { - case RESET_TYPE_INVISIBLE: - case RESET_TYPE_ALL: - case RESET_TYPE_RECOVER_OR_ALL: - case RESET_TYPE_WORLD: - case RESET_TYPE_DISABLE: - case RESET_TYPE_RECOVER_OR_DISABLE: - method = type; - netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", - RESET_TYPE(method)); - break; - default: - method = efx->type->map_reset_reason(type); - netif_dbg(efx, drv, efx->net_dev, - "scheduling %s reset for %s\n", - RESET_TYPE(method), RESET_TYPE(type)); - break; + if (efx_dev_registered(efx)) { + strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); + efx_fini_mcdi_logging(efx); + device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); + unregister_netdev(efx->net_dev); } - - set_bit(method, &efx->reset_pending); - smp_mb(); /* ensure we change reset_pending before checking state */ - - /* If we're not READY then just leave the flags set as the cue - * to abort probing or reschedule the reset later. - */ - if (ACCESS_ONCE(efx->state) != STATE_READY) - return; - - /* efx_process_channel() will no longer read events once a - * reset is scheduled. So switch back to poll'd MCDI completions. */ - efx_mcdi_mode_poll(efx); - - queue_work(reset_workqueue, &efx->reset_work); } /************************************************************************** @@ -2447,119 +882,45 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) **************************************************************************/ /* PCI device ID table */ -static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { - {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, - PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0), - .driver_data = (unsigned long) &falcon_a1_nic_type}, - {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, - PCI_DEVICE_ID_SOLARFLARE_SFC4000B), - .driver_data = (unsigned long) &falcon_b0_nic_type}, - {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */ - .driver_data = (unsigned long) &siena_a0_nic_type}, - {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */ - .driver_data = (unsigned long) &siena_a0_nic_type}, +static const struct pci_device_id efx_pci_table[] = { + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */ + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923), /* SFC9140 VF */ + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03), /* SFC9220 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */ + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03), /* SFC9250 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */ + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0c03), /* X4 PF (FF/LL) */ + .driver_data = (unsigned long)&efx_x4_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x2c03), /* X4 PF (FF only) */ + .driver_data = (unsigned long)&efx_x4_nic_type}, {0} /* end of list */ }; /************************************************************************** * - * Dummy PHY/MAC operations - * - * Can be used for some unimplemented operations - * Needed so all function pointers are valid and do not have to be tested - * before use - * - **************************************************************************/ -int efx_port_dummy_op_int(struct efx_nic *efx) -{ - return 0; -} -void efx_port_dummy_op_void(struct efx_nic *efx) {} - -static bool efx_port_dummy_op_poll(struct efx_nic *efx) -{ - return false; -} - -static const struct efx_phy_operations efx_dummy_phy_operations = { - .init = efx_port_dummy_op_int, - .reconfigure = efx_port_dummy_op_int, - .poll = efx_port_dummy_op_poll, - .fini = efx_port_dummy_op_void, -}; - -/************************************************************************** - * * Data housekeeping * **************************************************************************/ -/* This zeroes out and then fills in the invariants in a struct - * efx_nic (including all sub-structures). - */ -static int efx_init_struct(struct efx_nic *efx, - struct pci_dev *pci_dev, struct net_device *net_dev) -{ - int i; - - /* Initialise common structures */ - spin_lock_init(&efx->biu_lock); -#ifdef CONFIG_SFC_MTD - INIT_LIST_HEAD(&efx->mtd_list); -#endif - INIT_WORK(&efx->reset_work, efx_reset_work); - INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); - INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); - efx->pci_dev = pci_dev; - efx->msg_enable = debug; - efx->state = STATE_UNINIT; - strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); - - efx->net_dev = net_dev; - spin_lock_init(&efx->stats_lock); - mutex_init(&efx->mac_lock); - efx->phy_op = &efx_dummy_phy_operations; - efx->mdio.dev = net_dev; - INIT_WORK(&efx->mac_work, efx_mac_work); - init_waitqueue_head(&efx->flush_wq); - - for (i = 0; i < EFX_MAX_CHANNELS; i++) { - efx->channel[i] = efx_alloc_channel(efx, i, NULL); - if (!efx->channel[i]) - goto fail; - } - - EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); - - /* Higher numbered interrupt modes are less capable! */ - efx->interrupt_mode = max(efx->type->max_interrupt_mode, - interrupt_mode); - - /* Would be good to use the net_dev name, but we're too early */ - snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", - pci_name(pci_dev)); - efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); - if (!efx->workqueue) - goto fail; - - return 0; - -fail: - efx_fini_struct(efx); - return -ENOMEM; -} - -static void efx_fini_struct(struct efx_nic *efx) +void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) { - int i; - - for (i = 0; i < EFX_MAX_CHANNELS; i++) - kfree(efx->channel[i]); + u64 n_rx_nodesc_trunc = 0; + struct efx_channel *channel; - if (efx->workqueue) { - destroy_workqueue(efx->workqueue); - efx->workqueue = NULL; - } + efx_for_each_channel(channel, efx) + n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc; + stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc; + stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); } /************************************************************************** @@ -2576,10 +937,11 @@ static void efx_pci_remove_main(struct efx_nic *efx) /* Flush reset_work. It can no longer be scheduled since we * are not READY. */ - BUG_ON(efx->state == STATE_READY); - cancel_work_sync(&efx->reset_work); + WARN_ON(efx_net_active(efx->state)); + efx_flush_reset_workqueue(efx); - efx_stop_interrupts(efx, false); + efx_disable_interrupts(efx); + efx_clear_interrupt_affinity(efx); efx_nic_fini_interrupt(efx); efx_fini_port(efx); efx->type->fini(efx); @@ -2588,10 +950,12 @@ static void efx_pci_remove_main(struct efx_nic *efx) } /* Final NIC shutdown - * This is called only at module unload (or hotplug removal). + * This is called only at module unload (or hotplug removal). A PF can call + * this on its VFs to ensure they are unbound first. */ static void efx_pci_remove(struct pci_dev *pci_dev) { + struct efx_probe_data *probe_data; struct efx_nic *efx; efx = pci_get_drvdata(pci_dev); @@ -2600,11 +964,16 @@ static void efx_pci_remove(struct pci_dev *pci_dev) /* Mark the NIC as fini, then stop the interface */ rtnl_lock(); + efx_dissociate(efx); dev_close(efx->net_dev); - efx_stop_interrupts(efx, false); + efx_disable_interrupts(efx); + efx->state = STATE_UNINIT; rtnl_unlock(); - efx_sriov_fini(efx); + if (efx->type->sriov_fini) + efx->type->sriov_fini(efx); + + efx_fini_devlink_lock(efx); efx_unregister_netdev(efx); efx_mtd_remove(efx); @@ -2612,63 +981,47 @@ static void efx_pci_remove(struct pci_dev *pci_dev) efx_pci_remove_main(efx); efx_fini_io(efx); - netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); + pci_dbg(efx->pci_dev, "shutdown successful\n"); + efx_fini_devlink_and_unlock(efx); efx_fini_struct(efx); - pci_set_drvdata(pci_dev, NULL); free_netdev(efx->net_dev); - - pci_disable_pcie_error_reporting(pci_dev); + probe_data = container_of(efx, struct efx_probe_data, efx); + kfree(probe_data); }; /* NIC VPD information * Called during probe to display the part number of the - * installed NIC. VPD is potentially very large but this should - * always appear within the first 512 bytes. + * installed NIC. */ -#define SFC_VPD_LEN 512 -static void efx_print_product_vpd(struct efx_nic *efx) +static void efx_probe_vpd_strings(struct efx_nic *efx) { struct pci_dev *dev = efx->pci_dev; - char vpd_data[SFC_VPD_LEN]; - ssize_t vpd_size; - int i, j; - - /* Get the vpd data from the device */ - vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data); - if (vpd_size <= 0) { - netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n"); - return; - } + unsigned int vpd_size, kw_len; + u8 *vpd_data; + int start; - /* Get the Read only section */ - i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); - if (i < 0) { - netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n"); + vpd_data = pci_vpd_alloc(dev, &vpd_size); + if (IS_ERR(vpd_data)) { + pci_warn(dev, "Unable to read VPD\n"); return; } - j = pci_vpd_lrdt_size(&vpd_data[i]); - i += PCI_VPD_LRDT_TAG_SIZE; - if (i + j > vpd_size) - j = vpd_size - i; - - /* Get the Part number */ - i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN"); - if (i < 0) { - netif_err(efx, drv, efx->net_dev, "Part number not found\n"); - return; - } + start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); + if (start < 0) + pci_err(dev, "Part number not found or incomplete\n"); + else + pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start); - j = pci_vpd_info_field_size(&vpd_data[i]); - i += PCI_VPD_INFO_FLD_HDR_SIZE; - if (i + j > vpd_size) { - netif_err(efx, drv, efx->net_dev, "Incomplete part number\n"); - return; - } + start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len); + if (start < 0) + pci_err(dev, "Serial number not found or incomplete\n"); + else + efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL); - netif_info(efx, drv, efx->net_dev, - "Part Number : %.*s\n", j, &vpd_data[i]); + kfree(vpd_data); } @@ -2686,10 +1039,11 @@ static int efx_pci_probe_main(struct efx_nic *efx) efx_init_napi(efx); + down_write(&efx->filter_sem); rc = efx->type->init(efx); + up_write(&efx->filter_sem); if (rc) { - netif_err(efx, probe, efx->net_dev, - "failed to initialise NIC\n"); + pci_err(efx->pci_dev, "failed to initialise NIC\n"); goto fail3; } @@ -2703,10 +1057,17 @@ static int efx_pci_probe_main(struct efx_nic *efx) rc = efx_nic_init_interrupt(efx); if (rc) goto fail5; - efx_start_interrupts(efx, false); + + efx_set_interrupt_affinity(efx); + rc = efx_enable_interrupts(efx); + if (rc) + goto fail6; return 0; + fail6: + efx_clear_interrupt_affinity(efx); + efx_nic_fini_interrupt(efx); fail5: efx_fini_port(efx); fail4: @@ -2718,6 +1079,64 @@ static int efx_pci_probe_main(struct efx_nic *efx) return rc; } +static int efx_pci_probe_post_io(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + int rc = efx_pci_probe_main(efx); + + if (rc) + return rc; + + if (efx->type->sriov_init) { + rc = efx->type->sriov_init(efx); + if (rc) + pci_err(efx->pci_dev, "SR-IOV can't be enabled rc %d\n", + rc); + } + + /* Determine netdevice features */ + net_dev->features |= efx->type->offload_features; + + /* Add TSO features */ + if (efx->type->tso_versions && efx->type->tso_versions(efx)) + net_dev->features |= NETIF_F_TSO | NETIF_F_TSO6; + + /* Mask for features that also apply to VLAN devices */ + net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | + NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | + NETIF_F_RXCSUM); + + /* Determine user configurable features */ + net_dev->hw_features |= net_dev->features & ~efx->fixed_features; + + /* Disable receiving frames with bad FCS, by default. */ + net_dev->features &= ~NETIF_F_RXALL; + + /* Disable VLAN filtering by default. It may be enforced if + * the feature is fixed (i.e. VLAN filters are required to + * receive VLAN tagged packets due to vPort restrictions). + */ + net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + net_dev->features |= efx->fixed_features; + + net_dev->xdp_features = NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_NDO_XMIT; + + /* devlink creation, registration and lock */ + rc = efx_probe_devlink_and_lock(efx); + if (rc) + pci_err(efx->pci_dev, "devlink registration failed"); + + rc = efx_register_netdev(efx); + efx_probe_devlink_unlock(efx); + if (!rc) + return 0; + + efx_pci_remove_main(efx); + return rc; +} + /* NIC initialisation * * This is called at module load (or hotplug insertion, @@ -2730,101 +1149,130 @@ static int efx_pci_probe_main(struct efx_nic *efx) static int efx_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *entry) { + struct efx_probe_data *probe_data, **probe_ptr; struct net_device *net_dev; struct efx_nic *efx; int rc; - /* Allocate and initialise a struct net_device and struct efx_nic */ - net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, - EFX_MAX_RX_QUEUES); - if (!net_dev) + /* Allocate probe data and struct efx_nic */ + probe_data = kzalloc(sizeof(*probe_data), GFP_KERNEL); + if (!probe_data) return -ENOMEM; - efx = netdev_priv(net_dev); + probe_data->pci_dev = pci_dev; + efx = &probe_data->efx; + + /* Allocate and initialise a struct net_device */ + net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES); + if (!net_dev) { + rc = -ENOMEM; + goto fail0; + } + probe_ptr = netdev_priv(net_dev); + *probe_ptr = probe_data; + efx->net_dev = net_dev; efx->type = (const struct efx_nic_type *) entry->driver_data; - net_dev->features |= (efx->type->offload_features | NETIF_F_SG | - NETIF_F_HIGHDMA | NETIF_F_TSO | - NETIF_F_RXCSUM); - if (efx->type->offload_features & NETIF_F_V6_CSUM) - net_dev->features |= NETIF_F_TSO6; - /* Mask for features that also apply to VLAN devices */ - net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | - NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | - NETIF_F_RXCSUM); - /* All offloads can be toggled */ - net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; + efx->fixed_features |= NETIF_F_HIGHDMA; + pci_set_drvdata(pci_dev, efx); SET_NETDEV_DEV(net_dev, &pci_dev->dev); - rc = efx_init_struct(efx, pci_dev, net_dev); + rc = efx_init_struct(efx, pci_dev); if (rc) goto fail1; - netif_info(efx, probe, efx->net_dev, - "Solarflare NIC detected\n"); + pci_info(pci_dev, "Solarflare NIC detected\n"); - efx_print_product_vpd(efx); + if (!efx->type->is_vf) + efx_probe_vpd_strings(efx); /* Set up basic I/O (BAR mappings etc) */ - rc = efx_init_io(efx); + rc = efx_init_io(efx, efx->type->mem_bar(efx), efx->type->max_dma_mask, + efx->type->mem_map_size(efx)); if (rc) goto fail2; - rc = efx_pci_probe_main(efx); + rc = efx_pci_probe_post_io(efx); + if (rc) { + /* On failure, retry once immediately. + * If we aborted probe due to a scheduled reset, dismiss it. + */ + efx->reset_pending = 0; + rc = efx_pci_probe_post_io(efx); + if (rc) { + /* On another failure, retry once more + * after a 50-305ms delay. + */ + unsigned char r; + + get_random_bytes(&r, 1); + msleep((unsigned int)r + 50); + efx->reset_pending = 0; + rc = efx_pci_probe_post_io(efx); + } + } if (rc) goto fail3; - rc = efx_register_netdev(efx); - if (rc) - goto fail4; - - rc = efx_sriov_init(efx); - if (rc) - netif_err(efx, probe, efx->net_dev, - "SR-IOV can't be enabled rc %d\n", rc); - netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); /* Try to create MTDs, but allow this to fail */ rtnl_lock(); rc = efx_mtd_probe(efx); rtnl_unlock(); - if (rc) + if (rc && rc != -EPERM) netif_warn(efx, probe, efx->net_dev, "failed to create MTDs (%d)\n", rc); - rc = pci_enable_pcie_error_reporting(pci_dev); - if (rc && rc != -EINVAL) - netif_warn(efx, probe, efx->net_dev, - "pci_enable_pcie_error_reporting failed (%d)\n", rc); + if (efx->type->udp_tnl_push_ports) + efx->type->udp_tnl_push_ports(efx); return 0; - fail4: - efx_pci_remove_main(efx); fail3: efx_fini_io(efx); fail2: efx_fini_struct(efx); fail1: - pci_set_drvdata(pci_dev, NULL); WARN_ON(rc > 0); netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); free_netdev(net_dev); + fail0: + kfree(probe_data); return rc; } +/* efx_pci_sriov_configure returns the actual number of Virtual Functions + * enabled on success + */ +#ifdef CONFIG_SFC_SRIOV +static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + int rc; + struct efx_nic *efx = pci_get_drvdata(dev); + + if (efx->type->sriov_configure) { + rc = efx->type->sriov_configure(efx, num_vfs); + if (rc) + return rc; + else + return num_vfs; + } else + return -EOPNOTSUPP; +} +#endif + static int efx_pm_freeze(struct device *dev) { - struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_nic *efx = dev_get_drvdata(dev); rtnl_lock(); - if (efx->state != STATE_DISABLED) { - efx->state = STATE_UNINIT; - + if (efx_net_active(efx->state)) { efx_device_detach_sync(efx); efx_stop_all(efx); - efx_stop_interrupts(efx, false); + efx_disable_interrupts(efx); + + efx->state = efx_freeze(efx->state); } rtnl_unlock(); @@ -2832,24 +1280,38 @@ static int efx_pm_freeze(struct device *dev) return 0; } +static void efx_pci_shutdown(struct pci_dev *pci_dev) +{ + struct efx_nic *efx = pci_get_drvdata(pci_dev); + + if (!efx) + return; + + efx_pm_freeze(&pci_dev->dev); + pci_disable_device(pci_dev); +} + static int efx_pm_thaw(struct device *dev) { - struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + int rc; + struct efx_nic *efx = dev_get_drvdata(dev); rtnl_lock(); - if (efx->state != STATE_DISABLED) { - efx_start_interrupts(efx, false); + if (efx_frozen(efx->state)) { + rc = efx_enable_interrupts(efx); + if (rc) + goto fail; mutex_lock(&efx->mac_lock); - efx->phy_op->reconfigure(efx); + efx_mcdi_port_reconfigure(efx); mutex_unlock(&efx->mac_lock); efx_start_all(efx); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); - efx->state = STATE_READY; + efx->state = efx_thaw(efx->state); efx->type->resume_wol(efx); } @@ -2857,9 +1319,14 @@ static int efx_pm_thaw(struct device *dev) rtnl_unlock(); /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ - queue_work(reset_workqueue, &efx->reset_work); + efx_queue_reset_work(efx); return 0; + +fail: + rtnl_unlock(); + + return rc; } static int efx_pm_poweroff(struct device *dev) @@ -2893,11 +1360,13 @@ static int efx_pm_resume(struct device *dev) rc = efx->type->reset(efx, RESET_TYPE_ALL); if (rc) return rc; + down_write(&efx->filter_sem); rc = efx->type->init(efx); + up_write(&efx->filter_sem); if (rc) return rc; - efx_pm_thaw(dev); - return 0; + rc = efx_pm_thaw(dev); + return rc; } static int efx_pm_suspend(struct device *dev) @@ -2920,112 +1389,17 @@ static const struct dev_pm_ops efx_pm_ops = { .restore = efx_pm_resume, }; -/* A PCI error affecting this device was detected. - * At this point MMIO and DMA may be disabled. - * Stop the software path and request a slot reset. - */ -static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, - enum pci_channel_state state) -{ - pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; - struct efx_nic *efx = pci_get_drvdata(pdev); - - if (state == pci_channel_io_perm_failure) - return PCI_ERS_RESULT_DISCONNECT; - - rtnl_lock(); - - if (efx->state != STATE_DISABLED) { - efx->state = STATE_RECOVERY; - efx->reset_pending = 0; - - efx_device_detach_sync(efx); - - efx_stop_all(efx); - efx_stop_interrupts(efx, false); - - status = PCI_ERS_RESULT_NEED_RESET; - } else { - /* If the interface is disabled we don't want to do anything - * with it. - */ - status = PCI_ERS_RESULT_RECOVERED; - } - - rtnl_unlock(); - - pci_disable_device(pdev); - - return status; -} - -/* Fake a successfull reset, which will be performed later in efx_io_resume. */ -static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev) -{ - struct efx_nic *efx = pci_get_drvdata(pdev); - pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; - int rc; - - if (pci_enable_device(pdev)) { - netif_err(efx, hw, efx->net_dev, - "Cannot re-enable PCI device after reset.\n"); - status = PCI_ERS_RESULT_DISCONNECT; - } - - rc = pci_cleanup_aer_uncorrect_error_status(pdev); - if (rc) { - netif_err(efx, hw, efx->net_dev, - "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc); - /* Non-fatal error. Continue. */ - } - - return status; -} - -/* Perform the actual reset and resume I/O operations. */ -static void efx_io_resume(struct pci_dev *pdev) -{ - struct efx_nic *efx = pci_get_drvdata(pdev); - int rc; - - rtnl_lock(); - - if (efx->state == STATE_DISABLED) - goto out; - - rc = efx_reset(efx, RESET_TYPE_ALL); - if (rc) { - netif_err(efx, hw, efx->net_dev, - "efx_reset failed after PCI error (%d)\n", rc); - } else { - efx->state = STATE_READY; - netif_dbg(efx, hw, efx->net_dev, - "Done resetting and resuming IO after PCI error.\n"); - } - -out: - rtnl_unlock(); -} - -/* For simplicity and reliability, we always require a slot reset and try to - * reset the hardware when a pci error affecting the device is detected. - * We leave both the link_reset and mmio_enabled callback unimplemented: - * with our request for slot reset the mmio_enabled callback will never be - * called, and the link_reset callback is not used by AER or EEH mechanisms. - */ -static struct pci_error_handlers efx_err_handlers = { - .error_detected = efx_io_error_detected, - .slot_reset = efx_io_slot_reset, - .resume = efx_io_resume, -}; - static struct pci_driver efx_pci_driver = { .name = KBUILD_MODNAME, .id_table = efx_pci_table, .probe = efx_pci_probe, .remove = efx_pci_remove, .driver.pm = &efx_pm_ops, + .shutdown = efx_pci_shutdown, .err_handler = &efx_err_handlers, +#ifdef CONFIG_SFC_SRIOV + .sriov_configure = efx_pci_sriov_configure, +#endif }; /************************************************************************** @@ -3034,41 +1408,35 @@ static struct pci_driver efx_pci_driver = { * *************************************************************************/ -module_param(interrupt_mode, uint, 0444); -MODULE_PARM_DESC(interrupt_mode, - "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); - static int __init efx_init_module(void) { int rc; - printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); + printk(KERN_INFO "Solarflare NET driver\n"); rc = register_netdevice_notifier(&efx_netdev_notifier); if (rc) goto err_notifier; - rc = efx_init_sriov(); + rc = efx_create_reset_workqueue(); if (rc) - goto err_sriov; - - reset_workqueue = create_singlethread_workqueue("sfc_reset"); - if (!reset_workqueue) { - rc = -ENOMEM; goto err_reset; - } rc = pci_register_driver(&efx_pci_driver); if (rc < 0) goto err_pci; + rc = pci_register_driver(&ef100_pci_driver); + if (rc < 0) + goto err_pci_ef100; + return 0; + err_pci_ef100: + pci_unregister_driver(&efx_pci_driver); err_pci: - destroy_workqueue(reset_workqueue); + efx_destroy_reset_workqueue(); err_reset: - efx_fini_sriov(); - err_sriov: unregister_netdevice_notifier(&efx_netdev_notifier); err_notifier: return rc; @@ -3078,9 +1446,9 @@ static void __exit efx_exit_module(void) { printk(KERN_INFO "Solarflare NET driver unloading\n"); + pci_unregister_driver(&ef100_pci_driver); pci_unregister_driver(&efx_pci_driver); - destroy_workqueue(reset_workqueue); - efx_fini_sriov(); + efx_destroy_reset_workqueue(); unregister_netdevice_notifier(&efx_netdev_notifier); } @@ -3090,6 +1458,6 @@ module_exit(efx_exit_module); MODULE_AUTHOR("Solarflare Communications and " "Michael Brown <mbrown@fensystems.co.uk>"); -MODULE_DESCRIPTION("Solarflare Communications network driver"); +MODULE_DESCRIPTION("Solarflare network driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, efx_pci_table); |
