summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/sfc/efx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sfc/efx.c')
-rw-r--r--drivers/net/ethernet/sfc/efx.c738
1 files changed, 258 insertions, 480 deletions
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 256807c28ff7..112e55b98ed3 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -5,6 +5,7 @@
* Copyright 2005-2013 Solarflare Communications Inc.
*/
+#include <linux/filter.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
@@ -17,53 +18,40 @@
#include <linux/ethtool.h>
#include <linux/topology.h>
#include <linux/gfp.h>
-#include <linux/aer.h>
#include <linux/interrupt.h>
#include "net_driver.h"
#include <net/gre.h>
#include <net/udp_tunnel.h>
+#include <net/netdev_queues.h>
#include "efx.h"
#include "efx_common.h"
#include "efx_channels.h"
+#include "ef100.h"
#include "rx_common.h"
#include "tx_common.h"
#include "nic.h"
#include "io.h"
#include "selftest.h"
#include "sriov.h"
+#include "efx_devlink.h"
-#include "mcdi.h"
+#include "mcdi_port_common.h"
#include "mcdi_pcol.h"
#include "workarounds.h"
/**************************************************************************
*
- * Type name strings
- *
- **************************************************************************
- */
-
-/* UDP tunnel type names */
-static const char *const efx_udp_tunnel_type_names[] = {
- [TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan",
- [TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE] = "geneve",
-};
-
-void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen)
-{
- if (type < ARRAY_SIZE(efx_udp_tunnel_type_names) &&
- efx_udp_tunnel_type_names[type] != NULL)
- snprintf(buf, buflen, "%s", efx_udp_tunnel_type_names[type]);
- else
- snprintf(buf, buflen, "type %d", type);
-}
-
-/**************************************************************************
- *
* Configurable values
*
*************************************************************************/
+module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444);
+MODULE_PARM_DESC(interrupt_mode,
+ "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
+
+module_param(rss_cpus, uint, 0444);
+MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
+
/*
* Use separate channels for TX and RX events
*
@@ -119,44 +107,12 @@ static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
u32 flags);
-#define EFX_ASSERT_RESET_SERIALISED(efx) \
- do { \
- if ((efx->state == STATE_READY) || \
- (efx->state == STATE_RECOVERY) || \
- (efx->state == STATE_DISABLED)) \
- ASSERT_RTNL(); \
- } while (0)
-
/**************************************************************************
*
* Port handling
*
**************************************************************************/
-/* Equivalent to efx_link_set_advertising with all-zeroes, except does not
- * force the Autoneg bit on.
- */
-void efx_link_clear_advertising(struct efx_nic *efx)
-{
- bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
- efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
-}
-
-void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
-{
- efx->wanted_fc = wanted_fc;
- if (efx->link_advertising[0]) {
- if (wanted_fc & EFX_FC_RX)
- efx->link_advertising[0] |= (ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
- else
- efx->link_advertising[0] &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
- if (wanted_fc & EFX_FC_TX)
- efx->link_advertising[0] ^= ADVERTISED_Asym_Pause;
- }
-}
-
static void efx_fini_port(struct efx_nic *efx);
static int efx_probe_port(struct efx_nic *efx)
@@ -174,7 +130,7 @@ static int efx_probe_port(struct efx_nic *efx)
return rc;
/* Initialise MAC address to permanent address */
- ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
+ eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr);
return 0;
}
@@ -187,27 +143,17 @@ static int efx_init_port(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->init(efx);
- if (rc)
- goto fail1;
-
efx->port_initialized = true;
- /* Reconfigure the MAC before creating dma queues (required for
- * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
- efx_mac_reconfigure(efx);
-
/* Ensure the PHY advertises the correct flow control settings */
- rc = efx->phy_op->reconfigure(efx);
+ rc = efx_mcdi_port_reconfigure(efx);
if (rc && rc != -EPERM)
- goto fail2;
+ goto fail;
mutex_unlock(&efx->mac_lock);
return 0;
-fail2:
- efx->phy_op->fini(efx);
-fail1:
+fail:
mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -219,7 +165,6 @@ static void efx_fini_port(struct efx_nic *efx)
if (!efx->port_initialized)
return;
- efx->phy_op->fini(efx);
efx->port_initialized = false;
efx->link_state.up = false;
@@ -355,10 +300,7 @@ static int efx_probe_nic(struct efx_nic *efx)
if (efx->n_channels > 1)
netdev_rss_key_fill(efx->rss_context.rx_hash_key,
sizeof(efx->rss_context.rx_hash_key));
- efx_set_default_rx_indir_table(efx, &efx->rss_context);
-
- netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
- netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
+ efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table);
/* Initialise the interrupt moderation settings */
efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
@@ -409,7 +351,6 @@ static int efx_probe_all(struct efx_nic *efx)
rc = -EINVAL;
goto fail3;
}
- efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
#ifdef CONFIG_SFC_SRIOV
rc = efx->type->vswitching_probe(efx);
@@ -430,6 +371,8 @@ static int efx_probe_all(struct efx_nic *efx)
if (rc)
goto fail5;
+ efx->state = STATE_NET_DOWN;
+
return 0;
fail5:
@@ -475,14 +418,6 @@ unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
return usecs * 1000 / efx->timer_quantum_ns;
}
-unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks)
-{
- /* We must round up when converting ticks to microseconds
- * because we round down when converting the other way.
- */
- return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
-}
-
/* Set interrupt moderation parameters */
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
@@ -541,33 +476,6 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
/**************************************************************************
*
- * ioctls
- *
- *************************************************************************/
-
-/* Net device ioctl
- * Context: process, rtnl_lock() held.
- */
-static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct mii_ioctl_data *data = if_mii(ifr);
-
- if (cmd == SIOCSHWTSTAMP)
- return efx_ptp_set_ts_config(efx, ifr);
- if (cmd == SIOCGHWTSTAMP)
- return efx_ptp_get_ts_config(efx, ifr);
-
- /* Convert phy_id from older PRTAD/DEVAD format */
- if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
- (data->phy_id & 0xfc00) == 0x0400)
- data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
-
- return mdio_mii_ioctl(&efx->mdio, data, cmd);
-}
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -575,7 +483,7 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
/* Context: process, rtnl_lock() held. */
int efx_net_open(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
@@ -596,7 +504,9 @@ int efx_net_open(struct net_device *net_dev)
efx_start_all(efx);
if (efx->state == STATE_DISABLED || efx->reset_pending)
netif_device_detach(efx->net_dev);
- efx_selftest_async_start(efx);
+ else
+ efx->state = STATE_NET_UP;
+
return 0;
}
@@ -606,7 +516,7 @@ int efx_net_open(struct net_device *net_dev)
*/
int efx_net_stop(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
raw_smp_processor_id());
@@ -617,112 +527,9 @@ int efx_net_stop(struct net_device *net_dev)
return 0;
}
-/* Context: netif_tx_lock held, BHs disabled. */
-static void efx_watchdog(struct net_device *net_dev, unsigned int txqueue)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- netif_err(efx, tx_err, efx->net_dev,
- "TX stuck with port_enabled=%d: resetting channels\n",
- efx->port_enabled);
-
- efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
-}
-
-static int efx_set_mac_address(struct net_device *net_dev, void *data)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct sockaddr *addr = data;
- u8 *new_addr = addr->sa_data;
- u8 old_addr[6];
- int rc;
-
- if (!is_valid_ether_addr(new_addr)) {
- netif_err(efx, drv, efx->net_dev,
- "invalid ethernet MAC address requested: %pM\n",
- new_addr);
- return -EADDRNOTAVAIL;
- }
-
- /* save old address */
- ether_addr_copy(old_addr, net_dev->dev_addr);
- ether_addr_copy(net_dev->dev_addr, new_addr);
- if (efx->type->set_mac_address) {
- rc = efx->type->set_mac_address(efx);
- if (rc) {
- ether_addr_copy(net_dev->dev_addr, old_addr);
- return rc;
- }
- }
-
- /* Reconfigure the MAC */
- mutex_lock(&efx->mac_lock);
- efx_mac_reconfigure(efx);
- mutex_unlock(&efx->mac_lock);
-
- return 0;
-}
-
-/* Context: netif_addr_lock held, BHs disabled. */
-static void efx_set_rx_mode(struct net_device *net_dev)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- if (efx->port_enabled)
- queue_work(efx->workqueue, &efx->mac_work);
- /* Otherwise efx_start_port() will do this */
-}
-
-static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- int rc;
-
- /* If disabling RX n-tuple filtering, clear existing filters */
- if (net_dev->features & ~data & NETIF_F_NTUPLE) {
- rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
- if (rc)
- return rc;
- }
-
- /* If Rx VLAN filter is changed, update filters via mac_reconfigure.
- * If rx-fcs is changed, mac_reconfigure updates that too.
- */
- if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_RXFCS)) {
- /* efx_set_rx_mode() will schedule MAC work to update filters
- * when a new features are finally set in net_dev.
- */
- efx_set_rx_mode(net_dev);
- }
-
- return 0;
-}
-
-static int efx_get_phys_port_id(struct net_device *net_dev,
- struct netdev_phys_item_id *ppid)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- if (efx->type->get_phys_port_id)
- return efx->type->get_phys_port_id(efx, ppid);
- else
- return -EOPNOTSUPP;
-}
-
-static int efx_get_phys_port_name(struct net_device *net_dev,
- char *name, size_t len)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- if (snprintf(name, len, "p%u", efx->port_num) >= len)
- return -EINVAL;
- return 0;
-}
-
static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->vlan_rx_add_vid)
return efx->type->vlan_rx_add_vid(efx, proto, vid);
@@ -732,7 +539,7 @@ static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid
static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->vlan_rx_kill_vid)
return efx->type->vlan_rx_kill_vid(efx, proto, vid);
@@ -740,50 +547,21 @@ static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vi
return -EOPNOTSUPP;
}
-static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in)
+static int efx_hwtstamp_set(struct net_device *net_dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- switch (in) {
- case UDP_TUNNEL_TYPE_VXLAN:
- return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN;
- case UDP_TUNNEL_TYPE_GENEVE:
- return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE;
- default:
- return -1;
- }
-}
-
-static void efx_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
-{
- struct efx_nic *efx = netdev_priv(dev);
- struct efx_udp_tunnel tnl;
- int efx_tunnel_type;
-
- efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
- if (efx_tunnel_type < 0)
- return;
-
- tnl.type = (u16)efx_tunnel_type;
- tnl.port = ti->port;
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
- if (efx->type->udp_tnl_add_port)
- (void)efx->type->udp_tnl_add_port(efx, tnl);
+ return efx_ptp_set_ts_config(efx, config, extack);
}
-static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti)
+static int efx_hwtstamp_get(struct net_device *net_dev,
+ struct kernel_hwtstamp_config *config)
{
- struct efx_nic *efx = netdev_priv(dev);
- struct efx_udp_tunnel tnl;
- int efx_tunnel_type;
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
- efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
- if (efx_tunnel_type < 0)
- return;
-
- tnl.type = (u16)efx_tunnel_type;
- tnl.port = ti->port;
-
- if (efx->type->udp_tnl_del_port)
- (void)efx->type->udp_tnl_del_port(efx, tnl);
+ return efx_ptp_get_ts_config(efx, config);
}
static const struct net_device_ops efx_netdev_ops = {
@@ -793,13 +571,15 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_tx_timeout = efx_watchdog,
.ndo_start_xmit = efx_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = efx_ioctl,
.ndo_change_mtu = efx_change_mtu,
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode,
.ndo_set_features = efx_set_features,
+ .ndo_features_check = efx_features_check,
.ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid,
+ .ndo_hwtstamp_set = efx_hwtstamp_set,
+ .ndo_hwtstamp_get = efx_hwtstamp_get,
#ifdef CONFIG_SFC_SRIOV
.ndo_set_vf_mac = efx_sriov_set_vf_mac,
.ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
@@ -809,16 +589,120 @@ static const struct net_device_ops efx_netdev_ops = {
#endif
.ndo_get_phys_port_id = efx_get_phys_port_id,
.ndo_get_phys_port_name = efx_get_phys_port_name,
- .ndo_setup_tc = efx_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
#endif
- .ndo_udp_tunnel_add = efx_udp_tunnel_add,
- .ndo_udp_tunnel_del = efx_udp_tunnel_del,
.ndo_xdp_xmit = efx_xdp_xmit,
.ndo_bpf = efx_xdp
};
+static void efx_get_queue_stats_rx(struct net_device *net_dev, int idx,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+
+ channel = efx_get_channel(efx, idx);
+ rx_queue = efx_channel_get_rx_queue(channel);
+ /* Count only packets since last time datapath was started */
+ stats->packets = rx_queue->rx_packets - rx_queue->old_rx_packets;
+ stats->bytes = rx_queue->rx_bytes - rx_queue->old_rx_bytes;
+ stats->hw_drops = efx_get_queue_stat_rx_hw_drops(channel) -
+ channel->old_n_rx_hw_drops;
+ stats->hw_drop_overruns = channel->n_rx_nodesc_trunc -
+ channel->old_n_rx_hw_drop_overruns;
+}
+
+static void efx_get_queue_stats_tx(struct net_device *net_dev, int idx,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+
+ channel = efx_get_tx_channel(efx, idx);
+ stats->packets = 0;
+ stats->bytes = 0;
+ stats->hw_gso_packets = 0;
+ stats->hw_gso_wire_packets = 0;
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ stats->packets += tx_queue->complete_packets -
+ tx_queue->old_complete_packets;
+ stats->bytes += tx_queue->complete_bytes -
+ tx_queue->old_complete_bytes;
+ /* Note that, unlike stats->packets and stats->bytes,
+ * these count TXes enqueued, rather than completed,
+ * which may not be what users expect.
+ */
+ stats->hw_gso_packets += tx_queue->tso_bursts -
+ tx_queue->old_tso_bursts;
+ stats->hw_gso_wire_packets += tx_queue->tso_packets -
+ tx_queue->old_tso_packets;
+ }
+}
+
+static void efx_get_base_stats(struct net_device *net_dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+
+ rx->packets = 0;
+ rx->bytes = 0;
+ rx->hw_drops = 0;
+ rx->hw_drop_overruns = 0;
+ tx->packets = 0;
+ tx->bytes = 0;
+ tx->hw_gso_packets = 0;
+ tx->hw_gso_wire_packets = 0;
+
+ /* Count all packets on non-core queues, and packets before last
+ * datapath start on core queues.
+ */
+ efx_for_each_channel(channel, efx) {
+ rx_queue = efx_channel_get_rx_queue(channel);
+ if (channel->channel >= net_dev->real_num_rx_queues) {
+ rx->packets += rx_queue->rx_packets;
+ rx->bytes += rx_queue->rx_bytes;
+ rx->hw_drops += efx_get_queue_stat_rx_hw_drops(channel);
+ rx->hw_drop_overruns += channel->n_rx_nodesc_trunc;
+ } else {
+ rx->packets += rx_queue->old_rx_packets;
+ rx->bytes += rx_queue->old_rx_bytes;
+ rx->hw_drops += channel->old_n_rx_hw_drops;
+ rx->hw_drop_overruns += channel->old_n_rx_hw_drop_overruns;
+ }
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ if (channel->channel < efx->tx_channel_offset ||
+ channel->channel >= efx->tx_channel_offset +
+ net_dev->real_num_tx_queues) {
+ tx->packets += tx_queue->complete_packets;
+ tx->bytes += tx_queue->complete_bytes;
+ tx->hw_gso_packets += tx_queue->tso_bursts;
+ tx->hw_gso_wire_packets += tx_queue->tso_packets;
+ } else {
+ tx->packets += tx_queue->old_complete_packets;
+ tx->bytes += tx_queue->old_complete_bytes;
+ tx->hw_gso_packets += tx_queue->old_tso_bursts;
+ tx->hw_gso_wire_packets += tx_queue->old_tso_packets;
+ }
+ /* Include XDP TX in device-wide stats */
+ tx->packets += tx_queue->complete_xdp_packets;
+ tx->bytes += tx_queue->complete_xdp_bytes;
+ }
+ }
+}
+
+static const struct netdev_stat_ops efx_stat_ops = {
+ .get_queue_stats_rx = efx_get_queue_stats_rx,
+ .get_queue_stats_tx = efx_get_queue_stats_tx,
+ .get_base_stats = efx_get_base_stats,
+};
+
static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
{
struct bpf_prog *old_prog;
@@ -848,16 +732,11 @@ static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
/* Context: process, rtnl_lock() held. */
static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
- struct efx_nic *efx = netdev_priv(dev);
- struct bpf_prog *xdp_prog;
+ struct efx_nic *efx = efx_netdev_priv(dev);
switch (xdp->command) {
case XDP_SETUP_PROG:
return efx_xdp_setup_prog(efx, xdp->prog);
- case XDP_QUERY_PROG:
- xdp_prog = rtnl_dereference(efx->xdp_prog);
- xdp->prog_id = xdp_prog ? xdp_prog->aux->id : 0;
- return 0;
default:
return -EINVAL;
}
@@ -866,7 +745,7 @@ static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
u32 flags)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
@@ -888,7 +767,7 @@ static int efx_netdev_event(struct notifier_block *this,
if ((net_dev->netdev_ops == &efx_netdev_ops) &&
event == NETDEV_CHANGENAME)
- efx_update_name(netdev_priv(net_dev));
+ efx_update_name(efx_netdev_priv(net_dev));
return NOTIFY_DONE;
}
@@ -897,13 +776,13 @@ static struct notifier_block efx_netdev_notifier = {
.notifier_call = efx_netdev_event,
};
-static ssize_t
-show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t phy_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct efx_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", efx->phy_type);
}
-static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
+static DEVICE_ATTR_RO(phy_type);
static int efx_register_netdev(struct efx_nic *efx)
{
@@ -914,10 +793,11 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &efx_netdev_ops;
+ net_dev->stat_ops = &efx_stat_ops;
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
net_dev->priv_flags |= IFF_UNICAST_FLT;
net_dev->ethtool_ops = &efx_ethtool_ops;
- net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
+ netif_set_tso_max_segs(net_dev, EFX_TSO_MAX_SEGS);
net_dev->min_mtu = EFX_MIN_MTU;
net_dev->max_mtu = EFX_MAX_MTU;
@@ -927,11 +807,8 @@ static int efx_register_netdev(struct efx_nic *efx)
* already requested. If so, the NIC is probably hosed so we
* abort.
*/
- efx->state = STATE_READY;
- smp_mb(); /* ensure we change state before checking reset_pending */
if (efx->reset_pending) {
- netif_err(efx, probe, efx->net_dev,
- "aborting probe due to scheduled reset\n");
+ pci_err(efx->pci_dev, "aborting probe due to scheduled reset\n");
rc = -EIO;
goto fail_locked;
}
@@ -956,6 +833,8 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_associate(efx);
+ efx->state = STATE_NET_DOWN;
+
rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -985,10 +864,11 @@ static void efx_unregister_netdev(struct efx_nic *efx)
if (!efx->net_dev)
return;
- BUG_ON(netdev_priv(efx->net_dev) != efx);
+ if (WARN_ON(efx_netdev_priv(efx->net_dev) != efx))
+ return;
if (efx_dev_registered(efx)) {
- strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
efx_fini_mcdi_logging(efx);
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
@@ -1003,10 +883,6 @@ static void efx_unregister_netdev(struct efx_nic *efx)
/* PCI device ID table */
static const struct pci_device_id efx_pci_table[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
- .driver_data = (unsigned long) &siena_a0_nic_type},
- {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
- .driver_data = (unsigned long) &siena_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */
@@ -1023,6 +899,10 @@ static const struct pci_device_id efx_pci_table[] = {
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */
.driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0c03), /* X4 PF (FF/LL) */
+ .driver_data = (unsigned long)&efx_x4_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x2c03), /* X4 PF (FF only) */
+ .driver_data = (unsigned long)&efx_x4_nic_type},
{0} /* end of list */
};
@@ -1057,7 +937,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
/* Flush reset_work. It can no longer be scheduled since we
* are not READY.
*/
- BUG_ON(efx->state == STATE_READY);
+ WARN_ON(efx_net_active(efx->state));
efx_flush_reset_workqueue(efx);
efx_disable_interrupts(efx);
@@ -1075,6 +955,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
*/
static void efx_pci_remove(struct pci_dev *pci_dev)
{
+ struct efx_probe_data *probe_data;
struct efx_nic *efx;
efx = pci_get_drvdata(pci_dev);
@@ -1092,91 +973,55 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
if (efx->type->sriov_fini)
efx->type->sriov_fini(efx);
+ efx_fini_devlink_lock(efx);
efx_unregister_netdev(efx);
efx_mtd_remove(efx);
efx_pci_remove_main(efx);
- efx_fini_io(efx, efx->type->mem_bar(efx));
- netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
+ efx_fini_io(efx);
+ pci_dbg(efx->pci_dev, "shutdown successful\n");
+ efx_fini_devlink_and_unlock(efx);
efx_fini_struct(efx);
free_netdev(efx->net_dev);
-
- pci_disable_pcie_error_reporting(pci_dev);
+ probe_data = container_of(efx, struct efx_probe_data, efx);
+ kfree(probe_data);
};
/* NIC VPD information
* Called during probe to display the part number of the
- * installed NIC. VPD is potentially very large but this should
- * always appear within the first 512 bytes.
+ * installed NIC.
*/
-#define SFC_VPD_LEN 512
static void efx_probe_vpd_strings(struct efx_nic *efx)
{
struct pci_dev *dev = efx->pci_dev;
- char vpd_data[SFC_VPD_LEN];
- ssize_t vpd_size;
- int ro_start, ro_size, i, j;
-
- /* Get the vpd data from the device */
- vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
- if (vpd_size <= 0) {
- netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
- return;
- }
+ unsigned int vpd_size, kw_len;
+ u8 *vpd_data;
+ int start;
- /* Get the Read only section */
- ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (ro_start < 0) {
- netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
+ vpd_data = pci_vpd_alloc(dev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(dev, "Unable to read VPD\n");
return;
}
- ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
- j = ro_size;
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- if (i + j > vpd_size)
- j = vpd_size - i;
-
- /* Get the Part number */
- i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
- if (i < 0) {
- netif_err(efx, drv, efx->net_dev, "Part number not found\n");
- return;
- }
-
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (i + j > vpd_size) {
- netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
- return;
- }
-
- netif_info(efx, drv, efx->net_dev,
- "Part Number : %.*s\n", j, &vpd_data[i]);
-
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- j = ro_size;
- i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
- if (i < 0) {
- netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
- return;
- }
-
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (i + j > vpd_size) {
- netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
- return;
- }
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (start < 0)
+ pci_err(dev, "Part number not found or incomplete\n");
+ else
+ pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start);
- efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
- if (!efx->vpd_sn)
- return;
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
+ if (start < 0)
+ pci_err(dev, "Serial number not found or incomplete\n");
+ else
+ efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
- snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
+ kfree(vpd_data);
}
@@ -1198,8 +1043,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
rc = efx->type->init(efx);
up_write(&efx->filter_sem);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "failed to initialise NIC\n");
+ pci_err(efx->pci_dev, "failed to initialise NIC\n");
goto fail3;
}
@@ -1246,23 +1090,23 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
if (efx->type->sriov_init) {
rc = efx->type->sriov_init(efx);
if (rc)
- netif_err(efx, probe, efx->net_dev,
- "SR-IOV can't be enabled rc %d\n", rc);
+ pci_err(efx->pci_dev, "SR-IOV can't be enabled rc %d\n",
+ rc);
}
/* Determine netdevice features */
- net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
- NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
- if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
- net_dev->features |= NETIF_F_TSO6;
- /* Check whether device supports TSO */
- if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
- net_dev->features &= ~NETIF_F_ALL_TSO;
+ net_dev->features |= efx->type->offload_features;
+
+ /* Add TSO features */
+ if (efx->type->tso_versions && efx->type->tso_versions(efx))
+ net_dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
NETIF_F_RXCSUM);
+ /* Determine user configurable features */
net_dev->hw_features |= net_dev->features & ~efx->fixed_features;
/* Disable receiving frames with bad FCS, by default. */
@@ -1275,7 +1119,17 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
net_dev->features |= efx->fixed_features;
+ net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
+ /* devlink creation, registration and lock */
+ rc = efx_probe_devlink_and_lock(efx);
+ if (rc)
+ pci_err(efx->pci_dev, "devlink registration failed");
+
rc = efx_register_netdev(efx);
+ efx_probe_devlink_unlock(efx);
if (!rc)
return 0;
@@ -1295,27 +1149,37 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
static int efx_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *entry)
{
+ struct efx_probe_data *probe_data, **probe_ptr;
struct net_device *net_dev;
struct efx_nic *efx;
int rc;
- /* Allocate and initialise a struct net_device and struct efx_nic */
- net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
- EFX_MAX_RX_QUEUES);
- if (!net_dev)
+ /* Allocate probe data and struct efx_nic */
+ probe_data = kzalloc(sizeof(*probe_data), GFP_KERNEL);
+ if (!probe_data)
return -ENOMEM;
- efx = netdev_priv(net_dev);
+ probe_data->pci_dev = pci_dev;
+ efx = &probe_data->efx;
+
+ /* Allocate and initialise a struct net_device */
+ net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES);
+ if (!net_dev) {
+ rc = -ENOMEM;
+ goto fail0;
+ }
+ probe_ptr = netdev_priv(net_dev);
+ *probe_ptr = probe_data;
+ efx->net_dev = net_dev;
efx->type = (const struct efx_nic_type *) entry->driver_data;
efx->fixed_features |= NETIF_F_HIGHDMA;
pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
- rc = efx_init_struct(efx, pci_dev, net_dev);
+ rc = efx_init_struct(efx, pci_dev);
if (rc)
goto fail1;
- netif_info(efx, probe, efx->net_dev,
- "Solarflare NIC detected\n");
+ pci_info(pci_dev, "Solarflare NIC detected\n");
if (!efx->type->is_vf)
efx_probe_vpd_strings(efx);
@@ -1358,21 +1222,21 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_warn(efx, probe, efx->net_dev,
"failed to create MTDs (%d)\n", rc);
- (void)pci_enable_pcie_error_reporting(pci_dev);
-
if (efx->type->udp_tnl_push_ports)
efx->type->udp_tnl_push_ports(efx);
return 0;
fail3:
- efx_fini_io(efx, efx->type->mem_bar(efx));
+ efx_fini_io(efx);
fail2:
efx_fini_struct(efx);
fail1:
WARN_ON(rc > 0);
netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
free_netdev(net_dev);
+ fail0:
+ kfree(probe_data);
return rc;
}
@@ -1402,13 +1266,13 @@ static int efx_pm_freeze(struct device *dev)
rtnl_lock();
- if (efx->state != STATE_DISABLED) {
- efx->state = STATE_UNINIT;
-
+ if (efx_net_active(efx->state)) {
efx_device_detach_sync(efx);
efx_stop_all(efx);
efx_disable_interrupts(efx);
+
+ efx->state = efx_freeze(efx->state);
}
rtnl_unlock();
@@ -1416,6 +1280,17 @@ static int efx_pm_freeze(struct device *dev)
return 0;
}
+static void efx_pci_shutdown(struct pci_dev *pci_dev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+
+ if (!efx)
+ return;
+
+ efx_pm_freeze(&pci_dev->dev);
+ pci_disable_device(pci_dev);
+}
+
static int efx_pm_thaw(struct device *dev)
{
int rc;
@@ -1423,20 +1298,20 @@ static int efx_pm_thaw(struct device *dev)
rtnl_lock();
- if (efx->state != STATE_DISABLED) {
+ if (efx_frozen(efx->state)) {
rc = efx_enable_interrupts(efx);
if (rc)
goto fail;
mutex_lock(&efx->mac_lock);
- efx->phy_op->reconfigure(efx);
+ efx_mcdi_port_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
efx_device_attach_if_not_resetting(efx);
- efx->state = STATE_READY;
+ efx->state = efx_thaw(efx->state);
efx->type->resume_wol(efx);
}
@@ -1514,103 +1389,13 @@ static const struct dev_pm_ops efx_pm_ops = {
.restore = efx_pm_resume,
};
-/* A PCI error affecting this device was detected.
- * At this point MMIO and DMA may be disabled.
- * Stop the software path and request a slot reset.
- */
-static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
- enum pci_channel_state state)
-{
- pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
- struct efx_nic *efx = pci_get_drvdata(pdev);
-
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- rtnl_lock();
-
- if (efx->state != STATE_DISABLED) {
- efx->state = STATE_RECOVERY;
- efx->reset_pending = 0;
-
- efx_device_detach_sync(efx);
-
- efx_stop_all(efx);
- efx_disable_interrupts(efx);
-
- status = PCI_ERS_RESULT_NEED_RESET;
- } else {
- /* If the interface is disabled we don't want to do anything
- * with it.
- */
- status = PCI_ERS_RESULT_RECOVERED;
- }
-
- rtnl_unlock();
-
- pci_disable_device(pdev);
-
- return status;
-}
-
-/* Fake a successful reset, which will be performed later in efx_io_resume. */
-static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
-{
- struct efx_nic *efx = pci_get_drvdata(pdev);
- pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
-
- if (pci_enable_device(pdev)) {
- netif_err(efx, hw, efx->net_dev,
- "Cannot re-enable PCI device after reset.\n");
- status = PCI_ERS_RESULT_DISCONNECT;
- }
-
- return status;
-}
-
-/* Perform the actual reset and resume I/O operations. */
-static void efx_io_resume(struct pci_dev *pdev)
-{
- struct efx_nic *efx = pci_get_drvdata(pdev);
- int rc;
-
- rtnl_lock();
-
- if (efx->state == STATE_DISABLED)
- goto out;
-
- rc = efx_reset(efx, RESET_TYPE_ALL);
- if (rc) {
- netif_err(efx, hw, efx->net_dev,
- "efx_reset failed after PCI error (%d)\n", rc);
- } else {
- efx->state = STATE_READY;
- netif_dbg(efx, hw, efx->net_dev,
- "Done resetting and resuming IO after PCI error.\n");
- }
-
-out:
- rtnl_unlock();
-}
-
-/* For simplicity and reliability, we always require a slot reset and try to
- * reset the hardware when a pci error affecting the device is detected.
- * We leave both the link_reset and mmio_enabled callback unimplemented:
- * with our request for slot reset the mmio_enabled callback will never be
- * called, and the link_reset callback is not used by AER or EEH mechanisms.
- */
-static const struct pci_error_handlers efx_err_handlers = {
- .error_detected = efx_io_error_detected,
- .slot_reset = efx_io_slot_reset,
- .resume = efx_io_resume,
-};
-
static struct pci_driver efx_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = efx_pci_table,
.probe = efx_pci_probe,
.remove = efx_pci_remove,
.driver.pm = &efx_pm_ops,
+ .shutdown = efx_pci_shutdown,
.err_handler = &efx_err_handlers,
#ifdef CONFIG_SFC_SRIOV
.sriov_configure = efx_pci_sriov_configure,
@@ -1627,18 +1412,12 @@ static int __init efx_init_module(void)
{
int rc;
- printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
+ printk(KERN_INFO "Solarflare NET driver\n");
rc = register_netdevice_notifier(&efx_netdev_notifier);
if (rc)
goto err_notifier;
-#ifdef CONFIG_SFC_SRIOV
- rc = efx_init_sriov();
- if (rc)
- goto err_sriov;
-#endif
-
rc = efx_create_reset_workqueue();
if (rc)
goto err_reset;
@@ -1647,15 +1426,17 @@ static int __init efx_init_module(void)
if (rc < 0)
goto err_pci;
+ rc = pci_register_driver(&ef100_pci_driver);
+ if (rc < 0)
+ goto err_pci_ef100;
+
return 0;
+ err_pci_ef100:
+ pci_unregister_driver(&efx_pci_driver);
err_pci:
efx_destroy_reset_workqueue();
err_reset:
-#ifdef CONFIG_SFC_SRIOV
- efx_fini_sriov();
- err_sriov:
-#endif
unregister_netdevice_notifier(&efx_netdev_notifier);
err_notifier:
return rc;
@@ -1665,11 +1446,9 @@ static void __exit efx_exit_module(void)
{
printk(KERN_INFO "Solarflare NET driver unloading\n");
+ pci_unregister_driver(&ef100_pci_driver);
pci_unregister_driver(&efx_pci_driver);
efx_destroy_reset_workqueue();
-#ifdef CONFIG_SFC_SRIOV
- efx_fini_sriov();
-#endif
unregister_netdevice_notifier(&efx_netdev_notifier);
}
@@ -1682,4 +1461,3 @@ MODULE_AUTHOR("Solarflare Communications and "
MODULE_DESCRIPTION("Solarflare network driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, efx_pci_table);
-MODULE_VERSION(EFX_DRIVER_VERSION);