summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/sfc/falcon/efx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sfc/falcon/efx.c')
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c43
1 files changed, 8 insertions, 35 deletions
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 60c595ef7589..6ea41f6c9ef5 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -17,7 +17,6 @@
#include <linux/ethtool.h>
#include <linux/topology.h>
#include <linux/gfp.h>
-#include <linux/aer.h>
#include <linux/interrupt.h>
#include "net_driver.h"
#include "efx.h"
@@ -112,11 +111,6 @@ module_param(ef4_separate_tx_channels, bool, 0444);
MODULE_PARM_DESC(ef4_separate_tx_channels,
"Use separate channels for TX and RX");
-/* This is the weight assigned to each of the (per-channel) virtual
- * NAPI devices.
- */
-static int napi_weight = 64;
-
/* This is the time (in jiffies) between invocations of the hardware
* monitor.
* On Falcon-based NICs, this will:
@@ -1400,9 +1394,8 @@ static int ef4_probe_interrupts(struct ef4_nic *efx)
if (n_channels > extra_channels)
n_channels -= extra_channels;
if (ef4_separate_tx_channels) {
- efx->n_tx_channels = min(max(n_channels / 2,
- 1U),
- efx->max_tx_channels);
+ efx->n_tx_channels = clamp(n_channels / 2, 1U,
+ efx->max_tx_channels);
efx->n_rx_channels = max(n_channels -
efx->n_tx_channels,
1U);
@@ -1892,14 +1885,6 @@ unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs)
return usecs * 1000 / efx->timer_quantum_ns;
}
-unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks)
-{
- /* We must round up when converting ticks to microseconds
- * because we round down when converting the other way.
- */
- return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
-}
-
/* Set interrupt moderation parameters */
int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
@@ -2017,8 +2002,7 @@ static void ef4_init_napi_channel(struct ef4_channel *channel)
struct ef4_nic *efx = channel->efx;
channel->napi_dev = efx->net_dev;
- netif_napi_add(channel->napi_dev, &channel->napi_str,
- ef4_poll, napi_weight);
+ netif_napi_add(channel->napi_dev, &channel->napi_str, ef4_poll);
}
static void ef4_init_napi(struct ef4_nic *efx)
@@ -2092,7 +2076,7 @@ int ef4_net_stop(struct net_device *net_dev)
return 0;
}
-/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+/* Context: process, rcu_read_lock or RTNL held, non-blocking. */
static void ef4_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats)
{
@@ -2132,7 +2116,7 @@ static int ef4_change_mtu(struct net_device *net_dev, int new_mtu)
ef4_stop_all(efx);
mutex_lock(&efx->mac_lock);
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
ef4_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
@@ -2267,7 +2251,7 @@ static int ef4_register_netdev(struct ef4_nic *efx)
net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &ef4_netdev_ops;
net_dev->ethtool_ops = &ef4_ethtool_ops;
- netif_set_gso_max_segs(net_dev, EF4_TSO_MAX_SEGS);
+ netif_set_tso_max_segs(net_dev, EF4_TSO_MAX_SEGS);
net_dev->min_mtu = EF4_MIN_MTU;
net_dev->max_mtu = EF4_MAX_MTU;
@@ -2335,7 +2319,7 @@ static void ef4_unregister_netdev(struct ef4_nic *efx)
BUG_ON(netdev_priv(efx->net_dev) != efx);
if (ef4_dev_registered(efx)) {
- strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
}
@@ -2646,7 +2630,7 @@ static int ef4_init_struct(struct ef4_nic *efx,
efx->pci_dev = pci_dev;
efx->msg_enable = debug;
efx->state = STATE_UNINIT;
- strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
efx->rx_prefix_size = efx->type->rx_prefix_size;
@@ -2771,8 +2755,6 @@ static void ef4_pci_remove(struct pci_dev *pci_dev)
ef4_fini_struct(efx);
free_netdev(efx->net_dev);
-
- pci_disable_pcie_error_reporting(pci_dev);
};
/* NIC VPD information
@@ -2933,12 +2915,6 @@ static int ef4_pci_probe(struct pci_dev *pci_dev,
netif_warn(efx, probe, efx->net_dev,
"failed to create MTDs (%d)\n", rc);
- rc = pci_enable_pcie_error_reporting(pci_dev);
- if (rc && rc != -EINVAL)
- netif_notice(efx, probe, efx->net_dev,
- "PCIE error reporting unavailable (%d).\n",
- rc);
-
return 0;
fail4:
@@ -3151,9 +3127,6 @@ out:
/* For simplicity and reliability, we always require a slot reset and try to
* reset the hardware when a pci error affecting the device is detected.
- * We leave both the link_reset and mmio_enabled callback unimplemented:
- * with our request for slot reset the mmio_enabled callback will never be
- * called, and the link_reset callback is not used by AER or EEH mechanisms.
*/
static const struct pci_error_handlers ef4_err_handlers = {
.error_detected = ef4_io_error_detected,