summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-02-25 12:06:25 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-02-25 12:06:25 -0800
commit5ad3dbab569ac39e88fae31690401895c37368b6 (patch)
tree43301ebd31b499939a2c7c955468afada309ea33 /drivers/net
parent268f77b5250998b871fa54a2a9703871fb44544e (diff)
parent6cf739131a15e4177e58a1b4f2bede9d5da78552 (diff)
Merge tag 'net-5.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Jakub Kicinski: "Rather small batch this time. Current release - regressions: - bcm63xx_enet: fix sporadic kernel panic due to queue length mis-accounting Current release - new code bugs: - bcm4908_enet: fix RX path possible mem leak - bcm4908_enet: fix NAPI poll returned value - stmmac: fix missing spin_lock_init in visconti_eth_dwmac_probe() - sched: cls_flower: validate ct_state for invalid and reply flags Previous releases - regressions: - net: introduce CAN specific pointer in the struct net_device to prevent mis-interpreting memory - phy: micrel: set soft_reset callback to genphy_soft_reset for KSZ8081 - psample: fix netlink skb length with tunnel info Previous releases - always broken: - icmp: pass zeroed opts from icmp{,v6}_ndo_send before sending - wireguard: device: do not generate ICMP for non-IP packets - mptcp: provide subflow aware release function to avoid a mem leak - hsr: add support for EntryForgetTime - r8169: fix jumbo packet handling on RTL8168e - octeontx2-af: fix an off by one in rvu_dbg_qsize_write() - i40e: fix flow for IPv6 next header (extension header) - phy: icplus: call phy_restore_page() when phy_select_page() fails - dpaa_eth: fix the access method for the dpaa_napi_portal" * tag 'net-5.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (55 commits) r8169: fix jumbo packet handling on RTL8168e net: phy: micrel: set soft_reset callback to genphy_soft_reset for KSZ8081 net: psample: Fix netlink skb length with tunnel info net: broadcom: bcm4908_enet: fix NAPI poll returned value net: broadcom: bcm4908_enet: fix RX path possible mem leak net: hsr: add support for EntryForgetTime net: dsa: sja1105: Remove unneeded cast in sja1105_crc32() ibmvnic: fix a race between open and reset net: stmmac: Fix missing spin_lock_init in visconti_eth_dwmac_probe() net: introduce CAN specific pointer in the struct net_device net: usb: qmi_wwan: support ZTE P685M modem wireguard: kconfig: use arm chacha even with no neon wireguard: queueing: get rid of per-peer ring buffers wireguard: device: do not generate ICMP for non-IP packets wireguard: peer: put frequently used members above cache lines wireguard: selftests: test multiple parallel streams wireguard: socket: remove bogus __be32 annotation wireguard: avoid double unlikely() notation when using IS_ERR() net: qrtr: Fix memory leak in qrtr_tun_open vxlan: move debug check after netdev unregister ...
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/can/dev/dev.c4
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/can/vxcan.c6
-rw-r--r--drivers/net/dsa/b53/b53_common.c39
-rw-r--r--drivers/net/dsa/b53/b53_priv.h8
-rw-r--r--drivers/net/dsa/b53/b53_regs.h1
-rw-r--r--drivers/net/dsa/bcm_sf2.c18
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c3
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c63
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c64
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c35
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c30
-rw-r--r--drivers/net/gtp.c1
-rw-r--r--drivers/net/phy/icplus.c9
-rw-r--r--drivers/net/phy/micrel.c1
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c67
-rw-r--r--drivers/net/vxlan.c11
-rw-r--r--drivers/net/wireguard/device.c21
-rw-r--r--drivers/net/wireguard/device.h15
-rw-r--r--drivers/net/wireguard/peer.c28
-rw-r--r--drivers/net/wireguard/peer.h8
-rw-r--r--drivers/net/wireguard/queueing.c86
-rw-r--r--drivers/net/wireguard/queueing.h45
-rw-r--r--drivers/net/wireguard/receive.c16
-rw-r--r--drivers/net/wireguard/send.c31
-rw-r--r--drivers/net/wireguard/socket.c8
45 files changed, 489 insertions, 263 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 45d12b0e9a2f..b09bed554f26 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -88,7 +88,7 @@ config WIREGUARD
select CRYPTO_CURVE25519_X86 if X86 && 64BIT
select ARM_CRYPTO if ARM
select ARM64_CRYPTO if ARM64
- select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON
+ select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
select CRYPTO_POLY1305_ARM if ARM
select CRYPTO_BLAKE2S_ARM if ARM
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index d9281ae853f8..311d8564d611 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -239,6 +239,7 @@ void can_setup(struct net_device *dev)
struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
unsigned int txqs, unsigned int rxqs)
{
+ struct can_ml_priv *can_ml;
struct net_device *dev;
struct can_priv *priv;
int size;
@@ -270,7 +271,8 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
priv = netdev_priv(dev);
priv->dev = dev;
- dev->ml_priv = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
+ can_ml = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
+ can_set_ml_priv(dev, can_ml);
if (echo_skb_max) {
priv->echo_skb_max = echo_skb_max;
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index a1bd1be09548..30c8d53c9745 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -516,6 +516,7 @@ static struct slcan *slc_alloc(void)
int i;
char name[IFNAMSIZ];
struct net_device *dev = NULL;
+ struct can_ml_priv *can_ml;
struct slcan *sl;
int size;
@@ -538,7 +539,8 @@ static struct slcan *slc_alloc(void)
dev->base_addr = i;
sl = netdev_priv(dev);
- dev->ml_priv = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
+ can_ml = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
+ can_set_ml_priv(dev, can_ml);
/* Initialize channel control data */
sl->magic = SLCAN_MAGIC;
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 39ca14b0585d..067705e2850b 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -153,7 +153,7 @@ static void vcan_setup(struct net_device *dev)
dev->addr_len = 0;
dev->tx_queue_len = 0;
dev->flags = IFF_NOARP;
- dev->ml_priv = netdev_priv(dev);
+ can_set_ml_priv(dev, netdev_priv(dev));
/* set flags according to driver capabilities */
if (echo)
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index f9a524c5f6d6..8861a7d875e7 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -141,6 +141,8 @@ static const struct net_device_ops vxcan_netdev_ops = {
static void vxcan_setup(struct net_device *dev)
{
+ struct can_ml_priv *can_ml;
+
dev->type = ARPHRD_CAN;
dev->mtu = CANFD_MTU;
dev->hard_header_len = 0;
@@ -149,7 +151,9 @@ static void vxcan_setup(struct net_device *dev)
dev->flags = (IFF_NOARP|IFF_ECHO);
dev->netdev_ops = &vxcan_netdev_ops;
dev->needs_free_netdev = true;
- dev->ml_priv = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
+
+ can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
+ can_set_ml_priv(dev, can_ml);
}
/* forward declaration for rtnl_create_link() */
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index ae86ded1e2a1..a162499bcafc 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -543,6 +543,19 @@ static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
}
+static void b53_port_set_learning(struct b53_device *dev, int port,
+ bool learning)
+{
+ u16 reg;
+
+ b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, &reg);
+ if (learning)
+ reg &= ~BIT(port);
+ else
+ reg |= BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg);
+}
+
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
@@ -557,6 +570,7 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
b53_port_set_ucast_flood(dev, port, true);
b53_port_set_mcast_flood(dev, port, true);
+ b53_port_set_learning(dev, port, false);
if (dev->ops->irq_enable)
ret = dev->ops->irq_enable(dev, port);
@@ -691,6 +705,7 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port)
b53_port_set_ucast_flood(dev, port, true);
b53_port_set_mcast_flood(dev, port, true);
+ b53_port_set_learning(dev, port, false);
}
static void b53_enable_mib(struct b53_device *dev)
@@ -1953,19 +1968,20 @@ void b53_br_fast_age(struct dsa_switch *ds, int port)
}
EXPORT_SYMBOL(b53_br_fast_age);
-static int b53_br_flags_pre(struct dsa_switch *ds, int port,
- struct switchdev_brport_flags flags,
- struct netlink_ext_ack *extack)
+int b53_br_flags_pre(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
{
- if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD))
+ if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING))
return -EINVAL;
return 0;
}
+EXPORT_SYMBOL(b53_br_flags_pre);
-static int b53_br_flags(struct dsa_switch *ds, int port,
- struct switchdev_brport_flags flags,
- struct netlink_ext_ack *extack)
+int b53_br_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
{
if (flags.mask & BR_FLOOD)
b53_port_set_ucast_flood(ds->priv, port,
@@ -1973,17 +1989,22 @@ static int b53_br_flags(struct dsa_switch *ds, int port,
if (flags.mask & BR_MCAST_FLOOD)
b53_port_set_mcast_flood(ds->priv, port,
!!(flags.val & BR_MCAST_FLOOD));
+ if (flags.mask & BR_LEARNING)
+ b53_port_set_learning(ds->priv, port,
+ !!(flags.val & BR_LEARNING));
return 0;
}
+EXPORT_SYMBOL(b53_br_flags);
-static int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter,
- struct netlink_ext_ack *extack)
+int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter,
+ struct netlink_ext_ack *extack)
{
b53_port_set_mcast_flood(ds->priv, port, mrouter);
return 0;
}
+EXPORT_SYMBOL(b53_set_mrouter);
static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
{
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index faf983fbca82..8419bb7f4505 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -326,6 +326,14 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge);
void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge);
void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
void b53_br_fast_age(struct dsa_switch *ds, int port);
+int b53_br_flags_pre(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack);
+int b53_br_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack);
+int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter,
+ struct netlink_ext_ack *extack);
int b53_setup_devlink_resources(struct dsa_switch *ds);
void b53_port_event(struct dsa_switch *ds, int port);
void b53_phylink_validate(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index c90985c294a2..b2c539a42154 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -115,6 +115,7 @@
#define B53_UC_FLOOD_MASK 0x32
#define B53_MC_FLOOD_MASK 0x34
#define B53_IPMC_FLOOD_MASK 0x36
+#define B53_DIS_LEARNING 0x3c
/*
* Override Ports 0-7 State on devices with xMII interfaces (8 bit)
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 1857aa9aa84a..5ee8103b8e9c 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -223,23 +223,10 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
reg &= ~P_TXQ_PSM_VDD(port);
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
- /* Enable learning */
- reg = core_readl(priv, CORE_DIS_LEARN);
- reg &= ~BIT(port);
- core_writel(priv, reg, CORE_DIS_LEARN);
-
/* Enable Broadcom tags for that port if requested */
- if (priv->brcm_tag_mask & BIT(port)) {
+ if (priv->brcm_tag_mask & BIT(port))
b53_brcm_hdr_setup(ds, port);
- /* Disable learning on ASP port */
- if (port == 7) {
- reg = core_readl(priv, CORE_DIS_LEARN);
- reg |= BIT(port);
- core_writel(priv, reg, CORE_DIS_LEARN);
- }
- }
-
/* Configure Traffic Class to QoS mapping, allow each priority to map
* to a different queue number
*/
@@ -1117,7 +1104,10 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.set_mac_eee = b53_set_mac_eee,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
+ .port_pre_bridge_flags = b53_br_flags_pre,
+ .port_bridge_flags = b53_br_flags,
.port_stp_state_set = b53_br_set_stp_state,
+ .port_set_mrouter = b53_set_mrouter,
.port_fast_age = b53_br_fast_age,
.port_vlan_filtering = b53_vlan_filtering,
.port_vlan_add = b53_vlan_add,
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index 139b7b4fbd0d..a8efb7fac395 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -85,7 +85,7 @@ u32 sja1105_crc32(const void *buf, size_t len)
/* seed */
crc = ~0;
for (i = 0; i < len; i += 4) {
- sja1105_unpack((void *)buf + i, &word, 31, 0, 4);
+ sja1105_unpack(buf + i, &word, 31, 0, 4);
crc = crc32_le(crc, (u8 *)&word, 4);
}
return ~crc;
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index dd5c8a9038bb..a60ce9030581 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -223,8 +223,6 @@
#define AG71XX_REG_RX_SM 0x01b0
#define AG71XX_REG_TX_SM 0x01b4
-#define ETH_SWITCH_HEADER_LEN 2
-
#define AG71XX_DEFAULT_MSG_ENABLE \
(NETIF_MSG_DRV \
| NETIF_MSG_PROBE \
@@ -933,7 +931,7 @@ static void ag71xx_hw_setup(struct ag71xx *ag)
static unsigned int ag71xx_max_frame_len(unsigned int mtu)
{
- return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
+ return ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
}
static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 9be33dc98072..0b70e9e0ddad 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -570,6 +570,7 @@ static int bcm4908_enet_poll(struct napi_struct *napi, int weight)
if (len < ETH_ZLEN ||
(ctl & (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) != (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) {
+ kfree_skb(slot.skb);
enet->netdev->stats.rx_dropped++;
break;
}
@@ -582,6 +583,8 @@ static int bcm4908_enet_poll(struct napi_struct *napi, int weight)
enet->netdev->stats.rx_packets++;
enet->netdev->stats.rx_bytes += len;
+
+ handled++;
}
if (handled < weight) {
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index fd8767213165..977f097fc7bf 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1192,7 +1192,6 @@ static int bcm_enet_stop(struct net_device *dev)
kdev = &priv->pdev->dev;
netif_stop_queue(dev);
- netdev_reset_queue(dev);
napi_disable(&priv->napi);
if (priv->has_phy)
phy_stop(dev->phydev);
@@ -1231,6 +1230,9 @@ static int bcm_enet_stop(struct net_device *dev)
if (priv->has_phy)
phy_disconnect(dev->phydev);
+ /* reset BQL after forced tx reclaim to prevent kernel panic */
+ netdev_reset_queue(dev);
+
return 0;
}
@@ -2343,7 +2345,6 @@ static int bcm_enetsw_stop(struct net_device *dev)
del_timer_sync(&priv->swphy_poll);
netif_stop_queue(dev);
- netdev_reset_queue(dev);
napi_disable(&priv->napi);
del_timer_sync(&priv->rx_timeout);
@@ -2371,6 +2372,9 @@ static int bcm_enetsw_stop(struct net_device *dev)
free_irq(priv->irq_tx, dev);
free_irq(priv->irq_rx, dev);
+ /* reset BQL after forced tx reclaim to prevent kernel panic */
+ netdev_reset_queue(dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index ccfe52a50a66..720dc99bd1fc 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2670,7 +2670,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
u32 hash;
u64 ns;
- np = container_of(&portal, struct dpaa_napi_portal, p);
dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
fd_status = be32_to_cpu(fd->status);
fd_format = qm_fd_get_format(fd);
@@ -2685,6 +2684,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
percpu_priv = this_cpu_ptr(priv->percpu_priv);
percpu_stats = &percpu_priv->stats;
+ np = &percpu_priv->np;
if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)))
return qman_cb_dqrr_stop;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 1c0e4beb56e7..118a4bd3f877 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1172,12 +1172,25 @@ static int ibmvnic_open(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int rc;
- /* If device failover is pending, just set device state and return.
- * Device operation will be handled by reset routine.
+ ASSERT_RTNL();
+
+ /* If device failover is pending or we are about to reset, just set
+ * device state and return. Device operation will be handled by reset
+ * routine.
+ *
+ * It should be safe to overwrite the adapter->state here. Since
+ * we hold the rtnl, either the reset has not actually started or
+ * the rtnl got dropped during the set_link_state() in do_reset().
+ * In the former case, no one else is changing the state (again we
+ * have the rtnl) and in the latter case, do_reset() will detect and
+ * honor our setting below.
*/
- if (adapter->failover_pending) {
+ if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
+ netdev_dbg(netdev, "[S:%d FOP:%d] Resetting, deferring open\n",
+ adapter->state, adapter->failover_pending);
adapter->state = VNIC_OPEN;
- return 0;
+ rc = 0;
+ goto out;
}
if (adapter->state != VNIC_CLOSED) {
@@ -1196,10 +1209,12 @@ static int ibmvnic_open(struct net_device *netdev)
rc = __ibmvnic_open(netdev);
out:
- /* If open fails due to a pending failover, set device state and
- * return. Device operation will be handled by reset routine.
+ /* If open failed and there is a pending failover or in-progress reset,
+ * set device state and return. Device operation will be handled by
+ * reset routine. See also comments above regarding rtnl.
*/
- if (rc && adapter->failover_pending) {
+ if (rc &&
+ (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
adapter->state = VNIC_OPEN;
rc = 0;
}
@@ -1928,6 +1943,14 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (rwi->reset_reason == VNIC_RESET_FAILOVER)
adapter->failover_pending = false;
+ /* read the state and check (again) after getting rtnl */
+ reset_state = adapter->state;
+
+ if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
+ rc = -EBUSY;
+ goto out;
+ }
+
netif_carrier_off(netdev);
old_num_rx_queues = adapter->req_rx_queues;
@@ -1958,11 +1981,27 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (rc)
goto out;
+ if (adapter->state == VNIC_OPEN) {
+ /* When we dropped rtnl, ibmvnic_open() got
+ * it and noticed that we are resetting and
+ * set the adapter state to OPEN. Update our
+ * new "target" state, and resume the reset
+ * from VNIC_CLOSING state.
+ */
+ netdev_dbg(netdev,
+ "Open changed state from %d, updating.\n",
+ reset_state);
+ reset_state = VNIC_OPEN;
+ adapter->state = VNIC_CLOSING;
+ }
+
if (adapter->state != VNIC_CLOSING) {
+ /* If someone else changed the adapter state
+ * when we dropped the rtnl, fail the reset
+ */
rc = -1;
goto out;
}
-
adapter->state = VNIC_CLOSED;
}
}
@@ -2106,6 +2145,14 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
rwi->reset_reason);
+ /* read the state and check (again) after getting rtnl */
+ reset_state = adapter->state;
+
+ if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
+ rc = -EBUSY;
+ goto out;
+ }
+
netif_carrier_off(netdev);
adapter->reset_reason = rwi->reset_reason;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index a8a2b5f683a2..c70dec65a572 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -5083,7 +5083,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
enum i40e_admin_queue_err adq_err;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- bool is_reset_needed;
+ u32 reset_needed = 0;
i40e_status status;
u32 i, j;
@@ -5128,9 +5128,11 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
flags_complete:
changed_flags = orig_flags ^ new_flags;
- is_reset_needed = !!(changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
- I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED |
- I40E_FLAG_DISABLE_FW_LLDP));
+ if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP)
+ reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG;
+ if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
+ I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED))
+ reset_needed = BIT(__I40E_PF_RESET_REQUESTED);
/* Before we finalize any flag changes, we need to perform some
* checks to ensure that the changes are supported and safe.
@@ -5252,7 +5254,7 @@ flags_complete:
case I40E_AQ_RC_EEXIST:
dev_warn(&pf->pdev->dev,
"FW LLDP agent is already running\n");
- is_reset_needed = false;
+ reset_needed = 0;
break;
case I40E_AQ_RC_EPERM:
dev_warn(&pf->pdev->dev,
@@ -5281,8 +5283,8 @@ flags_complete:
/* Issue reset to cause things to take effect, as additional bits
* are added we will need to create a mask of bits requiring reset
*/
- if (is_reset_needed)
- i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
+ if (reset_needed)
+ i40e_do_reset(pf, reset_needed, true);
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 8bb8eb65add9..353deae139f9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2616,7 +2616,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
return;
if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
return;
- if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
+ if (test_bit(__I40E_VF_DISABLE, pf->state)) {
set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
return;
}
@@ -2634,7 +2634,6 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
}
}
}
- clear_bit(__I40E_VF_DISABLE, pf->state);
}
/**
@@ -5937,7 +5936,7 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
ch->seid = ctxt.seid;
ch->vsi_number = ctxt.vsi_number;
- ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
+ ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
/* copy just the sections touched not the entire info
* since not all sections are valid as returned by
@@ -7977,8 +7976,8 @@ static inline void
i40e_set_cld_element(struct i40e_cloud_filter *filter,
struct i40e_aqc_cloud_filters_element_data *cld)
{
- int i, j;
u32 ipa;
+ int i;
memset(cld, 0, sizeof(*cld));
ether_addr_copy(cld->outer_mac, filter->dst_mac);
@@ -7989,14 +7988,14 @@ i40e_set_cld_element(struct i40e_cloud_filter *filter,
if (filter->n_proto == ETH_P_IPV6) {
#define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
- for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
- i++, j += 2) {
+ for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) {
ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
- ipa = cpu_to_le32(ipa);
- memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
+
+ *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa);
}
} else {
ipa = be32_to_cpu(filter->dst_ipv4);
+
memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
}
@@ -8044,6 +8043,8 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
if (filter->flags >= ARRAY_SIZE(flag_table))
return I40E_ERR_CONFIG;
+ memset(&cld_filter, 0, sizeof(cld_filter));
+
/* copy element needed to add cloud filter from filter */
i40e_set_cld_element(filter, &cld_filter);
@@ -8107,10 +8108,13 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
return -EOPNOTSUPP;
/* adding filter using src_port/src_ip is not supported at this stage */
- if (filter->src_port || filter->src_ipv4 ||
+ if (filter->src_port ||
+ (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
!ipv6_addr_any(&filter->ip.v6.src_ip6))
return -EOPNOTSUPP;
+ memset(&cld_filter, 0, sizeof(cld_filter));
+
/* copy element needed to add cloud filter from filter */
i40e_set_cld_element(filter, &cld_filter.element);
@@ -8134,7 +8138,7 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
}
- } else if (filter->dst_ipv4 ||
+ } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
!ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
cld_filter.element.flags =
cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
@@ -8928,11 +8932,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
dev_dbg(&pf->pdev->dev, "PFR requested\n");
i40e_handle_reset_warning(pf, lock_acquired);
- dev_info(&pf->pdev->dev,
- pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
- "FW LLDP is disabled\n" :
- "FW LLDP is enabled\n");
-
} else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
/* Request a PF Reset
*
@@ -8940,6 +8939,10 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
*/
i40e_prep_for_reset(pf);
i40e_reset_and_rebuild(pf, true, lock_acquired);
+ dev_info(&pf->pdev->dev,
+ pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
+ "FW LLDP is disabled\n" :
+ "FW LLDP is enabled\n");
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;
@@ -10462,7 +10465,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
- u8 set_fc_aq_fail = 0;
i40e_status ret;
u32 val;
int v;
@@ -10605,13 +10607,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
- /* make sure our flow control settings are restored */
- ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
- if (ret)
- dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
-
/* Rebuild the VSIs and VEBs that existed before reset.
* They are still in our local switch element arrays, so only
* need to rebuild the switch model in the HW.
@@ -12191,6 +12186,8 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
struct i40e_aqc_configure_partition_bw_data bw_data;
i40e_status status;
+ memset(&bw_data, 0, sizeof(bw_data));
+
/* Set the valid bit for this PF */
bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
@@ -15198,7 +15195,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err;
u32 val;
u32 i;
- u8 set_fc_aq_fail;
err = pci_enable_device_mem(pdev);
if (err)
@@ -15537,24 +15533,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
- /* Make sure flow control is set according to current settings */
- err = i40e_set_fc(hw, &set_fc_aq_fail, true);
- if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
- dev_dbg(&pf->pdev->dev,
- "Set fc with err %s aq_err %s on get_phy_cap\n",
- i40e_stat_str(hw, err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
- dev_dbg(&pf->pdev->dev,
- "Set fc with err %s aq_err %s on set_phy_config\n",
- i40e_stat_str(hw, err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
- dev_dbg(&pf->pdev->dev,
- "Set fc with err %s aq_err %s on get_link_info\n",
- i40e_stat_str(hw, err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
-
/* if FDIR VSI was set up, start it now */
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
@@ -15611,6 +15589,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
dev_info(&pdev->dev,
"setup of misc vector failed: %d\n", err);
+ i40e_cloud_filter_exit(pf);
+ i40e_fdir_teardown(pf);
goto err_vsis;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index f6f1af94cca0..627794b31e33 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1948,7 +1948,7 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
skb_record_rx_queue(skb, rx_ring->queue_index);
if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
- u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
+ __le16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(vlan_tag));
@@ -3223,13 +3223,16 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
l4_proto = ip.v4->protocol;
} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
+ int ret;
+
tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
- if (l4.hdr != exthdr)
- ipv6_skip_exthdr(skb, exthdr - skb->data,
- &l4_proto, &frag_off);
+ ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ if (ret < 0)
+ return -1;
}
/* define outer transport */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 4f11f7bf75d1..fc32c5019b0f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -453,7 +453,7 @@ static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
struct i40e_tx_desc *tx_desc;
tx_desc = I40E_TX_DESC(xdp_ring, ntu);
- tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
+ tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index dae8280ce17c..357706444dd5 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -454,9 +454,7 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded:1; /* has previous stats been loaded */
-#ifdef CONFIG_DCB
u16 dcbx_cap;
-#endif /* CONFIG_DCB */
u32 tx_timeout_count;
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index fcfefad00d1c..468a63f7eff9 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -134,7 +134,7 @@ ice_dcbnl_getnumtcs(struct net_device *dev, int __always_unused tcid, u8 *num)
if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
return -EINVAL;
- *num = IEEE_8021QAZ_MAX_TCS;
+ *num = pf->hw.func_caps.common_cap.maxtc;
return 0;
}
@@ -159,6 +159,10 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_qos_cfg *qos_cfg;
+ /* if FW LLDP agent is running, DCBNL not allowed to change mode */
+ if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
+ return ICE_DCB_NO_HW_CHG;
+
/* No support for LLD_MANAGED modes or CEE+IEEE */
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 5636c9b23896..2dcfa685b763 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -8,6 +8,7 @@
#include "ice_fltr.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
+#include <net/dcbnl.h>
struct ice_stats {
char stat_string[ETH_GSTRING_LEN];
@@ -1238,6 +1239,9 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
status = ice_init_pf_dcb(pf, true);
if (status)
dev_warn(dev, "Fail to init DCB\n");
+
+ pf->dcbx_cap &= ~DCB_CAP_DCBX_LLD_MANAGED;
+ pf->dcbx_cap |= DCB_CAP_DCBX_HOST;
} else {
enum ice_status status;
bool dcbx_agent_status;
@@ -1280,6 +1284,9 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
if (status)
dev_dbg(dev, "Fail to enable MIB change events\n");
+ pf->dcbx_cap &= ~DCB_CAP_DCBX_HOST;
+ pf->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
+
ice_nway_reset(netdev);
}
}
@@ -3322,6 +3329,18 @@ ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
}
/**
+ * ice_get_valid_rss_size - return valid number of RSS queues
+ * @hw: pointer to the HW structure
+ * @new_size: requested RSS queues
+ */
+static int ice_get_valid_rss_size(struct ice_hw *hw, int new_size)
+{
+ struct ice_hw_common_caps *caps = &hw->func_caps.common_cap;
+
+ return min_t(int, new_size, BIT(caps->rss_table_entry_width));
+}
+
+/**
* ice_vsi_set_dflt_rss_lut - set default RSS LUT with requested RSS size
* @vsi: VSI to reconfigure RSS LUT on
* @req_rss_size: requested range of queue numbers for hashing
@@ -3348,14 +3367,10 @@ static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
return -ENOMEM;
/* set RSS LUT parameters */
- if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
vsi->rss_size = 1;
- } else {
- struct ice_hw_common_caps *caps = &hw->func_caps.common_cap;
-
- vsi->rss_size = min_t(int, req_rss_size,
- BIT(caps->rss_table_entry_width));
- }
+ else
+ vsi->rss_size = ice_get_valid_rss_size(hw, req_rss_size);
/* create/set RSS LUT */
ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
@@ -3434,9 +3449,12 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
ice_vsi_recfg_qs(vsi, new_rx, new_tx);
- if (new_rx && !netif_is_rxfh_configured(dev))
+ if (!netif_is_rxfh_configured(dev))
return ice_vsi_set_dflt_rss_lut(vsi, new_rx);
+ /* Update rss_size due to change in Rx queues */
+ vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx);
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index bf5fd812ea0e..1f38a8d0c525 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -1919,6 +1919,29 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
}
/**
+ * ice_vc_get_max_frame_size - get max frame size allowed for VF
+ * @vf: VF used to determine max frame size
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ */
+static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+ struct ice_port_info *pi = vsi->port_info;
+ u16 max_frame_size;
+
+ max_frame_size = pi->phy.link_info.max_frame_size;
+
+ if (vf->port_vlan_info)
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
* ice_vc_get_vf_res_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2000,6 +2023,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->max_vectors = pf->num_msix_per_vf;
vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+ vfres->max_mtu = ice_vc_get_max_frame_size(vf);
vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
@@ -2420,7 +2444,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
}
if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
- bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC);
+ bool set_dflt_vsi = alluni || allmulti;
if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
/* only attempt to set the default forwarding VSI if
@@ -2998,6 +3022,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
/* copy Rx queue info from VF into VSI */
if (qpi->rxq.ring_len > 0) {
+ u16 max_frame_size = ice_vc_get_max_frame_size(vf);
+
num_rxq++;
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
@@ -3010,7 +3036,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
vsi->rx_buf_len = qpi->rxq.databuffer_size;
vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
- if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
+ if (qpi->rxq.max_pkt_size > max_frame_size ||
qpi->rxq.max_pkt_size < 64) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -3018,6 +3044,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
vsi->max_frame = qpi->rxq.max_pkt_size;
+ /* add space for the port VLAN since the VF driver is not
+ * expected to account for it in the MTU calculation
+ */
+ if (vf->port_vlan_info)
+ vsi->max_frame += VLAN_HLEN;
}
/* VF can request to configure less than allocated queues or default
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 0507369bb54d..1767c60056c5 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4699,9 +4699,10 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
}
}
-static bool mvpp22_rss_is_supported(void)
+static bool mvpp22_rss_is_supported(struct mvpp2_port *port)
{
- return queue_mode == MVPP2_QDIST_MULTI_MODE;
+ return (queue_mode == MVPP2_QDIST_MULTI_MODE) &&
+ !(port->flags & MVPP2_F_LOOPBACK);
}
static int mvpp2_open(struct net_device *dev)
@@ -5513,7 +5514,7 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0, i, loc = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
switch (info->cmd) {
@@ -5548,7 +5549,7 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
switch (info->cmd) {
@@ -5569,7 +5570,9 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
{
- return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0;
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0;
}
static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
@@ -5578,7 +5581,7 @@ static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (indir)
@@ -5596,7 +5599,7 @@ static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
@@ -5617,7 +5620,7 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (rss_context >= MVPP22_N_RSS_TABLES)
return -EINVAL;
@@ -5639,7 +5642,7 @@ static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
struct mvpp2_port *port = netdev_priv(dev);
int ret;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
@@ -5956,7 +5959,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_cls_oversize_rxq_set(port);
mvpp2_cls_port_config(port);
- if (mvpp22_rss_is_supported())
+ if (mvpp22_rss_is_supported(port))
mvpp22_port_rss_init(port);
/* Provide an initial Rx packet size */
@@ -6861,7 +6864,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_FILTER;
- if (mvpp22_rss_is_supported()) {
+ if (mvpp22_rss_is_supported(port)) {
dev->hw_features |= NETIF_F_RXHASH;
dev->features |= NETIF_F_NTUPLE;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 094124b695dc..aa2ca8780b9c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -473,7 +473,7 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
u16 pcifunc;
int ret, lf;
- cmd_buf = memdup_user(buffer, count);
+ cmd_buf = memdup_user(buffer, count + 1);
if (IS_ERR(cmd_buf))
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index ebe1406c6e64..dbec8e187a68 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4806,12 +4806,11 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
if (!is_valid_ether_addr(dev->dev_addr)) {
struct sockaddr sa = { AF_UNSPEC };
- netdev_warn(dev,
- "Invalid MAC address, defaulting to random\n");
+ dev_warn(&hw->pdev->dev, "Invalid MAC address, defaulting to random\n");
eth_hw_addr_random(dev);
memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
if (sky2_set_mac_address(dev, &sa))
- netdev_warn(dev, "Failed to set MAC address.\n");
+ dev_warn(&hw->pdev->dev, "Failed to set MAC address.\n");
}
return dev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 394f43add85c..a99e71bc7b3c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -4986,6 +4986,7 @@ static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule
if (!fs_rule->mirr_mbox) {
mlx4_err(dev, "rule mirroring mailbox is null\n");
+ mlx4_free_cmd_mailbox(dev, mailbox);
return -EINVAL;
}
memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 0a20dae32184..f704da3f214c 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -2285,14 +2285,14 @@ static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
{
- RTL_W8(tp, MaxTxPacketSize, 0x3f);
+ RTL_W8(tp, MaxTxPacketSize, 0x24);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
}
static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
{
- RTL_W8(tp, MaxTxPacketSize, 0x0c);
+ RTL_W8(tp, MaxTxPacketSize, 0x3f);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index b7a0c57dfbfb..d23be45a64e5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -218,6 +218,7 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev)
goto remove_config;
}
+ spin_lock_init(&dwmac->lock);
dwmac->reg = stmmac_res.addr;
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = visconti_eth_fix_mac_speed;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 56985542e202..44bb133c3000 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -316,6 +316,32 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
if (!priv->dma_cap.av)
return -EOPNOTSUPP;
+ /* Port Transmit Rate and Speed Divider */
+ switch (priv->speed) {
+ case SPEED_10000:
+ ptr = 32;
+ speed_div = 10000000;
+ break;
+ case SPEED_5000:
+ ptr = 32;
+ speed_div = 5000000;
+ break;
+ case SPEED_2500:
+ ptr = 8;
+ speed_div = 2500000;
+ break;
+ case SPEED_1000:
+ ptr = 8;
+ speed_div = 1000000;
+ break;
+ case SPEED_100:
+ ptr = 4;
+ speed_div = 100000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
@@ -332,10 +358,6 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
}
- /* Port Transmit Rate and Speed Divider */
- ptr = (priv->speed == SPEED_100) ? 4 : 8;
- speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000;
-
/* Final adjustments for HW */
value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 9a70f05baf6e..39c00f050fbd 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -543,7 +543,6 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
mtu < ntohs(iph->tot_len)) {
netdev_dbg(dev, "packet too big, fragmentation needed\n");
- memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
goto err_rt;
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 3e431737c1ba..a00a667454a9 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -239,7 +239,7 @@ static int ip101a_g_config_intr_pin(struct phy_device *phydev)
oldpage = phy_select_page(phydev, IP101G_DEFAULT_PAGE);
if (oldpage < 0)
- return oldpage;
+ goto out;
/* configure the RXER/INTR_32 pin of the 32-pin IP101GR if needed: */
switch (priv->sel_intr32) {
@@ -314,7 +314,7 @@ static int ip101a_g_read_status(struct phy_device *phydev)
oldpage = phy_select_page(phydev, IP101G_DEFAULT_PAGE);
if (oldpage < 0)
- return oldpage;
+ goto out;
ret = __phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
if (ret < 0)
@@ -349,7 +349,8 @@ out:
static int ip101a_g_config_mdix(struct phy_device *phydev)
{
u16 ctrl = 0, ctrl2 = 0;
- int oldpage, ret;
+ int oldpage;
+ int ret = 0;
switch (phydev->mdix_ctrl) {
case ETH_TP_MDI:
@@ -367,7 +368,7 @@ static int ip101a_g_config_mdix(struct phy_device *phydev)
oldpage = phy_select_page(phydev, IP101G_DEFAULT_PAGE);
if (oldpage < 0)
- return oldpage;
+ goto out;
ret = __phy_modify(phydev, IP10XX_SPEC_CTRL_STATUS,
IP101A_G_AUTO_MDIX_DIS, ctrl);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 7ec6f70d6a82..a14a00328fa3 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1303,6 +1303,7 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8081_type,
.probe = kszphy_probe,
.config_init = ksz8081_config_init,
+ .soft_reset = genphy_soft_reset,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
.get_sset_count = kszphy_get_sset_count,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6c3d8c2abd38..17a050521b86 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1318,6 +1318,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1270, 5)}, /* ZTE MF667 */
+ {QMI_FIXED_INTF(0x19d2, 0x1275, 3)}, /* ZTE P685M */
{QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
{QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 2d7cc63bef89..b246817f3405 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -2632,21 +2632,24 @@ static inline u8 rtl8152_get_speed(struct r8152 *tp)
return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS);
}
-static void rtl_set_eee_plus(struct r8152 *tp)
+static void rtl_eee_plus_en(struct r8152 *tp, bool enable)
{
u32 ocp_data;
- u8 speed;
- speed = rtl8152_get_speed(tp);
- if (speed & _10bps) {
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
+ if (enable)
ocp_data |= EEEP_CR_EEEP_TX;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
- } else {
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
+ else
ocp_data &= ~EEEP_CR_EEEP_TX;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
- }
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
+}
+
+static void rtl_set_eee_plus(struct r8152 *tp)
+{
+ if (rtl8152_get_speed(tp) & _10bps)
+ rtl_eee_plus_en(tp, true);
+ else
+ rtl_eee_plus_en(tp, false);
}
static void rxdy_gated_en(struct r8152 *tp, bool enable)
@@ -3150,10 +3153,22 @@ static void r8153b_ups_flags(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags);
}
-static void r8153b_green_en(struct r8152 *tp, bool enable)
+static void rtl_green_en(struct r8152 *tp, bool enable)
{
u16 data;
+ data = sram_read(tp, SRAM_GREEN_CFG);
+ if (enable)
+ data |= GREEN_ETH_EN;
+ else
+ data &= ~GREEN_ETH_EN;
+ sram_write(tp, SRAM_GREEN_CFG, data);
+
+ tp->ups_info.green = enable;
+}
+
+static void r8153b_green_en(struct r8152 *tp, bool enable)
+{
if (enable) {
sram_write(tp, 0x8045, 0); /* 10M abiq&ldvbias */
sram_write(tp, 0x804d, 0x1222); /* 100M short abiq&ldvbias */
@@ -3164,11 +3179,7 @@ static void r8153b_green_en(struct r8152 *tp, bool enable)
sram_write(tp, 0x805d, 0x2444); /* 1000M short abiq&ldvbias */
}
- data = sram_read(tp, SRAM_GREEN_CFG);
- data |= GREEN_ETH_EN;
- sram_write(tp, SRAM_GREEN_CFG, data);
-
- tp->ups_info.green = enable;
+ rtl_green_en(tp, true);
}
static u16 r8153_phy_status(struct r8152 *tp, u16 desired)
@@ -3360,7 +3371,7 @@ static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable)
r8153b_ups_en(tp, false);
r8153_queue_wake(tp, false);
rtl_runtime_suspend_enable(tp, false);
- if (tp->udev->speed != USB_SPEED_HIGH)
+ if (tp->udev->speed >= USB_SPEED_SUPER)
r8153b_u1u2en(tp, true);
}
}
@@ -5056,7 +5067,7 @@ static void rtl8153b_up(struct r8152 *tp)
r8153_aldps_en(tp, true);
- if (tp->udev->speed != USB_SPEED_HIGH)
+ if (tp->udev->speed >= USB_SPEED_SUPER)
r8153b_u1u2en(tp, true);
}
@@ -5572,8 +5583,9 @@ static void r8153b_init(struct r8152 *tp)
ocp_data |= POLL_LINK_CHG;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
- if (tp->udev->speed != USB_SPEED_HIGH)
+ if (tp->udev->speed >= USB_SPEED_SUPER)
r8153b_u1u2en(tp, true);
+
usb_enable_lpm(tp->udev);
/* MAC clock speed down */
@@ -5756,6 +5768,9 @@ static int rtl8152_runtime_suspend(struct r8152 *tp)
struct net_device *netdev = tp->netdev;
int ret = 0;
+ if (!tp->rtl_ops.autosuspend_en)
+ return -EBUSY;
+
set_bit(SELECTIVE_SUSPEND, &tp->flags);
smp_mb__after_atomic();
@@ -6155,6 +6170,11 @@ rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *edata)
struct r8152 *tp = netdev_priv(net);
int ret;
+ if (!tp->rtl_ops.eee_get) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
ret = usb_autopm_get_interface(tp->intf);
if (ret < 0)
goto out;
@@ -6177,6 +6197,11 @@ rtl_ethtool_set_eee(struct net_device *net, struct ethtool_eee *edata)
struct r8152 *tp = netdev_priv(net);
int ret;
+ if (!tp->rtl_ops.eee_set) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
ret = usb_autopm_get_interface(tp->intf);
if (ret < 0)
goto out;
@@ -6576,7 +6601,7 @@ static int rtl_ops_init(struct r8152 *tp)
default:
ret = -ENODEV;
- netif_err(tp, probe, tp->netdev, "Unknown Device\n");
+ dev_err(&tp->intf->dev, "Unknown Device\n");
break;
}
@@ -6833,7 +6858,7 @@ static int rtl8152_probe(struct usb_interface *intf,
ret = register_netdev(netdev);
if (ret != 0) {
- netif_err(tp, probe, netdev, "couldn't register the device\n");
+ dev_err(&intf->dev, "couldn't register the device\n");
goto out1;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 3929e437382b..666dd201c3d5 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -4721,7 +4721,6 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan, *next;
struct net_device *dev, *aux;
- unsigned int h;
for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == &vxlan_link_ops)
@@ -4735,14 +4734,13 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
unregister_netdevice_queue(vxlan->dev, head);
}
- for (h = 0; h < PORT_HASH_SIZE; ++h)
- WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
}
static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
{
struct net *net;
LIST_HEAD(list);
+ unsigned int h;
rtnl_lock();
list_for_each_entry(net, net_list, exit_list) {
@@ -4755,6 +4753,13 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
unregister_netdevice_many(&list);
rtnl_unlock();
+
+ list_for_each_entry(net, net_list, exit_list) {
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
+ for (h = 0; h < PORT_HASH_SIZE; ++h)
+ WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
+ }
}
static struct pernet_operations vxlan_net_ops = {
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index a3ed49cd95c3..551ddaaaf540 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -138,7 +138,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
else if (skb->protocol == htons(ETH_P_IPV6))
net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n",
dev->name, &ipv6_hdr(skb)->daddr);
- goto err;
+ goto err_icmp;
}
family = READ_ONCE(peer->endpoint.addr.sa_family);
@@ -157,7 +157,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
struct sk_buff *segs = skb_gso_segment(skb, 0);
- if (unlikely(IS_ERR(segs))) {
+ if (IS_ERR(segs)) {
ret = PTR_ERR(segs);
goto err_peer;
}
@@ -201,12 +201,13 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
err_peer:
wg_peer_put(peer);
-err:
- ++dev->stats.tx_errors;
+err_icmp:
if (skb->protocol == htons(ETH_P_IP))
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
else if (skb->protocol == htons(ETH_P_IPV6))
icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
+err:
+ ++dev->stats.tx_errors;
kfree_skb(skb);
return ret;
}
@@ -234,8 +235,8 @@ static void wg_destruct(struct net_device *dev)
destroy_workqueue(wg->handshake_receive_wq);
destroy_workqueue(wg->handshake_send_wq);
destroy_workqueue(wg->packet_crypt_wq);
- wg_packet_queue_free(&wg->decrypt_queue, true);
- wg_packet_queue_free(&wg->encrypt_queue, true);
+ wg_packet_queue_free(&wg->decrypt_queue);
+ wg_packet_queue_free(&wg->encrypt_queue);
rcu_barrier(); /* Wait for all the peers to be actually freed. */
wg_ratelimiter_uninit();
memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
@@ -337,12 +338,12 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
goto err_destroy_handshake_send;
ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker,
- true, MAX_QUEUED_PACKETS);
+ MAX_QUEUED_PACKETS);
if (ret < 0)
goto err_destroy_packet_crypt;
ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker,
- true, MAX_QUEUED_PACKETS);
+ MAX_QUEUED_PACKETS);
if (ret < 0)
goto err_free_encrypt_queue;
@@ -367,9 +368,9 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
err_uninit_ratelimiter:
wg_ratelimiter_uninit();
err_free_decrypt_queue:
- wg_packet_queue_free(&wg->decrypt_queue, true);
+ wg_packet_queue_free(&wg->decrypt_queue);
err_free_encrypt_queue:
- wg_packet_queue_free(&wg->encrypt_queue, true);
+ wg_packet_queue_free(&wg->encrypt_queue);
err_destroy_packet_crypt:
destroy_workqueue(wg->packet_crypt_wq);
err_destroy_handshake_send:
diff --git a/drivers/net/wireguard/device.h b/drivers/net/wireguard/device.h
index 4d0144e16947..854bc3d97150 100644
--- a/drivers/net/wireguard/device.h
+++ b/drivers/net/wireguard/device.h
@@ -27,13 +27,14 @@ struct multicore_worker {
struct crypt_queue {
struct ptr_ring ring;
- union {
- struct {
- struct multicore_worker __percpu *worker;
- int last_cpu;
- };
- struct work_struct work;
- };
+ struct multicore_worker __percpu *worker;
+ int last_cpu;
+};
+
+struct prev_queue {
+ struct sk_buff *head, *tail, *peeked;
+ struct { struct sk_buff *next, *prev; } empty; // Match first 2 members of struct sk_buff.
+ atomic_t count;
};
struct wg_device {
diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c
index b3b6370e6b95..cd5cb0292cb6 100644
--- a/drivers/net/wireguard/peer.c
+++ b/drivers/net/wireguard/peer.c
@@ -32,27 +32,22 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
peer = kzalloc(sizeof(*peer), GFP_KERNEL);
if (unlikely(!peer))
return ERR_PTR(ret);
- peer->device = wg;
+ if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
+ goto err;
+ peer->device = wg;
wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
public_key, preshared_key, peer);
- if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
- goto err_1;
- if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false,
- MAX_QUEUED_PACKETS))
- goto err_2;
- if (wg_packet_queue_init(&peer->rx_queue, NULL, false,
- MAX_QUEUED_PACKETS))
- goto err_3;
-
peer->internal_id = atomic64_inc_return(&peer_counter);
peer->serial_work_cpu = nr_cpumask_bits;
wg_cookie_init(&peer->latest_cookie);
wg_timers_init(peer);
wg_cookie_checker_precompute_peer_keys(peer);
spin_lock_init(&peer->keypairs.keypair_update_lock);
- INIT_WORK(&peer->transmit_handshake_work,
- wg_packet_handshake_send_worker);
+ INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker);
+ INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker);
+ wg_prev_queue_init(&peer->tx_queue);
+ wg_prev_queue_init(&peer->rx_queue);
rwlock_init(&peer->endpoint_lock);
kref_init(&peer->refcount);
skb_queue_head_init(&peer->staged_packet_queue);
@@ -68,11 +63,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id);
return peer;
-err_3:
- wg_packet_queue_free(&peer->tx_queue, false);
-err_2:
- dst_cache_destroy(&peer->endpoint_cache);
-err_1:
+err:
kfree(peer);
return ERR_PTR(ret);
}
@@ -197,8 +188,7 @@ static void rcu_release(struct rcu_head *rcu)
struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu);
dst_cache_destroy(&peer->endpoint_cache);
- wg_packet_queue_free(&peer->rx_queue, false);
- wg_packet_queue_free(&peer->tx_queue, false);
+ WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue));
/* The final zeroing takes care of clearing any remaining handshake key
* material and other potentially sensitive information.
diff --git a/drivers/net/wireguard/peer.h b/drivers/net/wireguard/peer.h
index 23af40922997..8d53b687a1d1 100644
--- a/drivers/net/wireguard/peer.h
+++ b/drivers/net/wireguard/peer.h
@@ -36,16 +36,17 @@ struct endpoint {
struct wg_peer {
struct wg_device *device;
- struct crypt_queue tx_queue, rx_queue;
+ struct prev_queue tx_queue, rx_queue;
struct sk_buff_head staged_packet_queue;
int serial_work_cpu;
+ bool is_dead;
struct noise_keypairs keypairs;
struct endpoint endpoint;
struct dst_cache endpoint_cache;
rwlock_t endpoint_lock;
struct noise_handshake handshake;
atomic64_t last_sent_handshake;
- struct work_struct transmit_handshake_work, clear_peer_work;
+ struct work_struct transmit_handshake_work, clear_peer_work, transmit_packet_work;
struct cookie latest_cookie;
struct hlist_node pubkey_hash;
u64 rx_bytes, tx_bytes;
@@ -61,9 +62,8 @@ struct wg_peer {
struct rcu_head rcu;
struct list_head peer_list;
struct list_head allowedips_list;
- u64 internal_id;
struct napi_struct napi;
- bool is_dead;
+ u64 internal_id;
};
struct wg_peer *wg_peer_create(struct wg_device *wg,
diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
index 71b8e80b58e1..48e7b982a307 100644
--- a/drivers/net/wireguard/queueing.c
+++ b/drivers/net/wireguard/queueing.c
@@ -9,8 +9,7 @@ struct multicore_worker __percpu *
wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
{
int cpu;
- struct multicore_worker __percpu *worker =
- alloc_percpu(struct multicore_worker);
+ struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
if (!worker)
return NULL;
@@ -23,7 +22,7 @@ wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
}
int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
- bool multicore, unsigned int len)
+ unsigned int len)
{
int ret;
@@ -31,25 +30,78 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
if (ret)
return ret;
- if (function) {
- if (multicore) {
- queue->worker = wg_packet_percpu_multicore_worker_alloc(
- function, queue);
- if (!queue->worker) {
- ptr_ring_cleanup(&queue->ring, NULL);
- return -ENOMEM;
- }
- } else {
- INIT_WORK(&queue->work, function);
- }
+ queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
+ if (!queue->worker) {
+ ptr_ring_cleanup(&queue->ring, NULL);
+ return -ENOMEM;
}
return 0;
}
-void wg_packet_queue_free(struct crypt_queue *queue, bool multicore)
+void wg_packet_queue_free(struct crypt_queue *queue)
{
- if (multicore)
- free_percpu(queue->worker);
+ free_percpu(queue->worker);
WARN_ON(!__ptr_ring_empty(&queue->ring));
ptr_ring_cleanup(&queue->ring, NULL);
}
+
+#define NEXT(skb) ((skb)->prev)
+#define STUB(queue) ((struct sk_buff *)&queue->empty)
+
+void wg_prev_queue_init(struct prev_queue *queue)
+{
+ NEXT(STUB(queue)) = NULL;
+ queue->head = queue->tail = STUB(queue);
+ queue->peeked = NULL;
+ atomic_set(&queue->count, 0);
+ BUILD_BUG_ON(
+ offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) -
+ offsetof(struct prev_queue, empty) ||
+ offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) -
+ offsetof(struct prev_queue, empty));
+}
+
+static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
+{
+ WRITE_ONCE(NEXT(skb), NULL);
+ WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb);
+}
+
+bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
+{
+ if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
+ return false;
+ __wg_prev_queue_enqueue(queue, skb);
+ return true;
+}
+
+struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue)
+{
+ struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail));
+
+ if (tail == STUB(queue)) {
+ if (!next)
+ return NULL;
+ queue->tail = next;
+ tail = next;
+ next = smp_load_acquire(&NEXT(next));
+ }
+ if (next) {
+ queue->tail = next;
+ atomic_dec(&queue->count);
+ return tail;
+ }
+ if (tail != READ_ONCE(queue->head))
+ return NULL;
+ __wg_prev_queue_enqueue(queue, STUB(queue));
+ next = smp_load_acquire(&NEXT(tail));
+ if (next) {
+ queue->tail = next;
+ atomic_dec(&queue->count);
+ return tail;
+ }
+ return NULL;
+}
+
+#undef NEXT
+#undef STUB
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index dfb674e03076..4ef2944a68bc 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -17,12 +17,13 @@ struct wg_device;
struct wg_peer;
struct multicore_worker;
struct crypt_queue;
+struct prev_queue;
struct sk_buff;
/* queueing.c APIs: */
int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
- bool multicore, unsigned int len);
-void wg_packet_queue_free(struct crypt_queue *queue, bool multicore);
+ unsigned int len);
+void wg_packet_queue_free(struct crypt_queue *queue);
struct multicore_worker __percpu *
wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
@@ -135,8 +136,31 @@ static inline int wg_cpumask_next_online(int *next)
return cpu;
}
+void wg_prev_queue_init(struct prev_queue *queue);
+
+/* Multi producer */
+bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb);
+
+/* Single consumer */
+struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue);
+
+/* Single consumer */
+static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue)
+{
+ if (queue->peeked)
+ return queue->peeked;
+ queue->peeked = wg_prev_queue_dequeue(queue);
+ return queue->peeked;
+}
+
+/* Single consumer */
+static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
+{
+ queue->peeked = NULL;
+}
+
static inline int wg_queue_enqueue_per_device_and_peer(
- struct crypt_queue *device_queue, struct crypt_queue *peer_queue,
+ struct crypt_queue *device_queue, struct prev_queue *peer_queue,
struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
{
int cpu;
@@ -145,8 +169,9 @@ static inline int wg_queue_enqueue_per_device_and_peer(
/* We first queue this up for the peer ingestion, but the consumer
* will wait for the state to change to CRYPTED or DEAD before.
*/
- if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb)))
+ if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb)))
return -ENOSPC;
+
/* Then we queue it up in the device queue, which consumes the
* packet as soon as it can.
*/
@@ -157,9 +182,7 @@ static inline int wg_queue_enqueue_per_device_and_peer(
return 0;
}
-static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
- struct sk_buff *skb,
- enum packet_state state)
+static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state)
{
/* We take a reference, because as soon as we call atomic_set, the
* peer can be freed from below us.
@@ -167,14 +190,12 @@ static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
atomic_set_release(&PACKET_CB(skb)->state, state);
- queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu,
- peer->internal_id),
- peer->device->packet_crypt_wq, &queue->work);
+ queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id),
+ peer->device->packet_crypt_wq, &peer->transmit_packet_work);
wg_peer_put(peer);
}
-static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb,
- enum packet_state state)
+static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state)
{
/* We take a reference, because as soon as we call atomic_set, the
* peer can be freed from below us.
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index 2c9551ea6dc7..7dc84bcca261 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -444,7 +444,6 @@ packet_processed:
int wg_packet_rx_poll(struct napi_struct *napi, int budget)
{
struct wg_peer *peer = container_of(napi, struct wg_peer, napi);
- struct crypt_queue *queue = &peer->rx_queue;
struct noise_keypair *keypair;
struct endpoint endpoint;
enum packet_state state;
@@ -455,11 +454,10 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget)
if (unlikely(budget <= 0))
return 0;
- while ((skb = __ptr_ring_peek(&queue->ring)) != NULL &&
+ while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL &&
(state = atomic_read_acquire(&PACKET_CB(skb)->state)) !=
PACKET_STATE_UNCRYPTED) {
- __ptr_ring_discard_one(&queue->ring);
- peer = PACKET_PEER(skb);
+ wg_prev_queue_drop_peeked(&peer->rx_queue);
keypair = PACKET_CB(skb)->keypair;
free = true;
@@ -508,7 +506,7 @@ void wg_packet_decrypt_worker(struct work_struct *work)
enum packet_state state =
likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ?
PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
- wg_queue_enqueue_per_peer_napi(skb, state);
+ wg_queue_enqueue_per_peer_rx(skb, state);
if (need_resched())
cond_resched();
}
@@ -531,12 +529,10 @@ static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
if (unlikely(READ_ONCE(peer->is_dead)))
goto err;
- ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue,
- &peer->rx_queue, skb,
- wg->packet_crypt_wq,
- &wg->decrypt_queue.last_cpu);
+ ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb,
+ wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu);
if (unlikely(ret == -EPIPE))
- wg_queue_enqueue_per_peer_napi(skb, PACKET_STATE_DEAD);
+ wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD);
if (likely(!ret || ret == -EPIPE)) {
rcu_read_unlock_bh();
return;
diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
index f74b9341ab0f..5368f7c35b4b 100644
--- a/drivers/net/wireguard/send.c
+++ b/drivers/net/wireguard/send.c
@@ -239,8 +239,7 @@ void wg_packet_send_keepalive(struct wg_peer *peer)
wg_packet_send_staged_packets(peer);
}
-static void wg_packet_create_data_done(struct sk_buff *first,
- struct wg_peer *peer)
+static void wg_packet_create_data_done(struct wg_peer *peer, struct sk_buff *first)
{
struct sk_buff *skb, *next;
bool is_keepalive, data_sent = false;
@@ -262,22 +261,19 @@ static void wg_packet_create_data_done(struct sk_buff *first,
void wg_packet_tx_worker(struct work_struct *work)
{
- struct crypt_queue *queue = container_of(work, struct crypt_queue,
- work);
+ struct wg_peer *peer = container_of(work, struct wg_peer, transmit_packet_work);
struct noise_keypair *keypair;
enum packet_state state;
struct sk_buff *first;
- struct wg_peer *peer;
- while ((first = __ptr_ring_peek(&queue->ring)) != NULL &&
+ while ((first = wg_prev_queue_peek(&peer->tx_queue)) != NULL &&
(state = atomic_read_acquire(&PACKET_CB(first)->state)) !=
PACKET_STATE_UNCRYPTED) {
- __ptr_ring_discard_one(&queue->ring);
- peer = PACKET_PEER(first);
+ wg_prev_queue_drop_peeked(&peer->tx_queue);
keypair = PACKET_CB(first)->keypair;
if (likely(state == PACKET_STATE_CRYPTED))
- wg_packet_create_data_done(first, peer);
+ wg_packet_create_data_done(peer, first);
else
kfree_skb_list(first);
@@ -306,16 +302,14 @@ void wg_packet_encrypt_worker(struct work_struct *work)
break;
}
}
- wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first,
- state);
+ wg_queue_enqueue_per_peer_tx(first, state);
if (need_resched())
cond_resched();
}
}
-static void wg_packet_create_data(struct sk_buff *first)
+static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first)
{
- struct wg_peer *peer = PACKET_PEER(first);
struct wg_device *wg = peer->device;
int ret = -EINVAL;
@@ -323,13 +317,10 @@ static void wg_packet_create_data(struct sk_buff *first)
if (unlikely(READ_ONCE(peer->is_dead)))
goto err;
- ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue,
- &peer->tx_queue, first,
- wg->packet_crypt_wq,
- &wg->encrypt_queue.last_cpu);
+ ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first,
+ wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu);
if (unlikely(ret == -EPIPE))
- wg_queue_enqueue_per_peer(&peer->tx_queue, first,
- PACKET_STATE_DEAD);
+ wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD);
err:
rcu_read_unlock_bh();
if (likely(!ret || ret == -EPIPE))
@@ -393,7 +384,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
packets.prev->next = NULL;
wg_peer_get(keypair->entry.peer);
PACKET_CB(packets.next)->keypair = keypair;
- wg_packet_create_data(packets.next);
+ wg_packet_create_data(peer, packets.next);
return;
out_invalid:
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
index 410b318e57fb..d9ad850daa79 100644
--- a/drivers/net/wireguard/socket.c
+++ b/drivers/net/wireguard/socket.c
@@ -53,7 +53,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0,
fl.saddr, RT_SCOPE_HOST))) {
endpoint->src4.s_addr = 0;
- *(__force __be32 *)&endpoint->src_if4 = 0;
+ endpoint->src_if4 = 0;
fl.saddr = 0;
if (cache)
dst_cache_reset(cache);
@@ -63,7 +63,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) &&
rt->dst.dev->ifindex != endpoint->src_if4)))) {
endpoint->src4.s_addr = 0;
- *(__force __be32 *)&endpoint->src_if4 = 0;
+ endpoint->src_if4 = 0;
fl.saddr = 0;
if (cache)
dst_cache_reset(cache);
@@ -71,7 +71,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
ip_rt_put(rt);
rt = ip_route_output_flow(sock_net(sock), &fl, sock);
}
- if (unlikely(IS_ERR(rt))) {
+ if (IS_ERR(rt)) {
ret = PTR_ERR(rt);
net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
wg->dev->name, &endpoint->addr, ret);
@@ -138,7 +138,7 @@ static int send6(struct wg_device *wg, struct sk_buff *skb,
}
dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl,
NULL);
- if (unlikely(IS_ERR(dst))) {
+ if (IS_ERR(dst)) {
ret = PTR_ERR(dst);
net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
wg->dev->name, &endpoint->addr, ret);