summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-01 12:49:03 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-01 12:49:03 -0700
commit8cf9f2a29ff1265a392e5b2461c69a9e53b4539f (patch)
tree1787dc13f3708c1f836f532a2956cd3a02fa6a41
parentb8a78bb4d103b3ea069c4831081cb1ba17062a4b (diff)
parente8a732d1bc3ac313e22249c13a153c3fe54aa577 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix handling of pinned BPF map nodes in hash of maps, from Daniel Borkmann. 2) IPSEC ESP error paths leak memory, from Steffen Klassert. 3) We need an RCU grace period before freeing fib6_node objects, from Wei Wang. 4) Must check skb_put_padto() return value in HSR driver, from FLorian Fainelli. 5) Fix oops on PHY probe failure in ftgmac100 driver, from Andrew Jeffery. 6) Fix infinite loop in UDP queue when using SO_PEEK_OFF, from Eric Dumazet. 7) Use after free when tcf_chain_destroy() called multiple times, from Jiri Pirko. 8) Fix KSZ DSA tag layer multiple free of SKBS, from Florian Fainelli. 9) Fix leak of uninitialized memory in sctp_get_sctp_info(), inet_diag_msg_sctpladdrs_fill() and inet_diag_msg_sctpaddrs_fill(). From Stefano Brivio. 10) L2TP tunnel refcount fixes from Guillaume Nault. 11) Don't leak UDP secpath in udp_set_dev_scratch(), from Yossi Kauperman. 12) Revert a PHY layer change wrt. handling of PHY_HALTED state in phy_stop_machine(), it causes regressions for multiple people. From Florian Fainelli. 13) When packets are sent out of br0 we have to clear the offload_fwdq_mark value. 14) Several NULL pointer deref fixes in packet schedulers when their ->init() routine fails. From Nikolay Aleksandrov. 15) Aquantium devices cannot checksum offload correctly when the packet is <= 60 bytes. From Pavel Belous. 16) Fix vnet header access past end of buffer in AF_PACKET, from Benjamin Poirier. 17) Double free in probe error paths of nfp driver, from Dan Carpenter. 18) QOS capability not checked properly in DCB init paths of mlx5 driver, from Huy Nguyen. 19) Fix conflicts between firmware load failure and health_care timer in mlx5, also from Huy Nguyen. 20) Fix dangling page pointer when DMA mapping errors occur in mlx5, from Eran Ben ELisha. 21) ->ndo_setup_tc() in bnxt_en driver doesn't count rings properly, from Michael Chan. 22) Missing MSIX vector free in bnxt_en, also from Michael Chan. 23) Refcount leak in xfrm layer when using sk_policy, from Lorenzo Colitti. 24) Fix copy of uninitialized data in qlge driver, from Arnd Bergmann. 25) bpf_setsockopts() erroneously always returns -EINVAL even on success. Fix from Yuchung Cheng. 26) tipc_rcv() needs to linearize the SKB before parsing the inner headers, from Parthasarathy Bhuvaragan. 27) Fix deadlock between link status updates and link removal in netvsc driver, from Stephen Hemminger. 28) Missed locking of page fragment handling in ESP output, from Steffen Klassert. 29) Fix refcnt leak in ebpf congestion control code, from Sabrina Dubroca. 30) sxgbe_probe_config_dt() doesn't check devm_kzalloc()'s return value, from Christophe Jaillet. 31) Fix missing ipv6 rx_dst_cookie update when rx_dst is updated during early demux, from Paolo Abeni. 32) Several info leaks in xfrm_user layer, from Mathias Krause. 33) Fix out of bounds read in cxgb4 driver, from Stefano Brivio. 34) Properly propagate obsolete state of route upwards in ipv6 so that upper holders like xfrm can see it. From Xin Long. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (118 commits) udp: fix secpath leak bridge: switchdev: Clear forward mark when transmitting packet mlxsw: spectrum: Forbid linking to devices that have uppers wl1251: add a missing spin_lock_init() Revert "net: phy: Correctly process PHY_HALTED in phy_stop_machine()" net: dsa: bcm_sf2: Fix number of CFP entries for BCM7278 kcm: do not attach PF_KCM sockets to avoid deadlock sch_tbf: fix two null pointer dereferences on init failure sch_sfq: fix null pointer dereference on init failure sch_netem: avoid null pointer deref on init failure sch_fq_codel: avoid double free on init failure sch_cbq: fix null pointer dereferences on init failure sch_hfsc: fix null pointer deref and double free on init failure sch_hhf: fix null pointer dereference on init failure sch_multiq: fix double free on init failure sch_htb: fix crash on init failure net/mlx5e: Fix CQ moderation mode not set properly net/mlx5e: Fix inline header size for small packets net/mlx5: E-Switch, Unload the representors in the correct order net/mlx5e: Properly resolve TC offloaded ipv6 vxlan tunnel source address ...
-rw-r--r--drivers/net/dsa/bcm_sf2.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.h1
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c27
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c92
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c41
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c139
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c60
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c26
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c5
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c8
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c7
-rw-r--r--drivers/net/macsec.c1
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/phy_device.c6
-rw-r--r--drivers/net/usb/cdc_ncm.c7
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c9
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c1
-rw-r--r--include/linux/mlx5/driver.h4
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/skbuff.h41
-rw-r--r--include/net/ip6_fib.h32
-rw-r--r--include/net/sch_generic.h7
-rw-r--r--include/net/tcp.h4
-rw-r--r--include/net/udp.h2
-rw-r--r--kernel/bpf/hashtab.c30
-rw-r--r--net/bridge/br_device.c3
-rw-r--r--net/bridge/br_switchdev.c2
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/skbuff.c13
-rw-r--r--net/dsa/dsa2.c2
-rw-r--r--net/dsa/tag_ksz.c12
-rw-r--r--net/dsa/tag_trailer.c2
-rw-r--r--net/hsr/hsr_device.c3
-rw-r--r--net/ipv4/esp4.c20
-rw-r--r--net/ipv4/esp4_offload.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c10
-rw-r--r--net/ipv4/netfilter/ip_tables.c9
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c4
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_cong.c19
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/esp6.c16
-rw-r--r--net/ipv6/esp6_offload.c2
-rw-r--r--net/ipv6/ip6_fib.c35
-rw-r--r--net/ipv6/ipv6_sockglue.c1
-rw-r--r--net/ipv6/output_core.c6
-rw-r--r--net/ipv6/route.c20
-rw-r--r--net/ipv6/udp.c11
-rw-r--r--net/kcm/kcmsock.c4
-rw-r--r--net/l2tp/l2tp_core.c72
-rw-r--r--net/l2tp/l2tp_core.h13
-rw-r--r--net/l2tp/l2tp_netlink.c66
-rw-r--r--net/netfilter/nf_nat_core.c2
-rw-r--r--net/netfilter/nft_compat.c4
-rw-r--r--net/netfilter/nft_limit.c25
-rw-r--r--net/packet/af_packet.c12
-rw-r--r--net/sched/cls_api.c16
-rw-r--r--net/sched/sch_api.c6
-rw-r--r--net/sched/sch_cbq.c10
-rw-r--r--net/sched/sch_fq_codel.c4
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_hfsc.c10
-rw-r--r--net/sched/sch_hhf.c3
-rw-r--r--net/sched/sch_htb.c5
-rw-r--r--net/sched/sch_multiq.c7
-rw-r--r--net/sched/sch_netem.c4
-rw-r--r--net/sched/sch_sfq.c6
-rw-r--r--net/sched/sch_tbf.c5
-rw-r--r--net/sctp/sctp_diag.c7
-rw-r--r--net/sctp/socket.c3
-rw-r--r--net/tipc/bearer.c26
-rw-r--r--net/tipc/bearer.h2
-rw-r--r--net/tipc/msg.c7
-rw-r--r--net/tipc/node.c4
-rw-r--r--net/tipc/socket.c6
-rw-r--r--net/tipc/subscr.c21
-rw-r--r--net/xfrm/xfrm_policy.c7
-rw-r--r--net/xfrm/xfrm_state.c8
-rw-r--r--net/xfrm/xfrm_user.c6
119 files changed, 816 insertions, 559 deletions
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 648f91b58d1e..9b6ce7c3f6c3 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1048,6 +1048,7 @@ struct bcm_sf2_of_data {
u32 type;
const u16 *reg_offsets;
unsigned int core_reg_align;
+ unsigned int num_cfp_rules;
};
/* Register offsets for the SWITCH_REG_* block */
@@ -1071,6 +1072,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
.type = BCM7445_DEVICE_ID,
.core_reg_align = 0,
.reg_offsets = bcm_sf2_7445_reg_offsets,
+ .num_cfp_rules = 256,
};
static const u16 bcm_sf2_7278_reg_offsets[] = {
@@ -1093,6 +1095,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
.type = BCM7278_DEVICE_ID,
.core_reg_align = 1,
.reg_offsets = bcm_sf2_7278_reg_offsets,
+ .num_cfp_rules = 128,
};
static const struct of_device_id bcm_sf2_of_match[] = {
@@ -1149,6 +1152,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
priv->type = data->type;
priv->reg_offsets = data->reg_offsets;
priv->core_reg_align = data->core_reg_align;
+ priv->num_cfp_rules = data->num_cfp_rules;
/* Auto-detection using standard registers will not work, so
* provide an indication of what kind of device we are for
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 7d3030e04f11..7f9125eef3df 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -72,6 +72,7 @@ struct bcm_sf2_priv {
u32 type;
const u16 *reg_offsets;
unsigned int core_reg_align;
+ unsigned int num_cfp_rules;
/* spinlock protecting access to the indirect registers */
spinlock_t indir_lock;
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 2fb32d67065f..8a1da7e67707 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -98,7 +98,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
{
u32 reg;
- WARN_ON(addr >= CFP_NUM_RULES);
+ WARN_ON(addr >= priv->num_cfp_rules);
reg = core_readl(priv, CORE_CFP_ACC);
reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
@@ -109,7 +109,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
{
/* Entry #0 is reserved */
- return CFP_NUM_RULES - 1;
+ return priv->num_cfp_rules - 1;
}
static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
@@ -523,7 +523,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
if (!(reg & OP_STR_DONE))
break;
- } while (index < CFP_NUM_RULES);
+ } while (index < priv->num_cfp_rules);
/* Put the TCAM size here */
nfc->data = bcm_sf2_cfp_rule_size(priv);
@@ -544,7 +544,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
case ETHTOOL_GRXCLSRLCNT:
/* Subtract the default, unusable rule */
nfc->rule_cnt = bitmap_weight(priv->cfp.used,
- CFP_NUM_RULES) - 1;
+ priv->num_cfp_rules) - 1;
/* We support specifying rule locations */
nfc->data |= RX_CLS_LOC_SPECIAL;
break;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 1d307f2def2d..6e253d913fe2 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1661,21 +1661,21 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
return 0;
}
-static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
+static void xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
{
int ret;
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
- return 0;
+ return;
if (!IS_ENABLED(CONFIG_MDIO_XGENE))
- return 0;
+ return;
ret = xgene_enet_phy_connect(pdata->ndev);
if (!ret)
pdata->mdio_driver = true;
- return 0;
+ return;
}
static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
@@ -1779,10 +1779,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
if (ret)
return ret;
- ret = xgene_enet_check_phy_handle(pdata);
- if (ret)
- return ret;
-
xgene_enet_gpiod_get(pdata);
pdata->clk = devm_clk_get(&pdev->dev, NULL);
@@ -2097,9 +2093,11 @@ static int xgene_enet_probe(struct platform_device *pdev)
goto err;
}
+ xgene_enet_check_phy_handle(pdata);
+
ret = xgene_enet_init_hw(pdata);
if (ret)
- goto err;
+ goto err2;
link_state = pdata->mac_ops->link_state;
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
@@ -2117,29 +2115,30 @@ static int xgene_enet_probe(struct platform_device *pdev)
spin_lock_init(&pdata->stats_lock);
ret = xgene_extd_stats_init(pdata);
if (ret)
- goto err2;
+ goto err1;
xgene_enet_napi_add(pdata);
ret = register_netdev(ndev);
if (ret) {
netdev_err(ndev, "Failed to register netdev\n");
- goto err2;
+ goto err1;
}
return 0;
-err2:
+err1:
/*
* If necessary, free_netdev() will call netif_napi_del() and undo
* the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
*/
+ xgene_enet_delete_desc_rings(pdata);
+
+err2:
if (pdata->mdio_driver)
xgene_enet_phy_disconnect(pdata);
else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
xgene_enet_mdio_remove(pdata);
-err1:
- xgene_enet_delete_desc_rings(pdata);
err:
free_netdev(ndev);
return ret;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index fce0fd3f23ff..bf9b3f020e10 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -105,8 +105,7 @@ struct aq_hw_ops {
int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
- int (*hw_get_link_status)(struct aq_hw_s *self,
- struct aq_hw_link_status_s *link_status);
+ int (*hw_get_link_status)(struct aq_hw_s *self);
int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 9ee1c5016784..6ac9e2602d6d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -103,6 +103,8 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
else
cfg->vecs = 1U;
+ cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
+
cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
@@ -123,33 +125,30 @@ static void aq_nic_service_timer_cb(unsigned long param)
struct net_device *ndev = aq_nic_get_ndev(self);
int err = 0;
unsigned int i = 0U;
- struct aq_hw_link_status_s link_status;
struct aq_ring_stats_rx_s stats_rx;
struct aq_ring_stats_tx_s stats_tx;
if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
goto err_exit;
- err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status);
+ err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
if (err < 0)
goto err_exit;
- self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
- self->aq_nic_cfg.is_interrupt_moderation);
-
- if (memcmp(&link_status, &self->link_status, sizeof(link_status))) {
- if (link_status.mbps) {
- aq_utils_obj_set(&self->header.flags,
- AQ_NIC_FLAG_STARTED);
- aq_utils_obj_clear(&self->header.flags,
- AQ_NIC_LINK_DOWN);
- netif_carrier_on(self->ndev);
- } else {
- netif_carrier_off(self->ndev);
- aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
- }
+ self->link_status = self->aq_hw->aq_link_status;
- self->link_status = link_status;
+ self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
+ self->aq_nic_cfg.is_interrupt_moderation);
+
+ if (self->link_status.mbps) {
+ aq_utils_obj_set(&self->header.flags,
+ AQ_NIC_FLAG_STARTED);
+ aq_utils_obj_clear(&self->header.flags,
+ AQ_NIC_LINK_DOWN);
+ netif_carrier_on(self->ndev);
+ } else {
+ netif_carrier_off(self->ndev);
+ aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
}
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
@@ -597,14 +596,11 @@ exit:
}
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
-__releases(&ring->lock)
-__acquires(&ring->lock)
{
struct aq_ring_s *ring = NULL;
unsigned int frags = 0U;
unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
unsigned int tc = 0U;
- unsigned int trys = AQ_CFG_LOCK_TRYS;
int err = NETDEV_TX_OK;
bool is_nic_in_bad_state;
@@ -628,36 +624,21 @@ __acquires(&ring->lock)
goto err_exit;
}
- do {
- if (spin_trylock(&ring->header.lock)) {
- frags = aq_nic_map_skb(self, skb, ring);
-
- if (likely(frags)) {
- err = self->aq_hw_ops.hw_ring_tx_xmit(
- self->aq_hw,
- ring, frags);
- if (err >= 0) {
- if (aq_ring_avail_dx(ring) <
- AQ_CFG_SKB_FRAGS_MAX + 1)
- aq_nic_ndev_queue_stop(
- self,
- ring->idx);
-
- ++ring->stats.tx.packets;
- ring->stats.tx.bytes += skb->len;
- }
- } else {
- err = NETDEV_TX_BUSY;
- }
+ frags = aq_nic_map_skb(self, skb, ring);
- spin_unlock(&ring->header.lock);
- break;
- }
- } while (--trys);
+ if (likely(frags)) {
+ err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
+ ring,
+ frags);
+ if (err >= 0) {
+ if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
+ aq_nic_ndev_queue_stop(self, ring->idx);
- if (!trys) {
+ ++ring->stats.tx.packets;
+ ring->stats.tx.bytes += skb->len;
+ }
+ } else {
err = NETDEV_TX_BUSY;
- goto err_exit;
}
err_exit:
@@ -688,11 +669,26 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
netdev_for_each_mc_addr(ha, ndev) {
ether_addr_copy(self->mc_list.ar[i++], ha->addr);
++self->mc_list.count;
+
+ if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX)
+ break;
}
- return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
+ if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) {
+ /* Number of filters is too big: atlantic does not support this.
+ * Force all multi filter to support this.
+ * With this we disable all UC filters and setup "all pass"
+ * multicast mask
+ */
+ self->packet_filter |= IFF_ALLMULTI;
+ self->aq_hw->aq_nic_cfg->mc_list_count = 0;
+ return self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
+ self->packet_filter);
+ } else {
+ return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
self->mc_list.count);
+ }
}
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 9a0817938eca..ec5579fb8268 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -101,7 +101,6 @@ int aq_ring_init(struct aq_ring_s *self)
self->hw_head = 0;
self->sw_head = 0;
self->sw_tail = 0;
- spin_lock_init(&self->header.lock);
return 0;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
index f6012b34abe6..e12bcdfb874a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
@@ -17,7 +17,6 @@
#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_)
struct aq_obj_s {
- spinlock_t lock; /* spinlock for nic/rings processing */
atomic_t flags;
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index ad5b4d4dac7f..fee446af748f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -34,8 +34,6 @@ struct aq_vec_s {
#define AQ_VEC_RX_ID 1
static int aq_vec_poll(struct napi_struct *napi, int budget)
-__releases(&self->lock)
-__acquires(&self->lock)
{
struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
struct aq_ring_s *ring = NULL;
@@ -47,7 +45,7 @@ __acquires(&self->lock)
if (!self) {
err = -EINVAL;
- } else if (spin_trylock(&self->header.lock)) {
+ } else {
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
if (self->aq_hw_ops->hw_ring_tx_head_update) {
@@ -105,11 +103,8 @@ __acquires(&self->lock)
self->aq_hw_ops->hw_irq_enable(self->aq_hw,
1U << self->aq_ring_param.vec_idx);
}
-
-err_exit:
- spin_unlock(&self->header.lock);
}
-
+err_exit:
return work_done;
}
@@ -185,8 +180,6 @@ int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
self->aq_hw_ops = aq_hw_ops;
self->aq_hw = aq_hw;
- spin_lock_init(&self->header.lock);
-
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
err = aq_ring_init(&ring[AQ_VEC_TX_ID]);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index faeb4935ef3e..c5a02df7a48b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -629,6 +629,12 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1;
else if (0x0U == (pkt_type & 0x1CU))
buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1;
+
+ /* Checksum offload workaround for small packets */
+ if (rxd_wb->pkt_len <= 60) {
+ buff->is_ip_cso = 0U;
+ buff->is_cso_err = 0U;
+ }
}
is_err &= ~0x18U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 1bceb7358e5c..21784cc39dab 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -645,6 +645,12 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
else if (0x0U == (pkt_type & 0x1CU))
buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
+
+ /* Checksum offload workaround for small packets */
+ if (rxd_wb->pkt_len <= 60) {
+ buff->is_ip_cso = 0U;
+ buff->is_cso_err = 0U;
+ }
}
is_err &= ~0x18U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 8d6d8f5804da..4f5ec9a0fbfb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -141,6 +141,12 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
aq_hw_read_reg(self, 0x18U));
+
+ if (err < 0)
+ pr_err("%s: Bad FW version detected: expected=%x, actual=%x\n",
+ AQ_CFG_DRV_NAME,
+ aq_hw_caps->fw_ver_expected,
+ aq_hw_read_reg(self, 0x18U));
return err;
}
@@ -313,11 +319,11 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
err_exit:;
}
-int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
- struct aq_hw_link_status_s *link_status)
+int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
{
u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
+ struct aq_hw_link_status_s *link_status = &self->aq_link_status;
if (!link_speed_mask) {
link_status->mbps = 0U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index a66aee51ab5b..e0360a6b2202 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -180,8 +180,7 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
enum hal_atl_utils_fw_state_e state);
-int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
- struct aq_hw_link_status_s *link_status);
+int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index dc3052751bc1..c28fa5a8734c 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -597,7 +597,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
{
- dev_kfree_skb_any(cb->skb);
+ dev_consume_skb_any(cb->skb);
cb->skb = NULL;
dma_unmap_addr_set(cb, dma_addr, 0);
}
@@ -1346,6 +1346,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
if (!ring->cbs) {
+ dma_free_coherent(kdev, sizeof(struct dma_desc),
+ ring->desc_cpu, ring->desc_dma);
netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index e7c8539cbddf..f20b3d2a4c23 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4647,7 +4647,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->port_id = le16_to_cpu(resp->port_id);
bp->dev->dev_port = pf->port_id;
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
- memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
@@ -4687,16 +4686,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
- mutex_unlock(&bp->hwrm_cmd_lock);
-
- if (is_valid_ether_addr(vf->mac_addr)) {
- /* overwrite netdev dev_adr with admin VF MAC */
- memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
- } else {
- eth_hw_addr_random(bp->dev);
- rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
- }
- return rc;
#endif
}
@@ -7152,6 +7141,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
netdev_reset_tc(dev);
}
+ bp->tx_nr_rings += bp->tx_nr_rings_xdp;
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
bp->num_stat_ctxs = bp->cp_nr_rings;
@@ -7661,6 +7651,28 @@ void bnxt_restore_pf_fw_resources(struct bnxt *bp)
bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
}
+static int bnxt_init_mac_addr(struct bnxt *bp)
+{
+ int rc = 0;
+
+ if (BNXT_PF(bp)) {
+ memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
+ } else {
+#ifdef CONFIG_BNXT_SRIOV
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ if (is_valid_ether_addr(vf->mac_addr)) {
+ /* overwrite netdev dev_adr with admin VF MAC */
+ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+ } else {
+ eth_hw_addr_random(bp->dev);
+ rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
+ }
+#endif
+ }
+ return rc;
+}
+
static void bnxt_parse_log_pcie_link(struct bnxt *bp)
{
enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -7789,7 +7801,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = -1;
goto init_err_pci_clean;
}
-
+ rc = bnxt_init_mac_addr(bp);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to initialize mac address.\n");
+ rc = -EADDRNOTAVAIL;
+ goto init_err_pci_clean;
+ }
rc = bnxt_hwrm_queue_qportcfg(bp);
if (rc) {
netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 77da75a55c02..997e10e8b863 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -84,6 +84,8 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1);
+ if (ulp->msix_requested)
+ edev->en_ops->bnxt_free_msix(edev, ulp_id);
}
if (ulp->max_async_event_id)
bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index a981c4ee9d72..fea3f9a5fb2d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1360,7 +1360,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
if (skb) {
pkts_compl++;
bytes_compl += GENET_CB(skb)->bytes_sent;
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
}
txbds_processed++;
@@ -1875,7 +1875,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
cb = ring->cbs + i;
skb = bcmgenet_rx_refill(priv, cb);
if (skb)
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
if (!cb->skb)
return -ENOMEM;
}
@@ -1894,7 +1894,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
if (skb)
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 82bf7aac6cdb..0293b41171a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -369,12 +369,12 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
list_del(&entry.list);
spin_unlock(&adap->mbox_lock);
ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
- t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
+ t4_record_mbox(adap, cmd, size, access, ret);
return ret;
}
/* Copy in the new mailbox command and send it on its way ... */
- t4_record_mbox(adap, cmd, MBOX_LEN, access, 0);
+ t4_record_mbox(adap, cmd, size, access, 0);
for (i = 0; i < size; i += 8)
t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
@@ -426,7 +426,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
}
ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
- t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
+ t4_record_mbox(adap, cmd, size, access, ret);
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
t4_report_fw_error(adap);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 34dae51effd4..59da7ac3c108 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1863,7 +1863,6 @@ err_setup_mdio:
err_ioremap:
release_resource(priv->res);
err_req_mem:
- netif_napi_del(&priv->napi);
free_netdev(netdev);
err_alloc_etherdev:
return err;
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 6e67d22fd0d5..1c7da16ad0ff 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
goto no_mem;
}
+ pdev->dev.of_node = node;
+ pdev->dev.parent = priv->dev;
set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
ret = platform_device_add_data(pdev, &data, sizeof(data));
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 48d21c1e09f2..4d598ca8503a 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -6504,7 +6504,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
struct resource *res;
const char *dt_mac_addr;
const char *mac_from;
- char hw_mac_addr[ETH_ALEN];
+ char hw_mac_addr[ETH_ALEN] = {0};
u32 id;
int features;
int phy_mode;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 0039b4725405..2f26fb34d741 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -263,6 +263,7 @@ struct mlx5e_dcbx {
/* The only setting that cannot be read from FW */
u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
+ u8 cap;
};
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 2eb54d36e16e..c1d384fca4dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -288,13 +288,8 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_dcbx *dcbx = &priv->dcbx;
- u8 mode = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_VER_CEE;
-
- if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
- mode |= DCB_CAP_DCBX_HOST;
- return mode;
+ return priv->dcbx.cap;
}
static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
@@ -312,6 +307,7 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
/* set dcbx to fw controlled */
if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
+ dcbx->cap &= ~DCB_CAP_DCBX_HOST;
return 0;
}
@@ -324,6 +320,8 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
return 1;
+ dcbx->cap = mode;
+
return 0;
}
@@ -628,9 +626,9 @@ static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
*cap = false;
break;
case DCB_CAP_ATTR_DCBX:
- *cap = (DCB_CAP_DCBX_LLD_MANAGED |
- DCB_CAP_DCBX_VER_CEE |
- DCB_CAP_DCBX_STATIC);
+ *cap = priv->dcbx.cap |
+ DCB_CAP_DCBX_VER_CEE |
+ DCB_CAP_DCBX_VER_IEEE;
break;
default:
*cap = 0;
@@ -754,8 +752,16 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
{
struct mlx5e_dcbx *dcbx = &priv->dcbx;
+ if (!MLX5_CAP_GEN(priv->mdev, qos))
+ return;
+
if (MLX5_CAP_GEN(priv->mdev, dcbx))
mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
+ priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
+ DCB_CAP_DCBX_VER_IEEE;
+ if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
+ priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
+
mlx5e_ets_init(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 917fade5f5d5..f5594014715b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -641,8 +641,10 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
new_channels.params = priv->channels.params;
new_channels.params.num_channels = count;
- mlx5e_build_default_indir_rqt(priv->mdev, new_channels.params.indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, count);
+ if (!netif_is_rxfh_configured(priv->netdev))
+ mlx5e_build_default_indir_rqt(priv->mdev,
+ new_channels.params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, count);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 57f31fa478ce..6ad7f07e7861 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1969,6 +1969,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
}
mlx5e_build_common_cq_param(priv, param);
+ param->cq_period_mode = params->rx_cq_period_mode;
}
static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 325b2c8c1c6d..7344433259fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -222,13 +222,13 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
if (unlikely(!page))
return -ENOMEM;
- dma_info->page = page;
dma_info->addr = dma_map_page(rq->pdev, page, 0,
RQ_PAGE_SIZE(rq), rq->buff.map_dir);
if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
put_page(page);
return -ENOMEM;
}
+ dma_info->page = page;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3c536f560dd2..7f282e8f4e7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1443,12 +1443,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int ret;
- dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
- ret = dst->error;
- if (ret) {
- dst_release(dst);
+ ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
+ fl6);
+ if (ret < 0)
return ret;
- }
*out_ttl = ip6_dst_hoplimit(dst);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index aaa0f4ebba9a..31353e5c3c78 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -128,10 +128,10 @@ static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
return mlx5e_skb_l2_header_offset(skb);
}
-static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
- struct sk_buff *skb)
+static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
+ struct sk_buff *skb)
{
- int hlen;
+ u16 hlen;
switch (mode) {
case MLX5_INLINE_MODE_NONE:
@@ -140,19 +140,22 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
hlen = eth_get_headlen(skb->data, skb_headlen(skb));
if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
hlen += VLAN_HLEN;
- return hlen;
+ break;
case MLX5_INLINE_MODE_IP:
/* When transport header is set to zero, it means no transport
* header. When transport header is set to 0xff's, it means
* transport header wasn't set.
*/
- if (skb_transport_offset(skb))
- return mlx5e_skb_l3_header_offset(skb);
+ if (skb_transport_offset(skb)) {
+ hlen = mlx5e_skb_l3_header_offset(skb);
+ break;
+ }
/* fall through */
case MLX5_INLINE_MODE_L2:
default:
- return mlx5e_skb_l2_header_offset(skb);
+ hlen = mlx5e_skb_l2_header_offset(skb);
}
+ return min_t(u16, hlen, skb->len);
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 95b64025ce36..5bc0593bd76e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -815,7 +815,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
struct mlx5_eswitch_rep *rep;
int vport;
- for (vport = 0; vport < nvports; vport++) {
+ for (vport = nvports - 1; vport >= 0; vport--) {
rep = &esw->offloads.vport_reps[vport];
if (!rep->valid)
continue;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c065132b956d..16885827367b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1186,7 +1186,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
}
}
- clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
out:
mutex_unlock(&dev->intf_state_mutex);
@@ -1261,7 +1260,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_drain_health_recovery(dev);
mutex_lock(&dev->intf_state_mutex);
- if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
+ if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
__func__);
if (cleanup)
@@ -1270,7 +1269,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
}
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
- set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
if (mlx5_device_registered(dev))
mlx5_detach_device(dev);
@@ -1565,8 +1563,6 @@ static void shutdown(struct pci_dev *pdev)
int err;
dev_info(&pdev->dev, "Shutdown was called\n");
- /* Notify mlx5 clients that the kernel is being shut down */
- set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
err = mlx5_try_fast_unload(dev);
if (err)
mlx5_unload_one(dev, priv, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index f774de6f5fcb..520f6382dfde 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -201,13 +201,13 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
- /* arm_srq structs missing using identical xrc ones */
- u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
- u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
+ u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
+ u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
- MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
- MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
- MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm);
+ MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ);
+ MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
+ MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
+ MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
srq_out, sizeof(srq_out));
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 60bf8f27cc00..c6a3e61b53bd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4139,6 +4139,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
return -EINVAL;
if (!info->linking)
break;
+ if (netdev_has_any_upper_dev(upper_dev))
+ return -EINVAL;
if (netif_is_lag_master(upper_dev) &&
!mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
info->upper_info))
@@ -4258,6 +4260,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
upper_dev = info->upper_dev;
if (!netif_is_bridge_master(upper_dev))
return -EINVAL;
+ if (!info->linking)
+ break;
+ if (netdev_has_any_upper_dev(upper_dev))
+ return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 5eb1606765c5..d39ffbfcc436 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -705,6 +705,7 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_port_mc_router)
{
struct mlxsw_sp_bridge_port *bridge_port;
+ int err;
if (switchdev_trans_ph_prepare(trans))
return 0;
@@ -715,11 +716,17 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
if (!bridge_port->bridge_device->multicast_enabled)
- return 0;
+ goto out;
- return mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
- MLXSW_SP_FLOOD_TYPE_MC,
- is_port_mc_router);
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
+ MLXSW_SP_FLOOD_TYPE_MC,
+ is_port_mc_router);
+ if (err)
+ return err;
+
+out:
+ bridge_port->mrouter = is_port_mc_router;
+ return 0;
}
static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 0e08404480ef..d25b5038c3a2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -42,33 +42,29 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
struct tc_cls_flower_offload *flow, u8 key_type,
bool mask_version)
{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
struct flow_dissector_key_vlan *flow_vlan;
u16 tmp_tci;
+ memset(frame, 0, sizeof(struct nfp_flower_meta_two));
/* Populate the metadata frame. */
frame->nfp_flow_key_layer = key_type;
frame->mask_id = ~0;
- if (mask_version) {
- frame->tci = cpu_to_be16(~0);
- return;
- }
-
- flow_vlan = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- flow->key);
-
- /* Populate the tci field. */
- if (!flow_vlan->vlan_id) {
- tmp_tci = 0;
- } else {
- tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
- flow_vlan->vlan_priority) |
- FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
- flow_vlan->vlan_id) |
- NFP_FLOWER_MASK_VLAN_CFI;
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ flow_vlan = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ target);
+ /* Populate the tci field. */
+ if (flow_vlan->vlan_id) {
+ tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ flow_vlan->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ flow_vlan->vlan_id) |
+ NFP_FLOWER_MASK_VLAN_CFI;
+ frame->tci = cpu_to_be16(tmp_tci);
+ }
}
- frame->tci = cpu_to_be16(tmp_tci);
}
static void
@@ -99,17 +95,18 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_eth_addrs *flow_mac;
-
- flow_mac = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- target);
+ struct flow_dissector_key_eth_addrs *addr;
memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
- /* Populate mac frame. */
- ether_addr_copy(frame->mac_dst, &flow_mac->dst[0]);
- ether_addr_copy(frame->mac_src, &flow_mac->src[0]);
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ addr = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ target);
+ /* Populate mac frame. */
+ ether_addr_copy(frame->mac_dst, &addr->dst[0]);
+ ether_addr_copy(frame->mac_src, &addr->src[0]);
+ }
if (mask_version)
frame->mpls_lse = cpu_to_be32(~0);
@@ -121,14 +118,17 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ports *flow_tp;
+ struct flow_dissector_key_ports *tp;
- flow_tp = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- target);
+ memset(frame, 0, sizeof(struct nfp_flower_tp_ports));
- frame->port_src = flow_tp->src;
- frame->port_dst = flow_tp->dst;
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+ tp = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ target);
+ frame->port_src = tp->src;
+ frame->port_dst = tp->dst;
+ }
}
static void
@@ -137,25 +137,27 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ipv4_addrs *flow_ipv4;
- struct flow_dissector_key_basic *flow_basic;
-
- flow_ipv4 = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- target);
-
- flow_basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- target);
+ struct flow_dissector_key_ipv4_addrs *addr;
+ struct flow_dissector_key_basic *basic;
- /* Populate IPv4 frame. */
- frame->reserved = 0;
- frame->ipv4_src = flow_ipv4->src;
- frame->ipv4_dst = flow_ipv4->dst;
- frame->proto = flow_basic->ip_proto;
/* Wildcard TOS/TTL for now. */
- frame->tos = 0;
- frame->ttl = 0;
+ memset(frame, 0, sizeof(struct nfp_flower_ipv4));
+
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ addr = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ target);
+ frame->ipv4_src = addr->src;
+ frame->ipv4_dst = addr->dst;
+ }
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ target);
+ frame->proto = basic->ip_proto;
+ }
}
static void
@@ -164,26 +166,27 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ipv6_addrs *flow_ipv6;
- struct flow_dissector_key_basic *flow_basic;
-
- flow_ipv6 = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- target);
+ struct flow_dissector_key_ipv6_addrs *addr;
+ struct flow_dissector_key_basic *basic;
- flow_basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- target);
-
- /* Populate IPv6 frame. */
- frame->reserved = 0;
- frame->ipv6_src = flow_ipv6->src;
- frame->ipv6_dst = flow_ipv6->dst;
- frame->proto = flow_basic->ip_proto;
/* Wildcard LABEL/TOS/TTL for now. */
- frame->ipv6_flow_label_exthdr = 0;
- frame->tos = 0;
- frame->ttl = 0;
+ memset(frame, 0, sizeof(struct nfp_flower_ipv6));
+
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ addr = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ target);
+ frame->ipv6_src = addr->src;
+ frame->ipv6_dst = addr->dst;
+ }
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ target);
+ frame->proto = basic->ip_proto;
+ }
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 4ad10bd5e139..74a96d6bb05c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -105,43 +105,62 @@ static int
nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
struct tc_cls_flower_offload *flow)
{
- struct flow_dissector_key_control *mask_enc_ctl;
- struct flow_dissector_key_basic *mask_basic;
- struct flow_dissector_key_basic *key_basic;
+ struct flow_dissector_key_basic *mask_basic = NULL;
+ struct flow_dissector_key_basic *key_basic = NULL;
+ struct flow_dissector_key_ip *mask_ip = NULL;
u32 key_layer_two;
u8 key_layer;
int key_size;
- mask_enc_ctl = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_CONTROL,
- flow->mask);
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_dissector_key_control *mask_enc_ctl =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_CONTROL,
+ flow->mask);
+ /* We are expecting a tunnel. For now we ignore offloading. */
+ if (mask_enc_ctl->addr_type)
+ return -EOPNOTSUPP;
+ }
- mask_basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- flow->mask);
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ mask_basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ flow->mask);
+
+ key_basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ flow->key);
+ }
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP))
+ mask_ip = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ flow->mask);
- key_basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- flow->key);
key_layer_two = 0;
key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
key_size = sizeof(struct nfp_flower_meta_one) +
sizeof(struct nfp_flower_in_port) +
sizeof(struct nfp_flower_mac_mpls);
- /* We are expecting a tunnel. For now we ignore offloading. */
- if (mask_enc_ctl->addr_type)
- return -EOPNOTSUPP;
-
- if (mask_basic->n_proto) {
+ if (mask_basic && mask_basic->n_proto) {
/* Ethernet type is present in the key. */
switch (key_basic->n_proto) {
case cpu_to_be16(ETH_P_IP):
+ if (mask_ip && mask_ip->tos)
+ return -EOPNOTSUPP;
+ if (mask_ip && mask_ip->ttl)
+ return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_IPV4;
key_size += sizeof(struct nfp_flower_ipv4);
break;
case cpu_to_be16(ETH_P_IPV6):
+ if (mask_ip && mask_ip->tos)
+ return -EOPNOTSUPP;
+ if (mask_ip && mask_ip->ttl)
+ return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_IPV6;
key_size += sizeof(struct nfp_flower_ipv6);
break;
@@ -152,6 +171,11 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
case cpu_to_be16(ETH_P_ARP):
return -EOPNOTSUPP;
+ /* Currently we do not offload MPLS. */
+ case cpu_to_be16(ETH_P_MPLS_UC):
+ case cpu_to_be16(ETH_P_MPLS_MC):
+ return -EOPNOTSUPP;
+
/* Will be included in layer 2. */
case cpu_to_be16(ETH_P_8021Q):
break;
@@ -166,7 +190,7 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
}
}
- if (mask_basic->ip_proto) {
+ if (mask_basic && mask_basic->ip_proto) {
/* Ethernet type is present in the key. */
switch (key_basic->ip_proto) {
case IPPROTO_TCP:
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index d67969d3e484..3f199db2002e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -98,21 +98,20 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
struct nfp_pf *pf = pci_get_drvdata(pdev);
int err;
- mutex_lock(&pf->lock);
-
if (num_vfs > pf->limit_vfs) {
nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n",
pf->limit_vfs);
- err = -EINVAL;
- goto err_unlock;
+ return -EINVAL;
}
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err);
- goto err_unlock;
+ return err;
}
+ mutex_lock(&pf->lock);
+
err = nfp_app_sriov_enable(pf->app, num_vfs);
if (err) {
dev_warn(&pdev->dev,
@@ -129,9 +128,8 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
return num_vfs;
err_sriov_disable:
- pci_disable_sriov(pdev);
-err_unlock:
mutex_unlock(&pf->lock);
+ pci_disable_sriov(pdev);
return err;
#endif
return 0;
@@ -158,10 +156,10 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
pf->num_vfs = 0;
+ mutex_unlock(&pf->lock);
+
pci_disable_sriov(pdev);
dev_dbg(&pdev->dev, "Removed VFs.\n");
-
- mutex_unlock(&pf->lock);
#endif
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 9f77ce038a4a..66a09e490cf5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -895,6 +895,8 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
netdev_tx_sent_queue(nd_q, txbuf->real_len);
+ skb_tx_timestamp(skb);
+
tx_ring->wr_p += nr_frags + 1;
if (nfp_net_tx_ring_should_stop(tx_ring))
nfp_net_tx_ring_stop(nd_q, tx_ring);
@@ -903,8 +905,6 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
if (!skb->xmit_more || netif_xmit_stopped(nd_q))
nfp_net_tx_xmit_more_flush(tx_ring);
- skb_tx_timestamp(skb);
-
return NETDEV_TX_OK;
err_unmap:
@@ -1751,6 +1751,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
continue;
}
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+ nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
if (likely(!meta.portid)) {
netdev = dp->netdev;
} else {
@@ -1759,16 +1763,12 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nn = netdev_priv(dp->netdev);
netdev = nfp_app_repr_get(nn->app, meta.portid);
if (unlikely(!netdev)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
continue;
}
nfp_repr_inc_rx_stats(netdev, pkt_len);
}
- nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
-
- nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
-
skb_reserve(skb, pkt_off);
skb_put(skb, pkt_len);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 5797dbf2b507..34b985384d26 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -456,13 +456,9 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf)
{
int err;
- err = nfp_net_pf_app_start_ctrl(pf);
- if (err)
- return err;
-
err = nfp_app_start(pf->app, pf->ctrl_vnic);
if (err)
- goto err_ctrl_stop;
+ return err;
if (pf->num_vfs) {
err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
@@ -474,8 +470,6 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf)
err_app_stop:
nfp_app_stop(pf->app);
-err_ctrl_stop:
- nfp_net_pf_app_stop_ctrl(pf);
return err;
}
@@ -484,7 +478,6 @@ static void nfp_net_pf_app_stop(struct nfp_pf *pf)
if (pf->num_vfs)
nfp_app_sriov_disable(pf->app);
nfp_app_stop(pf->app);
- nfp_net_pf_app_stop_ctrl(pf);
}
static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
@@ -559,7 +552,7 @@ err_unmap_ctrl:
static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
{
- nfp_net_pf_app_stop(pf);
+ nfp_net_pf_app_stop_ctrl(pf);
/* stop app first, to avoid double free of ctrl vNIC's ddir */
nfp_net_debugfs_dir_clean(&pf->ddir);
@@ -690,6 +683,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
{
struct nfp_net_fw_version fw_ver;
u8 __iomem *ctrl_bar, *qc_bar;
+ struct nfp_net *nn;
int stride;
int err;
@@ -766,7 +760,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_free_vnics;
- err = nfp_net_pf_app_start(pf);
+ err = nfp_net_pf_app_start_ctrl(pf);
if (err)
goto err_free_irqs;
@@ -774,12 +768,20 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_stop_app;
+ err = nfp_net_pf_app_start(pf);
+ if (err)
+ goto err_clean_vnics;
+
mutex_unlock(&pf->lock);
return 0;
+err_clean_vnics:
+ list_for_each_entry(nn, &pf->vnics, vnic_list)
+ if (nfp_net_is_data_vnic(nn))
+ nfp_net_pf_clean_vnic(pf, nn);
err_stop_app:
- nfp_net_pf_app_stop(pf);
+ nfp_net_pf_app_stop_ctrl(pf);
err_free_irqs:
nfp_net_pf_free_irqs(pf);
err_free_vnics:
@@ -803,6 +805,8 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
if (list_empty(&pf->vnics))
goto out;
+ nfp_net_pf_app_stop(pf);
+
list_for_each_entry(nn, &pf->vnics, vnic_list)
if (nfp_net_is_data_vnic(nn))
nfp_net_pf_clean_vnic(pf, nn);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 28ea0af89aef..e3223f2fe2ff 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -724,7 +724,7 @@ static void ql_build_coredump_seg_header(
seg_hdr->cookie = MPI_COREDUMP_COOKIE;
seg_hdr->segNum = seg_number;
seg_hdr->segSize = seg_size;
- memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+ strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
}
/*
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index bd07a15d3b7c..e03fcf914690 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6863,8 +6863,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
tp->TxDescArray + entry);
if (skb) {
- tp->dev->stats.tx_dropped++;
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
tx_skb->skb = NULL;
}
}
@@ -7319,7 +7318,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
tp->tx_stats.packets++;
tp->tx_stats.bytes += tx_skb->skb->len;
u64_stats_update_end(&tp->tx_stats.syncp);
- dev_kfree_skb_any(tx_skb->skb);
+ dev_consume_skb_any(tx_skb->skb);
tx_skb->skb = NULL;
}
dirty_tx++;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 73427e29df2a..fbd00cb0cb7d 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -47,6 +47,8 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev,
plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
sizeof(*plat->mdio_bus_data),
GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL);
if (!dma_cfg)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 17d4bbaeb65c..6e359572b9f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -269,7 +269,10 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
ctrl |= val << reg_shift;
- if (dwmac->f2h_ptp_ref_clk) {
+ if (dwmac->f2h_ptp_ref_clk ||
+ phymode == PHY_INTERFACE_MODE_MII ||
+ phymode == PHY_INTERFACE_MODE_GMII ||
+ phymode == PHY_INTERFACE_MODE_SGMII) {
ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
&module);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index fffd6d5fc907..39c2122a4f26 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -979,14 +979,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
}
static const struct of_device_id sun8i_dwmac_match[] = {
- { .compatible = "allwinner,sun8i-h3-emac",
- .data = &emac_variant_h3 },
- { .compatible = "allwinner,sun8i-v3s-emac",
- .data = &emac_variant_v3s },
- { .compatible = "allwinner,sun8i-a83t-emac",
- .data = &emac_variant_a83t },
- { .compatible = "allwinner,sun50i-a64-emac",
- .data = &emac_variant_a64 },
{ }
};
MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index 56ba411421f0..38d1cc557c11 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -96,7 +96,7 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
if (of_machine_is_compatible("ti,dra7"))
return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr);
- dev_err(dev, "incompatible machine/device type for reading mac address\n");
+ dev_info(dev, "incompatible machine/device type for reading mac address\n");
return -ENOENT;
}
EXPORT_SYMBOL_GPL(ti_cm_get_macid);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 0d78727f1a14..d91cbc6c3ca4 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1269,7 +1269,12 @@ static void netvsc_link_change(struct work_struct *w)
bool notify = false, reschedule = false;
unsigned long flags, next_reconfig, delay;
- rtnl_lock();
+ /* if changes are happening, comeback later */
+ if (!rtnl_trylock()) {
+ schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
+ return;
+ }
+
net_device = rtnl_dereference(ndev_ctx->nvdev);
if (!net_device)
goto out_unlock;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 5e1ab1160856..98e4deaa3a6a 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3521,6 +3521,7 @@ module_init(macsec_init);
module_exit(macsec_exit);
MODULE_ALIAS_RTNL_LINK("macsec");
+MODULE_ALIAS_GENL_FAMILY("macsec");
MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 5068c582d502..d0626bf5c540 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -749,9 +749,6 @@ void phy_stop_machine(struct phy_device *phydev)
if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
-
- /* Now we can run the state machine synchronously */
- phy_state_machine(&phydev->state_queue.work);
}
/**
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 1790f7fec125..2f742ae5b92e 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -864,15 +864,17 @@ EXPORT_SYMBOL(phy_attached_info);
#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)"
void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
{
+ const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
+
if (!fmt) {
dev_info(&phydev->mdio.dev, ATTACHED_FMT "\n",
- phydev->drv->name, phydev_name(phydev),
+ drv_name, phydev_name(phydev),
phydev->irq);
} else {
va_list ap;
dev_info(&phydev->mdio.dev, ATTACHED_FMT,
- phydev->drv->name, phydev_name(phydev),
+ drv_name, phydev_name(phydev),
phydev->irq);
va_start(ap, fmt);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 8f572b9f3625..9c80e80c5493 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1758,6 +1758,13 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long)&wwan_noarp_info,
},
+ /* u-blox TOBY-L4 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1546, 0x1010,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+ },
+
/* Generic CDC-NCM devices */
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 98f17b05c68b..b06169ea60dc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1058,7 +1058,7 @@ static void free_old_xmit_skbs(struct send_queue *sq)
bytes += skb->len;
packets++;
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
}
/* Avoid overhead when no packets have been processed
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index fa315d84e98e..a1ea9ef97ed9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -787,6 +787,8 @@ int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
+void iwl_pcie_rx_allocator_work(struct work_struct *data);
+
/* common functions that are used by gen2 transport */
void iwl_pcie_apm_config(struct iwl_trans *trans);
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 351c4423125a..942736d3fa75 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -597,7 +597,7 @@ static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
rxq->free_count += RX_CLAIM_REQ_ALLOC;
}
-static void iwl_pcie_rx_allocator_work(struct work_struct *data)
+void iwl_pcie_rx_allocator_work(struct work_struct *data)
{
struct iwl_rb_allocator *rba_p =
container_of(data, struct iwl_rb_allocator, rx_alloc);
@@ -900,10 +900,6 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
return err;
}
def_rxq = trans_pcie->rxq;
- if (!rba->alloc_wq)
- rba->alloc_wq = alloc_workqueue("rb_allocator",
- WQ_HIGHPRI | WQ_UNBOUND, 1);
- INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
spin_lock(&rba->lock);
atomic_set(&rba->req_pending, 0);
@@ -1017,10 +1013,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
}
cancel_work_sync(&rba->rx_alloc);
- if (rba->alloc_wq) {
- destroy_workqueue(rba->alloc_wq);
- rba->alloc_wq = NULL;
- }
iwl_pcie_free_rbs_pool(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index f95eec52508e..3927bbf04f72 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1786,6 +1786,11 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans);
+ if (trans_pcie->rba.alloc_wq) {
+ destroy_workqueue(trans_pcie->rba.alloc_wq);
+ trans_pcie->rba.alloc_wq = NULL;
+ }
+
if (trans_pcie->msix_enabled) {
for (i = 0; i < trans_pcie->alloc_vecs; i++) {
irq_set_affinity_hint(
@@ -3169,6 +3174,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->inta_mask = CSR_INI_SET_MASK;
}
+ trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
+ INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
+
#ifdef CONFIG_IWLWIFI_PCIE_RTPM
trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
#else
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 08f0477f78d9..9915d83a4a30 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1571,6 +1571,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
wl->state = WL1251_STATE_OFF;
mutex_init(&wl->mutex);
+ spin_lock_init(&wl->wl_lock);
wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index df6ce59a1f95..205d82d4c468 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -673,9 +673,7 @@ enum mlx5_device_state {
};
enum mlx5_interface_state {
- MLX5_INTERFACE_STATE_DOWN = BIT(0),
- MLX5_INTERFACE_STATE_UP = BIT(1),
- MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
+ MLX5_INTERFACE_STATE_UP = BIT(0),
};
enum mlx5_pci_status {
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 779b23595596..c99ba7914c0a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3866,6 +3866,8 @@ int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
struct net_device *upper_dev);
+bool netdev_has_any_upper_dev(struct net_device *dev);
+
void *netdev_lower_get_next_private(struct net_device *dev,
struct list_head **iter);
void *netdev_lower_get_next_private_rcu(struct net_device *dev,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index dbe29b6c9bd6..d67a8182e5eb 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -973,7 +973,23 @@ int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg
int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
int offset, int len);
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
-int skb_pad(struct sk_buff *skb, int pad);
+int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
+
+/**
+ * skb_pad - zero pad the tail of an skb
+ * @skb: buffer to pad
+ * @pad: space to pad
+ *
+ * Ensure that a buffer is followed by a padding area that is zero
+ * filled. Used by network drivers which may DMA or transfer data
+ * beyond the buffer end onto the wire.
+ *
+ * May return error in out of memory cases. The skb is freed on error.
+ */
+static inline int skb_pad(struct sk_buff *skb, int pad)
+{
+ return __skb_pad(skb, pad, true);
+}
#define dev_kfree_skb(a) consume_skb(a)
int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
@@ -2825,25 +2841,42 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
* skb_put_padto - increase size and pad an skbuff up to a minimal size
* @skb: buffer to pad
* @len: minimal length
+ * @free_on_error: free buffer on error
*
* Pads up a buffer to ensure the trailing bytes exist and are
* blanked. If the buffer already contains sufficient data it
* is untouched. Otherwise it is extended. Returns zero on
- * success. The skb is freed on error.
+ * success. The skb is freed on error if @free_on_error is true.
*/
-static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
+static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
+ bool free_on_error)
{
unsigned int size = skb->len;
if (unlikely(size < len)) {
len -= size;
- if (skb_pad(skb, len))
+ if (__skb_pad(skb, len, free_on_error))
return -ENOMEM;
__skb_put(skb, len);
}
return 0;
}
+/**
+ * skb_put_padto - increase size and pad an skbuff up to a minimal size
+ * @skb: buffer to pad
+ * @len: minimal length
+ *
+ * Pads up a buffer to ensure the trailing bytes exist and are
+ * blanked. If the buffer already contains sufficient data it
+ * is untouched. Otherwise it is extended. Returns zero on
+ * success. The skb is freed on error.
+ */
+static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
+{
+ return __skb_put_padto(skb, len, true);
+}
+
static inline int skb_add_data(struct sk_buff *skb,
struct iov_iter *from, int copy)
{
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 1a88008cc6f5..af509f801084 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -70,6 +70,7 @@ struct fib6_node {
__u16 fn_flags;
int fn_sernum;
struct rt6_info *rr_ptr;
+ struct rcu_head rcu;
};
#ifndef CONFIG_IPV6_SUBTREES
@@ -104,7 +105,7 @@ struct rt6_info {
* the same cache line.
*/
struct fib6_table *rt6i_table;
- struct fib6_node *rt6i_node;
+ struct fib6_node __rcu *rt6i_node;
struct in6_addr rt6i_gateway;
@@ -167,13 +168,40 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
rt0->rt6i_flags |= RTF_EXPIRES;
}
+/* Function to safely get fn->sernum for passed in rt
+ * and store result in passed in cookie.
+ * Return true if we can get cookie safely
+ * Return false if not
+ */
+static inline bool rt6_get_cookie_safe(const struct rt6_info *rt,
+ u32 *cookie)
+{
+ struct fib6_node *fn;
+ bool status = false;
+
+ rcu_read_lock();
+ fn = rcu_dereference(rt->rt6i_node);
+
+ if (fn) {
+ *cookie = fn->fn_sernum;
+ status = true;
+ }
+
+ rcu_read_unlock();
+ return status;
+}
+
static inline u32 rt6_get_cookie(const struct rt6_info *rt)
{
+ u32 cookie = 0;
+
if (rt->rt6i_flags & RTF_PCPU ||
(unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
rt = (struct rt6_info *)(rt->dst.from);
- return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+ rt6_get_cookie_safe(rt, &cookie);
+
+ return cookie;
}
static inline void ip6_rt_put(struct rt6_info *rt)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 67f815e5d525..c1109cdbbfa6 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -101,6 +101,13 @@ struct Qdisc {
spinlock_t busylock ____cacheline_aligned_in_smp;
};
+static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
+{
+ if (qdisc->flags & TCQ_F_BUILTIN)
+ return;
+ refcount_inc(&qdisc->refcnt);
+}
+
static inline bool qdisc_is_running(const struct Qdisc *qdisc)
{
return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index ada65e767b28..f642a39f9eee 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1004,9 +1004,7 @@ void tcp_get_default_congestion_control(char *name);
void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
-int tcp_set_congestion_control(struct sock *sk, const char *name, bool load);
-void tcp_reinit_congestion_control(struct sock *sk,
- const struct tcp_congestion_ops *ca);
+int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit);
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
diff --git a/include/net/udp.h b/include/net/udp.h
index 586de4b811b5..626c2d8a70c5 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -260,7 +260,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
}
void udp_v4_early_demux(struct sk_buff *skb);
-void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
+bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *,
const struct sock *));
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 4fb463172aa8..d11c8181f4c5 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -652,12 +652,27 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
}
}
+static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
+{
+ return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
+ BITS_PER_LONG == 64;
+}
+
+static u32 htab_size_value(const struct bpf_htab *htab, bool percpu)
+{
+ u32 size = htab->map.value_size;
+
+ if (percpu || fd_htab_map_needs_adjust(htab))
+ size = round_up(size, 8);
+ return size;
+}
+
static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
void *value, u32 key_size, u32 hash,
bool percpu, bool onallcpus,
struct htab_elem *old_elem)
{
- u32 size = htab->map.value_size;
+ u32 size = htab_size_value(htab, percpu);
bool prealloc = htab_is_prealloc(htab);
struct htab_elem *l_new, **pl_new;
void __percpu *pptr;
@@ -696,9 +711,6 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
memcpy(l_new->key, key, key_size);
if (percpu) {
- /* round up value_size to 8 bytes */
- size = round_up(size, 8);
-
if (prealloc) {
pptr = htab_elem_get_ptr(l_new, key_size);
} else {
@@ -1209,17 +1221,9 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr)
{
- struct bpf_map *map;
-
if (attr->value_size != sizeof(u32))
return ERR_PTR(-EINVAL);
-
- /* pointer is stored internally */
- attr->value_size = sizeof(void *);
- map = htab_map_alloc(attr);
- attr->value_size = sizeof(u32);
-
- return map;
+ return htab_map_alloc(attr);
}
static void fd_htab_map_free(struct bpf_map *map)
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 861ae2a165f4..5a7be3bddfa9 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -53,6 +53,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
brstats->tx_bytes += skb->len;
u64_stats_update_end(&brstats->syncp);
+#ifdef CONFIG_NET_SWITCHDEV
+ skb->offload_fwd_mark = 0;
+#endif
BR_INPUT_SKB_CB(skb)->brdev = dev;
skb_reset_mac_header(skb);
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index 181a44d0f1da..f6b1c7de059d 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -115,7 +115,7 @@ br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac,
void
br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
{
- if (!fdb->added_by_user)
+ if (!fdb->added_by_user || !fdb->dst)
return;
switch (type) {
diff --git a/net/core/datagram.c b/net/core/datagram.c
index a21ca8dee5ea..8c2f4489ff8f 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -362,7 +362,7 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
if (flags & MSG_PEEK) {
err = -ENOENT;
spin_lock_bh(&sk_queue->lock);
- if (skb == skb_peek(sk_queue)) {
+ if (skb->next) {
__skb_unlink(skb, sk_queue);
refcount_dec(&skb->users);
if (destructor)
diff --git a/net/core/dev.c b/net/core/dev.c
index ce15a06d5558..86b4b0a79e7a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5289,6 +5289,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
* Ideally, a new ndo_busy_poll_stop() could avoid another round.
*/
rc = napi->poll(napi, BUSY_POLL_BUDGET);
+ trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
netpoll_poll_unlock(have_poll_lock);
if (rc == BUSY_POLL_BUDGET)
__napi_schedule(napi);
@@ -5667,12 +5668,13 @@ EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
* Find out if a device is linked to an upper device and return true in case
* it is. The caller must hold the RTNL lock.
*/
-static bool netdev_has_any_upper_dev(struct net_device *dev)
+bool netdev_has_any_upper_dev(struct net_device *dev)
{
ASSERT_RTNL();
return !list_empty(&dev->adj_list.upper);
}
+EXPORT_SYMBOL(netdev_has_any_upper_dev);
/**
* netdev_master_upper_dev_get - Get master upper device
diff --git a/net/core/filter.c b/net/core/filter.c
index 6280a602604c..169974998c76 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2836,15 +2836,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
sk->sk_prot->setsockopt == tcp_setsockopt) {
if (optname == TCP_CONGESTION) {
char name[TCP_CA_NAME_MAX];
+ bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN;
strncpy(name, optval, min_t(long, optlen,
TCP_CA_NAME_MAX-1));
name[TCP_CA_NAME_MAX-1] = 0;
- ret = tcp_set_congestion_control(sk, name, false);
- if (!ret && bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN)
- /* replacing an existing ca */
- tcp_reinit_congestion_control(sk,
- inet_csk(sk)->icsk_ca_ops);
+ ret = tcp_set_congestion_control(sk, name, false, reinit);
} else {
struct tcp_sock *tp = tcp_sk(sk);
@@ -2872,7 +2869,6 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
ret = -EINVAL;
}
}
- ret = -EINVAL;
#endif
} else {
ret = -EINVAL;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f990eb8b30a9..e07556606284 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1363,18 +1363,20 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
EXPORT_SYMBOL(skb_copy_expand);
/**
- * skb_pad - zero pad the tail of an skb
+ * __skb_pad - zero pad the tail of an skb
* @skb: buffer to pad
* @pad: space to pad
+ * @free_on_error: free buffer on error
*
* Ensure that a buffer is followed by a padding area that is zero
* filled. Used by network drivers which may DMA or transfer data
* beyond the buffer end onto the wire.
*
- * May return error in out of memory cases. The skb is freed on error.
+ * May return error in out of memory cases. The skb is freed on error
+ * if @free_on_error is true.
*/
-int skb_pad(struct sk_buff *skb, int pad)
+int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
{
int err;
int ntail;
@@ -1403,10 +1405,11 @@ int skb_pad(struct sk_buff *skb, int pad)
return 0;
free_skb:
- kfree_skb(skb);
+ if (free_on_error)
+ kfree_skb(skb);
return err;
}
-EXPORT_SYMBOL(skb_pad);
+EXPORT_SYMBOL(__skb_pad);
/**
* pskb_put - add data to the tail of a potentially fragmented buffer
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index c442051d5a55..20bc9c56fca0 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -577,7 +577,7 @@ static int dsa_dst_parse(struct dsa_switch_tree *dst)
return err;
}
- if (!dst->cpu_dp->netdev) {
+ if (!dst->cpu_dp) {
pr_warn("Tree has no master device\n");
return -EINVAL;
}
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index de66ca8e6201..fcd90f79458e 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -42,7 +42,8 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev)
padlen = (skb->len >= ETH_ZLEN) ? 0 : ETH_ZLEN - skb->len;
if (skb_tailroom(skb) >= padlen + KSZ_INGRESS_TAG_LEN) {
- if (skb_put_padto(skb, skb->len + padlen))
+ /* Let dsa_slave_xmit() free skb */
+ if (__skb_put_padto(skb, skb->len + padlen, false))
return NULL;
nskb = skb;
@@ -60,12 +61,13 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev)
skb_transport_header(skb) - skb->head);
skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
- if (skb_put_padto(nskb, nskb->len + padlen)) {
- kfree_skb(nskb);
+ /* Let skb_put_padto() free nskb, and let dsa_slave_xmit() free
+ * skb
+ */
+ if (skb_put_padto(nskb, nskb->len + padlen))
return NULL;
- }
- kfree_skb(skb);
+ consume_skb(skb);
}
tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN);
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index b09e56214005..9c7b1d74a5c6 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -40,7 +40,7 @@ static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
skb_set_network_header(nskb, skb_network_header(skb) - skb->head);
skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head);
skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
- kfree_skb(skb);
+ consume_skb(skb);
if (padlen) {
skb_put_zero(nskb, padlen);
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 4e7bdb213cd0..172d8309f89e 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -314,7 +314,8 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr);
- skb_put_padto(skb, ETH_ZLEN + HSR_HLEN);
+ if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
+ return;
hsr_forward_skb(skb, master);
return;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 0cbee0a666ff..df68963dc90a 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -258,7 +258,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
esp_output_udp_encap(x, skb, esp);
if (!skb_cloned(skb)) {
- if (tailen <= skb_availroom(skb)) {
+ if (tailen <= skb_tailroom(skb)) {
nfrags = 1;
trailer = skb;
tail = skb_tail_pointer(trailer);
@@ -292,8 +292,6 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
kunmap_atomic(vaddr);
- spin_unlock_bh(&x->lock);
-
nfrags = skb_shinfo(skb)->nr_frags;
__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
@@ -301,6 +299,9 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
skb_shinfo(skb)->nr_frags = ++nfrags;
pfrag->offset = pfrag->offset + allocsize;
+
+ spin_unlock_bh(&x->lock);
+
nfrags++;
skb->len += tailen;
@@ -381,7 +382,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
(unsigned char *)esph - skb->data,
assoclen + ivlen + esp->clen + alen);
if (unlikely(err < 0))
- goto error;
+ goto error_free;
if (!esp->inplace) {
int allocsize;
@@ -392,7 +393,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
spin_lock_bh(&x->lock);
if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
spin_unlock_bh(&x->lock);
- goto error;
+ goto error_free;
}
skb_shinfo(skb)->nr_frags = 1;
@@ -409,7 +410,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
(unsigned char *)esph - skb->data,
assoclen + ivlen + esp->clen + alen);
if (unlikely(err < 0))
- goto error;
+ goto error_free;
}
if ((x->props.flags & XFRM_STATE_ESN))
@@ -442,8 +443,9 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
if (sg != dsg)
esp_ssg_unref(x, tmp);
- kfree(tmp);
+error_free:
+ kfree(tmp);
error:
return err;
}
@@ -695,8 +697,10 @@ skip_cow:
sg_init_table(sg, nfrags);
err = skb_to_sgvec(skb, sg, 0, skb->len);
- if (unlikely(err < 0))
+ if (unlikely(err < 0)) {
+ kfree(tmp);
goto out;
+ }
skb->ip_summed = CHECKSUM_NONE;
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index e0666016a764..50112324fa5c 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -257,7 +257,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
err = esp_output_tail(x, skb, &esp);
- if (err < 0)
+ if (err)
return err;
secpath_reset(skb);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 0bc3c3d73e61..9e9d9afd18f7 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -268,14 +268,14 @@ unsigned int arpt_do_table(struct sk_buff *skb,
acpar.targinfo = t->data;
verdict = t->u.kernel.target->target(skb, &acpar);
- /* Target might have changed stuff. */
- arp = arp_hdr(skb);
-
- if (verdict == XT_CONTINUE)
+ if (verdict == XT_CONTINUE) {
+ /* Target might have changed stuff. */
+ arp = arp_hdr(skb);
e = arpt_next_entry(e);
- else
+ } else {
/* Verdict */
break;
+ }
} while (!acpar.hotdrop);
xt_write_recseq_end(addend);
local_bh_enable();
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 2a55a40211cb..622ed2887cd5 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -352,13 +352,14 @@ ipt_do_table(struct sk_buff *skb,
acpar.targinfo = t->data;
verdict = t->u.kernel.target->target(skb, &acpar);
- /* Target might have changed stuff. */
- ip = ip_hdr(skb);
- if (verdict == XT_CONTINUE)
+ if (verdict == XT_CONTINUE) {
+ /* Target might have changed stuff. */
+ ip = ip_hdr(skb);
e = ipt_next_entry(e);
- else
+ } else {
/* Verdict */
break;
+ }
} while (!acpar.hotdrop);
xt_write_recseq_end(addend);
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 7d72decb80f9..efaa04dcc80e 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -117,7 +117,8 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
* functions are also incrementing the refcount on their own,
* so it's safe to remove the entry even if it's in use. */
#ifdef CONFIG_PROC_FS
- proc_remove(c->pde);
+ if (cn->procdir)
+ proc_remove(c->pde);
#endif
return;
}
@@ -815,6 +816,7 @@ static void clusterip_net_exit(struct net *net)
#ifdef CONFIG_PROC_FS
struct clusterip_net *cn = net_generic(net, clusterip_net_id);
proc_remove(cn->procdir);
+ cn->procdir = NULL;
#endif
nf_unregister_net_hook(net, &cip_arp_ops);
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 71ce33decd97..a3e91b552edc 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2481,7 +2481,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
name[val] = 0;
lock_sock(sk);
- err = tcp_set_congestion_control(sk, name, true);
+ err = tcp_set_congestion_control(sk, name, true, true);
release_sock(sk);
return err;
}
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index fde983f6376b..421ea1b918da 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -189,8 +189,8 @@ void tcp_init_congestion_control(struct sock *sk)
INET_ECN_dontxmit(sk);
}
-void tcp_reinit_congestion_control(struct sock *sk,
- const struct tcp_congestion_ops *ca)
+static void tcp_reinit_congestion_control(struct sock *sk,
+ const struct tcp_congestion_ops *ca)
{
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -338,7 +338,7 @@ out:
* tcp_reinit_congestion_control (if the current congestion control was
* already initialized.
*/
-int tcp_set_congestion_control(struct sock *sk, const char *name, bool load)
+int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit)
{
struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_congestion_ops *ca;
@@ -360,9 +360,18 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load)
if (!ca) {
err = -ENOENT;
} else if (!load) {
- icsk->icsk_ca_ops = ca;
- if (!try_module_get(ca->owner))
+ const struct tcp_congestion_ops *old_ca = icsk->icsk_ca_ops;
+
+ if (try_module_get(ca->owner)) {
+ if (reinit) {
+ tcp_reinit_congestion_control(sk, ca);
+ } else {
+ icsk->icsk_ca_ops = ca;
+ module_put(old_ca->owner);
+ }
+ } else {
err = -EBUSY;
+ }
} else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))) {
err = -EPERM;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index cd1d044a7fa5..62344804baae 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1176,7 +1176,7 @@ static void udp_set_dev_scratch(struct sk_buff *skb)
scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
scratch->is_linear = !skb_is_nonlinear(skb);
#endif
- if (likely(!skb->_skb_refdst))
+ if (likely(!skb->_skb_refdst && !skb_sec_path(skb)))
scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
}
@@ -1929,14 +1929,16 @@ drop:
/* For TCP sockets, sk_rx_dst is protected by socket lock
* For UDP, we use xchg() to guard against concurrent changes.
*/
-void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
+bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
{
struct dst_entry *old;
if (dst_hold_safe(dst)) {
old = xchg(&sk->sk_rx_dst, dst);
dst_release(old);
+ return old != dst;
}
+ return false;
}
EXPORT_SYMBOL(udp_sk_rx_dst_set);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3c46e9513a31..936e9ab4dda5 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5556,7 +5556,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
* our DAD process, so we don't need
* to do it again
*/
- if (!(ifp->rt->rt6i_node))
+ if (!rcu_access_pointer(ifp->rt->rt6i_node))
ip6_ins_rt(ifp->rt);
if (ifp->idev->cnf.forwarding)
addrconf_join_anycast(ifp);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 9ed35473dcb5..ab64f367d11c 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -226,7 +226,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
int tailen = esp->tailen;
if (!skb_cloned(skb)) {
- if (tailen <= skb_availroom(skb)) {
+ if (tailen <= skb_tailroom(skb)) {
nfrags = 1;
trailer = skb;
tail = skb_tail_pointer(trailer);
@@ -260,8 +260,6 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
kunmap_atomic(vaddr);
- spin_unlock_bh(&x->lock);
-
nfrags = skb_shinfo(skb)->nr_frags;
__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
@@ -269,6 +267,9 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
skb_shinfo(skb)->nr_frags = ++nfrags;
pfrag->offset = pfrag->offset + allocsize;
+
+ spin_unlock_bh(&x->lock);
+
nfrags++;
skb->len += tailen;
@@ -345,7 +346,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
(unsigned char *)esph - skb->data,
assoclen + ivlen + esp->clen + alen);
if (unlikely(err < 0))
- goto error;
+ goto error_free;
if (!esp->inplace) {
int allocsize;
@@ -356,7 +357,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
spin_lock_bh(&x->lock);
if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
spin_unlock_bh(&x->lock);
- goto error;
+ goto error_free;
}
skb_shinfo(skb)->nr_frags = 1;
@@ -373,7 +374,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
(unsigned char *)esph - skb->data,
assoclen + ivlen + esp->clen + alen);
if (unlikely(err < 0))
- goto error;
+ goto error_free;
}
if ((x->props.flags & XFRM_STATE_ESN))
@@ -406,8 +407,9 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
if (sg != dsg)
esp_ssg_unref(x, tmp);
- kfree(tmp);
+error_free:
+ kfree(tmp);
error:
return err;
}
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index f02f131f6435..1cf437f75b0b 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -286,7 +286,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
err = esp6_output_tail(x, skb, &esp);
- if (err < 0)
+ if (err)
return err;
secpath_reset(skb);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5cc0ea038198..e1c85bb4eac0 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -148,11 +148,23 @@ static struct fib6_node *node_alloc(void)
return fn;
}
-static void node_free(struct fib6_node *fn)
+static void node_free_immediate(struct fib6_node *fn)
+{
+ kmem_cache_free(fib6_node_kmem, fn);
+}
+
+static void node_free_rcu(struct rcu_head *head)
{
+ struct fib6_node *fn = container_of(head, struct fib6_node, rcu);
+
kmem_cache_free(fib6_node_kmem, fn);
}
+static void node_free(struct fib6_node *fn)
+{
+ call_rcu(&fn->rcu, node_free_rcu);
+}
+
static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
{
int cpu;
@@ -601,9 +613,9 @@ insert_above:
if (!in || !ln) {
if (in)
- node_free(in);
+ node_free_immediate(in);
if (ln)
- node_free(ln);
+ node_free_immediate(ln);
return ERR_PTR(-ENOMEM);
}
@@ -877,7 +889,7 @@ add:
rt->dst.rt6_next = iter;
*ins = rt;
- rt->rt6i_node = fn;
+ rcu_assign_pointer(rt->rt6i_node, fn);
atomic_inc(&rt->rt6i_ref);
if (!info->skip_notify)
inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
@@ -903,7 +915,7 @@ add:
return err;
*ins = rt;
- rt->rt6i_node = fn;
+ rcu_assign_pointer(rt->rt6i_node, fn);
rt->dst.rt6_next = iter->dst.rt6_next;
atomic_inc(&rt->rt6i_ref);
if (!info->skip_notify)
@@ -1038,7 +1050,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
root, and then (in failure) stale node
in main tree.
*/
- node_free(sfn);
+ node_free_immediate(sfn);
err = PTR_ERR(sn);
goto failure;
}
@@ -1468,8 +1480,9 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
int fib6_del(struct rt6_info *rt, struct nl_info *info)
{
+ struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node,
+ lockdep_is_held(&rt->rt6i_table->tb6_lock));
struct net *net = info->nl_net;
- struct fib6_node *fn = rt->rt6i_node;
struct rt6_info **rtp;
#if RT6_DEBUG >= 2
@@ -1658,7 +1671,9 @@ static int fib6_clean_node(struct fib6_walker *w)
if (res) {
#if RT6_DEBUG >= 2
pr_debug("%s: del failed: rt=%p@%p err=%d\n",
- __func__, rt, rt->rt6i_node, res);
+ __func__, rt,
+ rcu_access_pointer(rt->rt6i_node),
+ res);
#endif
continue;
}
@@ -1780,8 +1795,10 @@ static int fib6_age(struct rt6_info *rt, void *arg)
}
gc_args->more++;
} else if (rt->rt6i_flags & RTF_CACHE) {
+ if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout))
+ rt->dst.obsolete = DST_OBSOLETE_KILL;
if (atomic_read(&rt->dst.__refcnt) == 1 &&
- time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
+ rt->dst.obsolete == DST_OBSOLETE_KILL) {
RT6_TRACE("aging clone %p\n", rt);
return -1;
} else if (rt->rt6i_flags & RTF_GATEWAY) {
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 02d795fe3d7f..a5e466d4e093 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -242,7 +242,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
pktopt = xchg(&np->pktoptions, NULL);
kfree_skb(pktopt);
- sk->sk_destruct = inet_sock_destruct;
/*
* ... and add it to the refcnt debug socks count
* in the new family. -acme
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index abb2c307fbe8..a338bbc33cf3 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -86,7 +86,6 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
while (offset <= packet_len) {
struct ipv6_opt_hdr *exthdr;
- unsigned int len;
switch (**nexthdr) {
@@ -112,10 +111,9 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
offset);
- len = ipv6_optlen(exthdr);
- if (len + offset >= IPV6_MAXPLEN)
+ offset += ipv6_optlen(exthdr);
+ if (offset > IPV6_MAXPLEN)
return -EINVAL;
- offset += len;
*nexthdr = &exthdr->nexthdr;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 94d6a13d47f0..2d0e7798c793 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -440,7 +440,8 @@ static bool rt6_check_expired(const struct rt6_info *rt)
if (time_after(jiffies, rt->dst.expires))
return true;
} else if (rt->dst.from) {
- return rt6_check_expired((struct rt6_info *) rt->dst.from);
+ return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
+ rt6_check_expired((struct rt6_info *)rt->dst.from);
}
return false;
}
@@ -1289,7 +1290,9 @@ static void rt6_dst_from_metrics_check(struct rt6_info *rt)
static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
{
- if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
+ u32 rt_cookie = 0;
+
+ if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
return NULL;
if (rt6_check_expired(rt))
@@ -1357,8 +1360,14 @@ static void ip6_link_failure(struct sk_buff *skb)
if (rt->rt6i_flags & RTF_CACHE) {
if (dst_hold_safe(&rt->dst))
ip6_del_rt(rt);
- } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
- rt->rt6i_node->fn_sernum = -1;
+ } else {
+ struct fib6_node *fn;
+
+ rcu_read_lock();
+ fn = rcu_dereference(rt->rt6i_node);
+ if (fn && (rt->rt6i_flags & RTF_DEFAULT))
+ fn->fn_sernum = -1;
+ rcu_read_unlock();
}
}
}
@@ -1375,7 +1384,8 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
{
return !(rt->rt6i_flags & RTF_CACHE) &&
- (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
+ (rt->rt6i_flags & RTF_PCPU ||
+ rcu_access_pointer(rt->rt6i_node));
}
static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 20039c8501eb..d6886228e1d0 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -768,6 +768,15 @@ start_lookup:
return 0;
}
+static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
+{
+ if (udp_sk_rx_dst_set(sk, dst)) {
+ const struct rt6_info *rt = (const struct rt6_info *)dst;
+
+ inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
+ }
+}
+
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
@@ -817,7 +826,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int ret;
if (unlikely(sk->sk_rx_dst != dst))
- udp_sk_rx_dst_set(sk, dst);
+ udp6_sk_rx_dst_set(sk, dst);
ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index da49191f7ad0..4abf6287d7e1 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1383,6 +1383,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
if (!csk)
return -EINVAL;
+ /* We must prevent loops or risk deadlock ! */
+ if (csk->sk_family == PF_KCM)
+ return -EOPNOTSUPP;
+
psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
if (!psock)
return -ENOMEM;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index b0c2d4ae781d..90165a6874bc 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -113,7 +113,6 @@ struct l2tp_net {
spinlock_t l2tp_session_hlist_lock;
};
-static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
{
@@ -127,39 +126,6 @@ static inline struct l2tp_net *l2tp_pernet(const struct net *net)
return net_generic(net, l2tp_net_id);
}
-/* Tunnel reference counts. Incremented per session that is added to
- * the tunnel.
- */
-static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
-{
- refcount_inc(&tunnel->ref_count);
-}
-
-static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
-{
- if (refcount_dec_and_test(&tunnel->ref_count))
- l2tp_tunnel_free(tunnel);
-}
-#ifdef L2TP_REFCNT_DEBUG
-#define l2tp_tunnel_inc_refcount(_t) \
-do { \
- pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \
- __func__, __LINE__, (_t)->name, \
- refcount_read(&_t->ref_count)); \
- l2tp_tunnel_inc_refcount_1(_t); \
-} while (0)
-#define l2tp_tunnel_dec_refcount(_t) \
-do { \
- pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \
- __func__, __LINE__, (_t)->name, \
- refcount_read(&_t->ref_count)); \
- l2tp_tunnel_dec_refcount_1(_t); \
-} while (0)
-#else
-#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
-#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
-#endif
-
/* Session hash global list for L2TPv3.
* The session_id SHOULD be random according to RFC3931, but several
* L2TP implementations use incrementing session_ids. So we do a real
@@ -229,6 +195,27 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
}
+/* Lookup a tunnel. A new reference is held on the returned tunnel. */
+struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
+{
+ const struct l2tp_net *pn = l2tp_pernet(net);
+ struct l2tp_tunnel *tunnel;
+
+ rcu_read_lock_bh();
+ list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
+ if (tunnel->tunnel_id == tunnel_id) {
+ l2tp_tunnel_inc_refcount(tunnel);
+ rcu_read_unlock_bh();
+
+ return tunnel;
+ }
+ }
+ rcu_read_unlock_bh();
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
+
/* Lookup a session. A new reference is held on the returned session.
* Optionally calls session->ref() too if do_ref is true.
*/
@@ -1348,17 +1335,6 @@ static void l2tp_udp_encap_destroy(struct sock *sk)
}
}
-/* Really kill the tunnel.
- * Come here only when all sessions have been cleared from the tunnel.
- */
-static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
-{
- BUG_ON(refcount_read(&tunnel->ref_count) != 0);
- BUG_ON(tunnel->sock != NULL);
- l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
- kfree_rcu(tunnel, rcu);
-}
-
/* Workqueue tunnel deletion function */
static void l2tp_tunnel_del_work(struct work_struct *work)
{
@@ -1844,6 +1820,8 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
l2tp_session_set_header_len(session, tunnel->version);
+ refcount_set(&session->ref_count, 1);
+
err = l2tp_session_add_to_tunnel(tunnel, session);
if (err) {
kfree(session);
@@ -1851,10 +1829,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
return ERR_PTR(err);
}
- /* Bump the reference count. The session context is deleted
- * only when this drops to zero.
- */
- refcount_set(&session->ref_count, 1);
l2tp_tunnel_inc_refcount(tunnel);
/* Ensure tunnel socket isn't deleted */
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index cdb6e3327f74..9101297f27ad 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -231,6 +231,8 @@ out:
return tunnel;
}
+struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
+
struct l2tp_session *l2tp_session_get(const struct net *net,
struct l2tp_tunnel *tunnel,
u32 session_id, bool do_ref);
@@ -269,6 +271,17 @@ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
+{
+ refcount_inc(&tunnel->ref_count);
+}
+
+static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
+{
+ if (refcount_dec_and_test(&tunnel->ref_count))
+ kfree_rcu(tunnel, rcu);
+}
+
/* Session reference counts. Incremented when code obtains a reference
* to a session.
*/
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 12cfcd0ca807..57427d430f10 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -65,10 +65,12 @@ static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
(info->attrs[L2TP_ATTR_CONN_ID])) {
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
- tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel)
+ tunnel = l2tp_tunnel_get(net, tunnel_id);
+ if (tunnel) {
session = l2tp_session_get(net, tunnel, session_id,
do_ref);
+ l2tp_tunnel_dec_refcount(tunnel);
+ }
}
return session;
@@ -271,8 +273,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
}
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
- tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel == NULL) {
+ tunnel = l2tp_tunnel_get(net, tunnel_id);
+ if (!tunnel) {
ret = -ENODEV;
goto out;
}
@@ -282,6 +284,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
(void) l2tp_tunnel_delete(tunnel);
+ l2tp_tunnel_dec_refcount(tunnel);
+
out:
return ret;
}
@@ -299,8 +303,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
}
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
- tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel == NULL) {
+ tunnel = l2tp_tunnel_get(net, tunnel_id);
+ if (!tunnel) {
ret = -ENODEV;
goto out;
}
@@ -311,6 +315,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
tunnel, L2TP_CMD_TUNNEL_MODIFY);
+ l2tp_tunnel_dec_refcount(tunnel);
+
out:
return ret;
}
@@ -438,34 +444,37 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[L2TP_ATTR_CONN_ID]) {
ret = -EINVAL;
- goto out;
+ goto err;
}
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
- tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel == NULL) {
- ret = -ENODEV;
- goto out;
- }
-
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
- goto out;
+ goto err;
+ }
+
+ tunnel = l2tp_tunnel_get(net, tunnel_id);
+ if (!tunnel) {
+ ret = -ENODEV;
+ goto err_nlmsg;
}
ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
NLM_F_ACK, tunnel, L2TP_CMD_TUNNEL_GET);
if (ret < 0)
- goto err_out;
+ goto err_nlmsg_tunnel;
+
+ l2tp_tunnel_dec_refcount(tunnel);
return genlmsg_unicast(net, msg, info->snd_portid);
-err_out:
+err_nlmsg_tunnel:
+ l2tp_tunnel_dec_refcount(tunnel);
+err_nlmsg:
nlmsg_free(msg);
-
-out:
+err:
return ret;
}
@@ -509,8 +518,9 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
ret = -EINVAL;
goto out;
}
+
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
- tunnel = l2tp_tunnel_find(net, tunnel_id);
+ tunnel = l2tp_tunnel_get(net, tunnel_id);
if (!tunnel) {
ret = -ENODEV;
goto out;
@@ -518,24 +528,24 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
if (!info->attrs[L2TP_ATTR_SESSION_ID]) {
ret = -EINVAL;
- goto out;
+ goto out_tunnel;
}
session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) {
ret = -EINVAL;
- goto out;
+ goto out_tunnel;
}
peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]);
if (!info->attrs[L2TP_ATTR_PW_TYPE]) {
ret = -EINVAL;
- goto out;
+ goto out_tunnel;
}
cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]);
if (cfg.pw_type >= __L2TP_PWTYPE_MAX) {
ret = -EINVAL;
- goto out;
+ goto out_tunnel;
}
if (tunnel->version > 2) {
@@ -557,7 +567,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]);
if (len > 8) {
ret = -EINVAL;
- goto out;
+ goto out_tunnel;
}
cfg.cookie_len = len;
memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len);
@@ -566,7 +576,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]);
if (len > 8) {
ret = -EINVAL;
- goto out;
+ goto out_tunnel;
}
cfg.peer_cookie_len = len;
memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len);
@@ -609,7 +619,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) ||
(l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) {
ret = -EPROTONOSUPPORT;
- goto out;
+ goto out_tunnel;
}
/* Check that pseudowire-specific params are present */
@@ -619,7 +629,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
case L2TP_PWTYPE_ETH_VLAN:
if (!info->attrs[L2TP_ATTR_VLAN_ID]) {
ret = -EINVAL;
- goto out;
+ goto out_tunnel;
}
break;
case L2TP_PWTYPE_ETH:
@@ -647,6 +657,8 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
}
}
+out_tunnel:
+ l2tp_tunnel_dec_refcount(tunnel);
out:
return ret;
}
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index eb541786ccb7..b1d3740ae36a 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -441,7 +441,7 @@ nf_nat_setup_info(struct nf_conn *ct,
else
ct->status |= IPS_DST_NAT;
- if (nfct_help(ct))
+ if (nfct_help(ct) && !nfct_seqadj(ct))
if (!nfct_seqadj_ext_add(ct))
return NF_DROP;
}
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index f5a7cb68694e..b89f4f65b2a0 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -305,7 +305,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
const struct nf_hook_ops *ops = &basechain->ops[0];
hook_mask = 1 << ops->hooknum;
- if (!(hook_mask & target->hooks))
+ if (target->hooks && !(hook_mask & target->hooks))
return -EINVAL;
ret = nft_compat_chain_validate_dependency(target->table,
@@ -484,7 +484,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
const struct nf_hook_ops *ops = &basechain->ops[0];
hook_mask = 1 << ops->hooknum;
- if (!(hook_mask & match->hooks))
+ if (match->hooks && !(hook_mask & match->hooks))
return -EINVAL;
ret = nft_compat_chain_validate_dependency(match->table,
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index 18dd57a52651..14538b1d4d11 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -65,19 +65,23 @@ static int nft_limit_init(struct nft_limit *limit,
limit->nsecs = unit * NSEC_PER_SEC;
if (limit->rate == 0 || limit->nsecs < unit)
return -EOVERFLOW;
- limit->tokens = limit->tokens_max = limit->nsecs;
-
- if (tb[NFTA_LIMIT_BURST]) {
- u64 rate;
+ if (tb[NFTA_LIMIT_BURST])
limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
+ else
+ limit->burst = 0;
+
+ if (limit->rate + limit->burst < limit->rate)
+ return -EOVERFLOW;
- rate = limit->rate + limit->burst;
- if (rate < limit->rate)
- return -EOVERFLOW;
+ /* The token bucket size limits the number of tokens can be
+ * accumulated. tokens_max specifies the bucket size.
+ * tokens_max = unit * (rate + burst) / rate.
+ */
+ limit->tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
+ limit->rate);
+ limit->tokens_max = limit->tokens;
- limit->rate = rate;
- }
if (tb[NFTA_LIMIT_FLAGS]) {
u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS]));
@@ -95,9 +99,8 @@ static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit *limit,
{
u32 flags = limit->invert ? NFT_LIMIT_F_INV : 0;
u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC);
- u64 rate = limit->rate - limit->burst;
- if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(rate),
+ if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(limit->rate),
NFTA_LIMIT_PAD) ||
nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs),
NFTA_LIMIT_PAD) ||
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 008a45ca3112..1c61af9af67d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2191,6 +2191,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
struct timespec ts;
__u32 ts_status;
bool is_drop_n_account = false;
+ bool do_vnet = false;
/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
* We may add members to them until current aligned size without forcing
@@ -2241,8 +2242,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
netoff = TPACKET_ALIGN(po->tp_hdrlen +
(maclen < 16 ? 16 : maclen)) +
po->tp_reserve;
- if (po->has_vnet_hdr)
+ if (po->has_vnet_hdr) {
netoff += sizeof(struct virtio_net_hdr);
+ do_vnet = true;
+ }
macoff = netoff - maclen;
}
if (po->tp_version <= TPACKET_V2) {
@@ -2259,8 +2262,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
skb_set_owner_r(copy_skb, sk);
}
snaplen = po->rx_ring.frame_size - macoff;
- if ((int)snaplen < 0)
+ if ((int)snaplen < 0) {
snaplen = 0;
+ do_vnet = false;
+ }
}
} else if (unlikely(macoff + snaplen >
GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
@@ -2273,6 +2278,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
if (unlikely((int)snaplen < 0)) {
snaplen = 0;
macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
+ do_vnet = false;
}
}
spin_lock(&sk->sk_receive_queue.lock);
@@ -2298,7 +2304,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
}
spin_unlock(&sk->sk_receive_queue.lock);
- if (po->has_vnet_hdr) {
+ if (do_vnet) {
if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
sizeof(struct virtio_net_hdr),
vio_le(), true)) {
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 9fd44c221347..6c5ea84d2682 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -215,9 +215,15 @@ static void tcf_chain_flush(struct tcf_chain *chain)
static void tcf_chain_destroy(struct tcf_chain *chain)
{
- list_del(&chain->list);
- tcf_chain_flush(chain);
- kfree(chain);
+ /* May be already removed from the list by the previous call. */
+ if (!list_empty(&chain->list))
+ list_del_init(&chain->list);
+
+ /* There might still be a reference held when we got here from
+ * tcf_block_put. Wait for the user to drop reference before free.
+ */
+ if (!chain->refcnt)
+ kfree(chain);
}
struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
@@ -288,8 +294,10 @@ void tcf_block_put(struct tcf_block *block)
if (!block)
return;
- list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+ list_for_each_entry_safe(chain, tmp, &block->chain_list, list) {
+ tcf_chain_flush(chain);
tcf_chain_destroy(chain);
+ }
kfree(block);
}
EXPORT_SYMBOL(tcf_block_put);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index a3fa144b8648..4fb5a3222d0d 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -836,7 +836,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
old = dev_graft_qdisc(dev_queue, new);
if (new && i > 0)
- refcount_inc(&new->refcnt);
+ qdisc_refcount_inc(new);
if (!ingress)
qdisc_destroy(old);
@@ -847,7 +847,7 @@ skip:
notify_and_destroy(net, skb, n, classid,
dev->qdisc, new);
if (new && !new->ops->attach)
- refcount_inc(&new->refcnt);
+ qdisc_refcount_inc(new);
dev->qdisc = new ? : &noop_qdisc;
if (new && new->ops->attach)
@@ -1256,7 +1256,7 @@ replay:
if (q == p ||
(p && check_loop(q, p, 0)))
return -ELOOP;
- refcount_inc(&q->refcnt);
+ qdisc_refcount_inc(q);
goto graft;
} else {
if (!q)
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 780db43300b1..156c8a33c677 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1139,6 +1139,13 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
struct tc_ratespec *r;
int err;
+ qdisc_watchdog_init(&q->watchdog, sch);
+ hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
+ q->delay_timer.function = cbq_undelay;
+
+ if (!opt)
+ return -EINVAL;
+
err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
if (err < 0)
return err;
@@ -1177,9 +1184,6 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
q->link.avpkt = q->link.allot/2;
q->link.minidle = -0x7FFFFFFF;
- qdisc_watchdog_init(&q->watchdog, sch);
- hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
- q->delay_timer.function = cbq_undelay;
q->toplevel = TC_CBQ_MAXLEVEL;
q->now = psched_get_time();
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 337f2d6d81e4..2c0c05f2cc34 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -491,10 +491,8 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
if (!q->flows)
return -ENOMEM;
q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL);
- if (!q->backlogs) {
- kvfree(q->flows);
+ if (!q->backlogs)
return -ENOMEM;
- }
for (i = 0; i < q->flows_cnt; i++) {
struct fq_codel_flow *flow = q->flows + i;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 57ba406f1437..4ba6da5fb254 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -785,7 +785,7 @@ static void attach_default_qdiscs(struct net_device *dev)
dev->priv_flags & IFF_NO_QUEUE) {
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
dev->qdisc = txq->qdisc_sleeping;
- refcount_inc(&dev->qdisc->refcnt);
+ qdisc_refcount_inc(dev->qdisc);
} else {
qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
if (qdisc) {
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index fd15200f8627..11ab8dace901 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1418,6 +1418,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
struct tc_hfsc_qopt *qopt;
int err;
+ qdisc_watchdog_init(&q->watchdog, sch);
+
if (opt == NULL || nla_len(opt) < sizeof(*qopt))
return -EINVAL;
qopt = nla_data(opt);
@@ -1430,7 +1432,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
err = tcf_block_get(&q->root.block, &q->root.filter_list);
if (err)
- goto err_tcf;
+ return err;
q->root.cl_common.classid = sch->handle;
q->root.refcnt = 1;
@@ -1448,13 +1450,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
qdisc_class_hash_grow(sch, &q->clhash);
- qdisc_watchdog_init(&q->watchdog, sch);
-
return 0;
-
-err_tcf:
- qdisc_class_hash_destroy(&q->clhash);
- return err;
}
static int
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 51d3ba682af9..73a53c08091b 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -477,6 +477,9 @@ static void hhf_destroy(struct Qdisc *sch)
kvfree(q->hhf_valid_bits[i]);
}
+ if (!q->hh_flows)
+ return;
+
for (i = 0; i < HH_FLOWS_CNT; i++) {
struct hh_flow_state *flow, *next;
struct list_head *head = &q->hh_flows[i];
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 5d65ec5207e9..5bf5177b2bd3 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1017,6 +1017,9 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
int err;
int i;
+ qdisc_watchdog_init(&q->watchdog, sch);
+ INIT_WORK(&q->work, htb_work_func);
+
if (!opt)
return -EINVAL;
@@ -1041,8 +1044,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
for (i = 0; i < TC_HTB_NUMPRIO; i++)
INIT_LIST_HEAD(q->drops + i);
- qdisc_watchdog_init(&q->watchdog, sch);
- INIT_WORK(&q->work, htb_work_func);
qdisc_skb_head_init(&q->direct_queue);
if (tb[TCA_HTB_DIRECT_QLEN])
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index f143b7bbaa0d..9c454f5d6c38 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -257,12 +257,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
for (i = 0; i < q->max_bands; i++)
q->queues[i] = &noop_qdisc;
- err = multiq_tune(sch, opt);
-
- if (err)
- kfree(q->queues);
-
- return err;
+ return multiq_tune(sch, opt);
}
static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 1b3dd6190e93..14d1724e0dc4 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -933,11 +933,11 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
struct netem_sched_data *q = qdisc_priv(sch);
int ret;
+ qdisc_watchdog_init(&q->watchdog, sch);
+
if (!opt)
return -EINVAL;
- qdisc_watchdog_init(&q->watchdog, sch);
-
q->loss_model = CLG_RANDOM;
ret = netem_change(sch, opt);
if (ret)
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 82469ef9655e..fc69fc5956e9 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -716,13 +716,13 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
int i;
int err;
+ setup_deferrable_timer(&q->perturb_timer, sfq_perturbation,
+ (unsigned long)sch);
+
err = tcf_block_get(&q->block, &q->filter_list);
if (err)
return err;
- setup_deferrable_timer(&q->perturb_timer, sfq_perturbation,
- (unsigned long)sch);
-
for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
q->dep[i].next = i + SFQ_MAX_FLOWS;
q->dep[i].prev = i + SFQ_MAX_FLOWS;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index b2e4b6ad241a..493270f0d5b0 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -425,12 +425,13 @@ static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
{
struct tbf_sched_data *q = qdisc_priv(sch);
+ qdisc_watchdog_init(&q->watchdog, sch);
+ q->qdisc = &noop_qdisc;
+
if (opt == NULL)
return -EINVAL;
q->t_c = ktime_get_ns();
- qdisc_watchdog_init(&q->watchdog, sch);
- q->qdisc = &noop_qdisc;
return tbf_change(sch, opt);
}
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index 9a647214a91e..e99518e79b52 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -70,7 +70,8 @@ static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
info = nla_data(attr);
list_for_each_entry_rcu(laddr, address_list, list) {
- memcpy(info, &laddr->a, addrlen);
+ memcpy(info, &laddr->a, sizeof(laddr->a));
+ memset(info + sizeof(laddr->a), 0, addrlen - sizeof(laddr->a));
info += addrlen;
}
@@ -93,7 +94,9 @@ static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb,
info = nla_data(attr);
list_for_each_entry(from, &asoc->peer.transport_addr_list,
transports) {
- memcpy(info, &from->ipaddr, addrlen);
+ memcpy(info, &from->ipaddr, sizeof(from->ipaddr));
+ memset(info + sizeof(from->ipaddr), 0,
+ addrlen - sizeof(from->ipaddr));
info += addrlen;
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 1db478e34520..8d760863bc41 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4538,8 +4538,7 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
info->sctpi_ictrlchunks = asoc->stats.ictrlchunks;
prim = asoc->peer.primary_path;
- memcpy(&info->sctpi_p_address, &prim->ipaddr,
- sizeof(struct sockaddr_storage));
+ memcpy(&info->sctpi_p_address, &prim->ipaddr, sizeof(prim->ipaddr));
info->sctpi_p_state = prim->state;
info->sctpi_p_cwnd = prim->cwnd;
info->sctpi_p_srtt = prim->srtt;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 767e0537dde5..89cd061c4468 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -65,6 +65,8 @@ static struct tipc_bearer *bearer_get(struct net *net, int bearer_id)
}
static void bearer_disable(struct net *net, struct tipc_bearer *b);
+static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev);
/**
* tipc_media_find - locates specified media object by name
@@ -428,6 +430,10 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
/* Associate TIPC bearer with L2 bearer */
rcu_assign_pointer(b->media_ptr, dev);
+ b->pt.dev = dev;
+ b->pt.type = htons(ETH_P_TIPC);
+ b->pt.func = tipc_l2_rcv_msg;
+ dev_add_pack(&b->pt);
memset(&b->bcast_addr, 0, sizeof(b->bcast_addr));
memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len);
b->bcast_addr.media_id = b->media->type_id;
@@ -447,6 +453,7 @@ void tipc_disable_l2_media(struct tipc_bearer *b)
struct net_device *dev;
dev = (struct net_device *)rtnl_dereference(b->media_ptr);
+ dev_remove_pack(&b->pt);
RCU_INIT_POINTER(dev->tipc_ptr, NULL);
synchronize_net();
dev_put(dev);
@@ -594,11 +601,12 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
struct tipc_bearer *b;
rcu_read_lock();
- b = rcu_dereference_rtnl(dev->tipc_ptr);
+ b = rcu_dereference_rtnl(dev->tipc_ptr) ?:
+ rcu_dereference_rtnl(orig_dev->tipc_ptr);
if (likely(b && test_bit(0, &b->up) &&
(skb->pkt_type <= PACKET_MULTICAST))) {
skb->next = NULL;
- tipc_rcv(dev_net(dev), skb, b);
+ tipc_rcv(dev_net(b->pt.dev), skb, b);
rcu_read_unlock();
return NET_RX_SUCCESS;
}
@@ -659,11 +667,6 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
return NOTIFY_OK;
}
-static struct packet_type tipc_packet_type __read_mostly = {
- .type = htons(ETH_P_TIPC),
- .func = tipc_l2_rcv_msg,
-};
-
static struct notifier_block notifier = {
.notifier_call = tipc_l2_device_event,
.priority = 0,
@@ -671,19 +674,12 @@ static struct notifier_block notifier = {
int tipc_bearer_setup(void)
{
- int err;
-
- err = register_netdevice_notifier(&notifier);
- if (err)
- return err;
- dev_add_pack(&tipc_packet_type);
- return 0;
+ return register_netdevice_notifier(&notifier);
}
void tipc_bearer_cleanup(void)
{
unregister_netdevice_notifier(&notifier);
- dev_remove_pack(&tipc_packet_type);
}
void tipc_bearer_stop(struct net *net)
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 635c9086e19a..e07a55a80c18 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -131,6 +131,7 @@ struct tipc_media {
* @name: bearer name (format = media:interface)
* @media: ptr to media structure associated with bearer
* @bcast_addr: media address used in broadcasting
+ * @pt: packet type for bearer
* @rcu: rcu struct for tipc_bearer
* @priority: default link priority for bearer
* @window: default window size for bearer
@@ -151,6 +152,7 @@ struct tipc_bearer {
char name[TIPC_MAX_BEARER_NAME];
struct tipc_media *media;
struct tipc_media_addr bcast_addr;
+ struct packet_type pt;
struct rcu_head rcu;
u32 priority;
u32 window;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index dcd90e6fa7c3..6ef379f004ac 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -479,13 +479,14 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
{
struct sk_buff *_skb = *skb;
- struct tipc_msg *hdr = buf_msg(_skb);
+ struct tipc_msg *hdr;
struct tipc_msg ohdr;
- int dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
+ int dlen;
if (skb_linearize(_skb))
goto exit;
hdr = buf_msg(_skb);
+ dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
if (msg_dest_droppable(hdr))
goto exit;
if (msg_errcode(hdr))
@@ -511,6 +512,8 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
goto exit;
+ /* reassign after skb header modifications */
+ hdr = buf_msg(_skb);
/* Now reverse the concerned fields */
msg_set_errcode(hdr, err);
msg_set_non_seq(hdr, 0);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 9b4dcb6a16b5..7dd22330a6b4 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1126,8 +1126,8 @@ int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
strncpy(linkname, tipc_link_name(link), len);
err = 0;
}
-exit:
tipc_node_read_unlock(node);
+exit:
tipc_node_put(node);
return err;
}
@@ -1557,6 +1557,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
/* Check/update node state before receiving */
if (unlikely(skb)) {
+ if (unlikely(skb_linearize(skb)))
+ goto discard;
tipc_node_write_lock(n);
if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
if (le->link) {
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 101e3597338f..d50edd6e0019 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2255,8 +2255,8 @@ void tipc_sk_reinit(struct net *net)
do {
tsk = ERR_PTR(rhashtable_walk_start(&iter));
- if (tsk)
- continue;
+ if (IS_ERR(tsk))
+ goto walk_stop;
while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
spin_lock_bh(&tsk->sk.sk_lock.slock);
@@ -2265,7 +2265,7 @@ void tipc_sk_reinit(struct net *net)
msg_set_orignode(msg, tn->own_addr);
spin_unlock_bh(&tsk->sk.sk_lock.slock);
}
-
+walk_stop:
rhashtable_walk_stop(&iter);
} while (tsk == ERR_PTR(-EAGAIN));
}
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0bf91cd3733c..be3d9e3183dc 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -52,7 +52,6 @@ struct tipc_subscriber {
struct list_head subscrp_list;
};
-static void tipc_subscrp_delete(struct tipc_subscription *sub);
static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
/**
@@ -197,15 +196,19 @@ static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
{
struct list_head *subscription_list = &subscriber->subscrp_list;
struct tipc_subscription *sub, *temp;
+ u32 timeout;
spin_lock_bh(&subscriber->lock);
list_for_each_entry_safe(sub, temp, subscription_list, subscrp_list) {
if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
continue;
- tipc_nametbl_unsubscribe(sub);
- list_del(&sub->subscrp_list);
- tipc_subscrp_delete(sub);
+ timeout = htohl(sub->evt.s.timeout, sub->swap);
+ if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer)) {
+ tipc_nametbl_unsubscribe(sub);
+ list_del(&sub->subscrp_list);
+ tipc_subscrp_put(sub);
+ }
if (s)
break;
@@ -236,18 +239,12 @@ static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
tipc_subscrb_put(subscriber);
}
-static void tipc_subscrp_delete(struct tipc_subscription *sub)
-{
- u32 timeout = htohl(sub->evt.s.timeout, sub->swap);
-
- if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer))
- tipc_subscrp_put(sub);
-}
-
static void tipc_subscrp_cancel(struct tipc_subscr *s,
struct tipc_subscriber *subscriber)
{
+ tipc_subscrb_get(subscriber);
tipc_subscrb_subscrp_delete(subscriber, s);
+ tipc_subscrb_put(subscriber);
}
static struct tipc_subscription *tipc_subscrp_create(struct net *net,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ff61d8557929..69b16ee327d9 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2226,7 +2226,6 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
goto no_transform;
}
- dst_hold(&xdst->u.dst);
route = xdst->route;
}
}
@@ -3308,9 +3307,15 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
struct xfrm_state *x_new[XFRM_MAX_DEPTH];
struct xfrm_migrate *mp;
+ /* Stage 0 - sanity checks */
if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
goto out;
+ if (dir >= XFRM_POLICY_MAX) {
+ err = -EINVAL;
+ goto out;
+ }
+
/* Stage 1 - find policy */
if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
err = -ENOENT;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 6c0956d10db6..a792effdb0b5 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1620,6 +1620,7 @@ int
xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
unsigned short family, struct net *net)
{
+ int i;
int err = 0;
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
if (!afinfo)
@@ -1628,6 +1629,9 @@ xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
spin_lock_bh(&net->xfrm.xfrm_state_lock); /*FIXME*/
if (afinfo->tmpl_sort)
err = afinfo->tmpl_sort(dst, src, n);
+ else
+ for (i = 0; i < n; i++)
+ dst[i] = src[i];
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
rcu_read_unlock();
return err;
@@ -1638,6 +1642,7 @@ int
xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
unsigned short family)
{
+ int i;
int err = 0;
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
struct net *net = xs_net(*src);
@@ -1648,6 +1653,9 @@ xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
spin_lock_bh(&net->xfrm.xfrm_state_lock);
if (afinfo->state_sort)
err = afinfo->state_sort(dst, src, n);
+ else
+ for (i = 0; i < n; i++)
+ dst[i] = src[i];
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
rcu_read_unlock();
return err;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 2be4c6af008a..9391ced05259 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -796,7 +796,7 @@ static int copy_user_offload(struct xfrm_state_offload *xso, struct sk_buff *skb
return -EMSGSIZE;
xuo = nla_data(attr);
-
+ memset(xuo, 0, sizeof(*xuo));
xuo->ifindex = xso->dev->ifindex;
xuo->flags = xso->flags;
@@ -1869,6 +1869,7 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
return -EMSGSIZE;
id = nlmsg_data(nlh);
+ memset(&id->sa_id, 0, sizeof(id->sa_id));
memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr));
id->sa_id.spi = x->id.spi;
id->sa_id.family = x->props.family;
@@ -2578,6 +2579,8 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
ue = nlmsg_data(nlh);
copy_to_user_state(x, &ue->state);
ue->hard = (c->data.hard != 0) ? 1 : 0;
+ /* clear the padding bytes */
+ memset(&ue->hard + 1, 0, sizeof(*ue) - offsetofend(typeof(*ue), hard));
err = xfrm_mark_put(skb, &x->mark);
if (err)
@@ -2715,6 +2718,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
struct nlattr *attr;
id = nlmsg_data(nlh);
+ memset(id, 0, sizeof(*id));
memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
id->spi = x->id.spi;
id->family = x->props.family;