From 68d676a089625daed9d61df56ac63b5cd7189efe Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 22 Jun 2018 11:56:56 -0700 Subject: nfp: bpf: don't stop offload if replace failed Stopping offload completely if replace of program failed dates back to days of transparent offload. Back then we wanted to silently fall back to the in-driver processing. Today we mark programs for offload when they are loaded into the kernel, so the transparent offload is no longer a reality. Flags check in the driver will only allow replace of a driver program with another driver program or an offload program with another offload program. When driver program is replaced stopping offload is a no-op, because driver program isn't offloaded. When replacing offloaded program if the offload fails the entire operation will fail all the way back to user space and we should continue using the old program. IOW when replacing a driver program stopping offload is unnecessary and when replacing offloaded program - it's a bug, old program should continue to run. In practice this bug would mean that if offload operation was to fail (either due to FW communication error, kernel OOM or new program being offloaded but for a different netdev) driver would continue reporting that previous XDP program is offloaded but in fact no program will be loaded in hardware. The failure is fairly unlikely (found by inspection, when working on the code) but it's unpleasant. Backport note: even though the bug was introduced in commit cafa92ac2553 ("nfp: bpf: add support for XDP_FLAGS_HW_MODE"), this fix depends on commit 441a33031fe5 ("net: xdp: don't allow device-bound programs in driver mode"), so this fix is sufficient only in v4.15 or newer. Kernels v4.13.x and v4.14.x do need to stop offload if it was transparent/opportunistic, i.e. if XDP_FLAGS_HW_MODE was not set on running program. Fixes: cafa92ac2553 ("nfp: bpf: add support for XDP_FLAGS_HW_MODE") Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index fcdfb8e7fdea..7184af94aa53 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -81,10 +81,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn, ret = nfp_net_bpf_offload(nn, prog, running, extack); /* Stop offload if replace not possible */ - if (ret && prog) - nfp_bpf_xdp_offload(app, nn, NULL, extack); + if (ret) + return ret; - nn->dp.bpf_offload_xdp = prog && !ret; + nn->dp.bpf_offload_xdp = !!prog; return ret; } -- cgit From 0ee1f4734967af8321ecebaf9c74221ace34f2d5 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 25 Jun 2018 09:26:27 +0200 Subject: r8152: napi hangup fix after disconnect When unplugging an r8152 adapter while the interface is UP, the NIC becomes unusable. usb->disconnect (aka rtl8152_disconnect) deletes napi. Then, rtl8152_disconnect calls unregister_netdev and that invokes netdev->ndo_stop (aka rtl8152_close). rtl8152_close tries to napi_disable, but the napi is already deleted by disconnect above. So the first while loop in napi_disable never finishes. This results in complete deadlock of the network layer as there is rtnl_mutex held by unregister_netdev. So avoid the call to napi_disable in rtl8152_close when the device is already gone. The other calls to usb_kill_urb, cancel_delayed_work_sync, netif_stop_queue etc. seem to be fine. The urb and netdev is not destroyed yet. Signed-off-by: Jiri Slaby Cc: linux-usb@vger.kernel.org Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller --- drivers/net/usb/r8152.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 86f7196f9d91..2a58607a6aea 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -3962,7 +3962,8 @@ static int rtl8152_close(struct net_device *netdev) #ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&tp->pm_notifier); #endif - napi_disable(&tp->napi); + if (!test_bit(RTL8152_UNPLUG, &tp->flags)) + napi_disable(&tp->napi); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); -- cgit From 2259b7a64d71f27311a19fd7a5bed47413d75985 Mon Sep 17 00:00:00 2001 From: Dave Stevenson Date: Mon, 25 Jun 2018 15:07:12 +0100 Subject: net: lan78xx: Allow for VLAN headers in timeout calcs The frame abort timeout being set by lan78xx_set_rx_max_frame_length didn't account for any VLAN headers, resulting in very low throughput if used with tagged VLANs. Use VLAN_ETH_HLEN instead of ETH_HLEN to correct for this. Signed-off-by: Dave Stevenson Signed-off-by: David S. Miller --- drivers/net/usb/lan78xx.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 8dff87ec6d99..1a1d3cc8e308 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -2298,7 +2298,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu) if ((ll_mtu % dev->maxpacket) == 0) return -EDOM; - ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); + ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN); netdev->mtu = new_mtu; @@ -2587,7 +2587,8 @@ static int lan78xx_reset(struct lan78xx_net *dev) buf |= FCT_TX_CTL_EN_; ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf); - ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); + ret = lan78xx_set_rx_max_frame_length(dev, + dev->net->mtu + VLAN_ETH_HLEN); ret = lan78xx_read_reg(dev, MAC_RX, &buf); buf |= MAC_RX_RXEN_; -- cgit From 4a27327b156e1e543c839a9358ba885564729ae7 Mon Sep 17 00:00:00 2001 From: Dave Stevenson Date: Mon, 25 Jun 2018 15:07:13 +0100 Subject: net: lan78xx: Add support for VLAN filtering. HW_VLAN_CTAG_FILTER was partially implemented, but not advertised to Linux. Complete the implementation of this. Signed-off-by: Dave Stevenson Signed-off-by: David S. Miller --- drivers/net/usb/lan78xx.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 1a1d3cc8e308..bd41a36924db 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -2363,7 +2363,7 @@ static int lan78xx_set_features(struct net_device *netdev, pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_); } - if (features & NETIF_F_HW_VLAN_CTAG_RX) + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_; else pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_; @@ -2976,6 +2976,9 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) if (DEFAULT_TSO_CSUM_ENABLE) dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG; + if (DEFAULT_VLAN_FILTER_ENABLE) + dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + dev->net->hw_features = dev->net->features; ret = lan78xx_setup_irq_domain(dev); -- cgit From ec21ecf0aad27956dc64475e5acd78f3575df462 Mon Sep 17 00:00:00 2001 From: Dave Stevenson Date: Mon, 25 Jun 2018 15:07:14 +0100 Subject: net: lan78xx: Add support for VLAN tag stripping. The chip supports stripping the VLAN tag and reporting it in metadata. Complete the support for this. Signed-off-by: Dave Stevenson Signed-off-by: David S. Miller --- drivers/net/usb/lan78xx.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index bd41a36924db..28da296f8cf7 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -64,6 +64,7 @@ #define DEFAULT_RX_CSUM_ENABLE (true) #define DEFAULT_TSO_CSUM_ENABLE (true) #define DEFAULT_VLAN_FILTER_ENABLE (true) +#define DEFAULT_VLAN_RX_OFFLOAD (true) #define TX_OVERHEAD (8) #define RXW_PADDING 2 @@ -2363,6 +2364,11 @@ static int lan78xx_set_features(struct net_device *netdev, pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_); } + if (features & NETIF_F_HW_VLAN_CTAG_RX) + pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_; + else + pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_; + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_; else @@ -2976,6 +2982,9 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) if (DEFAULT_TSO_CSUM_ENABLE) dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG; + if (DEFAULT_VLAN_RX_OFFLOAD) + dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX; + if (DEFAULT_VLAN_FILTER_ENABLE) dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER; @@ -3052,6 +3061,16 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev, } } +static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev, + struct sk_buff *skb, + u32 rx_cmd_a, u32 rx_cmd_b) +{ + if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) && + (rx_cmd_a & RX_CMD_A_FVTG_)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + (rx_cmd_b & 0xffff)); +} + static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) { int status; @@ -3116,6 +3135,8 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb) if (skb->len == size) { lan78xx_rx_csum_offload(dev, skb, rx_cmd_a, rx_cmd_b); + lan78xx_rx_vlan_offload(dev, skb, + rx_cmd_a, rx_cmd_b); skb_trim(skb, skb->len - 4); /* remove fcs */ skb->truesize = size + sizeof(struct sk_buff); @@ -3134,6 +3155,7 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb) skb_set_tail_pointer(skb2, size); lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b); + lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b); skb_trim(skb2, skb2->len - 4); /* remove fcs */ skb2->truesize = size + sizeof(struct sk_buff); -- cgit From 9343ac87f2a4e09bf6e27b5f31e72e9e3a82abff Mon Sep 17 00:00:00 2001 From: Dave Stevenson Date: Mon, 25 Jun 2018 15:07:15 +0100 Subject: net: lan78xx: Use s/w csum check on VLANs without tag stripping Observations of VLANs dropping packets due to invalid checksums when not offloading VLAN tag receive. With VLAN tag stripping enabled no issue is observed. Drop back to s/w checksums if VLAN offload is disabled. Signed-off-by: Dave Stevenson Signed-off-by: David S. Miller --- drivers/net/usb/lan78xx.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 28da296f8cf7..2e4130746c40 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -3052,8 +3052,13 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev, struct sk_buff *skb, u32 rx_cmd_a, u32 rx_cmd_b) { + /* HW Checksum offload appears to be flawed if used when not stripping + * VLAN headers. Drop back to S/W checksums under these conditions. + */ if (!(dev->net->features & NETIF_F_RXCSUM) || - unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) { + unlikely(rx_cmd_a & RX_CMD_A_ICSM_) || + ((rx_cmd_a & RX_CMD_A_FVTG_) && + !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) { skb->ip_summed = CHECKSUM_NONE; } else { skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_)); -- cgit From 8ffd569aaa818f2624ca821d9a246342fa8b8c50 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Mon, 4 Jun 2018 19:46:53 +0300 Subject: net/mlx5e: Don't attempt to dereference the ppriv struct if not being eswitch manager The check for cpu hit statistics was not returning immediate false for any non vport rep netdev and hence we crashed (say on mlx5 probed VFs) if user-space tool was calling into any possible netdev in the system. Fix that by doing a proper check before dereferencing. Fixes: 1d447a39142e ('net/mlx5e: Extendable vport representor netdev private data') Signed-off-by: Or Gerlitz Reported-by: Eli Cohen Reviewed-by: Eli Cohen Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 57987f6546e8..378ad74518ec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -837,8 +837,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct mlx5_eswitch_rep *rep = rpriv->rep; + struct mlx5_eswitch_rep *rep; + if (!MLX5_CAP_GEN(priv->mdev, eswitch_flow_table)) + return false; + + rep = rpriv->rep; if (rep && rep->vport != FDB_UPLINK_VPORT) return true; -- cgit From 0efc8562491b7d36f6bbc4fbc8f3348cb6641e9c Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 31 May 2018 11:16:18 +0300 Subject: net/mlx5: E-Switch, Avoid setup attempt if not being e-switch manager In smartnic env, the host (PF) driver might not be an e-switch manager, hence the FW will err on driver attempts to deal with setting/unsetting the eswitch and as a result the overall setup of sriov will fail. Fix that by avoiding the operation if e-switch management is not allowed for this driver instance. While here, move to use the correct name for the esw manager capability name. Fixes: 81848731ff40 ('net/mlx5: E-Switch, Add SR-IOV (FDB) support') Signed-off-by: Or Gerlitz Reported-by: Guy Kushnir Reviewed-by: Eli Cohen Tested-by: Eli Cohen Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 3 ++- drivers/net/ethernet/mellanox/mlx5/core/fw.c | 5 +++-- drivers/net/ethernet/mellanox/mlx5/core/sriov.c | 7 ++++++- 5 files changed, 13 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 378ad74518ec..60236f73373c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -839,7 +839,7 @@ static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep; - if (!MLX5_CAP_GEN(priv->mdev, eswitch_flow_table)) + if (!MLX5_ESWITCH_MANAGER(priv->mdev)) return false; rep = rpriv->rep; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index f63dfbcd29fe..103fd6a0cc65 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1604,7 +1604,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) if (!ESW_ALLOWED(esw)) return 0; - if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || + if (!MLX5_ESWITCH_MANAGER(esw->dev) || !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 49a75d31185e..f1a86cea86a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -32,6 +32,7 @@ #include #include +#include #include "mlx5_core.h" #include "fs_core.h" @@ -2652,7 +2653,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) goto err; } - if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { + if (MLX5_ESWITCH_MANAGER(dev)) { if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { err = init_fdb_root_ns(steering); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index afd9f4fa22f4..41ad24f0de2c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -32,6 +32,7 @@ #include #include +#include #include #include "mlx5_core.h" #include "../../mlxfw/mlxfw.h" @@ -159,13 +160,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) } if (MLX5_CAP_GEN(dev, vport_group_manager) && - MLX5_CAP_GEN(dev, eswitch_flow_table)) { + MLX5_ESWITCH_MANAGER(dev)) { err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE); if (err) return err; } - if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { + if (MLX5_ESWITCH_MANAGER(dev)) { err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 2a8b529ce6dd..a0674962f02c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) return -EBUSY; } + if (!MLX5_ESWITCH_MANAGER(dev)) + goto enable_vfs_hca; + err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); if (err) { mlx5_core_warn(dev, @@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) return err; } +enable_vfs_hca: for (vf = 0; vf < num_vfs; vf++) { err = mlx5_core_enable_hca(dev, vf + 1); if (err) { @@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) } out: - mlx5_eswitch_disable_sriov(dev->priv.eswitch); + if (MLX5_ESWITCH_MANAGER(dev)) + mlx5_eswitch_disable_sriov(dev->priv.eswitch); if (mlx5_wait_for_vf_pages(dev)) mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); -- cgit From 733d3e5497070d05971352ca5087bac83c197c3d Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 31 May 2018 11:32:56 +0300 Subject: net/mlx5e: Avoid dealing with vport representors if not being e-switch manager In smartnic env, the host (PF) driver might not be an e-switch manager, hence the switchdev mode representors are running on the embedded cpu (EC) and not at the host. As such, we should avoid dealing with vport representors if not being esw manager. While here, make sure to disallow eswitch switchdev related setups through devlink if we are not esw managers. Fixes: cb67b832921c ('net/mlx5e: Introduce SRIOV VF representors') Signed-off-by: Or Gerlitz Reviewed-by: Eli Cohen Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 12 ++++++------ drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 56c1b6f5593e..dae4156a710d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2846,7 +2846,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_activate_channels(&priv->channels); netif_tx_start_all_queues(priv->netdev); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_add_sqs_fwd_rules(priv); mlx5e_wait_channels_min_rx_wqes(&priv->channels); @@ -2857,7 +2857,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) { mlx5e_redirect_rqts_to_drop(priv); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_remove_sqs_fwd_rules(priv); /* FIXME: This is a W/A only for tx timeout watch dog false alarm when @@ -4597,7 +4597,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_set_netdev_dev_addr(netdev); #if IS_ENABLED(CONFIG_MLX5_ESWITCH) - if (MLX5_VPORT_MANAGER(mdev)) + if (MLX5_ESWITCH_MANAGER(mdev)) netdev->switchdev_ops = &mlx5e_switchdev_ops; #endif @@ -4753,7 +4753,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) mlx5e_enable_async_events(priv); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_register_vport_reps(priv); if (netdev->reg_state != NETREG_REGISTERED) @@ -4788,7 +4788,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) queue_work(priv->wq, &priv->set_rx_mode_work); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_unregister_vport_reps(priv); mlx5e_disable_async_events(priv); @@ -4972,7 +4972,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) return NULL; #ifdef CONFIG_MLX5_ESWITCH - if (MLX5_VPORT_MANAGER(mdev)) { + if (MLX5_ESWITCH_MANAGER(mdev)) { rpriv = mlx5e_alloc_nic_rep_priv(mdev); if (!rpriv) { mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 60236f73373c..2b8040a3cdbd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -823,7 +823,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep; - if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) + if (!MLX5_ESWITCH_MANAGER(priv->mdev)) return false; rep = rpriv->rep; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index cecd201f0b73..91f1209886ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1079,8 +1079,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink) if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) return -EOPNOTSUPP; - if (!MLX5_CAP_GEN(dev, vport_group_manager)) - return -EOPNOTSUPP; + if(!MLX5_ESWITCH_MANAGER(dev)) + return -EPERM; if (dev->priv.eswitch->mode == SRIOV_NONE) return -EOPNOTSUPP; -- cgit From a8d70a054a718b63058b3d3ac58b6181815e4289 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Thu, 31 May 2018 12:24:48 +0300 Subject: net/mlx5: E-Switch, Disallow vlan/spoofcheck setup if not being esw manager In smartnic env, if the host (PF) driver is not an e-switch manager, we are not allowed to apply eswitch ports setups such as vlan (VST), spoof-checks, min/max rate or state. Make sure we are eswitch manager when coming to issue these callbacks and err otherwise. Also fix the definition of ESW_ALLOWED to rely on eswitch_manager capability and on the vport_group_manger. Operations on the VF nic vport context, such as setting a mac or reading the vport counters are allowed to the PF in this scheme. The modify nic vport guid code was modified to omit checking the nic_vport_node_guid_modify eswitch capability. The reason for doing so is that modifying node guid requires vport group manager capability, and there's no need to check further capabilities. 1. set_vf_vlan - disallowed 2. set_vf_spoofchk - disallowed 3. set_vf_mac - allowed 4. get_vf_config - allowed 5. set_vf_trust - disallowed 6. set_vf_rate - disallowed 7. get_vf_stat - allowed 8. set_vf_link_state - disallowed Fixes: f942380c1239 ('net/mlx5: E-Switch, Vport ingress/egress ACLs rules for spoofchk') Signed-off-by: Eli Cohen Reviewed-by: Or Gerlitz Tested-by: Or Gerlitz --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 12 +++++------- drivers/net/ethernet/mellanox/mlx5/core/vport.c | 2 -- 2 files changed, 5 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 103fd6a0cc65..b79d74860a30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1594,17 +1594,15 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) } /* Public E-Switch API */ -#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev)) +#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) + int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { int err; int i, enabled_events; - if (!ESW_ALLOWED(esw)) - return 0; - - if (!MLX5_ESWITCH_MANAGER(esw->dev) || + if (!ESW_ALLOWED(esw) || !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); return -EOPNOTSUPP; @@ -1806,7 +1804,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, u64 node_guid; int err = 0; - if (!ESW_ALLOWED(esw)) + if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) return -EPERM; if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac)) return -EINVAL; @@ -1883,7 +1881,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, { struct mlx5_vport *evport; - if (!ESW_ALLOWED(esw)) + if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) return -EPERM; if (!LEGAL_VPORT(esw, vport)) return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 719cecb182c6..7eecd5b07bb1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -549,8 +549,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, return -EINVAL; if (!MLX5_CAP_GEN(mdev, vport_group_manager)) return -EACCES; - if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) - return -EOPNOTSUPP; in = kvzalloc(inlen, GFP_KERNEL); if (!in) -- cgit From f811980444ec59ad62f9e041adbb576a821132c7 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Wed, 13 Jun 2018 10:27:34 +0300 Subject: net/mlx5: Fix required capability for manipulating MPFS Manipulating of the MPFS requires eswitch manager capabilities. Fixes: eeb66cdb6826 ('net/mlx5: Separate between E-Switch and MPFS') Signed-off-by: Eli Cohen Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c index 7cb67122e8b5..98359559c77e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "mlx5_core.h" #include "lib/mpfs.h" @@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev) int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); struct mlx5_mpfs *mpfs; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL); @@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) { struct mlx5_mpfs *mpfs = dev->priv.mpfs; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return; WARN_ON(!hlist_empty(mpfs->hash)); @@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) u32 index; int err; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; mutex_lock(&mpfs->lock); @@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) int err = 0; u32 index; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; mutex_lock(&mpfs->lock); -- cgit From d14fcb8d877caf1b8d6bd65d444bf62b21f2070c Mon Sep 17 00:00:00 2001 From: Shay Agroskin Date: Tue, 22 May 2018 14:14:02 +0300 Subject: net/mlx5: Fix wrong size allocation for QoS ETC TC regitster The driver allocates wrong size (due to wrong struct name) when issuing a query/set request to NIC's register. Fixes: d8880795dabf ("net/mlx5e: Implement DCBNL IEEE max rate") Signed-off-by: Shay Agroskin Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/port.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index fa9d0760dd36..31a9cbd85689 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc); static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(qtct_reg)]; + u32 out[MLX5_ST_SZ_DW(qetc_reg)]; if (!MLX5_CAP_GEN(mdev, ets)) return -EOPNOTSUPP; @@ -713,7 +713,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, int outlen) { - u32 in[MLX5_ST_SZ_DW(qtct_reg)]; + u32 in[MLX5_ST_SZ_DW(qetc_reg)]; if (!MLX5_CAP_GEN(mdev, ets)) return -EOPNOTSUPP; -- cgit From 603b7bcff824740500ddfa001d7a7168b0b38542 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Fri, 25 May 2018 20:25:59 +0300 Subject: net/mlx5: Fix incorrect raw command length parsing The NULL character was not set correctly for the string containing the command length, this caused failures reading the output of the command due to a random length. The fix is to initialize the output length string. Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters") Signed-off-by: Alex Vesker Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 487388aed98f..2db5fe571594 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1276,7 +1276,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, { struct mlx5_core_dev *dev = filp->private_data; struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; - char outlen_str[8]; + char outlen_str[8] = {0}; int outlen; void *ptr; int err; @@ -1291,8 +1291,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, if (copy_from_user(outlen_str, buf, count)) return -EFAULT; - outlen_str[7] = 0; - err = sscanf(outlen_str, "%d", &outlen); if (err < 0) return err; -- cgit From d412c31dae053bf30a1bc15582a9990df297a660 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Tue, 12 Jun 2018 16:14:31 +0300 Subject: net/mlx5: Fix command interface race in polling mode The command interface can work in two modes: Events and Polling. In the general case, each time we invoke a command, a work is queued to handle it. When working in events, the interrupt handler completes the command execution. On the other hand, when working in polling mode, the work itself completes it. Due to a bug in the work handler, a command could have been completed by the interrupt handler, while the work handler hasn't finished yet, causing the it to complete once again if the command interface mode was changed from Events to polling after the interrupt handler was called. mlx5_unload_one() mlx5_stop_eqs() // Destroy the EQ before cmd EQ ...cmd_work_handler() write_doorbell() --> EVENT_TYPE_CMD mlx5_cmd_comp_handler() // First free free_ent(cmd, ent->idx) complete(&ent->done) <-- mlx5_stop_eqs //cmd was complete // move to polling before destroying the last cmd EQ mlx5_cmd_use_polling() cmd->mode = POLL; --> cmd_work_handler (continues) if (cmd->mode == POLL) mlx5_cmd_comp_handler() // Double free The solution is to store the cmd->mode before writing the doorbell. Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters") Signed-off-by: Alex Vesker Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 2db5fe571594..384c1fa49081 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -807,6 +807,7 @@ static void cmd_work_handler(struct work_struct *work) unsigned long flags; bool poll_cmd = ent->polling; int alloc_ret; + int cmd_mode; sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); @@ -853,6 +854,7 @@ static void cmd_work_handler(struct work_struct *work) set_signature(ent, !cmd->checksum_disabled); dump_command(dev, ent, 1); ent->ts1 = ktime_get_ns(); + cmd_mode = cmd->mode; if (ent->callback) schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); @@ -877,7 +879,7 @@ static void cmd_work_handler(struct work_struct *work) iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); mmiowb(); /* if not in polling don't use ent after this point */ - if (cmd->mode == CMD_MODE_POLLING || poll_cmd) { + if (cmd_mode == CMD_MODE_POLLING || poll_cmd) { poll_timeout(ent); /* make sure we read the descriptor after ownership is SW */ rmb(); -- cgit From a64119415ff248efa61301783bc26551df5dabf6 Mon Sep 17 00:00:00 2001 From: Pieter Jansen van Vuuren Date: Mon, 25 Jun 2018 20:36:27 -0700 Subject: nfp: flower: fix mpls ether type detection Previously it was not possible to distinguish between mpls ether types and other ether types. This leads to incorrect classification of offloaded filters that match on mpls ether type. For example the following two filters overlap: # tc filter add dev eth0 parent ffff: \ protocol 0x8847 flower \ action mirred egress redirect dev eth1 # tc filter add dev eth0 parent ffff: \ protocol 0x0800 flower \ action mirred egress redirect dev eth2 The driver now correctly includes the mac_mpls layer where HW stores mpls fields, when it detects an mpls ether type. It also sets the MPLS_Q bit to indicate that the filter should match mpls packets. Fixes: bb055c198d9b ("nfp: add mpls match offloading support") Signed-off-by: Pieter Jansen van Vuuren Reviewed-by: Simon Horman Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/match.c | 14 ++++++++++++++ drivers/net/ethernet/netronome/nfp/flower/offload.c | 8 ++++++++ 2 files changed, 22 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 91935405f586..84f7a5dbea9d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -123,6 +123,20 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame, NFP_FLOWER_MASK_MPLS_Q; frame->mpls_lse = cpu_to_be32(t_mpls); + } else if (dissector_uses_key(flow->dissector, + FLOW_DISSECTOR_KEY_BASIC)) { + /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q + * bit, which indicates an mpls ether type but without any + * mpls fields. + */ + struct flow_dissector_key_basic *key_basic; + + key_basic = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_BASIC, + flow->key); + if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) || + key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) + frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); } } diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index c42e64f32333..477f584f6d28 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -264,6 +264,14 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, case cpu_to_be16(ETH_P_ARP): return -EOPNOTSUPP; + case cpu_to_be16(ETH_P_MPLS_UC): + case cpu_to_be16(ETH_P_MPLS_MC): + if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { + key_layer |= NFP_FLOWER_LAYER_MAC; + key_size += sizeof(struct nfp_flower_mac_mpls); + } + break; + /* Will be included in layer 2. */ case cpu_to_be16(ETH_P_8021Q): break; -- cgit From 951a8ee6def39e25d0e60b9394e5a249ba8b2390 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Mon, 25 Jun 2018 20:36:28 -0700 Subject: nfp: reject binding to shared blocks TC shared blocks allow multiple qdiscs to be grouped together and filters shared between them. Currently the chains of filters attached to a block are only flushed when the block is removed. If a qdisc is removed from a block but the block still exists, flow del messages are not passed to the callback registered for that qdisc. For the NFP, this presents the possibility of rules still existing in hw when they should be removed. Prevent binding to shared blocks until the kernel can send per qdisc del messages when block unbinds occur. tcf_block_shared() was not used outside of the core until now, so also add an empty implementation for builds with CONFIG_NET_CLS=n. Fixes: 4861738775d7 ("net: sched: introduce shared filter blocks infrastructure") Signed-off-by: John Hurley Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/main.c | 3 +++ drivers/net/ethernet/netronome/nfp/flower/offload.c | 3 +++ 2 files changed, 6 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index fcdfb8e7fdea..6b15e3b11956 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -202,6 +202,9 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev, if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; + if (tcf_block_shared(f->block)) + return -EOPNOTSUPP; + switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 477f584f6d28..525057bee0ed 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -631,6 +631,9 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; + if (tcf_block_shared(f->block)) + return -EOPNOTSUPP; + switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, -- cgit From 2d2595719a97c876f35b1e60e5768e58753b268c Mon Sep 17 00:00:00 2001 From: Chengguang Xu Date: Tue, 26 Jun 2018 09:16:31 +0800 Subject: nfp: cast sizeof() to int when comparing with error code sizeof() will return unsigned value so in the error check negative error code will be always larger than sizeof(). Fixes: a0d8e02c35ff ("nfp: add support for reading nffw info") Signed-off-by: Chengguang Xu Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c index cd34097b79f1..37a6d7822a38 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c @@ -232,7 +232,7 @@ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp) err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), nfp_resource_address(state->res), fwinf, sizeof(*fwinf)); - if (err < sizeof(*fwinf)) + if (err < (int)sizeof(*fwinf)) goto err_release; if (!nffw_res_flg_init_get(fwinf)) -- cgit From 82be2ab159a3a0ae4024b946a31f12b221f6c8ff Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 27 Jun 2018 17:47:37 +0800 Subject: hinic: reset irq affinity before freeing irq Following warning is seen when rmmod hinic. This is because affinity value is not reset before calling free_irq(). This patch fixes it. [ 55.181232] WARNING: CPU: 38 PID: 19589 at kernel/irq/manage.c:1608 __free_irq+0x2aa/0x2c0 Fixes: 352f58b0d9f2 ("net-next/hinic: Set Rxq irq to specific cpu for NUMA") Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_rx.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index e2e5cdc7119c..4c0f7eda1166 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -439,6 +439,7 @@ static void rx_free_irq(struct hinic_rxq *rxq) { struct hinic_rq *rq = rxq->rq; + irq_set_affinity_hint(rq->irq, NULL); free_irq(rq->irq, rxq); rx_del_napi(rxq); } -- cgit From fec9d3b1dc4c481f20f5d2f5aef3ad1cb7504186 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Tue, 26 Jun 2018 10:44:01 +0200 Subject: net: macb: initialize bp->queues[0].bp for at91rm9200 The macb driver currently crashes on at91rm9200 with the following trace: Unable to handle kernel NULL pointer dereference at virtual address 00000014 [...] [] (macb_rx_desc) from [] (at91ether_open+0x2e8/0x3f8) [] (at91ether_open) from [] (__dev_open+0x120/0x13c) [] (__dev_open) from [] (__dev_change_flags+0x17c/0x1a8) [] (__dev_change_flags) from [] (dev_change_flags+0x18/0x4c) [] (dev_change_flags) from [] (ip_auto_config+0x220/0x10b0) [] (ip_auto_config) from [] (do_one_initcall+0x78/0x18c) [] (do_one_initcall) from [] (kernel_init_freeable+0x184/0x1c4) [] (kernel_init_freeable) from [] (kernel_init+0x8/0xe8) [] (kernel_init) from [] (ret_from_fork+0x14/0x34) Solve that by initializing bp->queues[0].bp in at91ether_init (as is done in macb_init). Fixes: ae1f2a56d273 ("net: macb: Added support for many RX queues") Signed-off-by: Alexandre Belloni Acked-by: Nicolas Ferre Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 3e93df5d4e3b..96cc03a6d942 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3726,6 +3726,8 @@ static int at91ether_init(struct platform_device *pdev) int err; u32 reg; + bp->queues[0].bp = bp; + dev->netdev_ops = &at91ether_netdev_ops; dev->ethtool_ops = &macb_ethtool_ops; -- cgit From ad088ec480768850db019a5cc543685e868a513d Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 26 Jun 2018 17:39:48 +0200 Subject: ixgbe: split XDP_TX tail and XDP_REDIRECT map flushing The driver was combining the XDP_TX tail flush and XDP_REDIRECT map flushing (xdp_do_flush_map). This is suboptimal, these two flush operations should be kept separate. Fixes: 11393cc9b9be ("xdp: Add batching support to redirect map") Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3e87dbbc9024..62e57b05a0ae 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2186,9 +2186,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, return skb; } -#define IXGBE_XDP_PASS 0 -#define IXGBE_XDP_CONSUMED 1 -#define IXGBE_XDP_TX 2 +#define IXGBE_XDP_PASS 0 +#define IXGBE_XDP_CONSUMED BIT(0) +#define IXGBE_XDP_TX BIT(1) +#define IXGBE_XDP_REDIR BIT(2) static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, struct xdp_frame *xdpf); @@ -2225,7 +2226,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, case XDP_REDIRECT: err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); if (!err) - result = IXGBE_XDP_TX; + result = IXGBE_XDP_REDIR; else result = IXGBE_XDP_CONSUMED; break; @@ -2285,7 +2286,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, unsigned int mss = 0; #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); - bool xdp_xmit = false; + unsigned int xdp_xmit = 0; struct xdp_buff xdp; xdp.rxq = &rx_ring->xdp_rxq; @@ -2328,8 +2329,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } if (IS_ERR(skb)) { - if (PTR_ERR(skb) == -IXGBE_XDP_TX) { - xdp_xmit = true; + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { + xdp_xmit |= xdp_res; ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); } else { rx_buffer->pagecnt_bias++; @@ -2401,7 +2404,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, total_rx_packets++; } - if (xdp_xmit) { + if (xdp_xmit & IXGBE_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & IXGBE_XDP_TX) { struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; /* Force memory writes to complete before letting h/w @@ -2409,8 +2415,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ wmb(); writel(ring->next_to_use, ring->tail); - - xdp_do_flush_map(); } u64_stats_update_begin(&rx_ring->syncp); -- cgit From 2e6893123830d04edc366e0ce59d46e622e140c1 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 26 Jun 2018 17:39:53 +0200 Subject: i40e: split XDP_TX tail and XDP_REDIRECT map flushing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The driver was combining the XDP_TX tail flush and XDP_REDIRECT map flushing (xdp_do_flush_map). This is suboptimal, these two flush operations should be kept separate. It looks like the mistake was copy-pasted from ixgbe. Fixes: d9314c474d4f ("i40e: add support for XDP_REDIRECT") Signed-off-by: Jesper Dangaard Brouer Acked-by: Björn Töpel Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index ed6dbcfd4e96..b151ae316546 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2199,9 +2199,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, return true; } -#define I40E_XDP_PASS 0 -#define I40E_XDP_CONSUMED 1 -#define I40E_XDP_TX 2 +#define I40E_XDP_PASS 0 +#define I40E_XDP_CONSUMED BIT(0) +#define I40E_XDP_TX BIT(1) +#define I40E_XDP_REDIR BIT(2) static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, struct i40e_ring *xdp_ring); @@ -2248,7 +2249,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, break; case XDP_REDIRECT: err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED; + result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; break; default: bpf_warn_invalid_xdp_action(act); @@ -2311,7 +2312,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) unsigned int total_rx_bytes = 0, total_rx_packets = 0; struct sk_buff *skb = rx_ring->skb; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - bool failure = false, xdp_xmit = false; + unsigned int xdp_xmit = 0; + bool failure = false; struct xdp_buff xdp; xdp.rxq = &rx_ring->xdp_rxq; @@ -2372,8 +2374,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) } if (IS_ERR(skb)) { - if (PTR_ERR(skb) == -I40E_XDP_TX) { - xdp_xmit = true; + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { + xdp_xmit |= xdp_res; i40e_rx_buffer_flip(rx_ring, rx_buffer, size); } else { rx_buffer->pagecnt_bias++; @@ -2427,12 +2431,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) total_rx_packets++; } - if (xdp_xmit) { + if (xdp_xmit & I40E_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & I40E_XDP_TX) { struct i40e_ring *xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; i40e_xdp_ring_update_tail(xdp_ring); - xdp_do_flush_map(); } rx_ring->skb = skb; -- cgit From 2471c75efed32529698c26da499954f0253cb401 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 26 Jun 2018 17:39:58 +0200 Subject: virtio_net: split XDP_TX kick and XDP_REDIRECT map flushing The driver was combining XDP_TX virtqueue_kick and XDP_REDIRECT map flushing (xdp_do_flush_map). This is suboptimal, these two flush operations should be kept separate. The suboptimal behavior was introduced in commit 9267c430c6b6 ("virtio-net: add missing virtqueue kick when flushing packets"). Fixes: 9267c430c6b6 ("virtio-net: add missing virtqueue kick when flushing packets") Signed-off-by: Jesper Dangaard Brouer Acked-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index b6c9a2af3732..53085c63277b 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644); /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ #define VIRTIO_XDP_HEADROOM 256 +/* Separating two types of XDP xmit */ +#define VIRTIO_XDP_TX BIT(0) +#define VIRTIO_XDP_REDIR BIT(1) + /* RX packet size EWMA. The average packet size is used to determine the packet * buffer size when refilling RX rings. As the entire RX ring may be refilled * at once, the weight is chosen so that the EWMA will be insensitive to short- @@ -582,7 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev, struct receive_queue *rq, void *buf, void *ctx, unsigned int len, - bool *xdp_xmit) + unsigned int *xdp_xmit) { struct sk_buff *skb; struct bpf_prog *xdp_prog; @@ -654,14 +658,14 @@ static struct sk_buff *receive_small(struct net_device *dev, trace_xdp_exception(vi->dev, xdp_prog, act); goto err_xdp; } - *xdp_xmit = true; + *xdp_xmit |= VIRTIO_XDP_TX; rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: err = xdp_do_redirect(dev, &xdp, xdp_prog); if (err) goto err_xdp; - *xdp_xmit = true; + *xdp_xmit |= VIRTIO_XDP_REDIR; rcu_read_unlock(); goto xdp_xmit; default: @@ -723,7 +727,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, void *buf, void *ctx, unsigned int len, - bool *xdp_xmit) + unsigned int *xdp_xmit) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf; u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); @@ -818,7 +822,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, put_page(xdp_page); goto err_xdp; } - *xdp_xmit = true; + *xdp_xmit |= VIRTIO_XDP_TX; if (unlikely(xdp_page != page)) put_page(page); rcu_read_unlock(); @@ -830,7 +834,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, put_page(xdp_page); goto err_xdp; } - *xdp_xmit = true; + *xdp_xmit |= VIRTIO_XDP_REDIR; if (unlikely(xdp_page != page)) put_page(page); rcu_read_unlock(); @@ -939,7 +943,8 @@ xdp_xmit: } static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, - void *buf, unsigned int len, void **ctx, bool *xdp_xmit) + void *buf, unsigned int len, void **ctx, + unsigned int *xdp_xmit) { struct net_device *dev = vi->dev; struct sk_buff *skb; @@ -1232,7 +1237,8 @@ static void refill_work(struct work_struct *work) } } -static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit) +static int virtnet_receive(struct receive_queue *rq, int budget, + unsigned int *xdp_xmit) { struct virtnet_info *vi = rq->vq->vdev->priv; unsigned int len, received = 0, bytes = 0; @@ -1321,7 +1327,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) struct virtnet_info *vi = rq->vq->vdev->priv; struct send_queue *sq; unsigned int received, qp; - bool xdp_xmit = false; + unsigned int xdp_xmit = 0; virtnet_poll_cleantx(rq); @@ -1331,12 +1337,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget) if (received < budget) virtqueue_napi_complete(napi, rq->vq, received); - if (xdp_xmit) { + if (xdp_xmit & VIRTIO_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & VIRTIO_XDP_TX) { qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); sq = &vi->sq[qp]; virtqueue_kick(sq->vq); - xdp_do_flush_map(); } return received; -- cgit From 4205c88eaf17b5f3ee30032d68df55cd5d9077a1 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Wed, 27 Jun 2018 15:03:20 +0100 Subject: net: stmmac: Set DMA buffer size in HW This is clearly a bug. We need to set the DMA buffer size in the HW otherwise corruption can occur when receiving packets. This is probably not occuring because of small MTU values and because HW has a default value internally (which currently is bigger than default buffer size). Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Vitor Soares Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c | 12 ++++++++++++ drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h | 2 ++ drivers/net/ethernet/stmicro/stmmac/hwif.h | 3 +++ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 2 ++ 4 files changed, 19 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index d37f17ca62fe..65bc3556bd8f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -407,6 +407,16 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan) } } +static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); + + value &= ~DMA_RBSZ_MASK; + value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK; + + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); +} + const struct stmmac_dma_ops dwmac4_dma_ops = { .reset = dwmac4_dma_reset, .init = dwmac4_dma_init, @@ -431,6 +441,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = { .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, .enable_tso = dwmac4_enable_tso, + .set_bfsize = dwmac4_set_bfsize, }; const struct stmmac_dma_ops dwmac410_dma_ops = { @@ -457,4 +468,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = { .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, .enable_tso = dwmac4_enable_tso, + .set_bfsize = dwmac4_set_bfsize, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h index c63c1fe3f26b..22a4a6dbb1a4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h @@ -120,6 +120,8 @@ /* DMA Rx Channel X Control register defines */ #define DMA_CONTROL_SR BIT(0) +#define DMA_RBSZ_MASK GENMASK(14, 1) +#define DMA_RBSZ_SHIFT 1 /* Interrupt status per channel */ #define DMA_CHAN_STATUS_REB GENMASK(21, 19) diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index e44e7b26ce82..fe8b536b13f8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -183,6 +183,7 @@ struct stmmac_dma_ops { void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); + void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan); }; #define stmmac_reset(__priv, __args...) \ @@ -235,6 +236,8 @@ struct stmmac_dma_ops { stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) #define stmmac_enable_tso(__priv, __args...) \ stmmac_do_void_callback(__priv, dma, enable_tso, __args) +#define stmmac_set_dma_bfsize(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_bfsize, __args) struct mac_device_info; struct net_device; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index cba46b62a1cd..60f59abab009 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1804,6 +1804,8 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, qmode); + stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz, + chan); } for (chan = 0; chan < tx_channels_count; chan++) { -- cgit From a11e1d432b51f63ba698d044441284a661f01144 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 28 Jun 2018 09:43:44 -0700 Subject: Revert changes to convert to ->poll_mask() and aio IOCB_CMD_POLL The poll() changes were not well thought out, and completely unexplained. They also caused a huge performance regression, because "->poll()" was no longer a trivial file operation that just called down to the underlying file operations, but instead did at least two indirect calls. Indirect calls are sadly slow now with the Spectre mitigation, but the performance problem could at least be largely mitigated by changing the "->get_poll_head()" operation to just have a per-file-descriptor pointer to the poll head instead. That gets rid of one of the new indirections. But that doesn't fix the new complexity that is completely unwarranted for the regular case. The (undocumented) reason for the poll() changes was some alleged AIO poll race fixing, but we don't make the common case slower and more complex for some uncommon special case, so this all really needs way more explanations and most likely a fundamental redesign. [ This revert is a revert of about 30 different commits, not reverted individually because that would just be unnecessarily messy - Linus ] Cc: Al Viro Cc: Christoph Hellwig Signed-off-by: Linus Torvalds --- drivers/net/ppp/pppoe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index de51e8f70f44..ce61231e96ea 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -1107,7 +1107,7 @@ static const struct proto_ops pppoe_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = pppoe_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, -- cgit From 5037c6280606396ab4d3065b066d4b574df020dc Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 28 Jun 2018 12:31:25 +0300 Subject: cnic: tidy up a size calculation Static checkers complain that id_tbl->table points to longs and 4 bytes is smaller than sizeof(long). But the since other side is dividing by 32 instead of sizeof(long), that means the current code works fine. Anyway, it's more conventional to use the BITS_TO_LONGS() macro when we're allocating a bitmap. Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/cnic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 30273a7717e2..4fd829b5e65d 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id, id_tbl->max = size; id_tbl->next = next; spin_lock_init(&id_tbl->lock); - id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL); + id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL); if (!id_tbl->table) return -ENOMEM; -- cgit From 484c016d9392786ce5c74017c206c706f29f823d Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Thu, 28 Jun 2018 04:52:15 -0700 Subject: bnx2x: Fix receiving tx-timeout in error or recovery state. Driver performs the internal reload when it receives tx-timeout event from the OS. Internal reload might fail in some scenarios e.g., fatal HW issues. In such cases OS still see the link, which would result in undesirable functionalities such as re-generation of tx-timeouts. The patch addresses this issue by indicating the link-down to OS when tx-timeout is detected, and keeping the link in down state till the internal reload is successful. Please consider applying it to 'net' branch. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 1 + drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 6 ++++++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 6 ++++++ 3 files changed, 13 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index d847e1b9c37b..be1506169076 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1533,6 +1533,7 @@ struct bnx2x { struct link_vars link_vars; u32 link_cnt; struct bnx2x_link_report_data last_reported_link; + bool force_link_down; struct mdio_if_info mdio; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 8cd73ff5debc..af7b5a4d8ba0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1261,6 +1261,11 @@ void __bnx2x_link_report(struct bnx2x *bp) { struct bnx2x_link_report_data cur_data; + if (bp->force_link_down) { + bp->link_vars.link_up = 0; + return; + } + /* reread mf_cfg */ if (IS_PF(bp) && !CHIP_IS_E1(bp)) bnx2x_read_mf_cfg(bp); @@ -2817,6 +2822,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bp->pending_max = 0; } + bp->force_link_down = false; if (bp->port.pmf) { rc = bnx2x_initial_phy_init(bp, load_mode); if (rc) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 5b1ed240bf18..57348f2b49a3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) bp->sp_rtnl_state = 0; smp_mb(); + /* Immediately indicate link as down */ + bp->link_vars.link_up = 0; + bp->force_link_down = true; + netif_carrier_off(bp->dev); + BNX2X_ERR("Indicating link is down due to Tx-timeout\n"); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); /* When ret value shows failure of allocation failure, * the nic is rebooted again. If open still fails, a error -- cgit From b95f6fbc8e15803a596ca5e5e21008fba29694c6 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Thu, 28 Jun 2018 15:26:50 +0300 Subject: fsl/fman: fix parser reporting bad checksum on short frames The FMan hardware parser needs to be configured to remove the short frame padding from the checksum calculation, otherwise short UDP and TCP frames are likely to be marked as having a bad checksum. Signed-off-by: Madalin Bucur Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/fman_port.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index ce6e24c74978..ecbf6187e13a 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -324,6 +324,10 @@ struct fman_port_qmi_regs { #define HWP_HXS_PHE_REPORT 0x00000800 #define HWP_HXS_PCAC_PSTAT 0x00000100 #define HWP_HXS_PCAC_PSTOP 0x00000001 +#define HWP_HXS_TCP_OFFSET 0xA +#define HWP_HXS_UDP_OFFSET 0xB +#define HWP_HXS_SH_PAD_REM 0x80000000 + struct fman_port_hwp_regs { struct { u32 ssa; /* Soft Sequence Attachment */ @@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port) iowrite32be(0xffffffff, ®s->pmda[i].lcv); } + /* Short packet padding removal from checksum calculation */ + iowrite32be(HWP_HXS_SH_PAD_REM, ®s->pmda[HWP_HXS_TCP_OFFSET].ssa); + iowrite32be(HWP_HXS_SH_PAD_REM, ®s->pmda[HWP_HXS_UDP_OFFSET].ssa); + start_port_hwp(port); } -- cgit From 595e802e53f24642a145cf7f3e4ac9afab4c21ec Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Thu, 28 Jun 2018 15:26:51 +0300 Subject: dpaa_eth: DPAA SGT needs to be 256B The DPAA HW requires that at least 256 bytes from the start of the first scatter-gather table entry are allocated and accessible. The hardware reads the maximum size the table can have in one access, thus requiring that the allocation and mapping to be done for the maximum size of 256B even if there is a smaller number of entries in the table. Signed-off-by: Madalin Bucur Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 5f4e1ffa7b95..ab02057ac730 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); /* Default alignment for start of data in an Rx FD */ #define DPAA_FD_DATA_ALIGNMENT 16 +/* The DPAA requires 256 bytes reserved and mapped for the SGT */ +#define DPAA_SGT_SIZE 256 + /* Values for the L3R field of the FM Parse Results */ /* L3 Type field: First IP Present IPv4 */ @@ -1617,8 +1620,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { nr_frags = skb_shinfo(skb)->nr_frags; - dma_unmap_single(dev, addr, qm_fd_get_offset(fd) + - sizeof(struct qm_sg_entry) * (1 + nr_frags), + dma_unmap_single(dev, addr, + qm_fd_get_offset(fd) + DPAA_SGT_SIZE, dma_dir); /* The sgt buffer has been allocated with netdev_alloc_frag(), @@ -1903,8 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, void *sgt_buf; /* get a page frag to store the SGTable */ - sz = SKB_DATA_ALIGN(priv->tx_headroom + - sizeof(struct qm_sg_entry) * (1 + nr_frags)); + sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE); sgt_buf = netdev_alloc_frag(sz); if (unlikely(!sgt_buf)) { netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", @@ -1972,9 +1974,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, skbh = (struct sk_buff **)buffer_start; *skbh = skb; - addr = dma_map_single(dev, buffer_start, priv->tx_headroom + - sizeof(struct qm_sg_entry) * (1 + nr_frags), - dma_dir); + addr = dma_map_single(dev, buffer_start, + priv->tx_headroom + DPAA_SGT_SIZE, dma_dir); if (unlikely(dma_mapping_error(dev, addr))) { dev_err(dev, "DMA mapping failed"); err = -EINVAL; -- cgit From 713b4a33524721767703956654f4d6713c556f2b Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Fri, 29 Jun 2018 10:28:31 -0500 Subject: net: phy: DP83TC811: Fix disabling interrupts Fix a bug where INT_STAT1 was written twice and INT_STAT2 was ignored when disabling interrupts. Fixes: b753a9faaf9a ("net: phy: DP83TC811: Introduce support for the DP83TC811 phy") Reviewed-by: Andrew Lunn Signed-off-by: Dan Murphy Signed-off-by: David S. Miller --- drivers/net/phy/dp83tc811.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c index 081d99aa3985..49ac678eb2dc 100644 --- a/drivers/net/phy/dp83tc811.c +++ b/drivers/net/phy/dp83tc811.c @@ -222,7 +222,7 @@ static int dp83811_config_intr(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_DP83811_INT_STAT1, 0); + err = phy_write(phydev, MII_DP83811_INT_STAT2, 0); } return err; -- cgit From cafb39600e7a73263122a0e2db052d691686378f Mon Sep 17 00:00:00 2001 From: Bert Kenward Date: Fri, 29 Jun 2018 16:29:28 +0100 Subject: sfc: correctly initialise filter rwsem for farch Fixes: fc7a6c287ff3 ("sfc: use a semaphore to lock farch filters too") Suggested-by: Joseph Korty Signed-off-by: Bert Kenward Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/farch.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 8edf20967c82..e045a5d6b938 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -2794,6 +2794,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx) if (!state) return -ENOMEM; efx->filter_state = state; + init_rwsem(&state->lock); table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; table->id = EFX_FARCH_FILTER_TABLE_RX_IP; -- cgit From bc800e8b39bad60ccdb83be828da63af71ab87b3 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Fri, 29 Jun 2018 17:51:26 +0200 Subject: alx: take rtnl before calling __alx_open from resume The __alx_open function can be called from ndo_open, which is called under RTNL, or from alx_resume, which isn't. Since commit d768319cd427, we're calling the netif_set_real_num_{tx,rx}_queues functions, which need to be called under RTNL. This is similar to commit 0c2cc02e571a ("igb: Move the calls to set the Tx and Rx queues into igb_open"). Fixes: d768319cd427 ("alx: enable multiple tx queues") Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/alx/main.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 567ee54504bc..5e5022fa1d04 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct alx_priv *alx = pci_get_drvdata(pdev); struct alx_hw *hw = &alx->hw; + int err; alx_reset_phy(hw); if (!netif_running(alx->dev)) return 0; netif_device_attach(alx->dev); - return __alx_open(alx, true); + + rtnl_lock(); + err = __alx_open(alx, true); + rtnl_unlock(); + + return err; } static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); -- cgit From 3ffe64f1a641b80a82d9ef4efa7a05ce69049871 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Fri, 29 Jun 2018 14:07:16 -0700 Subject: hv_netvsc: split sub-channel setup into async and sync When doing device hotplug the sub channel must be async to avoid deadlock issues because device is discovered in softirq context. When doing changes to MTU and number of channels, the setup must be synchronous to avoid races such as when MTU and device settings are done in a single ip command. Reported-by: Thomas Walker Fixes: 8195b1396ec8 ("hv_netvsc: fix deadlock on hotplug") Fixes: 732e49850c5e ("netvsc: fix race on sub channel creation") Signed-off-by: Stephen Hemminger Signed-off-by: Haiyang Zhang Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 2 +- drivers/net/hyperv/netvsc.c | 37 +++++++++++++++++++++++- drivers/net/hyperv/netvsc_drv.c | 17 ++++++++++- drivers/net/hyperv/rndis_filter.c | 61 ++++++++------------------------------- 4 files changed, 65 insertions(+), 52 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 1a924b867b07..4b6e308199d2 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -210,7 +210,7 @@ int netvsc_recv_callback(struct net_device *net, void netvsc_channel_cb(void *context); int netvsc_poll(struct napi_struct *napi, int budget); -void rndis_set_subchannel(struct work_struct *w); +int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 5d5bd513847f..8e9d0ee1572b 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -65,6 +65,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf) VM_PKT_DATA_INBAND, 0); } +/* Worker to setup sub channels on initial setup + * Initial hotplug event occurs in softirq context + * and can't wait for channels. + */ +static void netvsc_subchan_work(struct work_struct *w) +{ + struct netvsc_device *nvdev = + container_of(w, struct netvsc_device, subchan_work); + struct rndis_device *rdev; + int i, ret; + + /* Avoid deadlock with device removal already under RTNL */ + if (!rtnl_trylock()) { + schedule_work(w); + return; + } + + rdev = nvdev->extension; + if (rdev) { + ret = rndis_set_subchannel(rdev->ndev, nvdev); + if (ret == 0) { + netif_device_attach(rdev->ndev); + } else { + /* fallback to only primary channel */ + for (i = 1; i < nvdev->num_chn; i++) + netif_napi_del(&nvdev->chan_table[i].napi); + + nvdev->max_chn = 1; + nvdev->num_chn = 1; + } + } + + rtnl_unlock(); +} + static struct netvsc_device *alloc_net_device(void) { struct netvsc_device *net_device; @@ -81,7 +116,7 @@ static struct netvsc_device *alloc_net_device(void) init_completion(&net_device->channel_init_wait); init_waitqueue_head(&net_device->subchan_open); - INIT_WORK(&net_device->subchan_work, rndis_set_subchannel); + INIT_WORK(&net_device->subchan_work, netvsc_subchan_work); return net_device; } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index fe2256bf1d13..dd1d6e115145 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -905,8 +905,20 @@ static int netvsc_attach(struct net_device *ndev, if (IS_ERR(nvdev)) return PTR_ERR(nvdev); - /* Note: enable and attach happen when sub-channels setup */ + if (nvdev->num_chn > 1) { + ret = rndis_set_subchannel(ndev, nvdev); + + /* if unavailable, just proceed with one queue */ + if (ret) { + nvdev->max_chn = 1; + nvdev->num_chn = 1; + } + } + + /* In any case device is now ready */ + netif_device_attach(ndev); + /* Note: enable and attach happen when sub-channels setup */ netif_carrier_off(ndev); if (netif_running(ndev)) { @@ -2089,6 +2101,9 @@ static int netvsc_probe(struct hv_device *dev, memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); + if (nvdev->num_chn > 1) + schedule_work(&nvdev->subchan_work); + /* hw_features computed in rndis_netdev_set_hwcaps() */ net->features = net->hw_features | NETIF_F_HIGHDMA | NETIF_F_SG | diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 5428bb261102..9b4e3c3787e5 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1062,29 +1062,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) * This breaks overlap of processing the host message for the * new primary channel with the initialization of sub-channels. */ -void rndis_set_subchannel(struct work_struct *w) +int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) { - struct netvsc_device *nvdev - = container_of(w, struct netvsc_device, subchan_work); struct nvsp_message *init_packet = &nvdev->channel_init_pkt; - struct net_device_context *ndev_ctx; - struct rndis_device *rdev; - struct net_device *ndev; - struct hv_device *hv_dev; + struct net_device_context *ndev_ctx = netdev_priv(ndev); + struct hv_device *hv_dev = ndev_ctx->device_ctx; + struct rndis_device *rdev = nvdev->extension; int i, ret; - if (!rtnl_trylock()) { - schedule_work(w); - return; - } - - rdev = nvdev->extension; - if (!rdev) - goto unlock; /* device was removed */ - - ndev = rdev->ndev; - ndev_ctx = netdev_priv(ndev); - hv_dev = ndev_ctx->device_ctx; + ASSERT_RTNL(); memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; @@ -1100,13 +1086,13 @@ void rndis_set_subchannel(struct work_struct *w) VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) { netdev_err(ndev, "sub channel allocate send failed: %d\n", ret); - goto failed; + return ret; } wait_for_completion(&nvdev->channel_init_wait); if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { netdev_err(ndev, "sub channel request failed\n"); - goto failed; + return -EIO; } nvdev->num_chn = 1 + @@ -1125,21 +1111,7 @@ void rndis_set_subchannel(struct work_struct *w) for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) ndev_ctx->tx_table[i] = i % nvdev->num_chn; - netif_device_attach(ndev); - rtnl_unlock(); - return; - -failed: - /* fallback to only primary channel */ - for (i = 1; i < nvdev->num_chn; i++) - netif_napi_del(&nvdev->chan_table[i].napi); - - nvdev->max_chn = 1; - nvdev->num_chn = 1; - - netif_device_attach(ndev); -unlock: - rtnl_unlock(); + return 0; } static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, @@ -1360,21 +1332,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, netif_napi_add(net, &net_device->chan_table[i].napi, netvsc_poll, NAPI_POLL_WEIGHT); - if (net_device->num_chn > 1) - schedule_work(&net_device->subchan_work); + return net_device; out: - /* if unavailable, just proceed with one queue */ - if (ret) { - net_device->max_chn = 1; - net_device->num_chn = 1; - } - - /* No sub channels, device is ready */ - if (net_device->num_chn == 1) - netif_device_attach(net); - - return net_device; + /* setting up multiple channels failed */ + net_device->max_chn = 1; + net_device->num_chn = 1; err_dev_remv: rndis_filter_device_remove(dev, net_device); -- cgit From 603d4cf8fe095b1ee78f423d514427be507fb513 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Sat, 30 Jun 2018 17:38:55 +0200 Subject: net: fix use-after-free in GRO with ESP Since the addition of GRO for ESP, gro_receive can consume the skb and return -EINPROGRESS. In that case, the lower layer GRO handler cannot touch the skb anymore. Commit 5f114163f2f5 ("net: Add a skb_gro_flush_final helper.") converted some of the gro_receive handlers that can lead to ESP's gro_receive so that they wouldn't access the skb when -EINPROGRESS is returned, but missed other spots, mainly in tunneling protocols. This patch finishes the conversion to using skb_gro_flush_final(), and adds a new helper, skb_gro_flush_final_remcsum(), used in VXLAN and GUE. Fixes: 5f114163f2f5 ("net: Add a skb_gro_flush_final helper.") Signed-off-by: Sabrina Dubroca Reviewed-by: Stefano Brivio Signed-off-by: David S. Miller --- drivers/net/geneve.c | 2 +- drivers/net/vxlan.c | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 750eaa53bf0c..ada33c2d9ac2 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -476,7 +476,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk, out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index aee0e60471f1..f6bb1d54d4bd 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -623,9 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk, flush = 0; out: - skb_gro_remcsum_cleanup(skb, &grc); - skb->remcsum_offload = 0; - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final_remcsum(skb, pp, flush, &grc); return pp; } -- cgit From 5dc2d3996a8b221c20dd0900bdad45031a572530 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Sun, 1 Jul 2018 16:21:21 +0800 Subject: ipvlan: call dev_change_flags when ipvlan mode is reset After we change the ipvlan mode from l3 to l2, or vice versa, we only reset IFF_NOARP flag, but don't flush the ARP table cache, which will cause eth->h_dest to be equal to eth->h_source in ipvlan_xmit_mode_l2(). Then the message will not come out of host. Here is the reproducer on local host: ip link set eth1 up ip addr add 192.168.1.1/24 dev eth1 ip link add link eth1 ipvlan1 type ipvlan mode l3 ip netns add net1 ip link set ipvlan1 netns net1 ip netns exec net1 ip link set ipvlan1 up ip netns exec net1 ip addr add 192.168.2.1/24 dev ipvlan1 ip route add 192.168.2.0/24 via 192.168.1.2 ping 192.168.2.2 -c 2 ip netns exec net1 ip link set ipvlan1 type ipvlan mode l2 ping 192.168.2.2 -c 2 Add the same configuration on remote host. After we set the mode to l2, we could find that the src/dst MAC addresses are the same on eth1: 21:26:06.648565 00:b7:13:ad:d3:05 > 00:b7:13:ad:d3:05, ethertype IPv4 (0x0800), length 98: (tos 0x0, ttl 64, id 58356, offset 0, flags [DF], proto ICMP (1), length 84) 192.168.2.1 > 192.168.2.2: ICMP echo request, id 22686, seq 1, length 64 Fix this by calling dev_change_flags(), which will call netdevice notifier with flag change info. v2: a) As pointed out by Wang Cong, check return value for dev_change_flags() when change dev flags. b) As suggested by Stefano and Sabrina, move flags setting before l3mdev_ops. So we don't need to redo ipvlan_{, un}register_nf_hook() again in err path. Reported-by: Jianlin Shi Reviewed-by: Stefano Brivio Reviewed-by: Sabrina Dubroca Fixes: 2ad7bf3638411 ("ipvlan: Initial check-in of the IPVLAN driver.") Signed-off-by: Hangbin Liu Signed-off-by: David S. Miller --- drivers/net/ipvlan/ipvlan_main.c | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 23c1d6600241..4a949569ec4c 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -75,10 +75,23 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) { struct ipvl_dev *ipvlan; struct net_device *mdev = port->dev; - int err = 0; + unsigned int flags; + int err; ASSERT_RTNL(); if (port->mode != nval) { + list_for_each_entry(ipvlan, &port->ipvlans, pnode) { + flags = ipvlan->dev->flags; + if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) { + err = dev_change_flags(ipvlan->dev, + flags | IFF_NOARP); + } else { + err = dev_change_flags(ipvlan->dev, + flags & ~IFF_NOARP); + } + if (unlikely(err)) + goto fail; + } if (nval == IPVLAN_MODE_L3S) { /* New mode is L3S */ err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); @@ -86,21 +99,28 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) mdev->l3mdev_ops = &ipvl_l3mdev_ops; mdev->priv_flags |= IFF_L3MDEV_MASTER; } else - return err; + goto fail; } else if (port->mode == IPVLAN_MODE_L3S) { /* Old mode was L3S */ mdev->priv_flags &= ~IFF_L3MDEV_MASTER; ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); mdev->l3mdev_ops = NULL; } - list_for_each_entry(ipvlan, &port->ipvlans, pnode) { - if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) - ipvlan->dev->flags |= IFF_NOARP; - else - ipvlan->dev->flags &= ~IFF_NOARP; - } port->mode = nval; } + return 0; + +fail: + /* Undo the flags changes that have been done so far. */ + list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) { + flags = ipvlan->dev->flags; + if (port->mode == IPVLAN_MODE_L3 || + port->mode == IPVLAN_MODE_L3S) + dev_change_flags(ipvlan->dev, flags | IFF_NOARP); + else + dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP); + } + return err; } -- cgit From bb7858ba1102f82470a917e041fd23e6385c31be Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Sun, 1 Jul 2018 20:03:05 -0700 Subject: qed: Limit msix vectors in kdump kernel to the minimum required count. Memory size is limited in the kdump kernel environment. Allocation of more msix-vectors (or queues) consumes few tens of MBs of memory, which might lead to the kdump kernel failure. This patch adds changes to limit the number of MSI-X vectors in kdump kernel to minimum required value (i.e., 2 per engine). Fixes: fe56b9e6a ("qed: Add module with basic common support") Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 5c10fd7210c3..0cbc74d6ca8b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -789,6 +789,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, /* We want a minimum of one slowpath and one fastpath vector per hwfn */ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; + if (is_kdump_kernel()) { + DP_INFO(cdev, + "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", + cdev->int_params.in.min_msix_cnt); + cdev->int_params.in.num_vectors = + cdev->int_params.in.min_msix_cnt; + } + rc = qed_set_int_mode(cdev, false); if (rc) { DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); -- cgit From 538f8d00ba8bb417c4d9e76c61dee59d812d8287 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Sun, 1 Jul 2018 20:03:06 -0700 Subject: qed: Fix setting of incorrect eswitch mode. By default, driver sets the eswitch mode incorrectly as VEB (virtual Ethernet bridging). Need to set VEB eswitch mode only when sriov is enabled, and it should be to set NONE by default. The patch incorporates this change. Fixes: 0fefbfbaa ("qed*: Management firmware - notifications and defaults") Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 19 +++++++++++++++++-- 2 files changed, 18 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 329781cda77f..e5249b4741d0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1804,7 +1804,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) DP_INFO(p_hwfn, "Failed to update driver state\n"); rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, - QED_OV_ESWITCH_VEB); + QED_OV_ESWITCH_NONE); if (rc) DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index f01bf52bc381..fd59cf45f4be 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -4513,6 +4513,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn, static int qed_sriov_enable(struct qed_dev *cdev, int num) { struct qed_iov_vf_init_params params; + struct qed_hwfn *hwfn; + struct qed_ptt *ptt; int i, j, rc; if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { @@ -4525,8 +4527,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) /* Initialize HW for VF access */ for_each_hwfn(cdev, j) { - struct qed_hwfn *hwfn = &cdev->hwfns[j]; - struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + hwfn = &cdev->hwfns[j]; + ptt = qed_ptt_acquire(hwfn); /* Make sure not to use more than 16 queues per VF */ params.num_queues = min_t(int, @@ -4562,6 +4564,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) goto err; } + hwfn = QED_LEADING_HWFN(cdev); + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_ERR(hwfn, "Failed to acquire ptt\n"); + rc = -EBUSY; + goto err; + } + + rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB); + if (rc) + DP_INFO(cdev, "Failed to update eswitch mode\n"); + qed_ptt_release(hwfn, ptt); + return num; err: -- cgit From cc9b27cdf7bd3c86df73439758ac1564bc8f5bbe Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Sun, 1 Jul 2018 20:03:07 -0700 Subject: qed: Fix use of incorrect size in memcpy call. Use the correct size value while copying chassis/port id values. Fixes: 6ad8c632e ("qed: Add support for query/config dcbx.") Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index f0b01385d5cb..e0680ce91328 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -709,9 +709,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn, p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE]; memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id, - ARRAY_SIZE(p_local->local_chassis_id)); + sizeof(p_local->local_chassis_id)); memcpy(params->lldp_local.local_port_id, p_local->local_port_id, - ARRAY_SIZE(p_local->local_port_id)); + sizeof(p_local->local_port_id)); } static void @@ -723,9 +723,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn, p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE]; memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id, - ARRAY_SIZE(p_remote->peer_chassis_id)); + sizeof(p_remote->peer_chassis_id)); memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id, - ARRAY_SIZE(p_remote->peer_port_id)); + sizeof(p_remote->peer_port_id)); } static int -- cgit From 82a4e71b1565dea8387f54503e806cf374e779ec Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Sun, 1 Jul 2018 20:03:08 -0700 Subject: qede: Adverstise software timestamp caps when PHC is not available. When ptp clock is not available for a PF (e.g., higher PFs in NPAR mode), get-tsinfo() callback should return the software timestamp capabilities instead of returning the error. Fixes: 4c55215c ("qede: Add driver support for PTP") Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_ptp.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 02adb513f475..013ff567283c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info) { struct qede_ptp *ptp = edev->ptp; - if (!ptp) - return -EIO; + if (!ptp) { + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + + return 0; + } info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | -- cgit