diff options
Diffstat (limited to 'drivers/net')
152 files changed, 3952 insertions, 2324 deletions
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index ed6828821fbd..80af658e530d 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -207,7 +207,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev, return PTR_ERR(peer_net); peer = rtnl_create_link(peer_net, ifname, name_assign_type, - &vxcan_link_ops, tbp); + &vxcan_link_ops, tbp, extack); if (IS_ERR(peer)) { put_net(peer_net); return PTR_ERR(peer); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 2eb68769562c..aa4a1f5206f1 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -710,6 +710,10 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds) return ret; } + ret = bcm_sf2_cfp_resume(ds); + if (ret) + return ret; + if (priv->hw_params.num_gphy == 1) bcm_sf2_gphy_enable_set(ds, true); @@ -1061,6 +1065,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) spin_lock_init(&priv->indir_lock); mutex_init(&priv->stats_mutex); mutex_init(&priv->cfp.lock); + INIT_LIST_HEAD(&priv->cfp.rules_list); /* CFP rule #0 cannot be used for specific classifications, flag it as * permanently used @@ -1090,12 +1095,16 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) return ret; } + bcm_sf2_gphy_enable_set(priv->dev->ds, true); + ret = bcm_sf2_mdio_register(ds); if (ret) { pr_err("failed to register MDIO bus\n"); return ret; } + bcm_sf2_gphy_enable_set(priv->dev->ds, false); + ret = bcm_sf2_cfp_rst(priv); if (ret) { pr_err("failed to reset CFP\n"); @@ -1166,6 +1175,7 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev) priv->wol_ports_mask = 0; dsa_unregister_switch(priv->dev->ds); + bcm_sf2_cfp_exit(priv->dev->ds); /* Disable all ports and interrupts */ bcm_sf2_sw_suspend(priv->dev->ds); bcm_sf2_mdio_unregister(priv); diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index cc31e986e6e3..faaef320ec48 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -56,6 +56,7 @@ struct bcm_sf2_cfp_priv { DECLARE_BITMAP(used, CFP_NUM_RULES); DECLARE_BITMAP(unique, CFP_NUM_RULES); unsigned int rules_cnt; + struct list_head rules_list; }; struct bcm_sf2_priv { @@ -213,5 +214,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, struct ethtool_rxnfc *nfc); int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv); +void bcm_sf2_cfp_exit(struct dsa_switch *ds); +int bcm_sf2_cfp_resume(struct dsa_switch *ds); #endif /* __BCM_SF2_H */ diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index 47c5f272a084..e14663ab6dbc 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -20,6 +20,12 @@ #include "bcm_sf2.h" #include "bcm_sf2_regs.h" +struct cfp_rule { + int port; + struct ethtool_rx_flow_spec fs; + struct list_head next; +}; + struct cfp_udf_slice_layout { u8 slices[UDFS_PER_SLICE]; u32 mask_value; @@ -515,6 +521,61 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv, core_writel(priv, reg, offset); } +static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv, + int port, u32 location) +{ + struct cfp_rule *rule = NULL; + + list_for_each_entry(rule, &priv->cfp.rules_list, next) { + if (rule->port == port && rule->fs.location == location) + break; + } + + return rule; +} + +static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port, + struct ethtool_rx_flow_spec *fs) +{ + struct cfp_rule *rule = NULL; + size_t fs_size = 0; + int ret = 1; + + if (list_empty(&priv->cfp.rules_list)) + return ret; + + list_for_each_entry(rule, &priv->cfp.rules_list, next) { + ret = 1; + if (rule->port != port) + continue; + + if (rule->fs.flow_type != fs->flow_type || + rule->fs.ring_cookie != fs->ring_cookie || + rule->fs.m_ext.data[0] != fs->m_ext.data[0]) + continue; + + switch (fs->flow_type & ~FLOW_EXT) { + case TCP_V6_FLOW: + case UDP_V6_FLOW: + fs_size = sizeof(struct ethtool_tcpip6_spec); + break; + case TCP_V4_FLOW: + case UDP_V4_FLOW: + fs_size = sizeof(struct ethtool_tcpip4_spec); + break; + default: + continue; + } + + ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size); + ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size); + if (ret == 0) + break; + } + + return ret; +} + static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, unsigned int port_num, unsigned int queue_num, @@ -728,27 +789,14 @@ out_err: return ret; } -static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, - struct ethtool_rx_flow_spec *fs) +static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port, + struct ethtool_rx_flow_spec *fs) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = ds->ports[port].cpu_dp->index; __u64 ring_cookie = fs->ring_cookie; unsigned int queue_num, port_num; - int ret = -EINVAL; - - /* Check for unsupported extensions */ - if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype || - fs->m_ext.data[1])) - return -EINVAL; - - if (fs->location != RX_CLS_LOC_ANY && - test_bit(fs->location, priv->cfp.used)) - return -EBUSY; - - if (fs->location != RX_CLS_LOC_ANY && - fs->location > bcm_sf2_cfp_rule_size(priv)) - return -EINVAL; + int ret; /* This rule is a Wake-on-LAN filter and we must specifically * target the CPU port in order for it to be working. @@ -787,12 +835,54 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, queue_num, fs); break; default: + ret = -EINVAL; break; } return ret; } +static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, + struct ethtool_rx_flow_spec *fs) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + struct cfp_rule *rule = NULL; + int ret = -EINVAL; + + /* Check for unsupported extensions */ + if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype || + fs->m_ext.data[1])) + return -EINVAL; + + if (fs->location != RX_CLS_LOC_ANY && + test_bit(fs->location, priv->cfp.used)) + return -EBUSY; + + if (fs->location != RX_CLS_LOC_ANY && + fs->location > bcm_sf2_cfp_rule_size(priv)) + return -EINVAL; + + ret = bcm_sf2_cfp_rule_cmp(priv, port, fs); + if (ret == 0) + return -EEXIST; + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + ret = bcm_sf2_cfp_rule_insert(ds, port, fs); + if (ret) { + kfree(rule); + return ret; + } + + rule->port = port; + memcpy(&rule->fs, fs, sizeof(*fs)); + list_add_tail(&rule->next, &priv->cfp.rules_list); + + return ret; +} + static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port, u32 loc, u32 *next_loc) { @@ -830,19 +920,12 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port, return 0; } -static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, - u32 loc) +static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port, + u32 loc) { u32 next_loc = 0; int ret; - /* Refuse deleting unused rules, and those that are not unique since - * that could leave IPv6 rules with one of the chained rule in the - * table. - */ - if (!test_bit(loc, priv->cfp.unique) || loc == 0) - return -EINVAL; - ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc); if (ret) return ret; @@ -854,318 +937,54 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, return ret; } -static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow) +static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc) { - unsigned int i; - - for (i = 0; i < sizeof(flow->m_u); i++) - flow->m_u.hdata[i] ^= 0xff; - - flow->m_ext.vlan_etype ^= cpu_to_be16(~0); - flow->m_ext.vlan_tci ^= cpu_to_be16(~0); - flow->m_ext.data[0] ^= cpu_to_be32(~0); - flow->m_ext.data[1] ^= cpu_to_be32(~0); -} - -static int bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv *priv, - struct ethtool_tcpip4_spec *v4_spec, - bool mask) -{ - u32 reg, offset, ipv4; - u16 src_dst_port; - - if (mask) - offset = CORE_CFP_MASK_PORT(3); - else - offset = CORE_CFP_DATA_PORT(3); - - reg = core_readl(priv, offset); - /* src port [15:8] */ - src_dst_port = reg << 8; - - if (mask) - offset = CORE_CFP_MASK_PORT(2); - else - offset = CORE_CFP_DATA_PORT(2); - - reg = core_readl(priv, offset); - /* src port [7:0] */ - src_dst_port |= (reg >> 24); - - v4_spec->pdst = cpu_to_be16(src_dst_port); - v4_spec->psrc = cpu_to_be16((u16)(reg >> 8)); - - /* IPv4 dst [15:8] */ - ipv4 = (reg & 0xff) << 8; - - if (mask) - offset = CORE_CFP_MASK_PORT(1); - else - offset = CORE_CFP_DATA_PORT(1); - - reg = core_readl(priv, offset); - /* IPv4 dst [31:16] */ - ipv4 |= ((reg >> 8) & 0xffff) << 16; - /* IPv4 dst [7:0] */ - ipv4 |= (reg >> 24) & 0xff; - v4_spec->ip4dst = cpu_to_be32(ipv4); - - /* IPv4 src [15:8] */ - ipv4 = (reg & 0xff) << 8; - - if (mask) - offset = CORE_CFP_MASK_PORT(0); - else - offset = CORE_CFP_DATA_PORT(0); - reg = core_readl(priv, offset); + struct cfp_rule *rule; + int ret; - /* Once the TCAM is programmed, the mask reflects the slice number - * being matched, don't bother checking it when reading back the - * mask spec + /* Refuse deleting unused rules, and those that are not unique since + * that could leave IPv6 rules with one of the chained rule in the + * table. */ - if (!mask && !(reg & SLICE_VALID)) + if (!test_bit(loc, priv->cfp.unique) || loc == 0) return -EINVAL; - /* IPv4 src [7:0] */ - ipv4 |= (reg >> 24) & 0xff; - /* IPv4 src [31:16] */ - ipv4 |= ((reg >> 8) & 0xffff) << 16; - v4_spec->ip4src = cpu_to_be32(ipv4); - - return 0; -} - -static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv *priv, int port, - struct ethtool_rx_flow_spec *fs) -{ - struct ethtool_tcpip4_spec *v4_spec = NULL, *v4_m_spec = NULL; - u32 reg; - int ret; - - reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); - - switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) { - case IPPROTO_TCP: - fs->flow_type = TCP_V4_FLOW; - v4_spec = &fs->h_u.tcp_ip4_spec; - v4_m_spec = &fs->m_u.tcp_ip4_spec; - break; - case IPPROTO_UDP: - fs->flow_type = UDP_V4_FLOW; - v4_spec = &fs->h_u.udp_ip4_spec; - v4_m_spec = &fs->m_u.udp_ip4_spec; - break; - default: + rule = bcm_sf2_cfp_rule_find(priv, port, loc); + if (!rule) return -EINVAL; - } - - fs->m_ext.data[0] = cpu_to_be32((reg >> IP_FRAG_SHIFT) & 1); - v4_spec->tos = (reg >> IPTOS_SHIFT) & IPTOS_MASK; - - ret = bcm_sf2_cfp_unslice_ipv4(priv, v4_spec, false); - if (ret) - return ret; - - return bcm_sf2_cfp_unslice_ipv4(priv, v4_m_spec, true); -} - -static int bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv *priv, - __be32 *ip6_addr, __be16 *port, - bool mask) -{ - u32 reg, tmp, offset; - - /* C-Tag [31:24] - * UDF_n_B8 [23:8] (port) - * UDF_n_B7 (upper) [7:0] (addr[15:8]) - */ - if (mask) - offset = CORE_CFP_MASK_PORT(4); - else - offset = CORE_CFP_DATA_PORT(4); - reg = core_readl(priv, offset); - *port = cpu_to_be32(reg) >> 8; - tmp = (u32)(reg & 0xff) << 8; - - /* UDF_n_B7 (lower) [31:24] (addr[7:0]) - * UDF_n_B6 [23:8] (addr[31:16]) - * UDF_n_B5 (upper) [7:0] (addr[47:40]) - */ - if (mask) - offset = CORE_CFP_MASK_PORT(3); - else - offset = CORE_CFP_DATA_PORT(3); - reg = core_readl(priv, offset); - tmp |= (reg >> 24) & 0xff; - tmp |= (u32)((reg >> 8) << 16); - ip6_addr[3] = cpu_to_be32(tmp); - tmp = (u32)(reg & 0xff) << 8; - - /* UDF_n_B5 (lower) [31:24] (addr[39:32]) - * UDF_n_B4 [23:8] (addr[63:48]) - * UDF_n_B3 (upper) [7:0] (addr[79:72]) - */ - if (mask) - offset = CORE_CFP_MASK_PORT(2); - else - offset = CORE_CFP_DATA_PORT(2); - reg = core_readl(priv, offset); - tmp |= (reg >> 24) & 0xff; - tmp |= (u32)((reg >> 8) << 16); - ip6_addr[2] = cpu_to_be32(tmp); - tmp = (u32)(reg & 0xff) << 8; - /* UDF_n_B3 (lower) [31:24] (addr[71:64]) - * UDF_n_B2 [23:8] (addr[95:80]) - * UDF_n_B1 (upper) [7:0] (addr[111:104]) - */ - if (mask) - offset = CORE_CFP_MASK_PORT(1); - else - offset = CORE_CFP_DATA_PORT(1); - reg = core_readl(priv, offset); - tmp |= (reg >> 24) & 0xff; - tmp |= (u32)((reg >> 8) << 16); - ip6_addr[1] = cpu_to_be32(tmp); - tmp = (u32)(reg & 0xff) << 8; + ret = bcm_sf2_cfp_rule_remove(priv, port, loc); - /* UDF_n_B1 (lower) [31:24] (addr[103:96]) - * UDF_n_B0 [23:8] (addr[127:112]) - * Reserved [7:4] - * Slice ID [3:2] - * Slice valid [1:0] - */ - if (mask) - offset = CORE_CFP_MASK_PORT(0); - else - offset = CORE_CFP_DATA_PORT(0); - reg = core_readl(priv, offset); - tmp |= (reg >> 24) & 0xff; - tmp |= (u32)((reg >> 8) << 16); - ip6_addr[0] = cpu_to_be32(tmp); + list_del(&rule->next); + kfree(rule); - if (!mask && !(reg & SLICE_VALID)) - return -EINVAL; - - return 0; + return ret; } -static int bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv *priv, int port, - struct ethtool_rx_flow_spec *fs, - u32 next_loc) +static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow) { - struct ethtool_tcpip6_spec *v6_spec = NULL, *v6_m_spec = NULL; - u32 reg; - int ret; - - /* UDPv6 and TCPv6 both use ethtool_tcpip6_spec so we are fine - * assuming tcp_ip6_spec here being an union. - */ - v6_spec = &fs->h_u.tcp_ip6_spec; - v6_m_spec = &fs->m_u.tcp_ip6_spec; - - /* Read the second half first */ - ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6dst, &v6_spec->pdst, - false); - if (ret) - return ret; - - ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6dst, - &v6_m_spec->pdst, true); - if (ret) - return ret; - - /* Read last to avoid next entry clobbering the results during search - * operations. We would not have the port enabled for this rule, so - * don't bother checking it. - */ - (void)core_readl(priv, CORE_CFP_DATA_PORT(7)); - - /* The slice number is valid, so read the rule we are chained from now - * which is our first half. - */ - bcm_sf2_cfp_rule_addr_set(priv, next_loc); - ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); - if (ret) - return ret; - - reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); - - switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) { - case IPPROTO_TCP: - fs->flow_type = TCP_V6_FLOW; - break; - case IPPROTO_UDP: - fs->flow_type = UDP_V6_FLOW; - break; - default: - return -EINVAL; - } + unsigned int i; - ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6src, &v6_spec->psrc, - false); - if (ret) - return ret; + for (i = 0; i < sizeof(flow->m_u); i++) + flow->m_u.hdata[i] ^= 0xff; - return bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6src, - &v6_m_spec->psrc, true); + flow->m_ext.vlan_etype ^= cpu_to_be16(~0); + flow->m_ext.vlan_tci ^= cpu_to_be16(~0); + flow->m_ext.data[0] ^= cpu_to_be32(~0); + flow->m_ext.data[1] ^= cpu_to_be32(~0); } static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port, struct ethtool_rxnfc *nfc) { - u32 reg, ipv4_or_chain_id; - unsigned int queue_num; - int ret; - - bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location); - - ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM); - if (ret) - return ret; - - reg = core_readl(priv, CORE_ACT_POL_DATA0); - - ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); - if (ret) - return ret; - - /* Extract the destination port */ - nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) & - DST_MAP_IB_MASK) - 1; - - /* There is no Port 6, so we compensate for that here */ - if (nfc->fs.ring_cookie >= 6) - nfc->fs.ring_cookie++; - nfc->fs.ring_cookie *= SF2_NUM_EGRESS_QUEUES; - - /* Extract the destination queue */ - queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK; - nfc->fs.ring_cookie += queue_num; - - /* Extract the L3_FRAMING or CHAIN_ID */ - reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); + struct cfp_rule *rule; - /* With IPv6 rules this would contain a non-zero chain ID since - * we reserve entry 0 and it cannot be used. So if we read 0 here - * this means an IPv4 rule. - */ - ipv4_or_chain_id = (reg >> L3_FRAMING_SHIFT) & 0xff; - if (ipv4_or_chain_id == 0) - ret = bcm_sf2_cfp_ipv4_rule_get(priv, port, &nfc->fs); - else - ret = bcm_sf2_cfp_ipv6_rule_get(priv, port, &nfc->fs, - ipv4_or_chain_id); - if (ret) - return ret; - - /* Read last to avoid next entry clobbering the results during search - * operations - */ - reg = core_readl(priv, CORE_CFP_DATA_PORT(7)); - if (!(reg & 1 << port)) + rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location); + if (!rule) return -EINVAL; + memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs)); + bcm_sf2_invert_masks(&nfc->fs); /* Put the TCAM size here */ @@ -1302,3 +1121,51 @@ int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv) return 0; } + +void bcm_sf2_cfp_exit(struct dsa_switch *ds) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + struct cfp_rule *rule, *n; + + if (list_empty(&priv->cfp.rules_list)) + return; + + list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next) + bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location); +} + +int bcm_sf2_cfp_resume(struct dsa_switch *ds) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + struct cfp_rule *rule; + int ret = 0; + u32 reg; + + if (list_empty(&priv->cfp.rules_list)) + return ret; + + reg = core_readl(priv, CORE_CFP_CTL_REG); + reg &= ~CFP_EN_MAP_MASK; + core_writel(priv, reg, CORE_CFP_CTL_REG); + + ret = bcm_sf2_cfp_rst(priv); + if (ret) + return ret; + + list_for_each_entry(rule, &priv->cfp.rules_list, next) { + ret = bcm_sf2_cfp_rule_remove(priv, rule->port, + rule->fs.location); + if (ret) { + dev_err(ds->dev, "failed to remove rule\n"); + return ret; + } + + ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs); + if (ret) { + dev_err(ds->dev, "failed to restore rule\n"); + return ret; + } + } + + return ret; +} diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index a5de9bffe5be..74547f43b938 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -658,7 +658,8 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port, if (phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; - lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising); + lcl_adv = linkmode_adv_to_lcl_adv_t( + phydev->advertising); flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (flowctrl & FLOW_CTRL_TX) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index e05d4eddc935..fc0f508879d4 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3234,6 +3234,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_set_cmode = mv88e6390_port_set_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3276,6 +3277,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_set_cmode = mv88e6390x_port_set_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3291,8 +3293,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390x_serdes_power, - .serdes_irq_setup = mv88e6390_serdes_irq_setup, - .serdes_irq_free = mv88e6390_serdes_irq_free, + .serdes_irq_setup = mv88e6390x_serdes_irq_setup, + .serdes_irq_free = mv88e6390x_serdes_irq_free, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6390x_phylink_validate, }; @@ -3318,6 +3320,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_set_cmode = mv88e6390_port_set_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3405,11 +3408,11 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_pause_limit = mv88e6390_port_pause_limit, - .port_set_cmode = mv88e6390x_port_set_cmode, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_set_cmode = mv88e6390_port_set_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3710,11 +3713,11 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_limit = mv88e6390_port_pause_limit, - .port_set_cmode = mv88e6390x_port_set_cmode, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_set_cmode = mv88e6390_port_set_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3757,11 +3760,11 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_limit = mv88e6390_port_pause_limit, - .port_set_cmode = mv88e6390x_port_set_cmode, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_set_cmode = mv88e6390x_port_set_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3777,8 +3780,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390x_serdes_power, - .serdes_irq_setup = mv88e6390_serdes_irq_setup, - .serdes_irq_free = mv88e6390_serdes_irq_free, + .serdes_irq_setup = mv88e6390x_serdes_irq_setup, + .serdes_irq_free = mv88e6390x_serdes_irq_free, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index cd7db60a508b..ebd26b6a93e6 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -368,12 +368,15 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, u16 reg; int err; - if (mode == PHY_INTERFACE_MODE_NA) - return 0; - if (port != 9 && port != 10) return -EOPNOTSUPP; + /* Default to a slow mode, so freeing up SERDES interfaces for + * other ports which might use them for SFPs. + */ + if (mode == PHY_INTERFACE_MODE_NA) + mode = PHY_INTERFACE_MODE_1000BASEX; + switch (mode) { case PHY_INTERFACE_MODE_1000BASEX: cmode = MV88E6XXX_PORT_STS_CMODE_1000BASE_X; @@ -437,6 +440,21 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, return 0; } +int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode) +{ + switch (mode) { + case PHY_INTERFACE_MODE_XGMII: + case PHY_INTERFACE_MODE_XAUI: + case PHY_INTERFACE_MODE_RXAUI: + return -EINVAL; + default: + break; + } + + return mv88e6390x_port_set_cmode(chip, port, mode); +} + int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode) { int err; diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index 36904c9bf955..0d81866d0e4a 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -310,6 +310,8 @@ int mv88e6097_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in, u8 out); int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in, u8 out); +int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode); int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode); int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index bb69650ff772..2caa8c8b4b55 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -619,15 +619,11 @@ out: return ret; } -int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port) +int mv88e6390x_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port) { int lane; int err; - /* Only support ports 9 and 10 at the moment */ - if (port < 9) - return 0; - lane = mv88e6390x_serdes_get_lane(chip, port); if (lane == -ENODEV) @@ -663,11 +659,19 @@ int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port) return mv88e6390_serdes_irq_enable(chip, port, lane); } -void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) +int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port) +{ + if (port < 9) + return 0; + + return mv88e6390_serdes_irq_setup(chip, port); +} + +void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) { int lane = mv88e6390x_serdes_get_lane(chip, port); - if (port < 9) + if (lane == -ENODEV) return; if (lane < 0) @@ -685,6 +689,14 @@ void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) chip->ports[port].serdes_irq = 0; } +void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) +{ + if (port < 9) + return; + + mv88e6390x_serdes_irq_free(chip, port); +} + int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) { u8 cmode = chip->ports[port].cmode; diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index 7870c5a9ef12..573dce8b1eb4 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -77,6 +77,8 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port); void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port); +int mv88e6390x_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port); +void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port); int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port); int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip, int port, uint8_t *data); diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 7c9348a26cbb..91fc64c1145e 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1283,7 +1283,7 @@ static int greth_mdio_probe(struct net_device *dev) else phy_set_max_speed(phy, SPEED_100); - phy->advertising = phy->supported; + linkmode_copy(phy->advertising, phy->supported); greth->link = 0; greth->speed = 0; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 151bdb629e8a..128cd648ba99 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -857,6 +857,7 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata) static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; struct xgbe_phy_data *phy_data = pdata->phy_data; unsigned int phy_id = phy_data->phydev->phy_id; @@ -878,9 +879,15 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) phy_write(phy_data->phydev, 0x04, 0x0d01); phy_write(phy_data->phydev, 0x00, 0x9140); - phy_data->phydev->supported = PHY_10BT_FEATURES | - PHY_100BT_FEATURES | - PHY_1000BT_FEATURES; + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + supported); + linkmode_set_bit_array(phy_gbit_features_array, + ARRAY_SIZE(phy_gbit_features_array), + supported); + + linkmode_copy(phy_data->phydev->supported, supported); + phy_support_asym_pause(phy_data->phydev); netif_dbg(pdata, drv, pdata->netdev, @@ -891,6 +898,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; struct xgbe_phy_data *phy_data = pdata->phy_data; struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; unsigned int phy_id = phy_data->phydev->phy_id; @@ -951,9 +959,13 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) reg = phy_read(phy_data->phydev, 0x00); phy_write(phy_data->phydev, 0x00, reg & ~0x00800); - phy_data->phydev->supported = (PHY_10BT_FEATURES | - PHY_100BT_FEATURES | - PHY_1000BT_FEATURES); + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + supported); + linkmode_set_bit_array(phy_gbit_features_array, + ARRAY_SIZE(phy_gbit_features_array), + supported); + linkmode_copy(phy_data->phydev->supported, supported); phy_support_asym_pause(phy_data->phydev); netif_dbg(pdata, drv, pdata->netdev, @@ -976,7 +988,6 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata) struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; struct phy_device *phydev; - u32 advertising; int ret; /* If we already have a PHY, just return */ @@ -1036,9 +1047,8 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata) xgbe_phy_external_phy_quirks(pdata); - ethtool_convert_link_mode_to_legacy_u32(&advertising, - lks->link_modes.advertising); - phydev->advertising &= advertising; + linkmode_and(phydev->advertising, phydev->advertising, + lks->link_modes.advertising); phy_start_aneg(phy_data->phydev); @@ -1497,7 +1507,7 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata) if (!phy_data->phydev) return; - lcl_adv = ethtool_adv_to_lcl_adv_t(phy_data->phydev->advertising); + lcl_adv = linkmode_adv_to_lcl_adv_t(phy_data->phydev->advertising); if (phy_data->phydev->pause) { XGBE_SET_LP_ADV(lks, Pause); @@ -1815,7 +1825,6 @@ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata) { struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; - u32 advertising; int ret; ret = xgbe_phy_find_phy_device(pdata); @@ -1825,12 +1834,10 @@ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata) if (!phy_data->phydev) return 0; - ethtool_convert_link_mode_to_legacy_u32(&advertising, - lks->link_modes.advertising); - phy_data->phydev->autoneg = pdata->phy.autoneg; - phy_data->phydev->advertising = phy_data->phydev->supported & - advertising; + linkmode_and(phy_data->phydev->advertising, + phy_data->phydev->supported, + lks->link_modes.advertising); if (pdata->phy.autoneg != AUTONEG_ENABLE) { phy_data->phydev->speed = pdata->phy.speed; diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c index f5fe3bb2e59d..53529cd85162 100644 --- a/drivers/net/ethernet/apm/xgene-v2/mdio.c +++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c @@ -109,6 +109,7 @@ void xge_mdio_remove(struct net_device *ndev) int xge_mdio_config(struct net_device *ndev) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct xge_pdata *pdata = netdev_priv(ndev); struct device *dev = &pdata->pdev->dev; struct mii_bus *mdio_bus; @@ -148,16 +149,17 @@ int xge_mdio_config(struct net_device *ndev) goto err; } - phydev->supported &= ~(SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Half | - SUPPORTED_AUI | - SUPPORTED_MII | - SUPPORTED_FIBRE | - SUPPORTED_BNC); - phydev->advertising = phydev->supported; + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_AUI_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_BNC_BIT, mask); + + linkmode_andnot(phydev->supported, phydev->supported, mask); + linkmode_copy(phydev->advertising, phydev->supported); pdata->phy_speed = SPEED_UNKNOWN; return 0; diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index bd277b0dc615..4406325fdd9f 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -432,7 +432,8 @@ static int arc_emac_open(struct net_device *ndev) phy_dev->autoneg = AUTONEG_ENABLE; phy_dev->speed = 0; phy_dev->duplex = 0; - phy_dev->advertising &= phy_dev->supported; + linkmode_and(phy_dev->advertising, phy_dev->advertising, + phy_dev->supported); priv->last_rx_bd = 0; diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index e445ab724827..f44808959ff3 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -2248,6 +2248,7 @@ static void b44_adjust_link(struct net_device *dev) static int b44_register_phy_one(struct b44 *bp) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct mii_bus *mii_bus; struct ssb_device *sdev = bp->sdev; struct phy_device *phydev; @@ -2303,11 +2304,12 @@ static int b44_register_phy_one(struct b44 *bp) } /* mask with MAC supported features */ - phydev->supported &= (SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_MII); - phydev->advertising = phydev->supported; + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); + linkmode_and(phydev->supported, phydev->supported, mask); + linkmode_copy(phydev->advertising, phydev->supported); bp->old_link = 0; bp->phy_addr = phydev->mdio.addr; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 0e2d99c737e3..4574275ef445 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1068,6 +1068,7 @@ static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable) static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) { + unsigned int index; u32 reg; /* Disable RXCHK, active filters and Broadcom tag matching */ @@ -1076,6 +1077,15 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN); rxchk_writel(priv, reg, RXCHK_CONTROL); + /* Make sure we restore correct CID index in case HW lost + * its context during deep idle state + */ + for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { + rxchk_writel(priv, priv->filters_loc[index] << + RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index)); + rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index)); + } + /* Clear the MagicPacket detection logic */ mpd_enable_set(priv, false); @@ -2189,6 +2199,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv, rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index)); rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index)); + priv->filters_loc[index] = nfc->fs.location; set_bit(index, priv->filters); return 0; @@ -2208,6 +2219,7 @@ static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv, * be taken care of during suspend time by bcm_sysport_suspend_to_wol */ clear_bit(index, priv->filters); + priv->filters_loc[index] = 0; return 0; } @@ -2312,7 +2324,7 @@ static int bcm_sysport_map_queues(struct notifier_block *nb, struct bcm_sysport_priv *priv; struct net_device *slave_dev; unsigned int num_tx_queues; - unsigned int q, start, port; + unsigned int q, qp, port; struct net_device *dev; priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); @@ -2351,20 +2363,61 @@ static int bcm_sysport_map_queues(struct notifier_block *nb, priv->per_port_num_tx_queues = num_tx_queues; - start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues); - for (q = 0; q < num_tx_queues; q++) { - ring = &priv->tx_rings[q + start]; + for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues; + q++) { + ring = &priv->tx_rings[q]; + + if (ring->inspect) + continue; /* Just remember the mapping actual programming done * during bcm_sysport_init_tx_ring */ - ring->switch_queue = q; + ring->switch_queue = qp; ring->switch_port = port; ring->inspect = true; priv->ring_map[q + port * num_tx_queues] = ring; + qp++; + } + + return 0; +} + +static int bcm_sysport_unmap_queues(struct notifier_block *nb, + struct dsa_notifier_register_info *info) +{ + struct bcm_sysport_tx_ring *ring; + struct bcm_sysport_priv *priv; + struct net_device *slave_dev; + unsigned int num_tx_queues; + struct net_device *dev; + unsigned int q, port; + + priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); + if (priv->netdev != info->master) + return 0; + + dev = info->master; + + if (dev->netdev_ops != &bcm_sysport_netdev_ops) + return 0; + + port = info->port_number; + slave_dev = info->info.dev; + + num_tx_queues = slave_dev->real_num_tx_queues; + + for (q = 0; q < dev->num_tx_queues; q++) { + ring = &priv->tx_rings[q]; - /* Set all queues as being used now */ - set_bit(q + start, &priv->queue_bitmap); + if (ring->switch_port != port) + continue; + + if (!ring->inspect) + continue; + + ring->inspect = false; + priv->ring_map[q + port * num_tx_queues] = NULL; } return 0; @@ -2373,14 +2426,18 @@ static int bcm_sysport_map_queues(struct notifier_block *nb, static int bcm_sysport_dsa_notifier(struct notifier_block *nb, unsigned long event, void *ptr) { - struct dsa_notifier_register_info *info; - - if (event != DSA_PORT_REGISTER) - return NOTIFY_DONE; + int ret = NOTIFY_DONE; - info = ptr; + switch (event) { + case DSA_PORT_REGISTER: + ret = bcm_sysport_map_queues(nb, ptr); + break; + case DSA_PORT_UNREGISTER: + ret = bcm_sysport_unmap_queues(nb, ptr); + break; + } - return notifier_from_errno(bcm_sysport_map_queues(nb, info)); + return notifier_from_errno(ret); } #define REV_FMT "v%2x.%02x" diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index a7a230884a87..0887e6356649 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -786,6 +786,7 @@ struct bcm_sysport_priv { /* Ethtool */ u32 msg_enable; DECLARE_BITMAP(filters, RXCHK_BRCM_TAG_MAX); + u32 filters_loc[RXCHK_BRCM_TAG_MAX]; struct bcm_sysport_stats64 stats64; @@ -795,7 +796,6 @@ struct bcm_sysport_priv { /* map information between switch port queues and local queues */ struct notifier_block dsa_notifier; unsigned int per_port_num_tx_queues; - unsigned long queue_bitmap; struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8]; }; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index d83233ae4a15..510dfc1c236b 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -5731,7 +5731,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event, if (realdev) { dev = cnic_from_netdev(realdev); if (dev) { - vid |= VLAN_TAG_PRESENT; + vid |= VLAN_CFI_MASK; /* make non-zero */ cnic_rcv_netevent(dev->cnic_priv, event, vid); cnic_put(dev); } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 2d6f090bf644..bf88749505a9 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1169,7 +1169,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv, break; } - return 0; + return ret; } static void bcmgenet_power_up(struct bcmgenet_priv *priv, diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index a6cbaca37e94..aceb9b7b55bd 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -226,7 +226,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) * capabilities, use that knowledge to also configure the * Reverse MII interface correctly. */ - if (dev->phydev->supported & PHY_1000BT_FEATURES) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + dev->phydev->supported)) port_ctrl = PORT_MODE_EXT_RVMII_50; else port_ctrl = PORT_MODE_EXT_RVMII_25; @@ -317,7 +318,7 @@ int bcmgenet_mii_probe(struct net_device *dev) return ret; } - phydev->advertising = phydev->supported; + linkmode_copy(phydev->advertising, phydev->supported); /* The internal PHY has its link interrupts routed to the * Ethernet MAC ISRs. On GENETv5 there is a hardware issue diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 89295306f161..79b881d9cdb0 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -2157,7 +2157,8 @@ static void tg3_phy_start(struct tg3 *tp) phydev->speed = tp->link_config.speed; phydev->duplex = tp->link_config.duplex; phydev->autoneg = tp->link_config.autoneg; - phydev->advertising = tp->link_config.advertising; + ethtool_convert_legacy_u32_to_link_mode( + phydev->advertising, tp->link_config.advertising); } phy_start(phydev); @@ -4057,8 +4058,9 @@ static int tg3_power_down_prepare(struct tg3 *tp) do_low_power = false; if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, }; struct phy_device *phydev; - u32 phyid, advertising; + u32 phyid; phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); @@ -4067,25 +4069,33 @@ static int tg3_power_down_prepare(struct tg3 *tp) tp->link_config.speed = phydev->speed; tp->link_config.duplex = phydev->duplex; tp->link_config.autoneg = phydev->autoneg; - tp->link_config.advertising = phydev->advertising; - - advertising = ADVERTISED_TP | - ADVERTISED_Pause | - ADVERTISED_Autoneg | - ADVERTISED_10baseT_Half; + ethtool_convert_link_mode_to_legacy_u32( + &tp->link_config.advertising, + phydev->advertising); + + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + advertising); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + advertising); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + advertising); if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { - if (tg3_flag(tp, WOL_SPEED_100MB)) - advertising |= - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_10baseT_Full; - else - advertising |= ADVERTISED_10baseT_Full; + if (tg3_flag(tp, WOL_SPEED_100MB)) { + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + advertising); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + advertising); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + advertising); + } else { + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + advertising); + } } - phydev->advertising = advertising; - + linkmode_copy(phydev->advertising, advertising); phy_start_aneg(phydev); phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; @@ -6135,10 +6145,16 @@ static int tg3_setup_phy(struct tg3 *tp, bool force_reset) } /* tp->lock must be held */ -static u64 tg3_refclk_read(struct tg3 *tp) +static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts) { - u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB); - return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32; + u64 stamp; + + ptp_read_system_prets(sts); + stamp = tr32(TG3_EAV_REF_CLCK_LSB); + ptp_read_system_postts(sts); + stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32; + + return stamp; } /* tp->lock must be held */ @@ -6229,13 +6245,14 @@ static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) return 0; } -static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, + struct ptp_system_timestamp *sts) { u64 ns; struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); tg3_full_lock(tp, 0); - ns = tg3_refclk_read(tp); + ns = tg3_refclk_read(tp, sts); ns += tp->ptp_adjust; tg3_full_unlock(tp); @@ -6330,7 +6347,7 @@ static const struct ptp_clock_info tg3_ptp_caps = { .pps = 0, .adjfreq = tg3_ptp_adjfreq, .adjtime = tg3_ptp_adjtime, - .gettime64 = tg3_ptp_gettime, + .gettimex64 = tg3_ptp_gettimex, .settime64 = tg3_ptp_settime, .enable = tg3_ptp_enable, }; diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 4b3aecf98f2a..5359c1021f42 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1080,8 +1080,11 @@ static int octeon_mgmt_open(struct net_device *netdev) /* Set the mode of the interface, RGMII/MII. */ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) { union cvmx_agl_prtx_ctl agl_prtx_ctl; - int rgmii_mode = (netdev->phydev->supported & - (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0; + int rgmii_mode = + (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + netdev->phydev->supported) | + linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + netdev->phydev->supported)) != 0; agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 05a46926016a..956e708c777d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2295,6 +2295,8 @@ static int cxgb_up(struct adapter *adap) static void cxgb_down(struct adapter *adapter) { + struct hash_mac_addr *entry, *tmp; + cancel_work_sync(&adapter->tid_release_task); cancel_work_sync(&adapter->db_full_task); cancel_work_sync(&adapter->db_drop_task); @@ -2303,6 +2305,12 @@ static void cxgb_down(struct adapter *adapter) t4_sge_stop(adapter); t4_free_sge_resources(adapter); + + list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) { + list_del(&entry->list); + kfree(entry); + } + adapter->flags &= ~FULL_INIT_DONE; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 60df66f4d21c..bf7325f6d553 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -217,6 +217,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */ CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */ CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */ + CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */ CH_PCI_DEVICE_ID_TABLE_DEFINE_END; #endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index ff84791a0ff8..8ec503c88c06 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -722,6 +722,10 @@ static int adapter_up(struct adapter *adapter) if (adapter->flags & USING_MSIX) name_msix_vecs(adapter); + + /* Initialize hash mac addr list*/ + INIT_LIST_HEAD(&adapter->mac_hlist); + adapter->flags |= FULL_INIT_DONE; } @@ -747,8 +751,6 @@ static int adapter_up(struct adapter *adapter) enable_rx(adapter); t4vf_sge_start(adapter); - /* Initialize hash mac addr list*/ - INIT_LIST_HEAD(&adapter->mac_hlist); return 0; } @@ -3287,6 +3289,7 @@ err_disable_device: static void cxgb4vf_pci_remove(struct pci_dev *pdev) { struct adapter *adapter = pci_get_drvdata(pdev); + struct hash_mac_addr *entry, *tmp; /* * Tear down driver state associated with device. @@ -3337,6 +3340,11 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev) if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); kfree(adapter->mbox_log); + list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, + list) { + list_del(&entry->list); + kfree(entry); + } kfree(adapter); } diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index c5ad7a4f4d83..80b2bd3747ce 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1049,30 +1049,35 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, struct be_wrb_params *wrb_params) { + bool insert_vlan = false; u16 vlan_tag = 0; skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return skb; - if (skb_vlan_tag_present(skb)) + if (skb_vlan_tag_present(skb)) { vlan_tag = be_get_tx_vlan_tag(adapter, skb); + insert_vlan = true; + } if (qnq_async_evt_rcvd(adapter) && adapter->pvid) { - if (!vlan_tag) + if (!insert_vlan) { vlan_tag = adapter->pvid; + insert_vlan = true; + } /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to * skip VLAN insertion */ BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); } - if (vlan_tag) { + if (insert_vlan) { skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q), vlan_tag); if (unlikely(!skb)) return skb; - skb->vlan_tci = 0; + __vlan_hwaccel_clear_tag(skb); } /* Insert the outer VLAN, if any */ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 6e0f47f2c8a3..9510c9d78858 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2475,6 +2475,7 @@ static void dpaa_adjust_link(struct net_device *net_dev) static int dpaa_phy_init(struct net_device *net_dev) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct mac_device *mac_dev; struct phy_device *phy_dev; struct dpaa_priv *priv; @@ -2491,7 +2492,9 @@ static int dpaa_phy_init(struct net_device *net_dev) } /* Remove any features not supported by the controller */ - phy_dev->supported &= mac_dev->if_support; + ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support); + linkmode_and(phy_dev->supported, phy_dev->supported, mask); + phy_support_asym_pause(phy_dev); mac_dev->phy_dev = phy_dev; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 88f7acce38dc..bdfb13b71998 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -1434,8 +1434,11 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPCON, &dpcon); if (err) { - dev_info(dev, "Not enough DPCONs, will go on as-is\n"); - return NULL; + if (err == -ENXIO) + err = -EPROBE_DEFER; + else + dev_info(dev, "Not enough DPCONs, will go on as-is\n"); + return ERR_PTR(err); } err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); @@ -1493,8 +1496,10 @@ alloc_channel(struct dpaa2_eth_priv *priv) return NULL; channel->dpcon = setup_dpcon(priv); - if (!channel->dpcon) + if (IS_ERR_OR_NULL(channel->dpcon)) { + err = PTR_ERR(channel->dpcon); goto err_setup; + } err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, &attr); @@ -1513,7 +1518,7 @@ err_get_attr: free_dpcon(priv, channel->dpcon); err_setup: kfree(channel); - return NULL; + return ERR_PTR(err); } static void free_channel(struct dpaa2_eth_priv *priv, @@ -1547,10 +1552,11 @@ static int setup_dpio(struct dpaa2_eth_priv *priv) for_each_online_cpu(i) { /* Try to allocate a channel */ channel = alloc_channel(priv); - if (!channel) { - dev_info(dev, - "No affine channel for cpu %d and above\n", i); - err = -ENODEV; + if (IS_ERR_OR_NULL(channel)) { + err = PTR_ERR(channel); + if (err != -EPROBE_DEFER) + dev_info(dev, + "No affine channel for cpu %d and above\n", i); goto err_alloc_ch; } @@ -1608,9 +1614,12 @@ err_set_cdan: err_service_reg: free_channel(priv, channel); err_alloc_ch: + if (err == -EPROBE_DEFER) + return err; + if (cpumask_empty(&priv->dpio_cpumask)) { dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); - return err; + return -ENODEV; } dev_info(dev, "Cores %*pbl available for processing ingress traffic\n", @@ -1732,7 +1741,10 @@ static int setup_dpbp(struct dpaa2_eth_priv *priv) err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, &dpbp_dev); if (err) { - dev_err(dev, "DPBP device allocation failed\n"); + if (err == -ENXIO) + err = -EPROBE_DEFER; + else + dev_err(dev, "DPBP device allocation failed\n"); return err; } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c index 84b942b1eccc..9b150db3b510 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c @@ -140,7 +140,10 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io); if (err) { - dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); + if (err == -ENXIO) + err = -EPROBE_DEFER; + else + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); goto err_exit; } diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index d79e4e009d63..71f4205f14e7 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -393,7 +393,7 @@ void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, */ /* get local capabilities */ - lcl_adv = ethtool_adv_to_lcl_adv_t(phy_dev->advertising); + lcl_adv = linkmode_adv_to_lcl_adv_t(phy_dev->advertising); /* get link partner capabilities */ rmt_adv = 0; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 3c8da1a18ba0..0e102c764b13 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1784,14 +1784,20 @@ static phy_interface_t gfar_get_interface(struct net_device *dev) */ static int init_phy(struct net_device *dev) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct gfar_private *priv = netdev_priv(dev); - uint gigabit_support = - priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? - GFAR_SUPPORTED_GBIT : 0; phy_interface_t interface; struct phy_device *phydev; struct ethtool_eee edata; + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask); + priv->oldlink = 0; priv->oldspeed = 0; priv->oldduplex = -1; @@ -1809,8 +1815,8 @@ static int init_phy(struct net_device *dev) gfar_configure_serdes(dev); /* Remove any features not supported by the controller */ - phydev->supported &= (GFAR_SUPPORTED | gigabit_support); - phydev->advertising = phydev->supported; + linkmode_and(phydev->supported, phydev->supported, mask); + linkmode_copy(phydev->advertising, phydev->supported); /* Add support for flow control */ phy_support_asym_pause(phydev); @@ -3656,7 +3662,7 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) if (phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; - lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising); + lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (flowctrl & FLOW_CTRL_TX) val |= MACCFG1_TX_FLOW; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 0d76e15cd6dd..241325c35cb4 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1134,11 +1134,9 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, prio = vlan_tci_prio(rule); prio_mask = vlan_tci_priom(rule); - if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) { - vlan |= RQFPR_CFI; - vlan_mask |= RQFPR_CFI; - } else if (cfi != VLAN_TAG_PRESENT && - cfi_mask == VLAN_TAG_PRESENT) { + if (cfi_mask) { + if (cfi) + vlan |= RQFPR_CFI; vlan_mask |= RQFPR_CFI; } } diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 32e02700feaa..2e978cb8b28c 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -1742,12 +1742,7 @@ static int init_phy(struct net_device *dev) if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) uec_configure_serdes(dev); - phy_set_max_speed(phydev, SPEED_100); - - if (priv->max_speed == SPEED_1000) - phydev->supported |= ADVERTISED_1000baseT_Full; - - phydev->advertising = phydev->supported; + phy_set_max_speed(phydev, priv->max_speed); priv->phydev = phydev; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 28e907831b0e..c62378c07e70 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1163,6 +1163,7 @@ static void hns_nic_adjust_link(struct net_device *ndev) */ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; struct phy_device *phy_dev = h->phy_dev; int ret; @@ -1180,8 +1181,9 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) if (unlikely(ret)) return -ENODEV; - phy_dev->supported &= h->if_support; - phy_dev->advertising = phy_dev->supported; + ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support); + linkmode_and(phy_dev->supported, phy_dev->supported, supported); + linkmode_copy(phy_dev->advertising, phy_dev->supported); if (h->phy_if == PHY_INTERFACE_MODE_XGMII) phy_dev->autoneg = false; diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 038326cfda93..3bb313cdbc9f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -85,6 +85,12 @@ struct hclge_mbx_pf_to_vf_cmd { u16 msg[8]; }; +struct hclge_vf_rst_cmd { + u8 dest_vfid; + u8 vf_rst; + u8 rsv[22]; +}; + /* used by VF to store the received Async responses from PF */ struct hclgevf_mbx_arq_ring { #define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8 diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 055b40606dbc..f69d39f17bdd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -124,7 +124,10 @@ enum hnae3_reset_notify_type { enum hnae3_reset_type { HNAE3_VF_RESET, + HNAE3_VF_FUNC_RESET, + HNAE3_VF_PF_FUNC_RESET, HNAE3_VF_FULL_RESET, + HNAE3_FLR_RESET, HNAE3_FUNC_RESET, HNAE3_CORE_RESET, HNAE3_GLOBAL_RESET, @@ -132,6 +135,11 @@ enum hnae3_reset_type { HNAE3_NONE_RESET, }; +enum hnae3_flr_state { + HNAE3_FLR_DOWN, + HNAE3_FLR_DONE, +}; + struct hnae3_vector_info { u8 __iomem *io_addr; int vector; @@ -162,6 +170,7 @@ struct hnae3_client_ops { int (*setup_tc)(struct hnae3_handle *handle, u8 tc); int (*reset_notify)(struct hnae3_handle *handle, enum hnae3_reset_notify_type type); + enum hnae3_reset_type (*process_hw_error)(struct hnae3_handle *handle); }; #define HNAE3_CLIENT_NAME_LENGTH 16 @@ -296,7 +305,8 @@ struct hnae3_ae_dev { struct hnae3_ae_ops { int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev); - + void (*flr_prepare)(struct hnae3_ae_dev *ae_dev); + void (*flr_done)(struct hnae3_ae_dev *ae_dev); int (*init_client_instance)(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev); void (*uninit_client_instance)(struct hnae3_client *client, @@ -403,6 +413,8 @@ struct hnae3_ae_ops { u16 vlan, u8 qos, __be16 proto); int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable); void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle); + void (*set_default_reset_request)(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type); void (*get_channels)(struct hnae3_handle *handle, struct ethtool_channels *ch); void (*get_tqps_and_rss_info)(struct hnae3_handle *h, @@ -430,6 +442,9 @@ struct hnae3_ae_ops { int (*restore_fd_rules)(struct hnae3_handle *handle); void (*enable_fd)(struct hnae3_handle *handle, bool enable); pci_ers_result_t (*process_hw_error)(struct hnae3_ae_dev *ae_dev); + bool (*get_hw_reset_stat)(struct hnae3_handle *handle); + bool (*ae_dev_resetting)(struct hnae3_handle *handle); + unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle); }; struct hnae3_dcb_ops { @@ -488,6 +503,14 @@ struct hnae3_roce_private_info { void __iomem *roce_io_base; int base_vector; int num_vectors; + + /* The below attributes defined for RoCE client, hnae3 gives + * initial values to them, and RoCE client can modify and use + * them. + */ + unsigned long reset_state; + unsigned long instance_state; + unsigned long state; }; struct hnae3_unic_private_info { @@ -520,9 +543,6 @@ struct hnae3_handle { struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */ u64 flags; /* Indicate the capabilities for this handle*/ - unsigned long last_reset_time; - enum hnae3_reset_type reset_level; - union { struct net_device *netdev; /* first member */ struct hnae3_knic_private_info kinfo; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c index ea5f8a84070d..b6fabbbdfd5b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c @@ -9,6 +9,9 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) { struct hnae3_handle *h = hns3_get_handle(ndev); + if (hns3_nic_resetting(ndev)) + return -EBUSY; + if (h->kinfo.dcb_ops->ieee_getets) return h->kinfo.dcb_ops->ieee_getets(h, ets); @@ -20,6 +23,9 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) { struct hnae3_handle *h = hns3_get_handle(ndev); + if (hns3_nic_resetting(ndev)) + return -EBUSY; + if (h->kinfo.dcb_ops->ieee_setets) return h->kinfo.dcb_ops->ieee_setets(h, ets); @@ -31,6 +37,9 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) { struct hnae3_handle *h = hns3_get_handle(ndev); + if (hns3_nic_resetting(ndev)) + return -EBUSY; + if (h->kinfo.dcb_ops->ieee_getpfc) return h->kinfo.dcb_ops->ieee_getpfc(h, pfc); @@ -42,6 +51,9 @@ int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc) { struct hnae3_handle *h = hns3_get_handle(ndev); + if (hns3_nic_resetting(ndev)) + return -EBUSY; + if (h->kinfo.dcb_ops->ieee_setpfc) return h->kinfo.dcb_ops->ieee_setpfc(h, pfc); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 20fcf0d1c2ce..8d07ec668d5d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -312,6 +312,24 @@ static u16 hns3_get_max_available_channels(struct hnae3_handle *h) return min_t(u16, rss_size, max_rss_size); } +static void hns3_tqp_enable(struct hnae3_queue *tqp) +{ + u32 rcb_reg; + + rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); + rcb_reg |= BIT(HNS3_RING_EN_B); + hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); +} + +static void hns3_tqp_disable(struct hnae3_queue *tqp) +{ + u32 rcb_reg; + + rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); + rcb_reg &= ~BIT(HNS3_RING_EN_B); + hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); +} + static int hns3_nic_net_up(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); @@ -334,6 +352,10 @@ static int hns3_nic_net_up(struct net_device *netdev) for (i = 0; i < priv->vector_num; i++) hns3_vector_enable(&priv->tqp_vector[i]); + /* enable rcb */ + for (j = 0; j < h->kinfo.num_tqps; j++) + hns3_tqp_enable(h->kinfo.tqp[j]); + /* start the ae_dev */ ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; if (ret) @@ -344,6 +366,9 @@ static int hns3_nic_net_up(struct net_device *netdev) return 0; out_start_err: + while (j--) + hns3_tqp_disable(h->kinfo.tqp[j]); + for (j = i - 1; j >= 0; j--) hns3_vector_disable(&priv->tqp_vector[j]); @@ -354,11 +379,13 @@ out_start_err: static int hns3_nic_net_open(struct net_device *netdev) { - struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo; int i, ret; + if (hns3_nic_resetting(netdev)) + return -EBUSY; + netif_carrier_off(netdev); ret = hns3_nic_set_real_num_queue(netdev); @@ -378,23 +405,24 @@ static int hns3_nic_net_open(struct net_device *netdev) kinfo->prio_tc[i]); } - priv->ae_handle->last_reset_time = jiffies; return 0; } static void hns3_nic_net_down(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = hns3_get_handle(netdev); const struct hnae3_ae_ops *ops; int i; - if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) - return; - /* disable vectors */ for (i = 0; i < priv->vector_num; i++) hns3_vector_disable(&priv->tqp_vector[i]); + /* disable rcb */ + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_tqp_disable(h->kinfo.tqp[i]); + /* stop ae_dev */ ops = priv->ae_handle->ae_algo->ops; if (ops->stop) @@ -408,6 +436,11 @@ static void hns3_nic_net_down(struct net_device *netdev) static int hns3_nic_net_stop(struct net_device *netdev) { + struct hns3_nic_priv *priv = netdev_priv(netdev); + + if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) + return 0; + netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); @@ -1615,10 +1648,9 @@ static void hns3_nic_net_timeout(struct net_device *ndev) priv->tx_timeout_count++; - if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo))) - return; - - /* request the reset */ + /* request the reset, and let the hclge to determine + * which reset level should be done + */ if (h->ae_algo->ops->reset_event) h->ae_algo->ops->reset_event(h->pdev, h); } @@ -1819,9 +1851,29 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_DISCONNECT; } +static void hns3_reset_prepare(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + dev_info(&pdev->dev, "hns3 flr prepare\n"); + if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare) + ae_dev->ops->flr_prepare(ae_dev); +} + +static void hns3_reset_done(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + dev_info(&pdev->dev, "hns3 flr done\n"); + if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done) + ae_dev->ops->flr_done(ae_dev); +} + static const struct pci_error_handlers hns3_err_handler = { .error_detected = hns3_error_detected, .slot_reset = hns3_slot_reset, + .reset_prepare = hns3_reset_prepare, + .reset_done = hns3_reset_done, }; static struct pci_driver hns3_driver = { @@ -2669,6 +2721,7 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) static int hns3_nic_common_poll(struct napi_struct *napi, int budget) { + struct hns3_nic_priv *priv = netdev_priv(napi->dev); struct hns3_enet_ring *ring; int rx_pkt_total = 0; @@ -2677,6 +2730,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) bool clean_complete = true; int rx_budget; + if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { + napi_complete(napi); + return 0; + } + /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ @@ -2701,9 +2759,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) if (!clean_complete) return budget; - napi_complete(napi); - hns3_update_new_int_gl(tqp_vector); - hns3_mask_vector_irq(tqp_vector, 1); + if (likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) && + napi_complete(napi)) { + hns3_update_new_int_gl(tqp_vector); + hns3_mask_vector_irq(tqp_vector, 1); + } return rx_pkt_total; } @@ -3337,7 +3397,6 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->dev = &pdev->dev; priv->netdev = netdev; priv->ae_handle = handle; - priv->ae_handle->last_reset_time = jiffies; priv->tx_timeout_count = 0; handle->kinfo.netdev = netdev; @@ -3357,11 +3416,6 @@ static int hns3_client_init(struct hnae3_handle *handle) /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); - if (handle->flags & HNAE3_SUPPORT_VF) - handle->reset_level = HNAE3_VF_RESET; - else - handle->reset_level = HNAE3_FUNC_RESET; - ret = hns3_get_ring_config(priv); if (ret) { ret = -ENOMEM; @@ -3397,6 +3451,8 @@ static int hns3_client_init(struct hnae3_handle *handle) /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */ netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + set_bit(HNS3_NIC_STATE_INITED, &priv->state); + return ret; out_reg_netdev_fail: @@ -3423,6 +3479,11 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) if (netdev->reg_state != NETREG_UNINITIALIZED) unregister_netdev(netdev); + if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { + netdev_warn(netdev, "already uninitialized\n"); + goto out_netdev_free; + } + hns3_del_all_fd_rules(netdev, true); hns3_force_clear_all_rx_ring(handle); @@ -3443,6 +3504,7 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) priv->ring_data = NULL; +out_netdev_free: free_netdev(netdev); } @@ -3708,8 +3770,22 @@ static void hns3_restore_coal(struct hns3_nic_priv *priv) static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct net_device *ndev = kinfo->netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + + if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return 0; + + /* it is cumbersome for hardware to pick-and-choose entries for deletion + * from table space. Hence, for function reset software intervention is + * required to delete the entries + */ + if (hns3_dev_ongoing_func_reset(ae_dev)) { + hns3_remove_hw_addr(ndev); + hns3_del_all_fd_rules(ndev, false); + } if (!netif_running(ndev)) return 0; @@ -3720,6 +3796,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); int ret = 0; if (netif_running(kinfo->netdev)) { @@ -3729,9 +3806,10 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) "hns net up fail, ret=%d!\n", ret); return ret; } - handle->last_reset_time = jiffies; } + clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); + return ret; } @@ -3771,28 +3849,44 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); + ret = hns3_nic_alloc_vector_data(priv); + if (ret) + return ret; + hns3_restore_coal(priv); ret = hns3_nic_init_vector_data(priv); if (ret) - return ret; + goto err_dealloc_vector; ret = hns3_init_all_ring(priv); - if (ret) { - hns3_nic_uninit_vector_data(priv); - priv->ring_data = NULL; - } + if (ret) + goto err_uninit_vector; + + set_bit(HNS3_NIC_STATE_INITED, &priv->state); + + return ret; + +err_uninit_vector: + hns3_nic_uninit_vector_data(priv); + priv->ring_data = NULL; +err_dealloc_vector: + hns3_nic_dealloc_vector_data(priv); return ret; } static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) { - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); struct net_device *netdev = handle->kinfo.netdev; struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) { + netdev_warn(netdev, "already uninitialized\n"); + return 0; + } + hns3_force_clear_all_rx_ring(handle); ret = hns3_nic_uninit_vector_data(priv); @@ -3803,18 +3897,15 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) hns3_store_coal(priv); + ret = hns3_nic_dealloc_vector_data(priv); + if (ret) + netdev_err(netdev, "dealloc vector error\n"); + ret = hns3_uninit_all_ring(priv); if (ret) netdev_err(netdev, "uninit ring error\n"); - /* it is cumbersome for hardware to pick-and-choose entries for deletion - * from table space. Hence, for function reset software intervention is - * required to delete the entries - */ - if (hns3_dev_ongoing_func_reset(ae_dev)) { - hns3_remove_hw_addr(netdev); - hns3_del_all_fd_rules(netdev, false); - } + clear_bit(HNS3_NIC_STATE_INITED, &priv->state); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index d3636d088aa3..10ff18af3cc7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -15,7 +15,7 @@ extern const char hns3_driver_version[]; enum hns3_nic_state { HNS3_NIC_STATE_TESTING, HNS3_NIC_STATE_RESETTING, - HNS3_NIC_STATE_REINITING, + HNS3_NIC_STATE_INITED, HNS3_NIC_STATE_DOWN, HNS3_NIC_STATE_DISABLED, HNS3_NIC_STATE_REMOVING, @@ -47,7 +47,7 @@ enum hns3_nic_state { #define HNS3_RING_PREFETCH_EN_REG 0x0007C #define HNS3_RING_CFG_VF_NUM_REG 0x00080 #define HNS3_RING_ASID_REG 0x0008C -#define HNS3_RING_RX_VM_REG 0x00090 +#define HNS3_RING_EN_REG 0x00090 #define HNS3_RING_T0_BE_RST 0x00094 #define HNS3_RING_COULD_BE_RST 0x00098 #define HNS3_RING_WRR_WEIGHT_REG 0x0009c @@ -194,6 +194,8 @@ enum hns3_nic_state { #define HNS3_VECTOR_RL_OFFSET 0x900 #define HNS3_VECTOR_RL_EN_B 6 +#define HNS3_RING_EN_B 0 + enum hns3_pkt_l3t_type { HNS3_L3T_NONE, HNS3_L3T_IPV6, @@ -577,6 +579,11 @@ static inline int is_ring_empty(struct hns3_enet_ring *ring) return ring->next_to_use == ring->next_to_clean; } +static inline u32 hns3_read_reg(void __iomem *base, u32 reg) +{ + return readl(base + reg); +} + static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) { u8 __iomem *reg_addr = READ_ONCE(base); @@ -586,7 +593,21 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev) { - return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET)); + return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET || + ae_dev->reset_type == HNAE3_FLR_RESET || + ae_dev->reset_type == HNAE3_VF_FUNC_RESET || + ae_dev->reset_type == HNAE3_VF_FULL_RESET || + ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET)); +} + +#define hns3_read_dev(a, reg) \ + hns3_read_reg((a)->io_base, (reg)) + +static inline bool hns3_nic_resetting(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + + return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state); } #define hns3_write_dev(a, reg, value) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index a4762c2b8ba1..4563638367ac 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -291,6 +291,11 @@ static void hns3_self_test(struct net_device *ndev, int test_index = 0; u32 i; + if (hns3_nic_resetting(ndev)) { + netdev_err(ndev, "dev resetting!"); + return; + } + /* Only do offline selftest, or pass by default */ if (eth_test->flags != ETH_TEST_FL_OFFLINE) return; @@ -530,6 +535,11 @@ static void hns3_get_ringparam(struct net_device *netdev, struct hnae3_handle *h = priv->ae_handle; int queue_num = h->kinfo.num_tqps; + if (hns3_nic_resetting(netdev)) { + netdev_err(netdev, "dev resetting!"); + return; + } + param->tx_max_pending = HNS3_RING_MAX_PENDING; param->rx_max_pending = HNS3_RING_MAX_PENDING; @@ -760,6 +770,9 @@ static int hns3_set_ringparam(struct net_device *ndev, u32 old_desc_num, new_desc_num; int ret; + if (hns3_nic_resetting(ndev)) + return -EBUSY; + if (param->rx_mini_pending || param->rx_jumbo_pending) return -EINVAL; @@ -872,6 +885,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, struct hnae3_handle *h = priv->ae_handle; u16 queue_num = h->kinfo.num_tqps; + if (hns3_nic_resetting(netdev)) + return -EBUSY; + if (queue >= queue_num) { netdev_err(netdev, "Invalid queue value %d! Queue max id=%d\n", @@ -1033,6 +1049,9 @@ static int hns3_set_coalesce(struct net_device *netdev, int ret; int i; + if (hns3_nic_resetting(netdev)) + return -EBUSY; + ret = hns3_check_coalesce_para(netdev, cmd); if (ret) return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 690f62ed87dc..8af0cef5609b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -350,11 +350,20 @@ int hclge_cmd_init(struct hclge_dev *hdev) hdev->hw.cmq.crq.next_to_use = 0; hclge_cmd_init_regs(&hdev->hw); - clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); spin_unlock_bh(&hdev->hw.cmq.crq.lock); spin_unlock_bh(&hdev->hw.cmq.csq.lock); + clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + + /* Check if there is new reset pending, because the higher level + * reset may happen when lower level reset is being processed. + */ + if ((hclge_is_reset_pending(hdev))) { + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + return -EBUSY; + } + ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { dev_err(&hdev->pdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 123c37e653f3..6da9e22d82d0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -818,7 +818,6 @@ static void hclge_process_igu_egu_error(struct hclge_dev *hdev, static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd, enum hclge_err_int_type int_type) { - enum hnae3_reset_type reset_level = HNAE3_NONE_RESET; struct device *dev = &hdev->pdev->dev; const struct hclge_hw_error *hw_err_lst1, *hw_err_lst2, *hw_err_lst3; struct hclge_desc desc[2]; @@ -848,23 +847,17 @@ static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd, } err_sts = le32_to_cpu(desc[0].data[2]); - if (err_sts) { + if (err_sts) hclge_log_error(dev, hw_err_lst1, err_sts); - reset_level = HNAE3_FUNC_RESET; - } err_sts = le32_to_cpu(desc[0].data[3]); - if (err_sts) { + if (err_sts) hclge_log_error(dev, hw_err_lst2, err_sts); - reset_level = HNAE3_FUNC_RESET; - } if (cmd == HCLGE_PPP_CMD0_INT_CMD) { err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3; - if (err_sts) { + if (err_sts) hclge_log_error(dev, hw_err_lst3, err_sts); - reset_level = HNAE3_FUNC_RESET; - } } /* clear PPP INT */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index ffdd96020860..43bfc730a62d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2144,7 +2144,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) */ /* check for vector0 reset event sources */ + if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { + dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); + set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); + return HCLGE_VECTOR0_EVENT_RST; + } + if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { + dev_info(&hdev->pdev->dev, "global reset interrupt\n"); set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); @@ -2152,18 +2161,13 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) } if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { + dev_info(&hdev->pdev->dev, "core reset interrupt\n"); set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); return HCLGE_VECTOR0_EVENT_RST; } - if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { - set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); - *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); - return HCLGE_VECTOR0_EVENT_RST; - } - /* check for vector0 mailbox(=CMDQ RX) event source */ if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); @@ -2308,21 +2312,56 @@ static int hclge_notify_client(struct hclge_dev *hdev, int ret; ret = client->ops->reset_notify(handle, type); - if (ret) + if (ret) { + dev_err(&hdev->pdev->dev, + "notify nic client failed %d(%d)\n", type, ret); return ret; + } } return 0; } +static int hclge_notify_roce_client(struct hclge_dev *hdev, + enum hnae3_reset_notify_type type) +{ + struct hnae3_client *client = hdev->roce_client; + int ret = 0; + u16 i; + + if (!client) + return 0; + + if (!client->ops->reset_notify) + return -EOPNOTSUPP; + + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { + struct hnae3_handle *handle = &hdev->vport[i].roce; + + ret = client->ops->reset_notify(handle, type); + if (ret) { + dev_err(&hdev->pdev->dev, + "notify roce client failed %d(%d)", + type, ret); + return ret; + } + } + + return ret; +} + static int hclge_reset_wait(struct hclge_dev *hdev) { #define HCLGE_RESET_WATI_MS 100 -#define HCLGE_RESET_WAIT_CNT 5 +#define HCLGE_RESET_WAIT_CNT 200 u32 val, reg, reg_bit; u32 cnt = 0; switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + reg = HCLGE_GLOBAL_RESET_REG; + reg_bit = HCLGE_IMP_RESET_BIT; + break; case HNAE3_GLOBAL_RESET: reg = HCLGE_GLOBAL_RESET_REG; reg_bit = HCLGE_GLOBAL_RESET_BIT; @@ -2335,6 +2374,8 @@ static int hclge_reset_wait(struct hclge_dev *hdev) reg = HCLGE_FUN_RST_ING; reg_bit = HCLGE_FUN_RST_ING_B; break; + case HNAE3_FLR_RESET: + break; default: dev_err(&hdev->pdev->dev, "Wait for unsupported reset type: %d\n", @@ -2342,6 +2383,20 @@ static int hclge_reset_wait(struct hclge_dev *hdev) return -EINVAL; } + if (hdev->reset_type == HNAE3_FLR_RESET) { + while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && + cnt++ < HCLGE_RESET_WAIT_CNT) + msleep(HCLGE_RESET_WATI_MS); + + if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { + dev_err(&hdev->pdev->dev, + "flr wait timeout: %d\n", cnt); + return -EBUSY; + } + + return 0; + } + val = hclge_read_dev(&hdev->hw, reg); while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { msleep(HCLGE_RESET_WATI_MS); @@ -2358,6 +2413,55 @@ static int hclge_reset_wait(struct hclge_dev *hdev) return 0; } +static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) +{ + struct hclge_vf_rst_cmd *req; + struct hclge_desc desc; + + req = (struct hclge_vf_rst_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false); + req->dest_vfid = func_id; + + if (reset) + req->vf_rst = 0x1; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) +{ + int i; + + for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + int ret; + + /* Send cmd to set/clear VF's FUNC_RST_ING */ + ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); + if (ret) { + dev_err(&hdev->pdev->dev, + "set vf(%d) rst failded %d!\n", + vport->vport_id, ret); + return ret; + } + + if (!reset) + continue; + + /* Inform VF to process the reset. + * hclge_inform_reset_assert_to_vf may fail if VF + * driver is not loaded. + */ + ret = hclge_inform_reset_assert_to_vf(vport); + if (ret) + dev_warn(&hdev->pdev->dev, + "inform reset to vf(%d) failded %d!\n", + vport->vport_id, ret); + } + + return 0; +} + int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) { struct hclge_desc desc; @@ -2396,11 +2500,16 @@ static void hclge_do_reset(struct hclge_dev *hdev) break; case HNAE3_FUNC_RESET: dev_info(&pdev->dev, "PF Reset requested\n"); - hclge_func_reset_cmd(hdev, 0); /* schedule again to check later */ set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); hclge_reset_task_schedule(hdev); break; + case HNAE3_FLR_RESET: + dev_info(&pdev->dev, "FLR requested\n"); + /* schedule again to check later */ + set_bit(HNAE3_FLR_RESET, &hdev->reset_pending); + hclge_reset_task_schedule(hdev); + break; default: dev_warn(&pdev->dev, "Unsupported reset type: %d\n", hdev->reset_type); @@ -2414,20 +2523,28 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; /* return the highest priority reset level amongst all */ - if (test_bit(HNAE3_GLOBAL_RESET, addr)) + if (test_bit(HNAE3_IMP_RESET, addr)) { + rst_level = HNAE3_IMP_RESET; + clear_bit(HNAE3_IMP_RESET, addr); + clear_bit(HNAE3_GLOBAL_RESET, addr); + clear_bit(HNAE3_CORE_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) { rst_level = HNAE3_GLOBAL_RESET; - else if (test_bit(HNAE3_CORE_RESET, addr)) + clear_bit(HNAE3_GLOBAL_RESET, addr); + clear_bit(HNAE3_CORE_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_CORE_RESET, addr)) { rst_level = HNAE3_CORE_RESET; - else if (test_bit(HNAE3_IMP_RESET, addr)) - rst_level = HNAE3_IMP_RESET; - else if (test_bit(HNAE3_FUNC_RESET, addr)) + clear_bit(HNAE3_CORE_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_FUNC_RESET, addr)) { rst_level = HNAE3_FUNC_RESET; - - /* now, clear all other resets */ - clear_bit(HNAE3_GLOBAL_RESET, addr); - clear_bit(HNAE3_CORE_RESET, addr); - clear_bit(HNAE3_IMP_RESET, addr); - clear_bit(HNAE3_FUNC_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_FLR_RESET, addr)) { + rst_level = HNAE3_FLR_RESET; + clear_bit(HNAE3_FLR_RESET, addr); + } return rst_level; } @@ -2457,39 +2574,206 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) hclge_enable_vector(&hdev->misc_vector, true); } +static int hclge_reset_prepare_down(struct hclge_dev *hdev) +{ + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_FUNC_RESET: + /* fall through */ + case HNAE3_FLR_RESET: + ret = hclge_set_all_vf_rst(hdev, true); + break; + default: + break; + } + + return ret; +} + +static int hclge_reset_prepare_wait(struct hclge_dev *hdev) +{ + u32 reg_val; + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_FUNC_RESET: + /* There is no mechanism for PF to know if VF has stopped IO + * for now, just wait 100 ms for VF to stop IO + */ + msleep(100); + ret = hclge_func_reset_cmd(hdev, 0); + if (ret) { + dev_err(&hdev->pdev->dev, + "asserting function reset fail %d!\n", ret); + return ret; + } + + /* After performaning pf reset, it is not necessary to do the + * mailbox handling or send any command to firmware, because + * any mailbox handling or command to firmware is only valid + * after hclge_cmd_init is called. + */ + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + break; + case HNAE3_FLR_RESET: + /* There is no mechanism for PF to know if VF has stopped IO + * for now, just wait 100 ms for VF to stop IO + */ + msleep(100); + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + break; + case HNAE3_IMP_RESET: + reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, + BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); + break; + default: + break; + } + + dev_info(&hdev->pdev->dev, "prepare wait ok\n"); + + return ret; +} + +static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout) +{ +#define MAX_RESET_FAIL_CNT 5 +#define RESET_UPGRADE_DELAY_SEC 10 + + if (hdev->reset_pending) { + dev_info(&hdev->pdev->dev, "Reset pending %lu\n", + hdev->reset_pending); + return true; + } else if ((hdev->reset_type != HNAE3_IMP_RESET) && + (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) & + BIT(HCLGE_IMP_RESET_BIT))) { + dev_info(&hdev->pdev->dev, + "reset failed because IMP Reset is pending\n"); + hclge_clear_reset_cause(hdev); + return false; + } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) { + hdev->reset_fail_cnt++; + if (is_timeout) { + set_bit(hdev->reset_type, &hdev->reset_pending); + dev_info(&hdev->pdev->dev, + "re-schedule to wait for hw reset done\n"); + return true; + } + + dev_info(&hdev->pdev->dev, "Upgrade reset level\n"); + hclge_clear_reset_cause(hdev); + mod_timer(&hdev->reset_timer, + jiffies + RESET_UPGRADE_DELAY_SEC * HZ); + + return false; + } + + hclge_clear_reset_cause(hdev); + dev_err(&hdev->pdev->dev, "Reset fail!\n"); + return false; +} + +static int hclge_reset_prepare_up(struct hclge_dev *hdev) +{ + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_FUNC_RESET: + /* fall through */ + case HNAE3_FLR_RESET: + ret = hclge_set_all_vf_rst(hdev, false); + break; + default: + break; + } + + return ret; +} + static void hclge_reset(struct hclge_dev *hdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - struct hnae3_handle *handle; + bool is_timeout = false; + int ret; /* Initialize ae_dev reset status as well, in case enet layer wants to * know if device is undergoing reset */ ae_dev->reset_type = hdev->reset_type; + hdev->reset_count++; + hdev->last_reset_time = jiffies; /* perform reset of the stack & ae device for a client */ - handle = &hdev->vport[0].nic; + ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + goto err_reset; + + ret = hclge_reset_prepare_down(hdev); + if (ret) + goto err_reset; + rtnl_lock(); - hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + goto err_reset_lock; + rtnl_unlock(); - if (!hclge_reset_wait(hdev)) { - rtnl_lock(); - hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); - hclge_reset_ae_dev(hdev->ae_dev); - hclge_notify_client(hdev, HNAE3_INIT_CLIENT); + ret = hclge_reset_prepare_wait(hdev); + if (ret) + goto err_reset; - hclge_clear_reset_cause(hdev); - } else { - rtnl_lock(); - /* schedule again to check pending resets later */ - set_bit(hdev->reset_type, &hdev->reset_pending); - hclge_reset_task_schedule(hdev); + if (hclge_reset_wait(hdev)) { + is_timeout = true; + goto err_reset; } - hclge_notify_client(hdev, HNAE3_UP_CLIENT); - handle->last_reset_time = jiffies; + ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + goto err_reset; + + rtnl_lock(); + ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + goto err_reset_lock; + + ret = hclge_reset_ae_dev(hdev->ae_dev); + if (ret) + goto err_reset_lock; + + ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + goto err_reset_lock; + + hclge_clear_reset_cause(hdev); + + ret = hclge_reset_prepare_up(hdev); + if (ret) + goto err_reset_lock; + + ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); + if (ret) + goto err_reset_lock; + rtnl_unlock(); - ae_dev->reset_type = HNAE3_NONE_RESET; + + ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + goto err_reset; + + ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); + if (ret) + goto err_reset; + + return; + +err_reset_lock: + rtnl_unlock(); +err_reset: + if (hclge_reset_err_handle(hdev, is_timeout)) + hclge_reset_task_schedule(hdev); } static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) @@ -2515,20 +2799,42 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) if (!handle) handle = &hdev->vport[0].nic; - if (time_before(jiffies, (handle->last_reset_time + 3 * HZ))) + if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ))) return; - else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) - handle->reset_level = HNAE3_FUNC_RESET; + else if (hdev->default_reset_request) + hdev->reset_level = + hclge_get_reset_level(hdev, + &hdev->default_reset_request); + else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) + hdev->reset_level = HNAE3_FUNC_RESET; dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", - handle->reset_level); + hdev->reset_level); /* request reset & schedule reset task */ - set_bit(handle->reset_level, &hdev->reset_request); + set_bit(hdev->reset_level, &hdev->reset_request); hclge_reset_task_schedule(hdev); - if (handle->reset_level < HNAE3_GLOBAL_RESET) - handle->reset_level++; + if (hdev->reset_level < HNAE3_GLOBAL_RESET) + hdev->reset_level++; +} + +static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) +{ + struct hclge_dev *hdev = ae_dev->priv; + + set_bit(rst_type, &hdev->default_reset_request); +} + +static void hclge_reset_timer(struct timer_list *t) +{ + struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); + + dev_info(&hdev->pdev->dev, + "triggering global reset in reset timer\n"); + set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request); + hclge_reset_event(hdev->pdev, NULL); } static void hclge_reset_subtask(struct hclge_dev *hdev) @@ -2542,6 +2848,7 @@ static void hclge_reset_subtask(struct hclge_dev *hdev) * b. else, we can come back later to check this status so re-sched * now. */ + hdev->last_reset_time = jiffies; hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); if (hdev->reset_type != HNAE3_NONE_RESET) hclge_reset(hdev); @@ -4336,8 +4643,12 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle) struct hlist_node *node; int ret; + /* Return ok here, because reset error handling will check this + * return value. If error is returned here, the reset process will + * fail. + */ if (!hnae3_dev_fd_supported(hdev)) - return -EOPNOTSUPP; + return 0; hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); @@ -4592,6 +4903,31 @@ static int hclge_get_all_rules(struct hnae3_handle *handle, return 0; } +static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || + hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); +} + +static bool hclge_ae_dev_resetting(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); +} + +static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->reset_count; +} + static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -4805,10 +5141,6 @@ static int hclge_ae_start(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - int i; - - for (i = 0; i < vport->alloc_tqps; i++) - hclge_tqp_enable(hdev, i, 0, true); /* mac enable */ hclge_cfg_mac_mode(hdev, true); @@ -4828,7 +5160,6 @@ static void hclge_ae_stop(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - int i; set_bit(HCLGE_STATE_DOWN, &hdev->state); @@ -4836,14 +5167,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle) cancel_work_sync(&hdev->service_task); clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { + /* If it is not PF reset, the firmware will disable the MAC, + * so it only need to stop phy here. + */ + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && + hdev->reset_type != HNAE3_FUNC_RESET) { hclge_mac_stop_phy(hdev); return; } - for (i = 0; i < vport->alloc_tqps; i++) - hclge_tqp_enable(hdev, i, 0, false); - /* Mac disable */ hclge_cfg_mac_mode(hdev, false); @@ -6250,7 +6582,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev) if (!phydev->link || !phydev->autoneg) return 0; - local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising); + local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising); if (phydev->pause) remote_advertising = LPA_PAUSE_CAP; @@ -6612,6 +6944,8 @@ static void hclge_state_uninit(struct hclge_dev *hdev) if (hdev->service_timer.function) del_timer_sync(&hdev->service_timer); + if (hdev->reset_timer.function) + del_timer_sync(&hdev->reset_timer); if (hdev->service_task.func) cancel_work_sync(&hdev->service_task); if (hdev->rst_service_task.func) @@ -6620,6 +6954,34 @@ static void hclge_state_uninit(struct hclge_dev *hdev) cancel_work_sync(&hdev->mbx_service_task); } +static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev) +{ +#define HCLGE_FLR_WAIT_MS 100 +#define HCLGE_FLR_WAIT_CNT 50 + struct hclge_dev *hdev = ae_dev->priv; + int cnt = 0; + + clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); + set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); + hclge_reset_event(hdev->pdev, NULL); + + while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && + cnt++ < HCLGE_FLR_WAIT_CNT) + msleep(HCLGE_FLR_WAIT_MS); + + if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) + dev_err(&hdev->pdev->dev, + "flr wait down timeout: %d\n", cnt); +} + +static void hclge_flr_done(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + + set_bit(HNAE3_FLR_DONE, &hdev->flr_state); +} + static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) { struct pci_dev *pdev = ae_dev->pdev; @@ -6635,6 +6997,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hdev->pdev = pdev; hdev->ae_dev = ae_dev; hdev->reset_type = HNAE3_NONE_RESET; + hdev->reset_level = HNAE3_FUNC_RESET; ae_dev->priv = hdev; ret = hclge_pci_init(hdev); @@ -6769,6 +7132,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_dcb_ops_set(hdev); timer_setup(&hdev->service_timer, hclge_service_timer, 0); + timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); INIT_WORK(&hdev->service_task, hclge_service_task); INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); @@ -6779,6 +7143,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_enable_vector(&hdev->misc_vector, true); hclge_state_init(hdev); + hdev->last_reset_time = jiffies; pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); return 0; @@ -7275,6 +7640,8 @@ static void hclge_get_link_mode(struct hnae3_handle *handle, static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, + .flr_prepare = hclge_flr_prepare, + .flr_done = hclge_flr_done, .init_client_instance = hclge_init_client_instance, .uninit_client_instance = hclge_uninit_client_instance, .map_ring_to_vector = hclge_map_ring_to_vector, @@ -7321,6 +7688,7 @@ static const struct hnae3_ae_ops hclge_ops = { .set_vf_vlan_filter = hclge_set_vf_vlan_filter, .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, .reset_event = hclge_reset_event, + .set_default_reset_request = hclge_set_def_reset_request, .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, .set_channels = hclge_set_channels, .get_channels = hclge_get_channels, @@ -7337,6 +7705,9 @@ static const struct hnae3_ae_ops hclge_ops = { .restore_fd_rules = hclge_restore_fd_entries, .enable_fd = hclge_enable_fd, .process_hw_error = hclge_process_ras_hw_error, + .get_hw_reset_stat = hclge_get_hw_reset_stat, + .ae_dev_resetting = hclge_ae_dev_resetting, + .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 0d9215404269..ca90b662e33b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -97,11 +97,13 @@ enum HLCGE_PORT_TYPE { #define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0) /* Reset related Registers */ +#define HCLGE_PF_OTHER_INT_REG 0x20600 #define HCLGE_MISC_RESET_STS_REG 0x20700 #define HCLGE_MISC_VECTOR_INT_STS 0x20800 #define HCLGE_GLOBAL_RESET_REG 0x20A00 #define HCLGE_GLOBAL_RESET_BIT 0 #define HCLGE_CORE_RESET_BIT 1 +#define HCLGE_IMP_RESET_BIT 2 #define HCLGE_FUN_RST_ING 0x20C00 #define HCLGE_FUN_RST_ING_B 0 @@ -115,6 +117,8 @@ enum HLCGE_PORT_TYPE { /* CMDQ register bits for RX event(=MBX event) */ #define HCLGE_VECTOR0_RX_CMDQ_INT_B 1 +#define HCLGE_VECTOR0_IMP_RESET_INT_B 1 + #define HCLGE_MAC_DEFAULT_FRAME \ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN) #define HCLGE_MAC_MIN_FRAME 64 @@ -593,10 +597,16 @@ struct hclge_dev { struct hclge_misc_vector misc_vector; struct hclge_hw_stats hw_stats; unsigned long state; + unsigned long flr_state; + unsigned long last_reset_time; enum hnae3_reset_type reset_type; + enum hnae3_reset_type reset_level; + unsigned long default_reset_request; unsigned long reset_request; /* reset has been requested */ unsigned long reset_pending; /* client rst is pending to be served */ + unsigned long reset_count; /* the number of reset has been done */ + u32 reset_fail_cnt; u32 fw_version; u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */ u16 num_tqps; /* Num task queue pairs of this PF */ @@ -644,6 +654,7 @@ struct hclge_dev { unsigned long service_timer_period; unsigned long service_timer_previous; struct timer_list service_timer; + struct timer_list reset_timer; struct work_struct service_task; struct work_struct rst_service_task; struct work_struct mbx_service_task; @@ -768,6 +779,12 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue) return tqp->index; } +static inline bool hclge_is_reset_pending(struct hclge_dev *hdev) +{ + return !!hdev->reset_pending; +} + +int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport); int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, u16 vlan_id, bool is_kill); @@ -777,6 +794,7 @@ int hclge_buffer_alloc(struct hclge_dev *hdev); int hclge_rss_init_hw(struct hclge_dev *hdev); void hclge_rss_indir_init_cfg(struct hclge_dev *hdev); +int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport); void hclge_mbx_handler(struct hclge_dev *hdev); int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index f890022938d9..4c7e7bd3c847 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -79,15 +79,26 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, return status; } -static int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) +int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) { + struct hclge_dev *hdev = vport->back; + enum hnae3_reset_type reset_type; u8 msg_data[2]; u8 dest_vfid; dest_vfid = (u8)vport->vport_id; + if (hdev->reset_type == HNAE3_FUNC_RESET) + reset_type = HNAE3_VF_PF_FUNC_RESET; + else if (hdev->reset_type == HNAE3_FLR_RESET) + reset_type = HNAE3_VF_FULL_RESET; + else + return -EINVAL; + + memcpy(&msg_data[0], &reset_type, sizeof(u16)); + /* send this requested info to VF */ - return hclge_send_mbx_msg(vport, msg_data, sizeof(u8), + return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), HCLGE_MBX_ASSERTING_RESET, dest_vfid); } @@ -363,24 +374,10 @@ static void hclge_reset_vf(struct hclge_vport *vport, int ret; dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!", - mbx_req->mbx_src_vfid); - - /* Acknowledge VF that PF is now about to assert the reset for the VF. - * On receiving this message VF will get into pending state and will - * start polling for the hardware reset completion status. - */ - ret = hclge_inform_reset_assert_to_vf(vport); - if (ret) { - dev_err(&hdev->pdev->dev, - "PF fail(%d) to inform VF(%d)of reset, reset failed!\n", - ret, vport->vport_id); - return; - } + vport->vport_id); - dev_warn(&hdev->pdev->dev, "PF is now resetting VF %d.\n", - mbx_req->mbx_src_vfid); - /* reset this virtual function */ - hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid); + ret = hclge_func_reset_cmd(hdev, vport->vport_id); + hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0); } static bool hclge_cmd_crq_empty(struct hclge_hw *hw) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 03018638f701..741cb3b9519d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -195,12 +195,13 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev) { struct net_device *netdev = hdev->vport[0].nic.netdev; struct phy_device *phydev = hdev->hw.mac.phydev; + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; int ret; if (!phydev) return 0; - phydev->supported &= ~SUPPORTED_FIBRE; + linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported); ret = phy_connect_direct(netdev, phydev, hclge_mac_adjust_link, @@ -210,7 +211,15 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev) return ret; } - phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES; + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask); + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + mask); + linkmode_set_bit_array(phy_gbit_features_array, + ARRAY_SIZE(phy_gbit_features_array), + mask); + linkmode_and(phydev->supported, phydev->supported, mask); phy_support_asym_pause(phydev); return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index 0d3b445f6799..d5765c8cf3a3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -72,6 +72,45 @@ static bool hclgevf_is_special_opcode(u16 opcode) return false; } +static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring) +{ + struct hclgevf_dev *hdev = ring->dev; + struct hclgevf_hw *hw = &hdev->hw; + u32 reg_val; + + if (ring->flag == HCLGEVF_TYPE_CSQ) { + reg_val = (u32)ring->desc_dma_addr; + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val); + reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val); + + reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); + reg_val |= HCLGEVF_NIC_CMQ_ENABLE; + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val); + + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); + } else { + reg_val = (u32)ring->desc_dma_addr; + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val); + reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val); + + reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); + reg_val |= HCLGEVF_NIC_CMQ_ENABLE; + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val); + + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); + } +} + +static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw) +{ + hclgevf_cmd_config_regs(&hw->cmq.csq); + hclgevf_cmd_config_regs(&hw->cmq.crq); +} + static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hclgevf_desc); @@ -96,61 +135,23 @@ static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring) } } -static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev, - struct hclgevf_cmq_ring *ring) +static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type) { struct hclgevf_hw *hw = &hdev->hw; - int ring_type = ring->flag; - u32 reg_val; + struct hclgevf_cmq_ring *ring = + (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq; int ret; - ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM; - spin_lock_init(&ring->lock); - ring->next_to_clean = 0; - ring->next_to_use = 0; ring->dev = hdev; + ring->flag = ring_type; /* allocate CSQ/CRQ descriptor */ ret = hclgevf_alloc_cmd_desc(ring); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret, (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ"); - return ret; - } - /* initialize the hardware registers with csq/crq dma-address, - * descriptor number, head & tail pointers - */ - switch (ring_type) { - case HCLGEVF_TYPE_CSQ: - reg_val = (u32)ring->desc_dma_addr; - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val); - reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val); - - reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); - reg_val |= HCLGEVF_NIC_CMQ_ENABLE; - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val); - - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); - return 0; - case HCLGEVF_TYPE_CRQ: - reg_val = (u32)ring->desc_dma_addr; - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val); - reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val); - - reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); - reg_val |= HCLGEVF_NIC_CMQ_ENABLE; - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val); - - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); - return 0; - default: - return -EINVAL; - } + return ret; } void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, @@ -188,7 +189,8 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num) spin_lock_bh(&hw->cmq.csq.lock); - if (num > hclgevf_ring_space(&hw->cmq.csq)) { + if (num > hclgevf_ring_space(&hw->cmq.csq) || + test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) { spin_unlock_bh(&hw->cmq.csq.lock); return -EBUSY; } @@ -282,55 +284,83 @@ static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw, return status; } -int hclgevf_cmd_init(struct hclgevf_dev *hdev) +int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev) { - u32 version; int ret; - /* setup Tx write back timeout */ + /* Setup the lock for command queue */ + spin_lock_init(&hdev->hw.cmq.csq.lock); + spin_lock_init(&hdev->hw.cmq.crq.lock); + hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT; + hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM; + hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM; - /* setup queue CSQ/CRQ rings */ - hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ; - ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq); + ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ); if (ret) { dev_err(&hdev->pdev->dev, - "failed(%d) to initialize CSQ ring\n", ret); + "CSQ ring setup error %d\n", ret); return ret; } - hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ; - ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq); + ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ); if (ret) { dev_err(&hdev->pdev->dev, - "failed(%d) to initialize CRQ ring\n", ret); + "CRQ ring setup error %d\n", ret); goto err_csq; } + return 0; +err_csq: + hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); + return ret; +} + +int hclgevf_cmd_init(struct hclgevf_dev *hdev) +{ + u32 version; + int ret; + + spin_lock_bh(&hdev->hw.cmq.csq.lock); + spin_lock_bh(&hdev->hw.cmq.crq.lock); + /* initialize the pointers of async rx queue of mailbox */ hdev->arq.hdev = hdev; hdev->arq.head = 0; hdev->arq.tail = 0; hdev->arq.count = 0; + hdev->hw.cmq.csq.next_to_clean = 0; + hdev->hw.cmq.csq.next_to_use = 0; + hdev->hw.cmq.crq.next_to_clean = 0; + hdev->hw.cmq.crq.next_to_use = 0; + + hclgevf_cmd_init_regs(&hdev->hw); + + spin_unlock_bh(&hdev->hw.cmq.crq.lock); + spin_unlock_bh(&hdev->hw.cmq.csq.lock); + + clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + + /* Check if there is new reset pending, because the higher level + * reset may happen when lower level reset is being processed. + */ + if (hclgevf_is_reset_pending(hdev)) { + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + return -EBUSY; + } /* get firmware version */ ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { dev_err(&hdev->pdev->dev, "failed(%d) to query firmware version\n", ret); - goto err_crq; + return ret; } hdev->fw_version = version; dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); return 0; -err_crq: - hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); -err_csq: - hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); - - return ret; } void hclgevf_cmd_uninit(struct hclgevf_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index bc294b0c8b62..090541de0c7d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -256,6 +256,7 @@ static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg) int hclgevf_cmd_init(struct hclgevf_dev *hdev); void hclgevf_cmd_uninit(struct hclgevf_dev *hdev); +int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev); int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num); void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 085edb945389..6b4d1477055f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2,6 +2,7 @@ // Copyright (c) 2016-2017 Hisilicon Limited. #include <linux/etherdevice.h> +#include <linux/iopoll.h> #include <net/rtnetlink.h> #include "hclgevf_cmd.h" #include "hclgevf_main.h" @@ -10,8 +11,7 @@ #define HCLGEVF_NAME "hclgevf" -static int hclgevf_init_hdev(struct hclgevf_dev *hdev); -static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev); +static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); static struct hnae3_ae_algo ae_algovf; static const struct pci_device_id ae_algovf_pci_tbl[] = { @@ -209,12 +209,6 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) struct hclgevf_tqp *tqp; int i; - /* if this is on going reset then we need to re-allocate the TPQs - * since we cannot assume we would get same number of TPQs back from PF - */ - if (hclgevf_dev_ongoing_reset(hdev)) - devm_kfree(&hdev->pdev->dev, hdev->htqp); - hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, sizeof(struct hclgevf_tqp), GFP_KERNEL); if (!hdev->htqp) @@ -258,12 +252,6 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) new_tqps = kinfo->rss_size * kinfo->num_tc; kinfo->num_tqps = min(new_tqps, hdev->num_tqps); - /* if this is on going reset then we need to re-allocate the hnae queues - * as well since number of TPQs from PF might have changed. - */ - if (hclgevf_dev_ongoing_reset(hdev)) - devm_kfree(&hdev->pdev->dev, kinfo->tqp); - kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, sizeof(struct hnae3_queue *), GFP_KERNEL); if (!kinfo->tqp) @@ -868,6 +856,9 @@ static int hclgevf_unmap_ring_from_vector( struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); int ret, vector_id; + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + return 0; + vector_id = hclgevf_get_vector_index(hdev, vector); if (vector_id < 0) { dev_err(&handle->pdev->dev, @@ -956,13 +947,6 @@ static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, return status; } -static int hclgevf_get_queue_id(struct hnae3_queue *queue) -{ - struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q); - - return tqp->index; -} - static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; @@ -1102,33 +1086,74 @@ static int hclgevf_notify_client(struct hclgevf_dev *hdev, { struct hnae3_client *client = hdev->nic_client; struct hnae3_handle *handle = &hdev->nic; + int ret; if (!client->ops->reset_notify) return -EOPNOTSUPP; - return client->ops->reset_notify(handle, type); + ret = client->ops->reset_notify(handle, type); + if (ret) + dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", + type, ret); + + return ret; +} + +static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + + set_bit(HNAE3_FLR_DONE, &hdev->flr_state); +} + +static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, + unsigned long delay_us, + unsigned long wait_cnt) +{ + unsigned long cnt = 0; + + while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && + cnt++ < wait_cnt) + usleep_range(delay_us, delay_us * 2); + + if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { + dev_err(&hdev->pdev->dev, + "flr wait timeout\n"); + return -ETIMEDOUT; + } + + return 0; } static int hclgevf_reset_wait(struct hclgevf_dev *hdev) { -#define HCLGEVF_RESET_WAIT_MS 500 -#define HCLGEVF_RESET_WAIT_CNT 20 - u32 val, cnt = 0; +#define HCLGEVF_RESET_WAIT_US 20000 +#define HCLGEVF_RESET_WAIT_CNT 2000 +#define HCLGEVF_RESET_WAIT_TIMEOUT_US \ + (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) + + u32 val; + int ret; /* wait to check the hardware reset completion status */ - val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); - while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && - (cnt < HCLGEVF_RESET_WAIT_CNT)) { - msleep(HCLGEVF_RESET_WAIT_MS); - val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); - cnt++; - } + val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); + dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); + + if (hdev->reset_type == HNAE3_FLR_RESET) + return hclgevf_flr_poll_timeout(hdev, + HCLGEVF_RESET_WAIT_US, + HCLGEVF_RESET_WAIT_CNT); + + ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, + !(val & HCLGEVF_RST_ING_BITS), + HCLGEVF_RESET_WAIT_US, + HCLGEVF_RESET_WAIT_TIMEOUT_US); /* hardware completion status should be available by this time */ - if (cnt >= HCLGEVF_RESET_WAIT_CNT) { - dev_warn(&hdev->pdev->dev, - "could'nt get reset done status from h/w, timeout!\n"); - return -EBUSY; + if (ret) { + dev_err(&hdev->pdev->dev, + "could'nt get reset done status from h/w, timeout!\n"); + return ret; } /* we will wait a bit more to let reset of the stack to complete. This @@ -1145,10 +1170,12 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev) int ret; /* uninitialize the nic client */ - hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); + ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; /* re-initialize the hclge device */ - ret = hclgevf_init_hdev(hdev); + ret = hclgevf_reset_hdev(hdev); if (ret) { dev_err(&hdev->pdev->dev, "hclge device re-init failed, VF is disabled!\n"); @@ -1156,22 +1183,60 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev) } /* bring up the nic client again */ - hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); + ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + return ret; return 0; } +static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) +{ + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_VF_FUNC_RESET: + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, + 0, true, NULL, sizeof(u8)); + break; + case HNAE3_FLR_RESET: + set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + break; + default: + break; + } + + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + + dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", + hdev->reset_type, ret); + + return ret; +} + static int hclgevf_reset(struct hclgevf_dev *hdev) { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); int ret; + /* Initialize ae_dev reset status as well, in case enet layer wants to + * know if device is undergoing reset + */ + ae_dev->reset_type = hdev->reset_type; + hdev->reset_count++; rtnl_lock(); /* bring down the nic to stop any ongoing TX/RX */ - hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + goto err_reset_lock; rtnl_unlock(); + ret = hclgevf_reset_prepare_wait(hdev); + if (ret) + goto err_reset; + /* check if VF could successfully fetch the hardware reset completion * status from the hardware */ @@ -1181,58 +1246,118 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) dev_err(&hdev->pdev->dev, "VF failed(=%d) to fetch H/W reset completion status\n", ret); - - dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); - rtnl_lock(); - hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); - - rtnl_unlock(); - return ret; + goto err_reset; } rtnl_lock(); /* now, re-initialize the nic client and ae device*/ ret = hclgevf_reset_stack(hdev); - if (ret) + if (ret) { dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); + goto err_reset_lock; + } /* bring up the nic to enable TX/RX again */ - hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); + ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); + if (ret) + goto err_reset_lock; rtnl_unlock(); return ret; -} +err_reset_lock: + rtnl_unlock(); +err_reset: + /* When VF reset failed, only the higher level reset asserted by PF + * can restore it, so re-initialize the command queue to receive + * this higher reset event. + */ + hclgevf_cmd_init(hdev); + dev_err(&hdev->pdev->dev, "failed to reset VF\n"); -static int hclgevf_do_reset(struct hclgevf_dev *hdev) -{ - int status; - u8 respmsg; + return ret; +} - status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, - 0, false, &respmsg, sizeof(u8)); - if (status) - dev_err(&hdev->pdev->dev, - "VF reset request to PF failed(=%d)\n", status); +static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, + unsigned long *addr) +{ + enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; + + /* return the highest priority reset level amongst all */ + if (test_bit(HNAE3_VF_RESET, addr)) { + rst_level = HNAE3_VF_RESET; + clear_bit(HNAE3_VF_RESET, addr); + clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { + rst_level = HNAE3_VF_FULL_RESET; + clear_bit(HNAE3_VF_FULL_RESET, addr); + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { + rst_level = HNAE3_VF_PF_FUNC_RESET; + clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { + rst_level = HNAE3_VF_FUNC_RESET; + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_FLR_RESET, addr)) { + rst_level = HNAE3_FLR_RESET; + clear_bit(HNAE3_FLR_RESET, addr); + } - return status; + return rst_level; } static void hclgevf_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + struct hclgevf_dev *hdev = ae_dev->priv; dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); - handle->reset_level = HNAE3_VF_RESET; + if (hdev->default_reset_request) + hdev->reset_level = + hclgevf_get_reset_level(hdev, + &hdev->default_reset_request); + else + hdev->reset_level = HNAE3_VF_FUNC_RESET; /* reset of this VF requested */ set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); hclgevf_reset_task_schedule(hdev); - handle->last_reset_time = jiffies; + hdev->last_reset_time = jiffies; +} + +static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + + set_bit(rst_type, &hdev->default_reset_request); +} + +static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) +{ +#define HCLGEVF_FLR_WAIT_MS 100 +#define HCLGEVF_FLR_WAIT_CNT 50 + struct hclgevf_dev *hdev = ae_dev->priv; + int cnt = 0; + + clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); + set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); + hclgevf_reset_event(hdev->pdev, NULL); + + while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && + cnt++ < HCLGEVF_FLR_WAIT_CNT) + msleep(HCLGEVF_FLR_WAIT_MS); + + if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) + dev_err(&hdev->pdev->dev, + "flr wait down timeout: %d\n", cnt); } static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) @@ -1321,9 +1446,15 @@ static void hclgevf_reset_service_task(struct work_struct *work) */ hdev->reset_attempts = 0; - ret = hclgevf_reset(hdev); - if (ret) - dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); + hdev->last_reset_time = jiffies; + while ((hdev->reset_type = + hclgevf_get_reset_level(hdev, &hdev->reset_pending)) + != HNAE3_NONE_RESET) { + ret = hclgevf_reset(hdev); + if (ret) + dev_err(&hdev->pdev->dev, + "VF stack reset failed %d.\n", ret); + } } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state)) { /* we could be here when either of below happens: @@ -1352,19 +1483,17 @@ static void hclgevf_reset_service_task(struct work_struct *work) */ if (hdev->reset_attempts > 3) { /* prepare for full reset of stack + pcie interface */ - hdev->nic.reset_level = HNAE3_VF_FULL_RESET; + set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); /* "defer" schedule the reset task again */ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); } else { hdev->reset_attempts++; - /* request PF for resetting this VF via mailbox */ - ret = hclgevf_do_reset(hdev); - if (ret) - dev_warn(&hdev->pdev->dev, - "VF rst fail, stack will call\n"); + set_bit(hdev->reset_level, &hdev->reset_pending); + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); } + hclgevf_reset_task_schedule(hdev); } clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); @@ -1407,24 +1536,37 @@ static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); } -static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) +static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, + u32 *clearval) { - u32 cmdq_src_reg; + u32 cmdq_src_reg, rst_ing_reg; /* fetch the events from their corresponding regs */ cmdq_src_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG); + if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { + rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); + dev_info(&hdev->pdev->dev, + "receive reset interrupt 0x%x!\n", rst_ing_reg); + set_bit(HNAE3_VF_RESET, &hdev->reset_pending); + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); + *clearval = cmdq_src_reg; + return HCLGEVF_VECTOR0_EVENT_RST; + } + /* check for vector0 mailbox(=CMDQ RX) event source */ if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); *clearval = cmdq_src_reg; - return true; + return HCLGEVF_VECTOR0_EVENT_MBX; } dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); - return false; + return HCLGEVF_VECTOR0_EVENT_OTHER; } static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) @@ -1434,19 +1576,28 @@ static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) { + enum hclgevf_evt_cause event_cause; struct hclgevf_dev *hdev = data; u32 clearval; hclgevf_enable_vector(&hdev->misc_vector, false); - if (!hclgevf_check_event_cause(hdev, &clearval)) - goto skip_sched; - - hclgevf_mbx_handler(hdev); + event_cause = hclgevf_check_evt_cause(hdev, &clearval); - hclgevf_clear_event_cause(hdev, clearval); + switch (event_cause) { + case HCLGEVF_VECTOR0_EVENT_RST: + hclgevf_reset_task_schedule(hdev); + break; + case HCLGEVF_VECTOR0_EVENT_MBX: + hclgevf_mbx_handler(hdev); + break; + default: + break; + } -skip_sched: - hclgevf_enable_vector(&hdev->misc_vector, true); + if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { + hclgevf_clear_event_cause(hdev, clearval); + hclgevf_enable_vector(&hdev->misc_vector, true); + } return IRQ_HANDLED; } @@ -1566,21 +1717,7 @@ static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) static int hclgevf_ae_start(struct hnae3_handle *handle) { - struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - int i, queue_id; - - for (i = 0; i < kinfo->num_tqps; i++) { - /* ring enable */ - queue_id = hclgevf_get_queue_id(kinfo->tqp[i]); - if (queue_id < 0) { - dev_warn(&hdev->pdev->dev, - "Get invalid queue id, ignore it\n"); - continue; - } - - hclgevf_tqp_enable(hdev, queue_id, 0, true); - } /* reset tqp stats */ hclgevf_reset_tqp_stats(handle); @@ -1595,24 +1732,10 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) static void hclgevf_ae_stop(struct hnae3_handle *handle) { - struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - int i, queue_id; set_bit(HCLGEVF_STATE_DOWN, &hdev->state); - for (i = 0; i < kinfo->num_tqps; i++) { - /* Ring disable */ - queue_id = hclgevf_get_queue_id(kinfo->tqp[i]); - if (queue_id < 0) { - dev_warn(&hdev->pdev->dev, - "Get invalid queue id, ignore it\n"); - continue; - } - - hclgevf_tqp_enable(hdev, queue_id, 0, false); - } - /* reset tqp stats */ hclgevf_reset_tqp_stats(handle); del_timer_sync(&hdev->service_timer); @@ -1623,10 +1746,6 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle) static void hclgevf_state_init(struct hclgevf_dev *hdev) { - /* if this is on going reset then skip this initialization */ - if (hclgevf_dev_ongoing_reset(hdev)) - return; - /* setup tasks for the MBX */ INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); @@ -1668,10 +1787,6 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev) int vectors; int i; - /* if this is on going reset then skip this initialization */ - if (hclgevf_dev_ongoing_reset(hdev)) - return 0; - if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) vectors = pci_alloc_irq_vectors(pdev, hdev->roce_base_msix_offset + 1, @@ -1710,6 +1825,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev) hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, sizeof(int), GFP_KERNEL); if (!hdev->vector_irq) { + devm_kfree(&pdev->dev, hdev->vector_status); pci_free_irq_vectors(pdev); return -ENOMEM; } @@ -1721,6 +1837,8 @@ static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; + devm_kfree(&pdev->dev, hdev->vector_status); + devm_kfree(&pdev->dev, hdev->vector_irq); pci_free_irq_vectors(pdev); } @@ -1728,10 +1846,6 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) { int ret = 0; - /* if this is on going reset then skip this initialization */ - if (hclgevf_dev_ongoing_reset(hdev)) - return 0; - hclgevf_get_misc_vector(hdev); ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, @@ -1861,14 +1975,6 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) struct hclgevf_hw *hw; int ret; - /* check if we need to skip initialization of pci. This will happen if - * device is undergoing VF reset. Otherwise, we would need to - * re-initialize pci interface again i.e. when device is not going - * through *any* reset or actually undergoing full reset. - */ - if (hclgevf_dev_ongoing_reset(hdev)) - return 0; - ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "failed to enable PCI device\n"); @@ -1957,23 +2063,94 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) return 0; } -static int hclgevf_init_hdev(struct hclgevf_dev *hdev) +static int hclgevf_pci_reset(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int ret = 0; + + if (hdev->reset_type == HNAE3_VF_FULL_RESET && + test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { + hclgevf_misc_irq_uninit(hdev); + hclgevf_uninit_msi(hdev); + clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); + } + + if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { + pci_set_master(pdev); + ret = hclgevf_init_msi(hdev); + if (ret) { + dev_err(&pdev->dev, + "failed(%d) to init MSI/MSI-X\n", ret); + return ret; + } + + ret = hclgevf_misc_irq_init(hdev); + if (ret) { + hclgevf_uninit_msi(hdev); + dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", + ret); + return ret; + } + + set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); + } + + return ret; +} + +static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; int ret; - /* check if device is on-going full reset(i.e. pcie as well) */ - if (hclgevf_dev_ongoing_full_reset(hdev)) { - dev_warn(&pdev->dev, "device is going full reset\n"); - hclgevf_uninit_hdev(hdev); + ret = hclgevf_pci_reset(hdev); + if (ret) { + dev_err(&pdev->dev, "pci reset failed %d\n", ret); + return ret; + } + + ret = hclgevf_cmd_init(hdev); + if (ret) { + dev_err(&pdev->dev, "cmd failed %d\n", ret); + return ret; + } + + ret = hclgevf_rss_init_hw(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize RSS\n", ret); + return ret; } + ret = hclgevf_init_vlan_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize VLAN config\n", ret); + return ret; + } + + dev_info(&hdev->pdev->dev, "Reset done\n"); + + return 0; +} + +static int hclgevf_init_hdev(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int ret; + ret = hclgevf_pci_init(hdev); if (ret) { dev_err(&pdev->dev, "PCI initialization failed\n"); return ret; } + ret = hclgevf_cmd_queue_init(hdev); + if (ret) { + dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); + goto err_cmd_queue_init; + } + ret = hclgevf_cmd_init(hdev); if (ret) goto err_cmd_init; @@ -1983,16 +2160,17 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) if (ret) { dev_err(&hdev->pdev->dev, "Query vf status error, ret = %d.\n", ret); - goto err_query_vf; + goto err_cmd_init; } ret = hclgevf_init_msi(hdev); if (ret) { dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); - goto err_query_vf; + goto err_cmd_init; } hclgevf_state_init(hdev); + hdev->reset_level = HNAE3_VF_FUNC_RESET; ret = hclgevf_misc_irq_init(hdev); if (ret) { @@ -2001,6 +2179,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_misc_irq_init; } + set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); + ret = hclgevf_configure(hdev); if (ret) { dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); @@ -2034,6 +2214,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } + hdev->last_reset_time = jiffies; pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); return 0; @@ -2043,20 +2224,25 @@ err_config: err_misc_irq_init: hclgevf_state_uninit(hdev); hclgevf_uninit_msi(hdev); -err_query_vf: - hclgevf_cmd_uninit(hdev); err_cmd_init: + hclgevf_cmd_uninit(hdev); +err_cmd_queue_init: hclgevf_pci_uninit(hdev); + clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); return ret; } static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) { hclgevf_state_uninit(hdev); - hclgevf_misc_irq_uninit(hdev); + + if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { + hclgevf_misc_irq_uninit(hdev); + hclgevf_uninit_msi(hdev); + hclgevf_pci_uninit(hdev); + } + hclgevf_cmd_uninit(hdev); - hclgevf_uninit_msi(hdev); - hclgevf_pci_uninit(hdev); } static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) @@ -2159,9 +2345,32 @@ static void hclgevf_get_media_type(struct hnae3_handle *handle, *media_type = hdev->hw.mac.media_type; } +static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); +} + +static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); +} + +static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hdev->reset_count; +} + static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, + .flr_prepare = hclgevf_flr_prepare, + .flr_done = hclgevf_flr_done, .init_client_instance = hclgevf_init_client_instance, .uninit_client_instance = hclgevf_uninit_client_instance, .start = hclgevf_ae_start, @@ -2193,11 +2402,15 @@ static const struct hnae3_ae_ops hclgevf_ops = { .set_vlan_filter = hclgevf_set_vlan_filter, .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, .reset_event = hclgevf_reset_event, + .set_default_reset_request = hclgevf_set_def_reset_request, .get_channels = hclgevf_get_channels, .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, .get_status = hclgevf_get_status, .get_ksettings_an_result = hclgevf_get_ksettings_an_result, .get_media_type = hclgevf_get_media_type, + .get_hw_reset_stat = hclgevf_get_hw_reset_stat, + .ae_dev_resetting = hclgevf_ae_dev_resetting, + .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, }; static struct hnae3_ae_algo ae_algovf = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index aed241e8ffab..4c5ea7edcbb1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -31,11 +31,19 @@ #define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100 /* CMDQ register bits for RX event(=MBX event) */ #define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1 +/* RST register bits for RESET event */ +#define HCLGEVF_VECTOR0_RST_INT_B 2 #define HCLGEVF_TQP_RESET_TRY_TIMES 10 /* Reset related Registers */ -#define HCLGEVF_FUN_RST_ING 0x20C00 -#define HCLGEVF_FUN_RST_ING_B 0 +#define HCLGEVF_RST_ING 0x20C00 +#define HCLGEVF_FUN_RST_ING_BIT BIT(0) +#define HCLGEVF_GLOBAL_RST_ING_BIT BIT(5) +#define HCLGEVF_CORE_RST_ING_BIT BIT(6) +#define HCLGEVF_IMP_RST_ING_BIT BIT(7) +#define HCLGEVF_RST_ING_BITS \ + (HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \ + HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT) #define HCLGEVF_RSS_IND_TBL_SIZE 512 #define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff @@ -54,17 +62,25 @@ #define HCLGEVF_S_IP_BIT BIT(3) #define HCLGEVF_V_TAG_BIT BIT(4) +enum hclgevf_evt_cause { + HCLGEVF_VECTOR0_EVENT_RST, + HCLGEVF_VECTOR0_EVENT_MBX, + HCLGEVF_VECTOR0_EVENT_OTHER, +}; + /* states of hclgevf device & tasks */ enum hclgevf_states { /* device states */ HCLGEVF_STATE_DOWN, HCLGEVF_STATE_DISABLED, + HCLGEVF_STATE_IRQ_INITED, /* task states */ HCLGEVF_STATE_SERVICE_SCHED, HCLGEVF_STATE_RST_SERVICE_SCHED, HCLGEVF_STATE_RST_HANDLING, HCLGEVF_STATE_MBX_SERVICE_SCHED, HCLGEVF_STATE_MBX_HANDLING, + HCLGEVF_STATE_CMD_DISABLE, }; #define HCLGEVF_MPF_ENBALE 1 @@ -145,10 +161,17 @@ struct hclgevf_dev { struct hclgevf_misc_vector misc_vector; struct hclgevf_rss_cfg rss_cfg; unsigned long state; + unsigned long flr_state; + unsigned long default_reset_request; + unsigned long last_reset_time; + enum hnae3_reset_type reset_level; + unsigned long reset_pending; + enum hnae3_reset_type reset_type; #define HCLGEVF_RESET_REQUESTED 0 #define HCLGEVF_RESET_PENDING 1 unsigned long reset_state; /* requested, pending */ + unsigned long reset_count; /* the number of reset has been done */ u32 reset_attempts; u32 fw_version; @@ -192,18 +215,9 @@ struct hclgevf_dev { u32 flag; }; -static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev) -{ - return (hdev && - (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) && - (hdev->nic.reset_level == HNAE3_VF_RESET)); -} - -static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev) +static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev) { - return (hdev && - (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) && - (hdev->nic.reset_level == HNAE3_VF_FULL_RESET)); + return !!hdev->reset_pending; } int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index e9d5a4f96304..ef9c8e6eca28 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -40,6 +40,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, } while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) { + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) + return -EIO; + udelay(HCLGEVF_SLEEP_USCOEND); i++; } @@ -148,6 +151,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) crq = &hdev->hw.cmq.crq; while (!hclgevf_cmd_crq_empty(&hdev->hw)) { + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) { + dev_info(&hdev->pdev->dev, "vf crq need init\n"); + return; + } + desc = &crq->desc[crq->next_to_use]; req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; @@ -233,6 +241,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) { + enum hnae3_reset_type reset_type; u16 link_status; u16 *msg_q; u8 duplex; @@ -248,6 +257,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) /* process all the async queue messages */ while (tail != hdev->arq.head) { + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) { + dev_info(&hdev->pdev->dev, + "vf crq need init in async\n"); + return; + } + msg_q = hdev->arq.msg_q[hdev->arq.head]; switch (msg_q[0]) { @@ -267,7 +282,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) * has been completely reset. After this stack should * eventually be re-initialized. */ - hdev->nic.reset_level = HNAE3_VF_RESET; + reset_type = le16_to_cpu(msg_q[1]); + set_bit(reset_type, &hdev->reset_pending); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); hclgevf_reset_task_schedule(hdev); diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 760b2ad8e295..209255495bc9 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2455,7 +2455,8 @@ static void emac_adjust_link(struct net_device *ndev) dev->phy.duplex = phy->duplex; dev->phy.pause = phy->pause; dev->phy.asym_pause = phy->asym_pause; - dev->phy.advertising = phy->advertising; + ethtool_convert_link_mode_to_legacy_u32(&dev->phy.advertising, + phy->advertising); } static int emac_mii_bus_read(struct mii_bus *bus, int addr, int regnum) @@ -2490,7 +2491,8 @@ static int emac_mdio_phy_start_aneg(struct mii_phy *phy, phy_dev->autoneg = phy->autoneg; phy_dev->speed = phy->speed; phy_dev->duplex = phy->duplex; - phy_dev->advertising = phy->advertising; + ethtool_convert_legacy_u32_to_link_mode(phy_dev->advertising, + phy->advertising); return phy_start_aneg(phy_dev); } @@ -2624,7 +2626,8 @@ static int emac_dt_phy_connect(struct emac_instance *dev, dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask; dev->phy.def->name = dev->phy_dev->drv->name; dev->phy.def->ops = &emac_dt_mdio_phy_ops; - dev->phy.features = dev->phy_dev->supported; + ethtool_convert_link_mode_to_legacy_u32(&dev->phy.features, + dev->phy_dev->supported); dev->phy.address = dev->phy_dev->mdio.addr; dev->phy.mode = dev->phy_dev->interface; return 0; diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index c760dc72c520..be13227f1697 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -505,6 +505,9 @@ extern const struct e1000_info e1000_es2_info; void e1000e_ptp_init(struct e1000_adapter *adapter); void e1000e_ptp_remove(struct e1000_adapter *adapter); +u64 e1000e_read_systim(struct e1000_adapter *adapter, + struct ptp_system_timestamp *sts); + static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) { return hw->phy.ops.reset(hw); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 16a73bd9f4cb..59bd587d809d 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4319,13 +4319,16 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter) /** * e1000e_sanitize_systim - sanitize raw cycle counter reads * @hw: pointer to the HW structure - * @systim: time value read, sanitized and returned + * @systim: PHC time value read, sanitized and returned + * @sts: structure to hold system time before and after reading SYSTIML, + * may be NULL * * Errata for 82574/82583 possible bad bits read from SYSTIMH/L: * check to see that the time is incrementing at a reasonable * rate and is a multiple of incvalue. **/ -static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim) +static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim, + struct ptp_system_timestamp *sts) { u64 time_delta, rem, temp; u64 systim_next; @@ -4335,7 +4338,9 @@ static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim) incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { /* latch SYSTIMH on read of SYSTIML */ + ptp_read_system_prets(sts); systim_next = (u64)er32(SYSTIML); + ptp_read_system_postts(sts); systim_next |= (u64)er32(SYSTIMH) << 32; time_delta = systim_next - systim; @@ -4353,15 +4358,16 @@ static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim) } /** - * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) - * @cc: cyclecounter structure + * e1000e_read_systim - read SYSTIM register + * @adapter: board private structure + * @sts: structure which will contain system time before and after reading + * SYSTIML, may be NULL **/ -static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc) +u64 e1000e_read_systim(struct e1000_adapter *adapter, + struct ptp_system_timestamp *sts) { - struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, - cc); struct e1000_hw *hw = &adapter->hw; - u32 systimel, systimeh; + u32 systimel, systimel_2, systimeh; u64 systim; /* SYSTIMH latching upon SYSTIML read does not work well. * This means that if SYSTIML overflows after we read it but before @@ -4369,11 +4375,15 @@ static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc) * will experience a huge non linear increment in the systime value * to fix that we test for overflow and if true, we re-read systime. */ + ptp_read_system_prets(sts); systimel = er32(SYSTIML); + ptp_read_system_postts(sts); systimeh = er32(SYSTIMH); /* Is systimel is so large that overflow is possible? */ if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) { - u32 systimel_2 = er32(SYSTIML); + ptp_read_system_prets(sts); + systimel_2 = er32(SYSTIML); + ptp_read_system_postts(sts); if (systimel > systimel_2) { /* There was an overflow, read again SYSTIMH, and use * systimel_2 @@ -4386,12 +4396,24 @@ static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc) systim |= (u64)systimeh << 32; if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) - systim = e1000e_sanitize_systim(hw, systim); + systim = e1000e_sanitize_systim(hw, systim, sts); return systim; } /** + * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) + * @cc: cyclecounter structure + **/ +static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc) +{ + struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, + cc); + + return e1000e_read_systim(adapter, NULL); +} + +/** * e1000_sw_init - Initialize general software structures (struct e1000_adapter) * @adapter: board private structure to initialize * diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 37c76945ad9b..1a4c65d9feb4 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -161,22 +161,30 @@ static int e1000e_phc_getcrosststamp(struct ptp_clock_info *ptp, #endif/*CONFIG_E1000E_HWTS*/ /** - * e1000e_phc_gettime - Reads the current time from the hardware clock + * e1000e_phc_gettimex - Reads the current time from the hardware clock and + * system clock * @ptp: ptp clock structure - * @ts: timespec structure to hold the current time value + * @ts: timespec structure to hold the current PHC time + * @sts: structure to hold the current system time * * Read the timecounter and return the correct value in ns after converting * it into a struct timespec. **/ -static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +static int e1000e_phc_gettimex(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) { struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, ptp_clock_info); unsigned long flags; - u64 ns; + u64 cycles, ns; spin_lock_irqsave(&adapter->systim_lock, flags); - ns = timecounter_read(&adapter->tc); + + /* NOTE: Non-monotonic SYSTIM readings may be returned */ + cycles = e1000e_read_systim(adapter, sts); + ns = timecounter_cyc2time(&adapter->tc, cycles); + spin_unlock_irqrestore(&adapter->systim_lock, flags); *ts = ns_to_timespec64(ns); @@ -232,9 +240,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work) systim_overflow_work.work); struct e1000_hw *hw = &adapter->hw; struct timespec64 ts; + u64 ns; - adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts); + /* Update the timecounter */ + ns = timecounter_read(&adapter->tc); + ts = ns_to_timespec64(ns); e_dbg("SYSTIM overflow check at %lld.%09lu\n", (long long) ts.tv_sec, ts.tv_nsec); @@ -251,7 +262,7 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = { .pps = 0, .adjfreq = e1000e_phc_adjfreq, .adjtime = e1000e_phc_adjtime, - .gettime64 = e1000e_phc_gettime, + .gettimex64 = e1000e_phc_gettimex, .settime64 = e1000e_phc_settime, .enable = e1000e_phc_enable, }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 9f8464f80783..9c1211ad2c6b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2377,7 +2377,8 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return -EOPNOTSUPP; /* only magic packet is supported */ - if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)) + if (wol->wolopts && (wol->wolopts != WAKE_MAGIC) + | (wol->wolopts != WAKE_FILTER)) return -EOPNOTSUPP; /* is this a new value? */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index aef3c89ee79c..1384a5a006a4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3473,6 +3473,8 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, td_tag); + skb_tx_timestamp(skb); + /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. * @@ -3652,8 +3654,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (tsyn) tx_flags |= I40E_TX_FLAGS_TSYN; - skb_tx_timestamp(skb); - /* always enable CRC insertion offload */ td_cmd |= I40E_TX_DESC_CMD_ICRC; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index fb9bfad96daf..3b1dc77ae368 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -2343,6 +2343,8 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, td_tag); + skb_tx_timestamp(skb); + /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. * @@ -2461,8 +2463,6 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb, if (tso < 0) goto out_drop; - skb_tx_timestamp(skb); - /* always enable CRC insertion offload */ td_cmd |= IAVF_TX_DESC_CMD_ICRC; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 5acf3b743876..c57671068245 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2113,7 +2113,7 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct igb_adapter *adapter = netdev_priv(netdev); - if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)) + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) return -EOPNOTSUPP; if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 5df88ad8ac81..4584ebc9e8fe 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6019,6 +6019,8 @@ static int igb_tx_map(struct igb_ring *tx_ring, /* set the timestamp */ first->time_stamp = jiffies; + skb_tx_timestamp(skb); + /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. (Only applicable for weak-ordered * memory model archs, such as IA-64). @@ -6147,8 +6149,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, else if (!tso) igb_tx_csum(tx_ring, first); - skb_tx_timestamp(skb); - if (igb_tx_map(tx_ring, first, hdr_len)) goto cleanup_tx_tstamp; diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 2b95dc9c7a6a..fd3071f55bd3 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -277,17 +277,53 @@ static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta) return 0; } -static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp, - struct timespec64 *ts) +static int igb_ptp_gettimex_82576(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) { struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ptp_caps); + struct e1000_hw *hw = &igb->hw; unsigned long flags; + u32 lo, hi; u64 ns; spin_lock_irqsave(&igb->tmreg_lock, flags); - ns = timecounter_read(&igb->tc); + ptp_read_system_prets(sts); + lo = rd32(E1000_SYSTIML); + ptp_read_system_postts(sts); + hi = rd32(E1000_SYSTIMH); + + ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int igb_ptp_gettimex_82580(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + unsigned long flags; + u32 lo, hi; + u64 ns; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + ptp_read_system_prets(sts); + rd32(E1000_SYSTIMR); + ptp_read_system_postts(sts); + lo = rd32(E1000_SYSTIML); + hi = rd32(E1000_SYSTIMH); + + ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo); spin_unlock_irqrestore(&igb->tmreg_lock, flags); @@ -296,16 +332,22 @@ static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp, return 0; } -static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp, - struct timespec64 *ts) +static int igb_ptp_gettimex_i210(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) { struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ptp_caps); + struct e1000_hw *hw = &igb->hw; unsigned long flags; spin_lock_irqsave(&igb->tmreg_lock, flags); - igb_ptp_read_i210(igb, ts); + ptp_read_system_prets(sts); + rd32(E1000_SYSTIMR); + ptp_read_system_postts(sts); + ts->tv_nsec = rd32(E1000_SYSTIML); + ts->tv_sec = rd32(E1000_SYSTIMH); spin_unlock_irqrestore(&igb->tmreg_lock, flags); @@ -658,9 +700,12 @@ static void igb_ptp_overflow_check(struct work_struct *work) struct igb_adapter *igb = container_of(work, struct igb_adapter, ptp_overflow_work.work); struct timespec64 ts; + u64 ns; - igb->ptp_caps.gettime64(&igb->ptp_caps, &ts); + /* Update the timecounter */ + ns = timecounter_read(&igb->tc); + ts = ns_to_timespec64(ns); pr_debug("igb overflow check at %lld.%09lu\n", (long long) ts.tv_sec, ts.tv_nsec); @@ -1126,7 +1171,7 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; - adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576; + adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82576; adapter->ptp_caps.settime64 = igb_ptp_settime_82576; adapter->ptp_caps.enable = igb_ptp_feature_enable; adapter->cc.read = igb_ptp_read_82576; @@ -1145,7 +1190,7 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580; adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; - adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576; + adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82580; adapter->ptp_caps.settime64 = igb_ptp_settime_82576; adapter->ptp_caps.enable = igb_ptp_feature_enable; adapter->cc.read = igb_ptp_read_82580; @@ -1173,7 +1218,7 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->ptp_caps.pin_config = adapter->sdp_config; adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580; adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; - adapter->ptp_caps.gettime64 = igb_ptp_gettime_i210; + adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_i210; adapter->ptp_caps.settime64 = igb_ptp_settime_i210; adapter->ptp_caps.enable = igb_ptp_feature_enable_i210; adapter->ptp_caps.verify = igb_ptp_verify_pin; diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index 163e5838f7c2..a3cd7ac48d4b 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -241,7 +241,7 @@ static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) s32 err; u16 i; - WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock)); + lockdep_assert_held(&hw->mbx_lock); /* lock the mailbox to prevent pf/vf race condition */ err = e1000_obtain_mbx_lock_vf(hw); @@ -279,7 +279,7 @@ static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) s32 err; u16 i; - WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock)); + lockdep_assert_held(&hw->mbx_lock); /* lock the mailbox to prevent pf/vf race condition */ err = e1000_obtain_mbx_lock_vf(hw); diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index cdf18a5d9e08..3b00b109b34a 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -5,23 +5,14 @@ #define _IGC_H_ #include <linux/kobject.h> - #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> - #include <linux/ethtool.h> - #include <linux/sctp.h> #define IGC_ERR(args...) pr_err("igc: " args) -#define PFX "igc: " - -#include <linux/timecounter.h> -#include <linux/net_tstamp.h> -#include <linux/ptp_clock_kernel.h> - #include "igc_hw.h" /* main */ diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index 832da609d9a7..df40af759542 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -237,7 +237,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw) { struct igc_phy_info *phy = &hw->phy; s32 ret_val = 0; - u32 ctrl_ext; if (hw->phy.media_type != igc_media_type_copper) { phy->type = igc_phy_none; @@ -247,8 +246,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw) phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500; phy->reset_delay_us = 100; - ctrl_ext = rd32(IGC_CTRL_EXT); - /* set lan id */ hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >> IGC_STATUS_FUNC_SHIFT; @@ -287,8 +284,6 @@ out: static s32 igc_get_invariants_base(struct igc_hw *hw) { struct igc_mac_info *mac = &hw->mac; - u32 link_mode = 0; - u32 ctrl_ext = 0; s32 ret_val = 0; switch (hw->device_id) { @@ -302,9 +297,6 @@ static s32 igc_get_invariants_base(struct igc_hw *hw) hw->phy.media_type = igc_media_type_copper; - ctrl_ext = rd32(IGC_CTRL_EXT); - link_mode = ctrl_ext & IGC_CTRL_EXT_LINK_MODE_MASK; - /* mac initialization and operations */ ret_val = igc_init_mac_params_base(hw); if (ret_val) diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 9d85707e8a81..d002055c0623 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -865,6 +865,8 @@ static int igc_tx_map(struct igc_ring *tx_ring, /* set the timestamp */ first->time_stamp = jiffies; + skb_tx_timestamp(skb); + /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. (Only applicable for weak-ordered * memory model archs, such as IA-64). @@ -959,8 +961,6 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, first->bytecount = skb->len; first->gso_segs = 1; - skb_tx_timestamp(skb); - /* record initial flags and protocol */ first->tx_flags = tx_flags; first->protocol = protocol; @@ -1108,7 +1108,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, /* update pointers within the skb to store the data */ skb_reserve(skb, IGC_SKB_PAD); - __skb_put(skb, size); + __skb_put(skb, size); /* update buffer offset */ #if (PAGE_SIZE < 8192) @@ -1160,9 +1160,9 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, (va + headlen) - page_address(rx_buffer->page), size, truesize); #if (PAGE_SIZE < 8192) - rx_buffer->page_offset ^= truesize; + rx_buffer->page_offset ^= truesize; #else - rx_buffer->page_offset += truesize; + rx_buffer->page_offset += truesize; #endif } else { rx_buffer->pagecnt_bias++; @@ -1668,8 +1668,8 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) tx_buffer->next_to_watch, jiffies, tx_buffer->next_to_watch->wb.status); - netif_stop_subqueue(tx_ring->netdev, - tx_ring->queue_index); + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); /* we are about to reset, no point in enabling stuff */ return true; @@ -1700,20 +1700,6 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) } /** - * igc_ioctl - I/O control method - * @netdev: network interface device structure - * @ifreq: frequency - * @cmd: command - */ -static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - default: - return -EOPNOTSUPP; - } -} - -/** * igc_up - Open the interface and prepare it to handle traffic * @adapter: board private structure */ @@ -3358,7 +3344,7 @@ static int __igc_open(struct net_device *netdev, bool resuming) goto err_req_irq; /* Notify the stack of the actual queue counts. */ - netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); if (err) goto err_set_queues; @@ -3445,7 +3431,6 @@ static const struct net_device_ops igc_netdev_ops = { .ndo_set_mac_address = igc_set_mac, .ndo_change_mtu = igc_change_mtu, .ndo_get_stats = igc_get_stats, - .ndo_do_ioctl = igc_ioctl, }; /* PCIe configuration access */ @@ -3532,19 +3517,16 @@ static int igc_probe(struct pci_dev *pdev, struct net_device *netdev; struct igc_hw *hw; const struct igc_info *ei = igc_info_tbl[ent->driver_data]; - int err, pci_using_dac; + int err; err = pci_enable_device_mem(pdev); if (err) return err; - pci_using_dac = 0; err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); if (!err) { err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) - pci_using_dac = 1; } else { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 732b1e6ecc43..acba067cc15a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2206,7 +2206,8 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE | + WAKE_FILTER)) return -EOPNOTSUPP; if (ixgbe_wol_exclusion(adapter, wol)) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index fd1b0546fd67..4d77f42e035c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -4,6 +4,7 @@ #include "ixgbe.h" #include <net/xfrm.h> #include <crypto/aead.h> +#include <linux/if_bridge.h> #define IXGBE_IPSEC_KEY_BITS 160 static const char aes_gcm_name[] = "rfc4106(gcm(aes))"; @@ -693,7 +694,8 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) } else { struct tx_sa tsa; - if (adapter->num_vfs) + if (adapter->num_vfs && + adapter->bridge_mode != BRIDGE_MODE_VEPA) return -EOPNOTSUPP; /* find the first unused index */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 113b38e0defb..cfb83687c3d8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8269,6 +8269,8 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, /* set the timestamp */ first->time_stamp = jiffies; + skb_tx_timestamp(skb); + /* * Force memory writes to complete before letting h/w know there * are new descriptors to fetch. (Only applicable for weak-ordered @@ -8646,8 +8648,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, } } - skb_tx_timestamp(skb); - #ifdef CONFIG_PCI_IOV /* * Use the l2switch_enable flag - would be false if the DMA @@ -10517,7 +10517,8 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring) ixgbe_configure_rx_ring(adapter, rx_ring); clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state); - clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state); + if (xdp_ring) + clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index b3e0d8bb5cbd..d81a50dc9535 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -443,22 +443,52 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) } /** - * ixgbe_ptp_gettime + * ixgbe_ptp_gettimex * @ptp: the ptp clock structure - * @ts: timespec structure to hold the current time value + * @ts: timespec to hold the PHC timestamp + * @sts: structure to hold the system time before and after reading the PHC * * read the timecounter and return the correct value on ns, * after converting it into a struct timespec. */ -static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +static int ixgbe_ptp_gettimex(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); + struct ixgbe_hw *hw = &adapter->hw; unsigned long flags; - u64 ns; + u64 ns, stamp; spin_lock_irqsave(&adapter->tmreg_lock, flags); - ns = timecounter_read(&adapter->hw_tc); + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + /* Upper 32 bits represent billions of cycles, lower 32 bits + * represent cycles. However, we use timespec64_to_ns for the + * correct math even though the units haven't been corrected + * yet. + */ + ptp_read_system_prets(sts); + IXGBE_READ_REG(hw, IXGBE_SYSTIMR); + ptp_read_system_postts(sts); + ts->tv_nsec = IXGBE_READ_REG(hw, IXGBE_SYSTIML); + ts->tv_sec = IXGBE_READ_REG(hw, IXGBE_SYSTIMH); + stamp = timespec64_to_ns(ts); + break; + default: + ptp_read_system_prets(sts); + stamp = IXGBE_READ_REG(hw, IXGBE_SYSTIML); + ptp_read_system_postts(sts); + stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; + break; + } + + ns = timecounter_cyc2time(&adapter->hw_tc, stamp); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); *ts = ns_to_timespec64(ns); @@ -567,10 +597,14 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) { bool timeout = time_is_before_jiffies(adapter->last_overflow_check + IXGBE_OVERFLOW_PERIOD); - struct timespec64 ts; + unsigned long flags; if (timeout) { - ixgbe_ptp_gettime(&adapter->ptp_caps, &ts); + /* Update the timecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_read(&adapter->hw_tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + adapter->last_overflow_check = jiffies; } } @@ -1216,7 +1250,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.pps = 1; adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; - adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; + adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex; adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_x540; @@ -1233,7 +1267,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; - adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; + adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex; adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; break; @@ -1249,7 +1283,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; - adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; + adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex; adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; adapter->ptp_setup_sdp = NULL; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 5e47ede7e832..196b890467b2 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4016,6 +4016,8 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, /* set the timestamp */ first->time_stamp = jiffies; + skb_tx_timestamp(skb); + /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. (Only applicable for weak-ordered * memory model archs, such as IA-64). diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 1e9bcbdc6a90..2f427271a793 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1499,23 +1499,16 @@ mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp, struct ethtool_link_ksettings *cmd) { struct net_device *dev = mp->dev; - u32 supported, advertising; phy_ethtool_ksettings_get(dev->phydev, cmd); /* * The MAC does not support 1000baseT_Half. */ - ethtool_convert_link_mode_to_legacy_u32(&supported, - cmd->link_modes.supported); - ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising); - supported &= ~SUPPORTED_1000baseT_Half; - advertising &= ~ADVERTISED_1000baseT_Half; - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + cmd->link_modes.supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + cmd->link_modes.advertising); return 0; } @@ -3031,10 +3024,12 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) phy->autoneg = AUTONEG_ENABLE; phy->speed = 0; phy->duplex = 0; - phy->advertising = phy->supported | ADVERTISED_Autoneg; + linkmode_copy(phy->advertising, phy->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phy->advertising); } else { phy->autoneg = AUTONEG_DISABLE; - phy->advertising = 0; + linkmode_zero(phy->advertising); phy->speed = speed; phy->duplex = duplex; } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 697d9b374f5e..c7cd0081058e 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -2485,13 +2485,11 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2, skb->ip_summed = re->skb->ip_summed; skb->csum = re->skb->csum; skb_copy_hash(skb, re->skb); - skb->vlan_proto = re->skb->vlan_proto; - skb->vlan_tci = re->skb->vlan_tci; + __vlan_hwaccel_copy_tag(skb, re->skb); pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, length, PCI_DMA_FROMDEVICE); - re->skb->vlan_proto = 0; - re->skb->vlan_tci = 0; + __vlan_hwaccel_clear_tag(re->skb); skb_clear_hash(re->skb); re->skb->ip_summed = CHECKSUM_NONE; skb_put(skb, length); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 7dbfdac4067a..399f565dd85a 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -243,7 +243,7 @@ static void mtk_phy_link_adjust(struct net_device *dev) if (dev->phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; - lcl_adv = ethtool_adv_to_lcl_adv_t(dev->phydev->advertising); + lcl_adv = linkmode_adv_to_lcl_adv_t(dev->phydev->advertising); flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (flowctrl & FLOW_CTRL_TX) @@ -353,8 +353,9 @@ static int mtk_phy_connect(struct net_device *dev) phy_set_max_speed(dev->phydev, SPEED_1000); phy_support_asym_pause(dev->phydev); - dev->phydev->advertising = dev->phydev->supported | - ADVERTISED_Autoneg; + linkmode_copy(dev->phydev->advertising, dev->phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + dev->phydev->advertising); phy_start_aneg(dev->phydev); of_node_put(np); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index db00bf1c23f5..fd09ba98c0a6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -875,7 +875,7 @@ csum_none: skb->data_len = length; napi_gro_frags(&cq->napi); } else { - skb->vlan_tci = 0; + __vlan_hwaccel_clear_tag(skb); skb_clear_hash(skb); } next: diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 4afb10375397..190e8b56a41f 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -57,6 +57,7 @@ endif ifeq ($(CONFIG_NFP_APP_ABM_NIC),y) nfp-objs += \ abm/ctrl.o \ + abm/qdisc.o \ abm/main.o endif diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index c0830c0c2c3f..3d15de0ae271 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -7,9 +7,6 @@ #include <linux/netdevice.h> #include <linux/rcupdate.h> #include <linux/slab.h> -#include <net/pkt_cls.h> -#include <net/pkt_sched.h> -#include <net/red.h> #include "../nfpcore/nfp.h" #include "../nfpcore/nfp_cpp.h" @@ -28,269 +25,6 @@ static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id) } static int -__nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink, - u32 handle, unsigned int qs, u32 init_val) -{ - struct nfp_port *port = nfp_port_from_netdev(netdev); - int ret; - - ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val); - memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs); - - alink->parent = handle; - alink->num_qdiscs = qs; - port->tc_offload_cnt = qs; - - return ret; -} - -static void -nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink, - u32 handle, unsigned int qs) -{ - __nfp_abm_reset_root(netdev, alink, handle, qs, ~0); -} - -static int -nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) -{ - unsigned int i = TC_H_MIN(opt->parent) - 1; - - if (opt->parent == TC_H_ROOT) - i = 0; - else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) - i = TC_H_MIN(opt->parent) - 1; - else - return -EOPNOTSUPP; - - if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle) - return -EOPNOTSUPP; - - return i; -} - -static void -nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink, - u32 handle) -{ - unsigned int i; - - for (i = 0; i < alink->num_qdiscs; i++) - if (handle == alink->qdiscs[i].handle) - break; - if (i == alink->num_qdiscs) - return; - - if (alink->parent == TC_H_ROOT) { - nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0); - } else { - nfp_abm_ctrl_set_q_lvl(alink, i, ~0); - memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs)); - } -} - -static int -nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink, - struct tc_red_qopt_offload *opt) -{ - bool existing; - int i, err; - - i = nfp_abm_red_find(alink, opt); - existing = i >= 0; - - if (opt->set.min != opt->set.max || !opt->set.is_ecn) { - nfp_warn(alink->abm->app->cpp, - "RED offload failed - unsupported parameters\n"); - err = -EINVAL; - goto err_destroy; - } - - if (existing) { - if (alink->parent == TC_H_ROOT) - err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min); - else - err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min); - if (err) - goto err_destroy; - return 0; - } - - if (opt->parent == TC_H_ROOT) { - i = 0; - err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1, - opt->set.min); - } else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) { - i = TC_H_MIN(opt->parent) - 1; - err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min); - } else { - return -EINVAL; - } - /* Set the handle to try full clean up, in case IO failed */ - alink->qdiscs[i].handle = opt->handle; - if (err) - goto err_destroy; - - if (opt->parent == TC_H_ROOT) - err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats); - else - err = nfp_abm_ctrl_read_q_stats(alink, i, - &alink->qdiscs[i].stats); - if (err) - goto err_destroy; - - if (opt->parent == TC_H_ROOT) - err = nfp_abm_ctrl_read_xstats(alink, - &alink->qdiscs[i].xstats); - else - err = nfp_abm_ctrl_read_q_xstats(alink, i, - &alink->qdiscs[i].xstats); - if (err) - goto err_destroy; - - alink->qdiscs[i].stats.backlog_pkts = 0; - alink->qdiscs[i].stats.backlog_bytes = 0; - - return 0; -err_destroy: - /* If the qdisc keeps on living, but we can't offload undo changes */ - if (existing) { - opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts; - opt->set.qstats->backlog -= - alink->qdiscs[i].stats.backlog_bytes; - } - nfp_abm_red_destroy(netdev, alink, opt->handle); - - return err; -} - -static void -nfp_abm_update_stats(struct nfp_alink_stats *new, struct nfp_alink_stats *old, - struct tc_qopt_offload_stats *stats) -{ - _bstats_update(stats->bstats, new->tx_bytes - old->tx_bytes, - new->tx_pkts - old->tx_pkts); - stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts; - stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes; - stats->qstats->overlimits += new->overlimits - old->overlimits; - stats->qstats->drops += new->drops - old->drops; -} - -static int -nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) -{ - struct nfp_alink_stats *prev_stats; - struct nfp_alink_stats stats; - int i, err; - - i = nfp_abm_red_find(alink, opt); - if (i < 0) - return i; - prev_stats = &alink->qdiscs[i].stats; - - if (alink->parent == TC_H_ROOT) - err = nfp_abm_ctrl_read_stats(alink, &stats); - else - err = nfp_abm_ctrl_read_q_stats(alink, i, &stats); - if (err) - return err; - - nfp_abm_update_stats(&stats, prev_stats, &opt->stats); - - *prev_stats = stats; - - return 0; -} - -static int -nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) -{ - struct nfp_alink_xstats *prev_xstats; - struct nfp_alink_xstats xstats; - int i, err; - - i = nfp_abm_red_find(alink, opt); - if (i < 0) - return i; - prev_xstats = &alink->qdiscs[i].xstats; - - if (alink->parent == TC_H_ROOT) - err = nfp_abm_ctrl_read_xstats(alink, &xstats); - else - err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats); - if (err) - return err; - - opt->xstats->forced_mark += xstats.ecn_marked - prev_xstats->ecn_marked; - opt->xstats->pdrop += xstats.pdrop - prev_xstats->pdrop; - - *prev_xstats = xstats; - - return 0; -} - -static int -nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink, - struct tc_red_qopt_offload *opt) -{ - switch (opt->command) { - case TC_RED_REPLACE: - return nfp_abm_red_replace(netdev, alink, opt); - case TC_RED_DESTROY: - nfp_abm_red_destroy(netdev, alink, opt->handle); - return 0; - case TC_RED_STATS: - return nfp_abm_red_stats(alink, opt); - case TC_RED_XSTATS: - return nfp_abm_red_xstats(alink, opt); - default: - return -EOPNOTSUPP; - } -} - -static int -nfp_abm_mq_stats(struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt) -{ - struct nfp_alink_stats stats; - unsigned int i; - int err; - - for (i = 0; i < alink->num_qdiscs; i++) { - if (alink->qdiscs[i].handle == TC_H_UNSPEC) - continue; - - err = nfp_abm_ctrl_read_q_stats(alink, i, &stats); - if (err) - return err; - - nfp_abm_update_stats(&stats, &alink->qdiscs[i].stats, - &opt->stats); - } - - return 0; -} - -static int -nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink, - struct tc_mq_qopt_offload *opt) -{ - switch (opt->command) { - case TC_MQ_CREATE: - nfp_abm_reset_root(netdev, alink, opt->handle, - alink->total_queues); - return 0; - case TC_MQ_DESTROY: - if (opt->handle == alink->parent) - nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0); - return 0; - case TC_MQ_STATS: - return nfp_abm_mq_stats(alink, opt); - default: - return -EOPNOTSUPP; - } -} - -static int nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev, enum tc_setup_type type, void *type_data) { diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h index f907b7d98917..3774c063e419 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.h +++ b/drivers/net/ethernet/netronome/nfp/abm/main.h @@ -4,7 +4,11 @@ #ifndef __NFP_ABM_H__ #define __NFP_ABM_H__ 1 +#include <linux/bits.h> #include <net/devlink.h> +#include <net/pkt_cls.h> + +#define NFP_ABM_LVL_INFINITY S32_MAX struct nfp_app; struct nfp_net; @@ -91,6 +95,11 @@ struct nfp_abm_link { struct nfp_red_qdisc *qdiscs; }; +int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_red_qopt_offload *opt); +int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_mq_qopt_offload *opt); + void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink); int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm); int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val); diff --git a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c new file mode 100644 index 000000000000..bb05f9ee0401 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ + +#include <net/pkt_cls.h> +#include <net/pkt_sched.h> +#include <net/red.h> + +#include "../nfpcore/nfp_cpp.h" +#include "../nfp_app.h" +#include "../nfp_port.h" +#include "main.h" + +static int +__nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink, + u32 handle, unsigned int qs, u32 init_val) +{ + struct nfp_port *port = nfp_port_from_netdev(netdev); + int ret; + + ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val); + memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs); + + alink->parent = handle; + alink->num_qdiscs = qs; + port->tc_offload_cnt = qs; + + return ret; +} + +static void +nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink, + u32 handle, unsigned int qs) +{ + __nfp_abm_reset_root(netdev, alink, handle, qs, NFP_ABM_LVL_INFINITY); +} + +static int +nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) +{ + unsigned int i = TC_H_MIN(opt->parent) - 1; + + if (opt->parent == TC_H_ROOT) + i = 0; + else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) + i = TC_H_MIN(opt->parent) - 1; + else + return -EOPNOTSUPP; + + if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle) + return -EOPNOTSUPP; + + return i; +} + +static void +nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink, + u32 handle) +{ + unsigned int i; + + for (i = 0; i < alink->num_qdiscs; i++) + if (handle == alink->qdiscs[i].handle) + break; + if (i == alink->num_qdiscs) + return; + + if (alink->parent == TC_H_ROOT) { + nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0); + } else { + nfp_abm_ctrl_set_q_lvl(alink, i, NFP_ABM_LVL_INFINITY); + memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs)); + } +} + +static bool +nfp_abm_red_check_params(struct nfp_abm_link *alink, + struct tc_red_qopt_offload *opt) +{ + struct nfp_cpp *cpp = alink->abm->app->cpp; + + if (!opt->set.is_ecn) { + nfp_warn(cpp, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n", + opt->parent, opt->handle); + return false; + } + if (opt->set.is_harddrop) { + nfp_warn(cpp, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n", + opt->parent, opt->handle); + return false; + } + if (opt->set.min != opt->set.max) { + nfp_warn(cpp, "RED offload failed - unsupported min/max parameters (p:%08x h:%08x)\n", + opt->parent, opt->handle); + return false; + } + if (opt->set.min > NFP_ABM_LVL_INFINITY) { + nfp_warn(cpp, "RED offload failed - threshold too large %d > %d (p:%08x h:%08x)\n", + opt->set.min, NFP_ABM_LVL_INFINITY, opt->parent, + opt->handle); + return false; + } + + return true; +} + +static int +nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_red_qopt_offload *opt) +{ + bool existing; + int i, err; + + i = nfp_abm_red_find(alink, opt); + existing = i >= 0; + + if (!nfp_abm_red_check_params(alink, opt)) { + err = -EINVAL; + goto err_destroy; + } + + if (existing) { + if (alink->parent == TC_H_ROOT) + err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min); + else + err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min); + if (err) + goto err_destroy; + return 0; + } + + if (opt->parent == TC_H_ROOT) { + i = 0; + err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1, + opt->set.min); + } else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) { + i = TC_H_MIN(opt->parent) - 1; + err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min); + } else { + return -EINVAL; + } + /* Set the handle to try full clean up, in case IO failed */ + alink->qdiscs[i].handle = opt->handle; + if (err) + goto err_destroy; + + if (opt->parent == TC_H_ROOT) + err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats); + else + err = nfp_abm_ctrl_read_q_stats(alink, i, + &alink->qdiscs[i].stats); + if (err) + goto err_destroy; + + if (opt->parent == TC_H_ROOT) + err = nfp_abm_ctrl_read_xstats(alink, + &alink->qdiscs[i].xstats); + else + err = nfp_abm_ctrl_read_q_xstats(alink, i, + &alink->qdiscs[i].xstats); + if (err) + goto err_destroy; + + alink->qdiscs[i].stats.backlog_pkts = 0; + alink->qdiscs[i].stats.backlog_bytes = 0; + + return 0; +err_destroy: + /* If the qdisc keeps on living, but we can't offload undo changes */ + if (existing) { + opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts; + opt->set.qstats->backlog -= + alink->qdiscs[i].stats.backlog_bytes; + } + nfp_abm_red_destroy(netdev, alink, opt->handle); + + return err; +} + +static void +nfp_abm_update_stats(struct nfp_alink_stats *new, struct nfp_alink_stats *old, + struct tc_qopt_offload_stats *stats) +{ + _bstats_update(stats->bstats, new->tx_bytes - old->tx_bytes, + new->tx_pkts - old->tx_pkts); + stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts; + stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes; + stats->qstats->overlimits += new->overlimits - old->overlimits; + stats->qstats->drops += new->drops - old->drops; +} + +static int +nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) +{ + struct nfp_alink_stats *prev_stats; + struct nfp_alink_stats stats; + int i, err; + + i = nfp_abm_red_find(alink, opt); + if (i < 0) + return i; + prev_stats = &alink->qdiscs[i].stats; + + if (alink->parent == TC_H_ROOT) + err = nfp_abm_ctrl_read_stats(alink, &stats); + else + err = nfp_abm_ctrl_read_q_stats(alink, i, &stats); + if (err) + return err; + + nfp_abm_update_stats(&stats, prev_stats, &opt->stats); + + *prev_stats = stats; + + return 0; +} + +static int +nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) +{ + struct nfp_alink_xstats *prev_xstats; + struct nfp_alink_xstats xstats; + int i, err; + + i = nfp_abm_red_find(alink, opt); + if (i < 0) + return i; + prev_xstats = &alink->qdiscs[i].xstats; + + if (alink->parent == TC_H_ROOT) + err = nfp_abm_ctrl_read_xstats(alink, &xstats); + else + err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats); + if (err) + return err; + + opt->xstats->forced_mark += xstats.ecn_marked - prev_xstats->ecn_marked; + opt->xstats->pdrop += xstats.pdrop - prev_xstats->pdrop; + + *prev_xstats = xstats; + + return 0; +} + +int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_red_qopt_offload *opt) +{ + switch (opt->command) { + case TC_RED_REPLACE: + return nfp_abm_red_replace(netdev, alink, opt); + case TC_RED_DESTROY: + nfp_abm_red_destroy(netdev, alink, opt->handle); + return 0; + case TC_RED_STATS: + return nfp_abm_red_stats(alink, opt); + case TC_RED_XSTATS: + return nfp_abm_red_xstats(alink, opt); + default: + return -EOPNOTSUPP; + } +} + +static int +nfp_abm_mq_stats(struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt) +{ + struct nfp_alink_stats stats; + unsigned int i; + int err; + + for (i = 0; i < alink->num_qdiscs; i++) { + if (alink->qdiscs[i].handle == TC_H_UNSPEC) + continue; + + err = nfp_abm_ctrl_read_q_stats(alink, i, &stats); + if (err) + return err; + + nfp_abm_update_stats(&stats, &alink->qdiscs[i].stats, + &opt->stats); + } + + return 0; +} + +int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_mq_qopt_offload *opt) +{ + switch (opt->command) { + case TC_MQ_CREATE: + nfp_abm_reset_root(netdev, alink, opt->handle, + alink->total_queues); + return 0; + case TC_MQ_DESTROY: + if (opt->handle == alink->parent) + nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0); + return 0; + case TC_MQ_STATS: + return nfp_abm_mq_stats(alink, opt); + default: + return -EOPNOTSUPP; + } +} diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 244dc261006e..8d54b36afee8 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -2,7 +2,6 @@ /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> -#include <net/geneve.h> #include <net/pkt_cls.h> #include <net/switchdev.h> #include <net/tc_act/tc_csum.h> @@ -91,21 +90,6 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action, return act_size; } -static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev, - enum nfp_flower_tun_type tun_type) -{ - if (!out_dev->rtnl_link_ops) - return false; - - if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan")) - return tun_type == NFP_FL_TUNNEL_VXLAN; - - if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve")) - return tun_type == NFP_FL_TUNNEL_GENEVE; - - return false; -} - static int nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, const struct tc_action *action, struct nfp_fl_payload *nfp_flow, @@ -151,11 +135,12 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, /* Set action output parameters. */ output->flags = cpu_to_be16(tmp_flags); - /* Only offload if egress ports are on the same device as the - * ingress port. - */ - if (!switchdev_port_same_parent_id(in_dev, out_dev)) - return -EOPNOTSUPP; + if (nfp_netdev_is_nfp_repr(in_dev)) { + /* Confirm ingress and egress are on same device. */ + if (!switchdev_port_same_parent_id(in_dev, out_dev)) + return -EOPNOTSUPP; + } + if (!nfp_netdev_is_nfp_repr(out_dev)) return -EOPNOTSUPP; @@ -384,10 +369,21 @@ nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off, return 0; } +struct ipv4_ttl_word { + __u8 ttl; + __u8 protocol; + __sum16 check; +}; + static int nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off, - struct nfp_fl_set_ip4_addrs *set_ip_addr) + struct nfp_fl_set_ip4_addrs *set_ip_addr, + struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos) { + struct ipv4_ttl_word *ttl_word_mask; + struct ipv4_ttl_word *ttl_word; + struct iphdr *tos_word_mask; + struct iphdr *tos_word; __be32 exact, mask; /* We are expecting tcf_pedit to return a big endian value */ @@ -402,20 +398,53 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off, set_ip_addr->ipv4_dst_mask |= mask; set_ip_addr->ipv4_dst &= ~mask; set_ip_addr->ipv4_dst |= exact & mask; + set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS; + set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> + NFP_FL_LW_SIZ; break; case offsetof(struct iphdr, saddr): set_ip_addr->ipv4_src_mask |= mask; set_ip_addr->ipv4_src &= ~mask; set_ip_addr->ipv4_src |= exact & mask; + set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS; + set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> + NFP_FL_LW_SIZ; + break; + case offsetof(struct iphdr, ttl): + ttl_word_mask = (struct ipv4_ttl_word *)&mask; + ttl_word = (struct ipv4_ttl_word *)&exact; + + if (ttl_word_mask->protocol || ttl_word_mask->check) + return -EOPNOTSUPP; + + set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl; + set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl; + set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl; + set_ip_ttl_tos->head.jump_id = + NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS; + set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >> + NFP_FL_LW_SIZ; + break; + case round_down(offsetof(struct iphdr, tos), 4): + tos_word_mask = (struct iphdr *)&mask; + tos_word = (struct iphdr *)&exact; + + if (tos_word_mask->version || tos_word_mask->ihl || + tos_word_mask->tot_len) + return -EOPNOTSUPP; + + set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos; + set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos; + set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos; + set_ip_ttl_tos->head.jump_id = + NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS; + set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >> + NFP_FL_LW_SIZ; break; default: return -EOPNOTSUPP; } - set_ip_addr->reserved = cpu_to_be16(0); - set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS; - set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ; - return 0; } @@ -432,12 +461,57 @@ nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask, ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ; } +struct ipv6_hop_limit_word { + __be16 payload_len; + u8 nexthdr; + u8 hop_limit; +}; + +static int +nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask, + struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl) +{ + struct ipv6_hop_limit_word *fl_hl_mask; + struct ipv6_hop_limit_word *fl_hl; + + switch (off) { + case offsetof(struct ipv6hdr, payload_len): + fl_hl_mask = (struct ipv6_hop_limit_word *)&mask; + fl_hl = (struct ipv6_hop_limit_word *)&exact; + + if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len) + return -EOPNOTSUPP; + + ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit; + ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit; + ip_hl_fl->ipv6_hop_limit |= fl_hl->hop_limit & + fl_hl_mask->hop_limit; + break; + case round_down(offsetof(struct ipv6hdr, flow_lbl), 4): + if (mask & ~IPV6_FLOW_LABEL_MASK || + exact & ~IPV6_FLOW_LABEL_MASK) + return -EOPNOTSUPP; + + ip_hl_fl->ipv6_label_mask |= mask; + ip_hl_fl->ipv6_label &= ~mask; + ip_hl_fl->ipv6_label |= exact & mask; + break; + } + + ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL; + ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ; + + return 0; +} + static int nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, struct nfp_fl_set_ipv6_addr *ip_dst, - struct nfp_fl_set_ipv6_addr *ip_src) + struct nfp_fl_set_ipv6_addr *ip_src, + struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl) { __be32 exact, mask; + int err = 0; u8 word; /* We are expecting tcf_pedit to return a big endian value */ @@ -448,7 +522,8 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, return -EOPNOTSUPP; if (off < offsetof(struct ipv6hdr, saddr)) { - return -EOPNOTSUPP; + err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask, + ip_hl_fl); } else if (off < offsetof(struct ipv6hdr, daddr)) { word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact); nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word, @@ -462,7 +537,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, return -EOPNOTSUPP; } - return 0; + return err; } static int @@ -513,6 +588,8 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, char *nfp_action, int *a_len, u32 *csum_updated) { struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src; + struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl; + struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos; struct nfp_fl_set_ip4_addrs set_ip_addr; struct nfp_fl_set_tport set_tport; struct nfp_fl_set_eth set_eth; @@ -522,6 +599,8 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, u32 offset, cmd; u8 ip_proto = 0; + memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl)); + memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos)); memset(&set_ip6_dst, 0, sizeof(set_ip6_dst)); memset(&set_ip6_src, 0, sizeof(set_ip6_src)); memset(&set_ip_addr, 0, sizeof(set_ip_addr)); @@ -542,11 +621,12 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, err = nfp_fl_set_eth(action, idx, offset, &set_eth); break; case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: - err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr); + err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr, + &set_ip_ttl_tos); break; case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst, - &set_ip6_src); + &set_ip6_src, &set_ip6_tc_hl_fl); break; case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: err = nfp_fl_set_tport(action, idx, offset, &set_tport, @@ -577,6 +657,16 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, memcpy(nfp_action, &set_eth, act_size); *a_len += act_size; } + if (set_ip_ttl_tos.head.len_lw) { + nfp_action += act_size; + act_size = sizeof(set_ip_ttl_tos); + memcpy(nfp_action, &set_ip_ttl_tos, act_size); + *a_len += act_size; + + /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */ + *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | + nfp_fl_csum_l4_to_flag(ip_proto); + } if (set_ip_addr.head.len_lw) { nfp_action += act_size; act_size = sizeof(set_ip_addr); @@ -587,6 +677,15 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | nfp_fl_csum_l4_to_flag(ip_proto); } + if (set_ip6_tc_hl_fl.head.len_lw) { + nfp_action += act_size; + act_size = sizeof(set_ip6_tc_hl_fl); + memcpy(nfp_action, &set_ip6_tc_hl_fl, act_size); + *a_len += act_size; + + /* Hardware will automatically fix TCP/UDP checksum. */ + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); + } if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) { /* TC compiles set src and dst IPv6 address as a single action, * the hardware requires this to be 2 separate actions. @@ -728,9 +827,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, *a_len += sizeof(struct nfp_fl_push_vlan); } else if (is_tcf_tunnel_set(a)) { struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a); - struct nfp_repr *repr = netdev_priv(netdev); - *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a); + *tun_type = nfp_fl_get_tun_from_act_l4_port(app, a); if (*tun_type == NFP_FL_TUNNEL_NONE) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 29d673aa5277..15f41cfef9f1 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -8,6 +8,7 @@ #include <linux/skbuff.h> #include <linux/types.h> #include <net/geneve.h> +#include <net/vxlan.h> #include "../nfp_app.h" #include "../nfpcore/nfp_cpp.h" @@ -65,8 +66,10 @@ #define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6 #define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7 #define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9 +#define NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS 10 #define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11 #define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12 +#define NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL 13 #define NFP_FL_ACTION_OPCODE_SET_UDP 14 #define NFP_FL_ACTION_OPCODE_SET_TCP 15 #define NFP_FL_ACTION_OPCODE_PRE_LAG 16 @@ -82,6 +85,8 @@ #define NFP_FL_PUSH_VLAN_CFI BIT(12) #define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) +#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff) + /* LAG ports */ #define NFP_FL_LAG_OUT 0xC0DE0000 @@ -125,6 +130,26 @@ struct nfp_fl_set_ip4_addrs { __be32 ipv4_dst; }; +struct nfp_fl_set_ip4_ttl_tos { + struct nfp_fl_act_head head; + u8 ipv4_ttl_mask; + u8 ipv4_tos_mask; + u8 ipv4_ttl; + u8 ipv4_tos; + __be16 reserved; +}; + +struct nfp_fl_set_ipv6_tc_hl_fl { + struct nfp_fl_act_head head; + u8 ipv6_tc_mask; + u8 ipv6_hop_limit_mask; + __be16 reserved; + u8 ipv6_tc; + u8 ipv6_hop_limit; + __be32 ipv6_label_mask; + __be32 ipv6_label; +}; + struct nfp_fl_set_ipv6_addr { struct nfp_fl_act_head head; __be16 reserved; @@ -475,6 +500,32 @@ static inline int nfp_flower_cmsg_get_data_len(struct sk_buff *skb) return skb->len - NFP_FLOWER_CMSG_HLEN; } +static inline bool +nfp_fl_netdev_is_tunnel_type(struct net_device *netdev, + enum nfp_flower_tun_type tun_type) +{ + if (netif_is_vxlan(netdev)) + return tun_type == NFP_FL_TUNNEL_VXLAN; + if (netif_is_geneve(netdev)) + return tun_type == NFP_FL_TUNNEL_GENEVE; + + return false; +} + +static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev) +{ + if (!netdev->rtnl_link_ops) + return false; + if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch")) + return true; + if (netif_is_vxlan(netdev)) + return true; + if (netif_is_geneve(netdev)) + return true; + + return false; +} + struct sk_buff * nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports); void diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c index 81dcf5b318ba..5db838f45694 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c @@ -472,17 +472,25 @@ nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag, schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); } -static int +static void nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag, struct net_device *master) { struct nfp_fl_lag_group *group; + struct nfp_flower_priv *priv; + + priv = container_of(lag, struct nfp_flower_priv, nfp_lag); + + if (!netif_is_bond_master(master)) + return; mutex_lock(&lag->lock); group = nfp_fl_lag_find_group_for_master_with_lag(lag, master); if (!group) { mutex_unlock(&lag->lock); - return -ENOENT; + nfp_warn(priv->app->cpp, "untracked bond got unregistered %s\n", + netdev_name(master)); + return; } group->to_remove = true; @@ -490,7 +498,6 @@ nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag, mutex_unlock(&lag->lock); schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); - return 0; } static int @@ -575,7 +582,7 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag, return 0; } -static int +static void nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev, struct netdev_notifier_changelowerstate_info *info) { @@ -586,18 +593,18 @@ nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev, unsigned long *flags; if (!netif_is_lag_port(netdev) || !nfp_netdev_is_nfp_repr(netdev)) - return 0; + return; lag_lower_info = info->lower_state_info; if (!lag_lower_info) - return 0; + return; priv = container_of(lag, struct nfp_flower_priv, nfp_lag); repr = netdev_priv(netdev); /* Verify that the repr is associated with this app. */ if (repr->app != priv->app) - return 0; + return; repr_priv = repr->app_priv; flags = &repr_priv->lag_port_flags; @@ -617,20 +624,15 @@ nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev, mutex_unlock(&lag->lock); schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); - return 0; } -static int -nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event, - void *ptr) +int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv, + struct net_device *netdev, + unsigned long event, void *ptr) { - struct net_device *netdev; - struct nfp_fl_lag *lag; + struct nfp_fl_lag *lag = &priv->nfp_lag; int err; - netdev = netdev_notifier_info_to_dev(ptr); - lag = container_of(nb, struct nfp_fl_lag, lag_nb); - switch (event) { case NETDEV_CHANGEUPPER: err = nfp_fl_lag_changeupper_event(lag, ptr); @@ -638,17 +640,11 @@ nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event, return NOTIFY_BAD; return NOTIFY_OK; case NETDEV_CHANGELOWERSTATE: - err = nfp_fl_lag_changels_event(lag, netdev, ptr); - if (err) - return NOTIFY_BAD; + nfp_fl_lag_changels_event(lag, netdev, ptr); return NOTIFY_OK; case NETDEV_UNREGISTER: - if (netif_is_bond_master(netdev)) { - err = nfp_fl_lag_schedule_group_delete(lag, netdev); - if (err) - return NOTIFY_BAD; - return NOTIFY_OK; - } + nfp_fl_lag_schedule_group_delete(lag, netdev); + return NOTIFY_OK; } return NOTIFY_DONE; @@ -673,8 +669,6 @@ void nfp_flower_lag_init(struct nfp_fl_lag *lag) /* 0 is a reserved batch version so increment to first valid value. */ nfp_fl_increment_version(lag); - - lag->lag_nb.notifier_call = nfp_fl_lag_netdev_event; } void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag) diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 3a54728d2ea6..5059110a1768 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -146,23 +146,12 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr) return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false); } -static int -nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev) -{ - return tc_setup_cb_egdev_register(netdev, - nfp_flower_setup_tc_egress_cb, - netdev_priv(netdev)); -} - static void nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev) { struct nfp_repr *repr = netdev_priv(netdev); kfree(repr->app_priv); - - tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb, - netdev_priv(netdev)); } static void @@ -568,6 +557,8 @@ static int nfp_flower_init(struct nfp_app *app) goto err_cleanup_metadata; } + INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); + return 0; err_cleanup_metadata: @@ -661,10 +652,6 @@ static int nfp_flower_start(struct nfp_app *app) err = nfp_flower_lag_reset(&app_priv->nfp_lag); if (err) return err; - - err = register_netdevice_notifier(&app_priv->nfp_lag.lag_nb); - if (err) - return err; } return nfp_tunnel_config_start(app); @@ -672,12 +659,27 @@ static int nfp_flower_start(struct nfp_app *app) static void nfp_flower_stop(struct nfp_app *app) { + nfp_tunnel_config_stop(app); +} + +static int +nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev, + unsigned long event, void *ptr) +{ struct nfp_flower_priv *app_priv = app->priv; + int ret; - if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) - unregister_netdevice_notifier(&app_priv->nfp_lag.lag_nb); + if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { + ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr); + if (ret & NOTIFY_STOP_MASK) + return ret; + } - nfp_tunnel_config_stop(app); + ret = nfp_flower_reg_indir_block_handler(app, netdev, event); + if (ret & NOTIFY_STOP_MASK) + return ret; + + return nfp_tunnel_mac_event_handler(app, netdev, event, ptr); } const struct nfp_app_type app_flower = { @@ -698,7 +700,6 @@ const struct nfp_app_type app_flower = { .vnic_init = nfp_flower_vnic_init, .vnic_clean = nfp_flower_vnic_clean, - .repr_init = nfp_flower_repr_netdev_init, .repr_preclean = nfp_flower_repr_netdev_preclean, .repr_clean = nfp_flower_repr_netdev_clean, @@ -708,6 +709,8 @@ const struct nfp_app_type app_flower = { .start = nfp_flower_start, .stop = nfp_flower_stop, + .netdev_event = nfp_flower_netdev_event, + .ctrl_msg_rx = nfp_flower_cmsg_rx, .sriov_enable = nfp_flower_sriov_enable, diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 90045bab95bf..b858bac47621 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -20,7 +20,6 @@ struct nfp_fl_pre_lag; struct net_device; struct nfp_app; -#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff) #define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \ init_unalloc) #define NFP_FLOWER_MASK_ENTRY_RS 256 @@ -72,7 +71,6 @@ struct nfp_mtu_conf { /** * struct nfp_fl_lag - Flower APP priv data for link aggregation - * @lag_nb: Notifier to track master/slave events * @work: Work queue for writing configs to the HW * @lock: Lock to protect lag_group_list * @group_list: List of all master/slave groups offloaded @@ -85,7 +83,6 @@ struct nfp_mtu_conf { * retransmission */ struct nfp_fl_lag { - struct notifier_block lag_nb; struct delayed_work work; struct mutex lock; struct list_head group_list; @@ -126,13 +123,13 @@ struct nfp_fl_lag { * @nfp_neigh_off_lock: Lock for the neighbour address list * @nfp_mac_off_ids: IDA to manage id assignment for offloaded macs * @nfp_mac_off_count: Number of MACs in address list - * @nfp_tun_mac_nb: Notifier to monitor link state * @nfp_tun_neigh_nb: Notifier to monitor neighbour state * @reify_replies: atomically stores the number of replies received * from firmware for repr reify * @reify_wait_queue: wait queue for repr reify response counting * @mtu_conf: Configuration of repr MTU value * @nfp_lag: Link aggregation data block + * @indr_block_cb_priv: List of priv data passed to indirect block cbs */ struct nfp_flower_priv { struct nfp_app *app; @@ -160,12 +157,12 @@ struct nfp_flower_priv { spinlock_t nfp_neigh_off_lock; struct ida nfp_mac_off_ids; int nfp_mac_off_count; - struct notifier_block nfp_tun_mac_nb; struct notifier_block nfp_tun_neigh_nb; atomic_t reify_replies; wait_queue_head_t reify_wait_queue; struct nfp_mtu_conf mtu_conf; struct nfp_fl_lag nfp_lag; + struct list_head indr_block_cb_priv; }; /** @@ -209,7 +206,6 @@ struct nfp_fl_payload { char *unmasked_data; char *mask_data; char *action_data; - bool ingress_offload; }; extern const struct rhashtable_params nfp_flower_table_params; @@ -226,7 +222,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app); int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, enum tc_setup_type type, void *type_data); -int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, +int nfp_flower_compile_flow_match(struct nfp_app *app, + struct tc_cls_flower_offload *flow, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, struct nfp_fl_payload *nfp_flow, @@ -244,7 +241,7 @@ int nfp_modify_flow_metadata(struct nfp_app *app, struct nfp_fl_payload * nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie, - struct net_device *netdev, __be32 host_ctx); + struct net_device *netdev); struct nfp_fl_payload * nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie); @@ -252,21 +249,28 @@ void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb); int nfp_tunnel_config_start(struct nfp_app *app); void nfp_tunnel_config_stop(struct nfp_app *app); +int nfp_tunnel_mac_event_handler(struct nfp_app *app, + struct net_device *netdev, + unsigned long event, void *ptr); void nfp_tunnel_write_macs(struct nfp_app *app); void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4); void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4); void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb); void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb); -int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, - void *cb_priv); void nfp_flower_lag_init(struct nfp_fl_lag *lag); void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag); int nfp_flower_lag_reset(struct nfp_fl_lag *lag); +int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv, + struct net_device *netdev, + unsigned long event, void *ptr); bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb); int nfp_flower_lag_populate_pre_action(struct nfp_app *app, struct net_device *master, struct nfp_fl_pre_lag *pre_act); int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master); +int nfp_flower_reg_indir_block_handler(struct nfp_app *app, + struct net_device *netdev, + unsigned long event); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index e54fb6034326..cdf75595f627 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -52,10 +52,13 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port, return 0; } - if (tun_type) + if (tun_type) { frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type); - else + } else { + if (!cmsg_port) + return -EOPNOTSUPP; frame->in_port = cpu_to_be32(cmsg_port); + } return 0; } @@ -289,17 +292,21 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame, } } -int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, +int nfp_flower_compile_flow_match(struct nfp_app *app, + struct tc_cls_flower_offload *flow, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, struct nfp_fl_payload *nfp_flow, enum nfp_flower_tun_type tun_type) { - struct nfp_repr *netdev_repr; + u32 cmsg_port = 0; int err; u8 *ext; u8 *msk; + if (nfp_netdev_is_nfp_repr(netdev)) + cmsg_port = nfp_repr_get_port_id(netdev); + memset(nfp_flow->unmasked_data, 0, key_ls->key_size); memset(nfp_flow->mask_data, 0, key_ls->key_size); @@ -327,15 +334,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, /* Populate Exact Port data. */ err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext, - nfp_repr_get_port_id(netdev), - false, tun_type); + cmsg_port, false, tun_type); if (err) return err; /* Populate Mask Port Data. */ err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk, - nfp_repr_get_port_id(netdev), - true, tun_type); + cmsg_port, true, tun_type); if (err) return err; @@ -399,16 +404,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, msk += sizeof(struct nfp_flower_ipv4_udp_tun); /* Configure tunnel end point MAC. */ - if (nfp_netdev_is_nfp_repr(netdev)) { - netdev_repr = netdev_priv(netdev); - nfp_tunnel_write_macs(netdev_repr->app); - - /* Store the tunnel destination in the rule data. - * This must be present and be an exact match. - */ - nfp_flow->nfp_tun_ipv4_addr = tun_dst; - nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst); - } + nfp_tunnel_write_macs(app); + + /* Store the tunnel destination in the rule data. + * This must be present and be an exact match. + */ + nfp_flow->nfp_tun_ipv4_addr = tun_dst; + nfp_tunnel_add_ipv4_off(app, tun_dst); if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) { err = nfp_flower_compile_geneve_opt(ext, flow, false); diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 48729bf171e0..573a4400a26c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -21,7 +21,6 @@ struct nfp_mask_id_table { struct nfp_fl_flow_table_cmp_arg { struct net_device *netdev; unsigned long cookie; - __be32 host_ctx; }; static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id) @@ -76,14 +75,13 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) /* Must be called with either RTNL or rcu_read_lock */ struct nfp_fl_payload * nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie, - struct net_device *netdev, __be32 host_ctx) + struct net_device *netdev) { struct nfp_fl_flow_table_cmp_arg flower_cmp_arg; struct nfp_flower_priv *priv = app->priv; flower_cmp_arg.netdev = netdev; flower_cmp_arg.cookie = tc_flower_cookie; - flower_cmp_arg.host_ctx = host_ctx; return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg, nfp_flower_table_params); @@ -287,6 +285,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app, nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt); nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie); + nfp_flow->ingress_dev = netdev; new_mask_id = 0; if (!nfp_check_mask_add(app, nfp_flow->mask_data, @@ -306,8 +305,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app, priv->stats[stats_cxt].bytes = 0; priv->stats[stats_cxt].used = jiffies; - check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev, - NFP_FL_STATS_CTX_DONT_CARE); + check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev); if (check_entry) { if (nfp_release_stats_entry(app, stats_cxt)) return -EINVAL; @@ -352,9 +350,7 @@ static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg, const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key; const struct nfp_fl_payload *flow_entry = obj; - if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) && - (cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE || - flow_entry->meta.host_ctx_id == cmp_arg->host_ctx)) + if (flow_entry->ingress_dev == cmp_arg->netdev) return flow_entry->tc_flower_cookie != cmp_arg->cookie; return 1; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 29c95423ab64..545d94168874 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -56,11 +56,10 @@ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) static int -nfp_flower_xmit_flow(struct net_device *netdev, - struct nfp_fl_payload *nfp_flow, u8 mtype) +nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, + u8 mtype) { u32 meta_len, key_len, mask_len, act_len, tot_len; - struct nfp_repr *priv = netdev_priv(netdev); struct sk_buff *skb; unsigned char *msg; @@ -78,7 +77,7 @@ nfp_flower_xmit_flow(struct net_device *netdev, nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ; nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ; - skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL); + skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL); if (!skb) return -ENOMEM; @@ -96,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *netdev, nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ; nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ; - nfp_ctrl_tx(priv->app->ctrl, skb); + nfp_ctrl_tx(app->ctrl, skb); return 0; } @@ -129,9 +128,9 @@ nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts, static int nfp_flower_calculate_key_layers(struct nfp_app *app, + struct net_device *netdev, struct nfp_fl_key_ls *ret_key_ls, struct tc_cls_flower_offload *flow, - bool egress, enum nfp_flower_tun_type *tun_type) { struct flow_dissector_key_basic *mask_basic = NULL; @@ -187,8 +186,6 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, skb_flow_dissector_target(flow->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL, flow->key); - if (!egress) - return -EOPNOTSUPP; if (mask_enc_ctl->addr_type != 0xffff || enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) @@ -251,9 +248,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, default: return -EOPNOTSUPP; } - } else if (egress) { - /* Reject non tunnel matches offloaded to egress repr. */ - return -EOPNOTSUPP; + + /* Ensure the ingress netdev matches the expected tun type. */ + if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) + return -EOPNOTSUPP; } if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { @@ -374,7 +372,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, } static struct nfp_fl_payload * -nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress) +nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) { struct nfp_fl_payload *flow_pay; @@ -398,7 +396,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress) flow_pay->nfp_tun_ipv4_addr = 0; flow_pay->meta.flags = 0; - flow_pay->ingress_offload = !egress; return flow_pay; @@ -416,7 +413,6 @@ err_free_flow: * @app: Pointer to the APP handle * @netdev: netdev structure. * @flow: TC flower classifier offload structure. - * @egress: NFP netdev is the egress. * * Adds a new flow to the repeated hash structure and action payload. * @@ -424,46 +420,35 @@ err_free_flow: */ static int nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flow, bool egress) + struct tc_cls_flower_offload *flow) { enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; - struct nfp_port *port = nfp_port_from_netdev(netdev); struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *flow_pay; struct nfp_fl_key_ls *key_layer; - struct net_device *ingr_dev; + struct nfp_port *port = NULL; int err; - ingr_dev = egress ? NULL : netdev; - flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, - NFP_FL_STATS_CTX_DONT_CARE); - if (flow_pay) { - /* Ignore as duplicate if it has been added by different cb. */ - if (flow_pay->ingress_offload && egress) - return 0; - else - return -EOPNOTSUPP; - } + if (nfp_netdev_is_nfp_repr(netdev)) + port = nfp_port_from_netdev(netdev); key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL); if (!key_layer) return -ENOMEM; - err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress, + err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow, &tun_type); if (err) goto err_free_key_ls; - flow_pay = nfp_flower_allocate_new(key_layer, egress); + flow_pay = nfp_flower_allocate_new(key_layer); if (!flow_pay) { err = -ENOMEM; goto err_free_key_ls; } - flow_pay->ingress_dev = egress ? NULL : netdev; - - err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay, - tun_type); + err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev, + flow_pay, tun_type); if (err) goto err_destroy_flow; @@ -471,12 +456,11 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (err) goto err_destroy_flow; - err = nfp_compile_flow_metadata(app, flow, flow_pay, - flow_pay->ingress_dev); + err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev); if (err) goto err_destroy_flow; - err = nfp_flower_xmit_flow(netdev, flow_pay, + err = nfp_flower_xmit_flow(app, flow_pay, NFP_FLOWER_CMSG_TYPE_FLOW_ADD); if (err) goto err_destroy_flow; @@ -487,7 +471,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (err) goto err_destroy_flow; - port->tc_offload_cnt++; + if (port) + port->tc_offload_cnt++; /* Deallocate flow payload when flower rule has been destroyed. */ kfree(key_layer); @@ -509,7 +494,6 @@ err_free_key_ls: * @app: Pointer to the APP handle * @netdev: netdev structure. * @flow: TC flower classifier offload structure - * @egress: Netdev is the egress dev. * * Removes a flow from the repeated hash structure and clears the * action payload. @@ -518,19 +502,19 @@ err_free_key_ls: */ static int nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flow, bool egress) + struct tc_cls_flower_offload *flow) { - struct nfp_port *port = nfp_port_from_netdev(netdev); struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *nfp_flow; - struct net_device *ingr_dev; + struct nfp_port *port = NULL; int err; - ingr_dev = egress ? NULL : netdev; - nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, - NFP_FL_STATS_CTX_DONT_CARE); + if (nfp_netdev_is_nfp_repr(netdev)) + port = nfp_port_from_netdev(netdev); + + nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); if (!nfp_flow) - return egress ? 0 : -ENOENT; + return -ENOENT; err = nfp_modify_flow_metadata(app, nfp_flow); if (err) @@ -539,13 +523,14 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, if (nfp_flow->nfp_tun_ipv4_addr) nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr); - err = nfp_flower_xmit_flow(netdev, nfp_flow, + err = nfp_flower_xmit_flow(app, nfp_flow, NFP_FLOWER_CMSG_TYPE_FLOW_DEL); if (err) goto err_free_flow; err_free_flow: - port->tc_offload_cnt--; + if (port) + port->tc_offload_cnt--; kfree(nfp_flow->action_data); kfree(nfp_flow->mask_data); kfree(nfp_flow->unmasked_data); @@ -561,7 +546,6 @@ err_free_flow: * @app: Pointer to the APP handle * @netdev: Netdev structure. * @flow: TC flower classifier offload structure - * @egress: Netdev is the egress dev. * * Populates a flow statistics structure which which corresponds to a * specific flow. @@ -570,22 +554,16 @@ err_free_flow: */ static int nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flow, bool egress) + struct tc_cls_flower_offload *flow) { struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *nfp_flow; - struct net_device *ingr_dev; u32 ctx_id; - ingr_dev = egress ? NULL : netdev; - nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, - NFP_FL_STATS_CTX_DONT_CARE); + nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); if (!nfp_flow) return -EINVAL; - if (nfp_flow->ingress_offload && egress) - return 0; - ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); spin_lock_bh(&priv->stats_lock); @@ -602,35 +580,18 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, static int nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flower, bool egress) + struct tc_cls_flower_offload *flower) { if (!eth_proto_is_802_3(flower->common.protocol)) return -EOPNOTSUPP; switch (flower->command) { case TC_CLSFLOWER_REPLACE: - return nfp_flower_add_offload(app, netdev, flower, egress); + return nfp_flower_add_offload(app, netdev, flower); case TC_CLSFLOWER_DESTROY: - return nfp_flower_del_offload(app, netdev, flower, egress); + return nfp_flower_del_offload(app, netdev, flower); case TC_CLSFLOWER_STATS: - return nfp_flower_get_stats(app, netdev, flower, egress); - default: - return -EOPNOTSUPP; - } -} - -int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, - void *cb_priv) -{ - struct nfp_repr *repr = cb_priv; - - if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data)) - return -EOPNOTSUPP; - - switch (type) { - case TC_SETUP_CLSFLOWER: - return nfp_flower_repr_offload(repr->app, repr->netdev, - type_data, true); + return nfp_flower_get_stats(app, netdev, flower); default: return -EOPNOTSUPP; } @@ -647,7 +608,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, switch (type) { case TC_SETUP_CLSFLOWER: return nfp_flower_repr_offload(repr->app, repr->netdev, - type_data, false); + type_data); default: return -EOPNOTSUPP; } @@ -686,3 +647,129 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, return -EOPNOTSUPP; } } + +struct nfp_flower_indr_block_cb_priv { + struct net_device *netdev; + struct nfp_app *app; + struct list_head list; +}; + +static struct nfp_flower_indr_block_cb_priv * +nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app, + struct net_device *netdev) +{ + struct nfp_flower_indr_block_cb_priv *cb_priv; + struct nfp_flower_priv *priv = app->priv; + + /* All callback list access should be protected by RTNL. */ + ASSERT_RTNL(); + + list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list) + if (cb_priv->netdev == netdev) + return cb_priv; + + return NULL; +} + +static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct nfp_flower_indr_block_cb_priv *priv = cb_priv; + struct tc_cls_flower_offload *flower = type_data; + + if (flower->common.chain_index) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return nfp_flower_repr_offload(priv->app, priv->netdev, + type_data); + default: + return -EOPNOTSUPP; + } +} + +static int +nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, + struct tc_block_offload *f) +{ + struct nfp_flower_indr_block_cb_priv *cb_priv; + struct nfp_flower_priv *priv = app->priv; + int err; + + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); + if (!cb_priv) + return -ENOMEM; + + cb_priv->netdev = netdev; + cb_priv->app = app; + list_add(&cb_priv->list, &priv->indr_block_cb_priv); + + err = tcf_block_cb_register(f->block, + nfp_flower_setup_indr_block_cb, + netdev, cb_priv, f->extack); + if (err) { + list_del(&cb_priv->list); + kfree(cb_priv); + } + + return err; + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, + nfp_flower_setup_indr_block_cb, netdev); + cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); + if (cb_priv) { + list_del(&cb_priv->list); + kfree(cb_priv); + } + + return 0; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int +nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, + enum tc_setup_type type, void *type_data) +{ + switch (type) { + case TC_SETUP_BLOCK: + return nfp_flower_setup_indr_tc_block(netdev, cb_priv, + type_data); + default: + return -EOPNOTSUPP; + } +} + +int nfp_flower_reg_indir_block_handler(struct nfp_app *app, + struct net_device *netdev, + unsigned long event) +{ + int err; + + if (!nfp_fl_is_netdev_to_offload(netdev)) + return NOTIFY_OK; + + if (event == NETDEV_REGISTER) { + err = __tc_indr_block_cb_register(netdev, app, + nfp_flower_indr_setup_tc_cb, + netdev); + if (err) + nfp_flower_cmsg_warn(app, + "Indirect block reg failed - %s\n", + netdev->name); + } else if (event == NETDEV_UNREGISTER) { + __tc_indr_block_cb_unregister(netdev, + nfp_flower_indr_setup_tc_cb, + netdev); + } + + return NOTIFY_OK; +} diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 8e5bec04d1f9..2d9f26a725c2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -4,7 +4,6 @@ #include <linux/etherdevice.h> #include <linux/inetdevice.h> #include <net/netevent.h> -#include <net/vxlan.h> #include <linux/idr.h> #include <net/dst_metadata.h> #include <net/arp.h> @@ -182,18 +181,6 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) } } -static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev) -{ - if (!netdev->rtnl_link_ops) - return false; - if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch")) - return true; - if (netif_is_vxlan(netdev)) - return true; - - return false; -} - static int nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata, gfp_t flag) @@ -615,7 +602,7 @@ static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev, if (nfp_netdev_is_nfp_repr(netdev)) port = nfp_repr_get_port_id(netdev); - else if (!nfp_tun_is_netdev_to_offload(netdev)) + else if (!nfp_fl_is_netdev_to_offload(netdev)) return; entry = kmalloc(sizeof(*entry), GFP_KERNEL); @@ -652,29 +639,16 @@ static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev, mutex_unlock(&priv->nfp_mac_off_lock); } -static int nfp_tun_mac_event_handler(struct notifier_block *nb, - unsigned long event, void *ptr) +int nfp_tunnel_mac_event_handler(struct nfp_app *app, + struct net_device *netdev, + unsigned long event, void *ptr) { - struct nfp_flower_priv *app_priv; - struct net_device *netdev; - struct nfp_app *app; - if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) { - app_priv = container_of(nb, struct nfp_flower_priv, - nfp_tun_mac_nb); - app = app_priv->app; - netdev = netdev_notifier_info_to_dev(ptr); - /* If non-nfp netdev then free its offload index. */ - if (nfp_tun_is_netdev_to_offload(netdev)) + if (nfp_fl_is_netdev_to_offload(netdev)) nfp_tun_del_mac_idx(app, netdev->ifindex); } else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER) { - app_priv = container_of(nb, struct nfp_flower_priv, - nfp_tun_mac_nb); - app = app_priv->app; - netdev = netdev_notifier_info_to_dev(ptr); - nfp_tun_add_to_mac_offload_list(netdev, app); /* Force a list write to keep NFP up to date. */ @@ -686,14 +660,11 @@ static int nfp_tun_mac_event_handler(struct notifier_block *nb, int nfp_tunnel_config_start(struct nfp_app *app) { struct nfp_flower_priv *priv = app->priv; - struct net_device *netdev; - int err; /* Initialise priv data for MAC offloading. */ priv->nfp_mac_off_count = 0; mutex_init(&priv->nfp_mac_off_lock); INIT_LIST_HEAD(&priv->nfp_mac_off_list); - priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler; mutex_init(&priv->nfp_mac_index_lock); INIT_LIST_HEAD(&priv->nfp_mac_index_list); ida_init(&priv->nfp_mac_off_ids); @@ -707,27 +678,7 @@ int nfp_tunnel_config_start(struct nfp_app *app) INIT_LIST_HEAD(&priv->nfp_neigh_off_list); priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler; - err = register_netdevice_notifier(&priv->nfp_tun_mac_nb); - if (err) - goto err_free_mac_ida; - - err = register_netevent_notifier(&priv->nfp_tun_neigh_nb); - if (err) - goto err_unreg_mac_nb; - - /* Parse netdevs already registered for MACs that need offloaded. */ - rtnl_lock(); - for_each_netdev(&init_net, netdev) - nfp_tun_add_to_mac_offload_list(netdev, app); - rtnl_unlock(); - - return 0; - -err_unreg_mac_nb: - unregister_netdevice_notifier(&priv->nfp_tun_mac_nb); -err_free_mac_ida: - ida_destroy(&priv->nfp_mac_off_ids); - return err; + return register_netevent_notifier(&priv->nfp_tun_neigh_nb); } void nfp_tunnel_config_stop(struct nfp_app *app) @@ -739,7 +690,6 @@ void nfp_tunnel_config_stop(struct nfp_app *app) struct nfp_ipv4_addr_entry *ip_entry; struct list_head *ptr, *storage; - unregister_netdevice_notifier(&priv->nfp_tun_mac_nb); unregister_netevent_notifier(&priv->nfp_tun_neigh_nb); /* Free any memory that may be occupied by MAC list. */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index 68a0991aac22..4a1b8f79e731 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -136,6 +136,53 @@ nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type, return old; } +static int +nfp_app_netdev_event(struct notifier_block *nb, unsigned long event, void *ptr) +{ + struct net_device *netdev; + struct nfp_app *app; + + netdev = netdev_notifier_info_to_dev(ptr); + app = container_of(nb, struct nfp_app, netdev_nb); + + if (app->type->netdev_event) + return app->type->netdev_event(app, netdev, event, ptr); + return NOTIFY_DONE; +} + +int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl) +{ + int err; + + app->ctrl = ctrl; + + if (app->type->start) { + err = app->type->start(app); + if (err) + return err; + } + + app->netdev_nb.notifier_call = nfp_app_netdev_event; + err = register_netdevice_notifier(&app->netdev_nb); + if (err) + goto err_app_stop; + + return 0; + +err_app_stop: + if (app->type->stop) + app->type->stop(app); + return err; +} + +void nfp_app_stop(struct nfp_app *app) +{ + unregister_netdevice_notifier(&app->netdev_nb); + + if (app->type->stop) + app->type->stop(app); +} + struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id) { struct nfp_app *app; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 4d6ecf99b1cc..d578d856a009 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -69,6 +69,7 @@ extern const struct nfp_app_type app_abm; * @port_get_stats_strings: get strings for extra statistics * @start: start application logic * @stop: stop application logic + * @netdev_event: Netdevice notifier event * @ctrl_msg_rx: control message handler * @ctrl_msg_rx_raw: handler for control messages from data queues * @setup_tc: setup TC ndo @@ -122,6 +123,9 @@ struct nfp_app_type { int (*start)(struct nfp_app *app); void (*stop)(struct nfp_app *app); + int (*netdev_event)(struct nfp_app *app, struct net_device *netdev, + unsigned long event, void *ptr); + void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb); void (*ctrl_msg_rx_raw)(struct nfp_app *app, const void *data, unsigned int len); @@ -151,6 +155,7 @@ struct nfp_app_type { * @reprs: array of pointers to representors * @type: pointer to const application ops and info * @ctrl_mtu: MTU to set on the control vNIC (set in .init()) + * @netdev_nb: Netdevice notifier block * @priv: app-specific priv data */ struct nfp_app { @@ -163,6 +168,9 @@ struct nfp_app { const struct nfp_app_type *type; unsigned int ctrl_mtu; + + struct notifier_block netdev_nb; + void *priv; }; @@ -264,21 +272,6 @@ nfp_app_repr_change_mtu(struct nfp_app *app, struct net_device *netdev, return app->type->repr_change_mtu(app, netdev, new_mtu); } -static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl) -{ - app->ctrl = ctrl; - if (!app->type->start) - return 0; - return app->type->start(app); -} - -static inline void nfp_app_stop(struct nfp_app *app) -{ - if (!app->type->stop) - return; - app->type->stop(app); -} - static inline const char *nfp_app_name(struct nfp_app *app) { if (!app) @@ -430,6 +423,8 @@ nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority); struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id); void nfp_app_free(struct nfp_app *app); +int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl); +void nfp_app_stop(struct nfp_app *app); /* Callbacks shared between apps */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 6f0c37d09256..dda02fefc806 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -851,7 +851,7 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, void __iomem *ctrl_bar); struct nfp_net * -nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, +nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev, unsigned int max_tx_rings, unsigned int max_rx_rings); void nfp_net_free(struct nfp_net *nn); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 6bddfcfdec34..a0343f25068a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -890,8 +890,6 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) u64_stats_update_end(&r_vec->tx_sync); } - netdev_tx_sent_queue(nd_q, txbuf->real_len); - skb_tx_timestamp(skb); tx_ring->wr_p += nr_frags + 1; @@ -899,7 +897,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) nfp_net_tx_ring_stop(nd_q, tx_ring); tx_ring->wr_ptr_add += nr_frags + 1; - if (!skb->xmit_more || netif_xmit_stopped(nd_q)) + if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, skb->xmit_more)) nfp_net_tx_xmit_more_flush(tx_ring); return NETDEV_TX_OK; @@ -3560,6 +3558,7 @@ void nfp_net_info(struct nfp_net *nn) /** * nfp_net_alloc() - Allocate netdev and related structure * @pdev: PCI device + * @ctrl_bar: PCI IOMEM with vNIC config memory * @needs_netdev: Whether to allocate a netdev for this vNIC * @max_tx_rings: Maximum number of TX rings supported by device * @max_rx_rings: Maximum number of RX rings supported by device @@ -3570,11 +3569,12 @@ void nfp_net_info(struct nfp_net *nn) * * Return: NFP Net device structure, or ERR_PTR on error. */ -struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, - unsigned int max_tx_rings, - unsigned int max_rx_rings) +struct nfp_net * +nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev, + unsigned int max_tx_rings, unsigned int max_rx_rings) { struct nfp_net *nn; + int err; if (needs_netdev) { struct net_device *netdev; @@ -3594,6 +3594,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, } nn->dp.dev = &pdev->dev; + nn->dp.ctrl_bar = ctrl_bar; nn->pdev = pdev; nn->max_tx_rings = max_tx_rings; @@ -3616,7 +3617,19 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0); + err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar, + &nn->tlv_caps); + if (err) + goto err_free_nn; + return nn; + +err_free_nn: + if (nn->dp.netdev) + free_netdev(nn->dp.netdev); + else + vfree(nn); + return ERR_PTR(err); } /** @@ -3889,11 +3902,6 @@ int nfp_net_init(struct nfp_net *nn) nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD; } - err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar, - &nn->tlv_caps); - if (err) - return err; - if (nn->dp.netdev) nfp_net_netdev_init(nn); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 1e7d20468a34..08f5fdbd8e41 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -116,13 +116,13 @@ nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev, n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS); /* Allocate and initialise the vNIC */ - nn = nfp_net_alloc(pf->pdev, needs_netdev, n_tx_rings, n_rx_rings); + nn = nfp_net_alloc(pf->pdev, ctrl_bar, needs_netdev, + n_tx_rings, n_rx_rings); if (IS_ERR(nn)) return nn; nn->app = pf->app; nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar); - nn->dp.ctrl_bar = ctrl_bar; nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ; nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ; nn->dp.is_vf = 0; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index d2c1e9ea5668..1145849ca7ba 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -172,7 +172,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, rx_bar_off = NFP_PCIE_QUEUE(startq); /* Allocate and initialise the netdev */ - nn = nfp_net_alloc(pdev, true, max_tx_rings, max_rx_rings); + nn = nfp_net_alloc(pdev, ctrl_bar, true, max_tx_rings, max_rx_rings); if (IS_ERR(nn)) { err = PTR_ERR(nn); goto err_ctrl_unmap; @@ -180,7 +180,6 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, vf->nn = nn; nn->fw_ver = fw_ver; - nn->dp.ctrl_bar = ctrl_bar; nn->dp.is_vf = 1; nn->stride_tx = stride; nn->stride_rx = stride; diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 25382f8fbb70..bd8695a4faaa 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -783,8 +783,6 @@ static int lpc_mii_probe(struct net_device *ndev) phy_set_max_speed(phydev, SPEED_100); - phydev->advertising = phydev->supported; - pldat->link = 0; pldat->speed = 0; pldat->duplex = -1; diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c index a9f1bc013364..1450f386bc65 100644 --- a/drivers/net/ethernet/qualcomm/qca_debug.c +++ b/drivers/net/ethernet/qualcomm/qca_debug.c @@ -61,6 +61,7 @@ static const char qcaspi_gstrings_stats[][ETH_GSTRING_LEN] = { "Transmit ring full", "SPI errors", "Write verify errors", + "Buffer available errors", }; #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index d5310504f436..97f92953bdb9 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -289,6 +289,14 @@ qcaspi_transmit(struct qcaspi *qca) qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, &available); + if (available > QCASPI_HW_BUF_LEN) { + /* This could only happen by interferences on the SPI line. + * So retry later ... + */ + qca->stats.buf_avail_err++; + return -1; + } + while (qca->txr.skb[qca->txr.head]) { pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN; @@ -355,7 +363,13 @@ qcaspi_receive(struct qcaspi *qca) netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n", available); - if (available == 0) { + if (available > QCASPI_HW_BUF_LEN) { + /* This could only happen by interferences on the SPI line. + * So retry later ... + */ + qca->stats.buf_avail_err++; + return -1; + } else if (available == 0) { netdev_dbg(net_dev, "qcaspi_receive called without any data being available!\n"); return -1; } diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h index 2d2c49726492..eb9af45fcc5e 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.h +++ b/drivers/net/ethernet/qualcomm/qca_spi.h @@ -74,6 +74,7 @@ struct qcaspi_stats { u64 ring_full; u64 spi_err; u64 write_verify_failed; + u64 buf_avail_err; }; struct qcaspi { diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 1fd01688d37b..b3010cc51cdd 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -224,7 +224,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, - { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_USR, 0x0116), 0, 0, RTL_CFG_0 }, { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 }, { 0x0001, 0x8168, @@ -6584,7 +6584,7 @@ static int r8169_phy_connect(struct rtl8169_private *tp) phy_set_max_speed(phydev, SPEED_100); /* Ensure to advertise everything, incl. pause */ - phydev->advertising = phydev->supported; + linkmode_copy(phydev->advertising, phydev->supported); phy_attached_info(phydev); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 7eeac3d6cfe8..b6a50058bb8d 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -6041,6 +6041,10 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" }, { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, + /* MUM and SUC firmware share the same partition type */ + { NVRAM_PARTITION_TYPE_MUM_FIRMWARE, 0, 0, "sfc_mumfw" }, + { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" }, + { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" } }; static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, @@ -6091,6 +6095,9 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, part->common.mtd.flags = MTD_CAP_NORFLASH; part->common.mtd.size = size; part->common.mtd.erasesize = erase_size; + /* sfc_status is read-only */ + if (!erase_size) + part->common.mtd.flags |= MTD_NO_ERASE; return 0; } diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index c3ad564ac4c0..22eb059086f7 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -553,13 +553,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments))) goto err; - /* Update BQL */ - netdev_tx_sent_queue(tx_queue->core_txq, skb_len); - efx_tx_maybe_stop_queue(tx_queue); /* Pass off to hardware */ - if (!xmit_more || netif_xmit_stopped(tx_queue->core_txq)) { + if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) { struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); /* There could be packets left on the partner queue if those diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index d9d0d03e4ce7..bba9733b5119 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -234,6 +234,9 @@ #define DESC_NUM 256 +#define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#define NETSEC_RX_BUF_SZ 1536 + #define DESC_SZ sizeof(struct netsec_de) #define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000) @@ -571,34 +574,10 @@ static const struct ethtool_ops netsec_ethtool_ops = { /************* NETDEV_OPS FOLLOW *************/ -static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv, - struct netsec_desc *desc) -{ - struct sk_buff *skb; - - if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) { - skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len); - } else { - desc->len = L1_CACHE_ALIGN(desc->len); - skb = netdev_alloc_skb(priv->ndev, desc->len); - } - if (!skb) - return NULL; - - desc->addr = skb->data; - desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len, - DMA_FROM_DEVICE); - if (dma_mapping_error(priv->dev, desc->dma_addr)) { - dev_kfree_skb_any(skb); - return NULL; - } - return skb; -} static void netsec_set_rx_de(struct netsec_priv *priv, struct netsec_desc_ring *dring, u16 idx, - const struct netsec_desc *desc, - struct sk_buff *skb) + const struct netsec_desc *desc) { struct netsec_de *de = dring->vaddr + DESC_SZ * idx; u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) | @@ -617,59 +596,6 @@ static void netsec_set_rx_de(struct netsec_priv *priv, dring->desc[idx].dma_addr = desc->dma_addr; dring->desc[idx].addr = desc->addr; dring->desc[idx].len = desc->len; - dring->desc[idx].skb = skb; -} - -static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv, - struct netsec_desc_ring *dring, - u16 idx, - struct netsec_rx_pkt_info *rxpi, - struct netsec_desc *desc, u16 *len) -{ - struct netsec_de de = {}; - - memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ); - - *len = de.buf_len_info >> 16; - - rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1; - rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3; - rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) & - NETSEC_RX_PKT_ERR_MASK; - *desc = dring->desc[idx]; - return desc->skb; -} - -static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv, - struct netsec_rx_pkt_info *rxpi, - struct netsec_desc *desc, - u16 *len) -{ - struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; - struct sk_buff *tmp_skb, *skb = NULL; - struct netsec_desc td; - int tail; - - *rxpi = (struct netsec_rx_pkt_info){}; - - td.len = priv->ndev->mtu + 22; - - tmp_skb = netsec_alloc_skb(priv, &td); - - tail = dring->tail; - - if (!tmp_skb) { - netsec_set_rx_de(priv, dring, tail, &dring->desc[tail], - dring->desc[tail].skb); - } else { - skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len); - netsec_set_rx_de(priv, dring, tail, &td, tmp_skb); - } - - /* move tail ahead */ - dring->tail = (dring->tail + 1) % DESC_NUM; - - return skb; } static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget) @@ -736,19 +662,65 @@ static int netsec_process_tx(struct netsec_priv *priv, int budget) return done; } +static void *netsec_alloc_rx_data(struct netsec_priv *priv, + dma_addr_t *dma_handle, u16 *desc_len) +{ + size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + size_t payload_len = NETSEC_RX_BUF_SZ; + dma_addr_t mapping; + void *buf; + + total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD); + + buf = napi_alloc_frag(total_len); + if (!buf) + return NULL; + + mapping = dma_map_single(priv->dev, buf + NETSEC_SKB_PAD, payload_len, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, mapping))) + goto err_out; + + *dma_handle = mapping; + *desc_len = payload_len; + + return buf; + +err_out: + skb_free_frag(buf); + return NULL; +} + +static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; + u16 idx = from; + + while (num) { + netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]); + idx++; + if (idx >= DESC_NUM) + idx = 0; + num--; + } +} + static int netsec_process_rx(struct netsec_priv *priv, int budget) { struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; struct net_device *ndev = priv->ndev; struct netsec_rx_pkt_info rx_info; - int done = 0; - struct netsec_desc desc; struct sk_buff *skb; - u16 len; + int done = 0; while (done < budget) { u16 idx = dring->tail; struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); + struct netsec_desc *desc = &dring->desc[idx]; + u16 pkt_len, desc_len; + dma_addr_t dma_handle; + void *buf_addr; + u32 truesize; if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) { /* reading the register clears the irq */ @@ -762,18 +734,59 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) */ dma_rmb(); done++; - skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len); - if (unlikely(!skb) || rx_info.err_flag) { + + pkt_len = de->buf_len_info >> 16; + rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) & + NETSEC_RX_PKT_ERR_MASK; + rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1; + if (rx_info.err_flag) { netif_err(priv, drv, priv->ndev, - "%s: rx fail err(%d)\n", - __func__, rx_info.err_code); + "%s: rx fail err(%d)\n", __func__, + rx_info.err_code); ndev->stats.rx_dropped++; + dring->tail = (dring->tail + 1) % DESC_NUM; + /* reuse buffer page frag */ + netsec_rx_fill(priv, idx, 1); continue; } + rx_info.rx_cksum_result = + (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3; - dma_unmap_single(priv->dev, desc.dma_addr, desc.len, - DMA_FROM_DEVICE); - skb_put(skb, len); + /* allocate a fresh buffer and map it to the hardware. + * This will eventually replace the old buffer in the hardware + */ + buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len); + if (unlikely(!buf_addr)) + break; + + dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len, + DMA_FROM_DEVICE); + prefetch(desc->addr); + + truesize = SKB_DATA_ALIGN(desc->len + NETSEC_SKB_PAD) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + skb = build_skb(desc->addr, truesize); + if (unlikely(!skb)) { + /* free the newly allocated buffer, we are not going to + * use it + */ + dma_unmap_single(priv->dev, dma_handle, desc_len, + DMA_FROM_DEVICE); + skb_free_frag(buf_addr); + netif_err(priv, drv, priv->ndev, + "rx failed to build skb\n"); + break; + } + dma_unmap_single_attrs(priv->dev, desc->dma_addr, desc->len, + DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + + /* Update the descriptor with the new buffer we allocated */ + desc->len = desc_len; + desc->dma_addr = dma_handle; + desc->addr = buf_addr; + + skb_reserve(skb, NETSEC_SKB_PAD); + skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, priv->ndev); if (priv->rx_cksum_offload_flag && @@ -782,8 +795,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) { ndev->stats.rx_packets++; - ndev->stats.rx_bytes += len; + ndev->stats.rx_bytes += pkt_len; } + + netsec_rx_fill(priv, idx, 1); + dring->tail = (dring->tail + 1) % DESC_NUM; } return done; @@ -946,7 +962,10 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id) dma_unmap_single(priv->dev, desc->dma_addr, desc->len, id == NETSEC_RING_RX ? DMA_FROM_DEVICE : DMA_TO_DEVICE); - dev_kfree_skb(desc->skb); + if (id == NETSEC_RING_RX) + skb_free_frag(desc->addr); + else if (id == NETSEC_RING_TX) + dev_kfree_skb(desc->skb); } memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM); @@ -977,47 +996,50 @@ static void netsec_free_dring(struct netsec_priv *priv, int id) static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id) { struct netsec_desc_ring *dring = &priv->desc_ring[id]; - int ret = 0; dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM, &dring->desc_dma, GFP_KERNEL); - if (!dring->vaddr) { - ret = -ENOMEM; + if (!dring->vaddr) goto err; - } dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL); - if (!dring->desc) { - ret = -ENOMEM; + if (!dring->desc) goto err; - } return 0; err: netsec_free_dring(priv, id); - return ret; + return -ENOMEM; } static int netsec_setup_rx_dring(struct netsec_priv *priv) { struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; - struct netsec_desc desc; - struct sk_buff *skb; - int n; + int i; - desc.len = priv->ndev->mtu + 22; + for (i = 0; i < DESC_NUM; i++) { + struct netsec_desc *desc = &dring->desc[i]; + dma_addr_t dma_handle; + void *buf; + u16 len; - for (n = 0; n < DESC_NUM; n++) { - skb = netsec_alloc_skb(priv, &desc); - if (!skb) { + buf = netsec_alloc_rx_data(priv, &dma_handle, &len); + if (!buf) { netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); - return -ENOMEM; + goto err_out; } - netsec_set_rx_de(priv, dring, n, &desc, skb); + desc->dma_addr = dma_handle; + desc->addr = buf; + desc->len = len; } + netsec_rx_fill(priv, 0, DESC_NUM); + return 0; + +err_out: + return -ENOMEM; } static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg, @@ -1377,6 +1399,8 @@ static int netsec_netdev_init(struct net_device *ndev) int ret; u16 data; + BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM); + ret = netsec_alloc_dring(priv, NETSEC_RING_TX); if (ret) return ret; diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 6732f5cbde08..9e7391faa1dc 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1117,7 +1117,7 @@ static void ave_phy_adjust_link(struct net_device *ndev) if (phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; - lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising); + lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (cap & FLOW_CTRL_TX) txcr |= AVE_TXCR_FLOCTR; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 5710864fa809..d1f61c25d82b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -458,8 +458,10 @@ stmmac_get_pauseparam(struct net_device *netdev, if (!adv_lp.pause) return; } else { - if (!(netdev->phydev->supported & SUPPORTED_Pause) || - !(netdev->phydev->supported & SUPPORTED_Asym_Pause)) + if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, + netdev->phydev->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + netdev->phydev->supported)) return; } @@ -487,8 +489,10 @@ stmmac_set_pauseparam(struct net_device *netdev, if (!adv_lp.pause) return -EOPNOTSUPP; } else { - if (!(phy->supported & SUPPORTED_Pause) || - !(phy->supported & SUPPORTED_Asym_Pause)) + if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phy->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phy->supported)) return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 500f7ed8c58c..9434fd5a5477 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -565,26 +565,14 @@ static const struct cpsw_stats cpsw_gstrings_ch_stats[] = { (func)(slave++, ##arg); \ } while (0) +static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, + __be16 proto, u16 vid); + static inline int cpsw_get_slave_port(u32 slave_num) { return slave_num + 1; } -static void cpsw_add_mcast(struct cpsw_priv *priv, const u8 *addr) -{ - struct cpsw_common *cpsw = priv->cpsw; - - if (cpsw->data.dual_emac) { - struct cpsw_slave *slave = cpsw->slaves + priv->emac_port; - - cpsw_ale_add_mcast(cpsw->ale, addr, ALE_PORT_HOST, - ALE_VLAN, slave->port_vlan, 0); - return; - } - - cpsw_ale_add_mcast(cpsw->ale, addr, ALE_ALL_PORTS, 0, 0, 0); -} - static void cpsw_set_promiscious(struct net_device *ndev, bool enable) { struct cpsw_common *cpsw = ndev_to_cpsw(ndev); @@ -640,7 +628,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) /* Clear all mcast from ALE */ cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1); - __dev_mc_unsync(ndev, NULL); + __hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL); /* Flood All Unicast Packets to Host port */ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); @@ -661,29 +649,148 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) } } -static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr) +struct addr_sync_ctx { + struct net_device *ndev; + const u8 *addr; /* address to be synched */ + int consumed; /* number of address instances */ + int flush; /* flush flag */ +}; + +/** + * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes + * if it's not deleted + * @ndev: device to sync + * @addr: address to be added or deleted + * @vid: vlan id, if vid < 0 set/unset address for real device + * @add: add address if the flag is set or remove otherwise + */ +static int cpsw_set_mc(struct net_device *ndev, const u8 *addr, + int vid, int add) { struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int mask, flags, ret; + + if (vid < 0) { + if (cpsw->data.dual_emac) + vid = cpsw->slaves[priv->emac_port].port_vlan; + else + vid = 0; + } + + mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS; + flags = vid ? ALE_VLAN : 0; + + if (add) + ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0); + else + ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid); + + return ret; +} + +static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx) +{ + struct addr_sync_ctx *sync_ctx = ctx; + struct netdev_hw_addr *ha; + int found = 0, ret = 0; + + if (!vdev || !(vdev->flags & IFF_UP)) + return 0; + + /* vlan address is relevant if its sync_cnt != 0 */ + netdev_for_each_mc_addr(ha, vdev) { + if (ether_addr_equal(ha->addr, sync_ctx->addr)) { + found = ha->sync_cnt; + break; + } + } + + if (found) + sync_ctx->consumed++; + + if (sync_ctx->flush) { + if (!found) + cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0); + return 0; + } + + if (found) + ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1); + + return ret; +} + +static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num) +{ + struct addr_sync_ctx sync_ctx; + int ret; + + sync_ctx.consumed = 0; + sync_ctx.addr = addr; + sync_ctx.ndev = ndev; + sync_ctx.flush = 0; + + ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx); + if (sync_ctx.consumed < num && !ret) + ret = cpsw_set_mc(ndev, addr, -1, 1); + + return ret; +} + +static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num) +{ + struct addr_sync_ctx sync_ctx; + + sync_ctx.consumed = 0; + sync_ctx.addr = addr; + sync_ctx.ndev = ndev; + sync_ctx.flush = 1; + + vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx); + if (sync_ctx.consumed == num) + cpsw_set_mc(ndev, addr, -1, 0); - cpsw_add_mcast(priv, addr); return 0; } -static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr) +static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx) { - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - int vid, flags; + struct addr_sync_ctx *sync_ctx = ctx; + struct netdev_hw_addr *ha; + int found = 0; - if (cpsw->data.dual_emac) { - vid = cpsw->slaves[priv->emac_port].port_vlan; - flags = ALE_VLAN; - } else { - vid = 0; - flags = 0; + if (!vdev || !(vdev->flags & IFF_UP)) + return 0; + + /* vlan address is relevant if its sync_cnt != 0 */ + netdev_for_each_mc_addr(ha, vdev) { + if (ether_addr_equal(ha->addr, sync_ctx->addr)) { + found = ha->sync_cnt; + break; + } } - cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid); + if (!found) + return 0; + + sync_ctx->consumed++; + cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0); + return 0; +} + +static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num) +{ + struct addr_sync_ctx sync_ctx; + + sync_ctx.addr = addr; + sync_ctx.ndev = ndev; + sync_ctx.consumed = 0; + + vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx); + if (sync_ctx.consumed < num) + cpsw_set_mc(ndev, addr, -1, 0); + return 0; } @@ -704,7 +811,9 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) /* Restore allmulti on vlans if necessary */ cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI); - __dev_mc_sync(ndev, cpsw_add_mc_addr, cpsw_del_mc_addr); + /* add/remove mcast address either for real netdev or for vlan */ + __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr, + cpsw_del_mc_addr); } static void cpsw_intr_enable(struct cpsw_common *cpsw) @@ -1845,9 +1954,23 @@ static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) slave_write(slave, tx_prio_map, tx_prio_rg); } +static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg) +{ + struct cpsw_priv *priv = arg; + + if (!vdev) + return 0; + + cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid); + return 0; +} + /* restore resources after port reset */ static void cpsw_restore(struct cpsw_priv *priv) { + /* restore vlan configurations */ + vlan_for_each(priv->ndev, cpsw_restore_vlans, priv); + /* restore MQPRIO offload */ for_each_slave(priv, cpsw_mqprio_resume, priv); @@ -1964,7 +2087,7 @@ static int cpsw_ndo_stop(struct net_device *ndev) struct cpsw_common *cpsw = priv->cpsw; cpsw_info(priv, ifdown, "shutting down cpsw device\n"); - __dev_mc_unsync(priv->ndev, cpsw_del_mc_addr); + __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc); netif_tx_stop_all_queues(priv->ndev); netif_carrier_off(priv->ndev); @@ -2415,6 +2538,7 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, HOST_PORT_NUM, ALE_VLAN, vid); ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, 0, ALE_VLAN, vid); + ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid); err: pm_runtime_put(cpsw->dev); return ret; diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 6a71c2c0f17d..c50a9772f4af 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -607,9 +607,9 @@ static void tc_handle_link_change(struct net_device *dev) static int tc_mii_probe(struct net_device *dev) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct tc35815_local *lp = netdev_priv(dev); struct phy_device *phydev; - u32 dropmask; phydev = phy_find_first(lp->mii_bus); if (!phydev) { @@ -630,17 +630,22 @@ static int tc_mii_probe(struct net_device *dev) /* mask with MAC supported features */ phy_set_max_speed(phydev, SPEED_100); - dropmask = 0; - if (options.speed == 10) - dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full; - else if (options.speed == 100) - dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full; - if (options.duplex == 1) - dropmask |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full; - else if (options.duplex == 2) - dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half; - phydev->supported &= ~dropmask; - phydev->advertising = phydev->supported; + if (options.speed == 10) { + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask); + } else if (options.speed == 100) { + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, mask); + } + if (options.duplex == 1) { + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask); + } else if (options.duplex == 2) { + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask); + } + linkmode_and(phydev->supported, phydev->supported, mask); + linkmode_copy(phydev->advertising, phydev->supported); lp->link = 0; lp->speed = 0; diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index a0cd1c41cf5f..7c53e06b31c3 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -70,6 +70,7 @@ struct geneve_dev { bool collect_md; bool use_udp6_rx_checksums; bool ttl_inherit; + enum ifla_geneve_df df; }; struct geneve_sock { @@ -387,6 +388,57 @@ drop: return 0; } +/* Callback from net/ipv{4,6}/udp.c to check that we have a tunnel for errors */ +static int geneve_udp_encap_err_lookup(struct sock *sk, struct sk_buff *skb) +{ + struct genevehdr *geneveh; + struct geneve_sock *gs; + u8 zero_vni[3] = { 0 }; + u8 *vni = zero_vni; + + if (skb->len < GENEVE_BASE_HLEN) + return -EINVAL; + + geneveh = geneve_hdr(skb); + if (geneveh->ver != GENEVE_VER) + return -EINVAL; + + if (geneveh->proto_type != htons(ETH_P_TEB)) + return -EINVAL; + + gs = rcu_dereference_sk_user_data(sk); + if (!gs) + return -ENOENT; + + if (geneve_get_sk_family(gs) == AF_INET) { + struct iphdr *iph = ip_hdr(skb); + __be32 addr4 = 0; + + if (!gs->collect_md) { + vni = geneve_hdr(skb)->vni; + addr4 = iph->daddr; + } + + return geneve_lookup(gs, addr4, vni) ? 0 : -ENOENT; + } + +#if IS_ENABLED(CONFIG_IPV6) + if (geneve_get_sk_family(gs) == AF_INET6) { + struct ipv6hdr *ip6h = ipv6_hdr(skb); + struct in6_addr addr6 = { 0 }; + + if (!gs->collect_md) { + vni = geneve_hdr(skb)->vni; + addr6 = ip6h->daddr; + } + + return geneve6_lookup(gs, addr6, vni) ? 0 : -ENOENT; + } +#endif + + return -EPFNOSUPPORT; +} + static struct socket *geneve_create_sock(struct net *net, bool ipv6, __be16 port, bool ipv6_rx_csum) { @@ -544,6 +596,7 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, tunnel_cfg.gro_receive = geneve_gro_receive; tunnel_cfg.gro_complete = geneve_gro_complete; tunnel_cfg.encap_rcv = geneve_udp_encap_recv; + tunnel_cfg.encap_err_lookup = geneve_udp_encap_err_lookup; tunnel_cfg.encap_destroy = NULL; setup_udp_tunnel_sock(net, sock, &tunnel_cfg); list_add(&gs->list, &gn->sock_list); @@ -823,8 +876,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct rtable *rt; struct flowi4 fl4; __u8 tos, ttl; + __be16 df = 0; __be16 sport; - __be16 df; int err; rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); @@ -838,6 +891,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (geneve->collect_md) { tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; + + df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; } else { tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb); if (geneve->ttl_inherit) @@ -845,8 +900,22 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, else ttl = key->ttl; ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); + + if (geneve->df == GENEVE_DF_SET) { + df = htons(IP_DF); + } else if (geneve->df == GENEVE_DF_INHERIT) { + struct ethhdr *eth = eth_hdr(skb); + + if (ntohs(eth->h_proto) == ETH_P_IPV6) { + df = htons(IP_DF); + } else if (ntohs(eth->h_proto) == ETH_P_IP) { + struct iphdr *iph = ip_hdr(skb); + + if (iph->frag_off & htons(IP_DF)) + df = htons(IP_DF); + } + } } - df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr)); if (unlikely(err)) @@ -1093,6 +1162,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { [IFLA_GENEVE_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, [IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, [IFLA_GENEVE_TTL_INHERIT] = { .type = NLA_U8 }, + [IFLA_GENEVE_DF] = { .type = NLA_U8 }, }; static int geneve_validate(struct nlattr *tb[], struct nlattr *data[], @@ -1128,6 +1198,16 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[], } } + if (data[IFLA_GENEVE_DF]) { + enum ifla_geneve_df df = nla_get_u8(data[IFLA_GENEVE_DF]); + + if (df < 0 || df > GENEVE_DF_MAX) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_GENEVE_DF], + "Invalid DF attribute"); + return -EINVAL; + } + } + return 0; } @@ -1173,7 +1253,7 @@ static int geneve_configure(struct net *net, struct net_device *dev, struct netlink_ext_ack *extack, const struct ip_tunnel_info *info, bool metadata, bool ipv6_rx_csum, - bool ttl_inherit) + bool ttl_inherit, enum ifla_geneve_df df) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev *t, *geneve = netdev_priv(dev); @@ -1223,6 +1303,7 @@ static int geneve_configure(struct net *net, struct net_device *dev, geneve->collect_md = metadata; geneve->use_udp6_rx_checksums = ipv6_rx_csum; geneve->ttl_inherit = ttl_inherit; + geneve->df = df; err = register_netdevice(dev); if (err) @@ -1242,7 +1323,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack, struct ip_tunnel_info *info, bool *metadata, bool *use_udp6_rx_checksums, bool *ttl_inherit, - bool changelink) + enum ifla_geneve_df *df, bool changelink) { int attrtype; @@ -1330,6 +1411,9 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], if (data[IFLA_GENEVE_TOS]) info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]); + if (data[IFLA_GENEVE_DF]) + *df = nla_get_u8(data[IFLA_GENEVE_DF]); + if (data[IFLA_GENEVE_LABEL]) { info->key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) & IPV6_FLOWLABEL_MASK; @@ -1448,6 +1532,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { + enum ifla_geneve_df df = GENEVE_DF_UNSET; bool use_udp6_rx_checksums = false; struct ip_tunnel_info info; bool ttl_inherit = false; @@ -1456,12 +1541,12 @@ static int geneve_newlink(struct net *net, struct net_device *dev, init_tnl_info(&info, GENEVE_UDP_PORT); err = geneve_nl2info(tb, data, extack, &info, &metadata, - &use_udp6_rx_checksums, &ttl_inherit, false); + &use_udp6_rx_checksums, &ttl_inherit, &df, false); if (err) return err; err = geneve_configure(net, dev, extack, &info, metadata, - use_udp6_rx_checksums, ttl_inherit); + use_udp6_rx_checksums, ttl_inherit, df); if (err) return err; @@ -1524,6 +1609,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], struct ip_tunnel_info info; bool metadata; bool use_udp6_rx_checksums; + enum ifla_geneve_df df; bool ttl_inherit; int err; @@ -1539,7 +1625,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], use_udp6_rx_checksums = geneve->use_udp6_rx_checksums; ttl_inherit = geneve->ttl_inherit; err = geneve_nl2info(tb, data, extack, &info, &metadata, - &use_udp6_rx_checksums, &ttl_inherit, true); + &use_udp6_rx_checksums, &ttl_inherit, &df, true); if (err) return err; @@ -1572,6 +1658,7 @@ static size_t geneve_get_size(const struct net_device *dev) nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GENEVE_REMOTE{6} */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ + nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_DF */ nla_total_size(sizeof(__be32)) + /* IFLA_GENEVE_LABEL */ nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */ nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ @@ -1620,6 +1707,9 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_be32(skb, IFLA_GENEVE_LABEL, info->key.label)) goto nla_put_failure; + if (nla_put_u8(skb, IFLA_GENEVE_DF, geneve->df)) + goto nla_put_failure; + if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst)) goto nla_put_failure; @@ -1666,12 +1756,13 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, memset(tb, 0, sizeof(tb)); dev = rtnl_create_link(net, name, name_assign_type, - &geneve_link_ops, tb); + &geneve_link_ops, tb, NULL); if (IS_ERR(dev)) return dev; init_tnl_info(&info, dst_port); - err = geneve_configure(net, dev, NULL, &info, true, true, false); + err = geneve_configure(net, dev, NULL, &info, + true, true, false, GENEVE_DF_UNSET); if (err) { free_netdev(dev); return ERR_PTR(err); diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c index 6fe5dc9201d0..9d0504f3e3b2 100644 --- a/drivers/net/phy/amd.c +++ b/drivers/net/phy/amd.c @@ -66,7 +66,6 @@ static struct phy_driver am79c_driver[] = { { .name = "AM79C874", .phy_id_mask = 0xfffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = am79c_config_init, .ack_interrupt = am79c_ack_interrupt, .config_intr = am79c_config_intr, diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c index 632472cab3bb..beb3309bb0f0 100644 --- a/drivers/net/phy/aquantia.c +++ b/drivers/net/phy/aquantia.c @@ -25,15 +25,10 @@ #define PHY_ID_AQR107 0x03a1b4e0 #define PHY_ID_AQR405 0x03a1b4b0 -#define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \ - SUPPORTED_1000baseT_Full | \ - SUPPORTED_100baseT_Full | \ - PHY_DEFAULT_FEATURES) - static int aquantia_config_aneg(struct phy_device *phydev) { - phydev->supported = PHY_AQUANTIA_FEATURES; - phydev->advertising = phydev->supported; + linkmode_copy(phydev->supported, phy_10gbit_features); + linkmode_copy(phydev->advertising, phydev->supported); return 0; } @@ -116,7 +111,6 @@ static struct phy_driver aquantia_driver[] = { .phy_id_mask = 0xfffffff0, .name = "Aquantia AQ1202", .features = PHY_10GBIT_FULL_FEATURES, - .flags = PHY_HAS_INTERRUPT, .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, .config_intr = aquantia_config_intr, @@ -128,7 +122,6 @@ static struct phy_driver aquantia_driver[] = { .phy_id_mask = 0xfffffff0, .name = "Aquantia AQ2104", .features = PHY_10GBIT_FULL_FEATURES, - .flags = PHY_HAS_INTERRUPT, .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, .config_intr = aquantia_config_intr, @@ -140,7 +133,6 @@ static struct phy_driver aquantia_driver[] = { .phy_id_mask = 0xfffffff0, .name = "Aquantia AQR105", .features = PHY_10GBIT_FULL_FEATURES, - .flags = PHY_HAS_INTERRUPT, .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, .config_intr = aquantia_config_intr, @@ -152,7 +144,6 @@ static struct phy_driver aquantia_driver[] = { .phy_id_mask = 0xfffffff0, .name = "Aquantia AQR106", .features = PHY_10GBIT_FULL_FEATURES, - .flags = PHY_HAS_INTERRUPT, .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, .config_intr = aquantia_config_intr, @@ -164,7 +155,6 @@ static struct phy_driver aquantia_driver[] = { .phy_id_mask = 0xfffffff0, .name = "Aquantia AQR107", .features = PHY_10GBIT_FULL_FEATURES, - .flags = PHY_HAS_INTERRUPT, .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, .config_intr = aquantia_config_intr, @@ -176,7 +166,6 @@ static struct phy_driver aquantia_driver[] = { .phy_id_mask = 0xfffffff0, .name = "Aquantia AQR405", .features = PHY_10GBIT_FULL_FEATURES, - .flags = PHY_HAS_INTERRUPT, .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, .config_intr = aquantia_config_intr, diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index e74a047a846e..f9432d053a22 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -379,7 +379,6 @@ static struct phy_driver at803x_driver[] = { .suspend = at803x_suspend, .resume = at803x_resume, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .ack_interrupt = at803x_ack_interrupt, .config_intr = at803x_config_intr, }, { @@ -395,7 +394,6 @@ static struct phy_driver at803x_driver[] = { .suspend = at803x_suspend, .resume = at803x_resume, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .ack_interrupt = at803x_ack_interrupt, .config_intr = at803x_config_intr, }, { @@ -410,7 +408,6 @@ static struct phy_driver at803x_driver[] = { .suspend = at803x_suspend, .resume = at803x_resume, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .aneg_done = at803x_aneg_done, .ack_interrupt = &at803x_ack_interrupt, .config_intr = &at803x_config_intr, diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c index d95bffdec4c1..a88dd14a25c0 100644 --- a/drivers/net/phy/bcm63xx.c +++ b/drivers/net/phy/bcm63xx.c @@ -43,7 +43,7 @@ static int bcm63xx_config_init(struct phy_device *phydev) int reg, err; /* ASYM_PAUSE bit is marked RO in datasheet, so don't cheat */ - phydev->supported |= SUPPORTED_Pause; + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported); reg = phy_read(phydev, MII_BCM63XX_IR); if (reg < 0) @@ -69,7 +69,7 @@ static struct phy_driver bcm63xx_driver[] = { .phy_id_mask = 0xfffffc00, .name = "Broadcom BCM63XX (1)", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL, + .flags = PHY_IS_INTERNAL, .config_init = bcm63xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm63xx_config_intr, @@ -78,7 +78,7 @@ static struct phy_driver bcm63xx_driver[] = { .phy_id = 0x002bdc00, .phy_id_mask = 0xfffffc00, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL, + .flags = PHY_IS_INTERNAL, .config_init = bcm63xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm63xx_config_intr, diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index b2b6307d64a4..712224cc442d 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -650,6 +650,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev) static struct phy_driver bcm7xxx_driver[] = { BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"), + BCM7XXX_28NM_EPHY(PHY_ID_BCM7255, "Broadcom BCM7255"), BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"), BCM7XXX_28NM_EPHY(PHY_ID_BCM7268, "Broadcom BCM7268"), BCM7XXX_28NM_EPHY(PHY_ID_BCM7271, "Broadcom BCM7271"), @@ -670,6 +671,7 @@ static struct phy_driver bcm7xxx_driver[] = { static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { { PHY_ID_BCM7250, 0xfffffff0, }, + { PHY_ID_BCM7255, 0xfffffff0, }, { PHY_ID_BCM7260, 0xfffffff0, }, { PHY_ID_BCM7268, 0xfffffff0, }, { PHY_ID_BCM7271, 0xfffffff0, }, diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c index f7ebdcff53e4..1b350183bffb 100644 --- a/drivers/net/phy/bcm87xx.c +++ b/drivers/net/phy/bcm87xx.c @@ -86,8 +86,12 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev) static int bcm87xx_config_init(struct phy_device *phydev) { - phydev->supported = SUPPORTED_10000baseR_FEC; - phydev->advertising = ADVERTISED_10000baseR_FEC; + linkmode_zero(phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + phydev->supported); + linkmode_zero(phydev->advertising); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + phydev->advertising); phydev->state = PHY_NOLINK; phydev->autoneg = AUTONEG_DISABLE; @@ -193,7 +197,6 @@ static struct phy_driver bcm87xx_driver[] = { .phy_id = PHY_ID_BCM8706, .phy_id_mask = 0xffffffff, .name = "Broadcom BCM8706", - .flags = PHY_HAS_INTERRUPT, .config_init = bcm87xx_config_init, .config_aneg = bcm87xx_config_aneg, .read_status = bcm87xx_read_status, @@ -205,7 +208,6 @@ static struct phy_driver bcm87xx_driver[] = { .phy_id = PHY_ID_BCM8727, .phy_id_mask = 0xffffffff, .name = "Broadcom BCM8727", - .flags = PHY_HAS_INTERRUPT, .config_init = bcm87xx_config_init, .config_aneg = bcm87xx_config_aneg, .read_status = bcm87xx_read_status, diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 704537010453..aa73c5cc5f86 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -602,7 +602,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5411", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -611,7 +610,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5421", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -620,7 +618,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54210E", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -629,7 +626,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5461", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -638,7 +634,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54612E", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -647,7 +642,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54616S", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .config_aneg = bcm54616s_config_aneg, .ack_interrupt = bcm_phy_ack_intr, @@ -657,7 +651,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5464", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -666,7 +659,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5481", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, .ack_interrupt = bcm_phy_ack_intr, @@ -676,7 +668,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54810", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, .ack_interrupt = bcm_phy_ack_intr, @@ -686,7 +677,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5482", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm5482_config_init, .read_status = bcm5482_read_status, .ack_interrupt = bcm_phy_ack_intr, @@ -696,7 +686,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM50610", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -705,7 +694,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM50610M", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -714,7 +702,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM57780", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -723,7 +710,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCMAC131", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = brcm_fet_config_init, .ack_interrupt = brcm_fet_ack_interrupt, .config_intr = brcm_fet_config_intr, @@ -732,7 +718,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5241", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = brcm_fet_config_init, .ack_interrupt = brcm_fet_ack_interrupt, .config_intr = brcm_fet_config_intr, @@ -751,7 +736,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM89610", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c index c05af00bf4b6..fea61c81bda9 100644 --- a/drivers/net/phy/cicada.c +++ b/drivers/net/phy/cicada.c @@ -108,7 +108,6 @@ static struct phy_driver cis820x_driver[] = { .name = "Cicada Cis8201", .phy_id_mask = 0x000ffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &cis820x_config_init, .ack_interrupt = &cis820x_ack_interrupt, .config_intr = &cis820x_config_intr, @@ -117,7 +116,6 @@ static struct phy_driver cis820x_driver[] = { .name = "Cicada Cis8204", .phy_id_mask = 0x000fffc0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &cis820x_config_init, .ack_interrupt = &cis820x_ack_interrupt, .config_intr = &cis820x_config_intr, diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c index 5ee99b3b428c..97162008f42b 100644 --- a/drivers/net/phy/davicom.c +++ b/drivers/net/phy/davicom.c @@ -150,7 +150,6 @@ static struct phy_driver dm91xx_driver[] = { .name = "Davicom DM9161E", .phy_id_mask = 0x0ffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = dm9161_config_init, .config_aneg = dm9161_config_aneg, .ack_interrupt = dm9161_ack_interrupt, @@ -160,7 +159,6 @@ static struct phy_driver dm91xx_driver[] = { .name = "Davicom DM9161B/C", .phy_id_mask = 0x0ffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = dm9161_config_init, .config_aneg = dm9161_config_aneg, .ack_interrupt = dm9161_ack_interrupt, @@ -170,7 +168,6 @@ static struct phy_driver dm91xx_driver[] = { .name = "Davicom DM9161A", .phy_id_mask = 0x0ffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = dm9161_config_init, .config_aneg = dm9161_config_aneg, .ack_interrupt = dm9161_ack_interrupt, @@ -180,7 +177,6 @@ static struct phy_driver dm91xx_driver[] = { .name = "Davicom DM9131", .phy_id_mask = 0x0ffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, } }; diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index edd4d44a386d..18b41bc345ab 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1521,7 +1521,6 @@ static struct phy_driver dp83640_driver = { .phy_id_mask = 0xfffffff0, .name = "NatSemi DP83640", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = dp83640_probe, .remove = dp83640_remove, .soft_reset = dp83640_soft_reset, diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 6e8a2a4f3a6e..24c7f149f3e6 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -318,7 +318,6 @@ static struct phy_driver dp83822_driver[] = { .phy_id_mask = 0xfffffff0, .name = "TI DP83822", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = dp83822_config_init, .soft_reset = dp83822_phy_reset, .get_wol = dp83822_get_wol, diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index 6e8e42361fd5..a6b55909d1dc 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -108,7 +108,6 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl); .phy_id_mask = 0xfffffff0, \ .name = _name, \ .features = PHY_BASIC_FEATURES, \ - .flags = PHY_HAS_INTERRUPT, \ \ .soft_reset = genphy_soft_reset, \ .config_init = _config_init, \ diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index b3935778b19f..da6a67d47ce9 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -334,7 +334,6 @@ static struct phy_driver dp83867_driver[] = { .phy_id_mask = 0xfffffff0, .name = "TI DP83867", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = dp83867_config_init, .soft_reset = dp83867_phy_reset, diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c index 78cad134a79e..da13356999e5 100644 --- a/drivers/net/phy/dp83tc811.c +++ b/drivers/net/phy/dp83tc811.c @@ -346,7 +346,6 @@ static struct phy_driver dp83811_driver[] = { .phy_id_mask = 0xfffffff0, .name = "TI DP83TC811", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = dp83811_config_init, .config_aneg = dp83811_config_aneg, .soft_reset = dp83811_phy_reset, diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index 67b260877f30..f7fb62712cd8 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -223,14 +223,23 @@ struct phy_device *fixed_phy_register(unsigned int irq, switch (status->speed) { case SPEED_1000: - phy->supported = PHY_1000BT_FEATURES; - break; + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phy->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phy->supported); + /* fall through */ case SPEED_100: - phy->supported = PHY_100BT_FEATURES; - break; + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + phy->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + phy->supported); + /* fall through */ case SPEED_10: default: - phy->supported = PHY_10BT_FEATURES; + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + phy->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + phy->supported); } ret = phy_device_register(phy); diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index 791587a49215..21ce68964204 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c @@ -234,7 +234,6 @@ static struct phy_driver icplus_driver[] = { .name = "ICPlus IP101A/G", .phy_id_mask = 0x0ffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .ack_interrupt = ip101a_g_ack_interrupt, .config_init = &ip101a_g_config_init, .suspend = genphy_suspend, diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c index 7d936fb61c22..fc0f5024a29e 100644 --- a/drivers/net/phy/intel-xway.c +++ b/drivers/net/phy/intel-xway.c @@ -242,7 +242,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.3", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, .ack_interrupt = xway_gphy_ack_interrupt, @@ -255,7 +254,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY22F (PEF 7061) v1.3", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, .ack_interrupt = xway_gphy_ack_interrupt, @@ -268,7 +266,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.4", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, .ack_interrupt = xway_gphy_ack_interrupt, @@ -281,7 +278,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY22F (PEF 7061) v1.4", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, .ack_interrupt = xway_gphy_ack_interrupt, @@ -294,7 +290,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.5 / v1.6", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, @@ -306,7 +301,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY22F (PEF 7061) v1.5 / v1.6", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, @@ -318,7 +312,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY11G (xRX v1.1 integrated)", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, @@ -330,7 +323,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY22F (xRX v1.1 integrated)", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, @@ -342,7 +334,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY11G (xRX v1.2 integrated)", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, @@ -354,7 +345,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY22F (xRX v1.2 integrated)", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c index c14b254b2879..c8bb29ae1a2a 100644 --- a/drivers/net/phy/lxt.c +++ b/drivers/net/phy/lxt.c @@ -177,7 +177,7 @@ static int lxt973a2_read_status(struct phy_device *phydev) */ } while (lpa == adv && retry--); - phydev->lp_advertising = mii_lpa_to_ethtool_lpa_t(lpa); + mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa); lpa &= adv; @@ -218,7 +218,7 @@ static int lxt973a2_read_status(struct phy_device *phydev) phydev->speed = SPEED_10; phydev->pause = phydev->asym_pause = 0; - phydev->lp_advertising = 0; + linkmode_zero(phydev->lp_advertising); } return 0; @@ -257,7 +257,6 @@ static struct phy_driver lxt97x_driver[] = { .name = "LXT970", .phy_id_mask = 0xfffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = lxt970_config_init, .ack_interrupt = lxt970_ack_interrupt, .config_intr = lxt970_config_intr, @@ -266,7 +265,6 @@ static struct phy_driver lxt97x_driver[] = { .name = "LXT971", .phy_id_mask = 0xfffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .ack_interrupt = lxt971_ack_interrupt, .config_intr = lxt971_config_intr, }, { diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index cbec296107bd..36a0db86c6f4 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -491,25 +491,26 @@ static int m88e1318_config_aneg(struct phy_device *phydev) } /** - * ethtool_adv_to_fiber_adv_t - * @ethadv: the ethtool advertisement settings + * linkmode_adv_to_fiber_adv_t + * @advertise: the linkmode advertisement settings * - * A small helper function that translates ethtool advertisement - * settings to phy autonegotiation advertisements for the - * MII_ADV register for fiber link. + * A small helper function that translates linkmode advertisement + * settings to phy autonegotiation advertisements for the MII_ADV + * register for fiber link. */ -static inline u32 ethtool_adv_to_fiber_adv_t(u32 ethadv) +static inline u32 linkmode_adv_to_fiber_adv_t(unsigned long *advertise) { u32 result = 0; - if (ethadv & ADVERTISED_1000baseT_Half) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertise)) result |= ADVERTISE_FIBER_1000HALF; - if (ethadv & ADVERTISED_1000baseT_Full) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertise)) result |= ADVERTISE_FIBER_1000FULL; - if ((ethadv & ADVERTISE_PAUSE_ASYM) && (ethadv & ADVERTISE_PAUSE_CAP)) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertise) && + linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertise)) result |= LPA_PAUSE_ASYM_FIBER; - else if (ethadv & ADVERTISE_PAUSE_CAP) + else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertise)) result |= (ADVERTISE_PAUSE_FIBER & (~ADVERTISE_PAUSE_ASYM_FIBER)); @@ -530,14 +531,13 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev) int changed = 0; int err; int adv, oldadv; - u32 advertise; if (phydev->autoneg != AUTONEG_ENABLE) return genphy_setup_forced(phydev); /* Only allow advertising what this PHY supports */ - phydev->advertising &= phydev->supported; - advertise = phydev->advertising; + linkmode_and(phydev->advertising, phydev->advertising, + phydev->supported); /* Setup fiber advertisement */ adv = phy_read(phydev, MII_ADVERTISE); @@ -547,7 +547,7 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev) oldadv = adv; adv &= ~(ADVERTISE_FIBER_1000HALF | ADVERTISE_FIBER_1000FULL | LPA_PAUSE_FIBER); - adv |= ethtool_adv_to_fiber_adv_t(advertise); + adv |= linkmode_adv_to_fiber_adv_t(phydev->advertising); if (adv != oldadv) { err = phy_write(phydev, MII_ADVERTISE, adv); @@ -879,8 +879,14 @@ static int m88e1510_config_init(struct phy_device *phydev) * so disable Pause support. */ pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause; - phydev->supported &= ~pause; - phydev->advertising &= ~pause; + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->advertising); } return m88e1318_config_init(phydev); @@ -1043,22 +1049,21 @@ static int m88e1145_config_init(struct phy_device *phydev) } /** - * fiber_lpa_to_ethtool_lpa_t + * fiber_lpa_to_linkmode_lpa_t + * @advertising: the linkmode advertisement settings * @lpa: value of the MII_LPA register for fiber link * * A small helper function that translates MII_LPA - * bits to ethtool LP advertisement settings. + * bits to linkmode LP advertisement settings. */ -static u32 fiber_lpa_to_ethtool_lpa_t(u32 lpa) +static void fiber_lpa_to_linkmode_lpa_t(unsigned long *advertising, u32 lpa) { - u32 result = 0; - if (lpa & LPA_FIBER_1000HALF) - result |= ADVERTISED_1000baseT_Half; + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + advertising); if (lpa & LPA_FIBER_1000FULL) - result |= ADVERTISED_1000baseT_Full; - - return result; + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + advertising); } /** @@ -1134,9 +1139,8 @@ static int marvell_read_status_page_an(struct phy_device *phydev, } if (!fiber) { - phydev->lp_advertising = - mii_stat1000_to_ethtool_lpa_t(lpagb) | - mii_lpa_to_ethtool_lpa_t(lpa); + mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa); + mii_stat1000_to_linkmode_lpa_t(phydev->lp_advertising, lpagb); if (phydev->duplex == DUPLEX_FULL) { phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0; @@ -1144,7 +1148,7 @@ static int marvell_read_status_page_an(struct phy_device *phydev, } } else { /* The fiber link is only 1000M capable */ - phydev->lp_advertising = fiber_lpa_to_ethtool_lpa_t(lpa); + fiber_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa); if (phydev->duplex == DUPLEX_FULL) { if (!(lpa & LPA_PAUSE_FIBER)) { @@ -1183,7 +1187,7 @@ static int marvell_read_status_page_fixed(struct phy_device *phydev) phydev->pause = 0; phydev->asym_pause = 0; - phydev->lp_advertising = 0; + linkmode_zero(phydev->lp_advertising); return 0; } @@ -1235,7 +1239,8 @@ static int marvell_read_status(struct phy_device *phydev) int err; /* Check the fiber mode first */ - if (phydev->supported & SUPPORTED_FIBRE && + if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->supported) && phydev->interface != PHY_INTERFACE_MODE_SGMII) { err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); if (err < 0) @@ -1278,7 +1283,8 @@ static int marvell_suspend(struct phy_device *phydev) int err; /* Suspend the fiber mode first */ - if (!(phydev->supported & SUPPORTED_FIBRE)) { + if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->supported)) { err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); if (err < 0) goto error; @@ -1312,7 +1318,8 @@ static int marvell_resume(struct phy_device *phydev) int err; /* Resume the fiber mode first */ - if (!(phydev->supported & SUPPORTED_FIBRE)) { + if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->supported)) { err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); if (err < 0) goto error; @@ -1463,7 +1470,8 @@ error: static int marvell_get_sset_count(struct phy_device *phydev) { - if (phydev->supported & SUPPORTED_FIBRE) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->supported)) return ARRAY_SIZE(marvell_hw_stats); else return ARRAY_SIZE(marvell_hw_stats) - NB_FIBER_STATS; @@ -2005,7 +2013,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1101", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &marvell_config_init, .config_aneg = &m88e1101_config_aneg, @@ -2024,7 +2031,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1112", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1111_config_init, .config_aneg = &marvell_config_aneg, @@ -2043,7 +2049,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1111", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1111_config_init, .config_aneg = &marvell_config_aneg, @@ -2063,7 +2068,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1118", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1118_config_init, .config_aneg = &m88e1118_config_aneg, @@ -2082,7 +2086,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1121R", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = &m88e1121_probe, .config_init = &marvell_config_init, .config_aneg = &m88e1121_config_aneg, @@ -2103,7 +2106,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1318S", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1318_config_init, .config_aneg = &m88e1318_config_aneg, @@ -2126,7 +2128,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1145", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1145_config_init, .config_aneg = &m88e1101_config_aneg, @@ -2146,7 +2147,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1149R", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1149_config_init, .config_aneg = &m88e1118_config_aneg, @@ -2165,7 +2165,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1240", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1111_config_init, .config_aneg = &marvell_config_aneg, @@ -2184,7 +2183,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1116R", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1116r_config_init, .ack_interrupt = &marvell_ack_interrupt, @@ -2202,7 +2200,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1510", .features = PHY_GBIT_FIBRE_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = &m88e1510_probe, .config_init = &m88e1510_config_init, .config_aneg = &m88e1510_config_aneg, @@ -2226,7 +2223,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1540", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = m88e1510_probe, .config_init = &marvell_config_init, .config_aneg = &m88e1510_config_aneg, @@ -2248,7 +2244,6 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1545", .probe = m88e1510_probe, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &marvell_config_init, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, @@ -2268,7 +2263,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E3016", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e3016_config_init, .aneg_done = &marvell_aneg_done, @@ -2289,7 +2283,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E6390", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = m88e6390_probe, .config_init = &marvell_config_init, .config_aneg = &m88e1510_config_aneg, diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index 1c9d039eec63..6f6e886fc836 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -252,7 +252,6 @@ static int mv3310_resume(struct phy_device *phydev) static int mv3310_config_init(struct phy_device *phydev) { __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; - u32 mask; int val; /* Check that the PHY interface type is compatible */ @@ -336,13 +335,9 @@ static int mv3310_config_init(struct phy_device *phydev) } } - if (!ethtool_convert_link_mode_to_legacy_u32(&mask, supported)) - phydev_warn(phydev, - "PHY supports (%*pb) more modes than phylib supports, some modes not supported.\n", - __ETHTOOL_LINK_MODE_MASK_NBITS, supported); - - phydev->supported &= mask; - phydev->advertising &= phydev->supported; + linkmode_copy(phydev->supported, supported); + linkmode_and(phydev->advertising, phydev->advertising, + phydev->supported); return 0; } @@ -350,7 +345,7 @@ static int mv3310_config_init(struct phy_device *phydev) static int mv3310_config_aneg(struct phy_device *phydev) { bool changed = false; - u32 advertising; + u16 reg; int ret; /* We don't support manual MDI control */ @@ -364,31 +359,35 @@ static int mv3310_config_aneg(struct phy_device *phydev) return genphy_c45_an_disable_aneg(phydev); } - phydev->advertising &= phydev->supported; - advertising = phydev->advertising; + linkmode_and(phydev->advertising, phydev->advertising, + phydev->supported); ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM, - ethtool_adv_to_mii_adv_t(advertising)); + linkmode_adv_to_mii_adv_t(phydev->advertising)); if (ret < 0) return ret; if (ret > 0) changed = true; + reg = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising); ret = mv3310_modify(phydev, MDIO_MMD_AN, MV_AN_CTRL1000, - ADVERTISE_1000FULL | ADVERTISE_1000HALF, - ethtool_adv_to_mii_ctrl1000_t(advertising)); + ADVERTISE_1000FULL | ADVERTISE_1000HALF, reg); if (ret < 0) return ret; if (ret > 0) changed = true; /* 10G control register */ + if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + phydev->advertising)) + reg = MDIO_AN_10GBT_CTRL_ADV10G; + else + reg = 0; + ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, - MDIO_AN_10GBT_CTRL_ADV10G, - advertising & ADVERTISED_10000baseT_Full ? - MDIO_AN_10GBT_CTRL_ADV10G : 0); + MDIO_AN_10GBT_CTRL_ADV10G, reg); if (ret < 0) return ret; if (ret > 0) @@ -458,7 +457,7 @@ static int mv3310_read_status(struct phy_device *phydev) phydev->speed = SPEED_UNKNOWN; phydev->duplex = DUPLEX_UNKNOWN; - phydev->lp_advertising = 0; + linkmode_zero(phydev->lp_advertising); phydev->link = 0; phydev->pause = 0; phydev->asym_pause = 0; @@ -491,7 +490,7 @@ static int mv3310_read_status(struct phy_device *phydev) if (val < 0) return val; - phydev->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(val); + mii_stat1000_to_linkmode_lpa_t(phydev->lp_advertising, val); if (phydev->autoneg == AUTONEG_ENABLE) phy_resolve_aneg_linkmode(phydev); diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index ddc2c5ea3787..b03bcf2c388a 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -232,7 +232,7 @@ static struct phy_driver meson_gxl_phy[] = { .phy_id_mask = 0xfffffff0, .name = "Meson GXL Internal PHY", .features = PHY_BASIC_FEATURES, - .flags = PHY_IS_INTERNAL | PHY_HAS_INTERRUPT, + .flags = PHY_IS_INTERNAL, .config_init = meson_gxl_config_init, .aneg_done = genphy_aneg_done, .read_status = meson_gxl_read_status, diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 9265dea79412..c33384710d26 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -311,17 +311,22 @@ static int kszphy_config_init(struct phy_device *phydev) static int ksz8041_config_init(struct phy_device *phydev) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + struct device_node *of_node = phydev->mdio.dev.of_node; /* Limit supported and advertised modes in fiber mode */ if (of_property_read_bool(of_node, "micrel,fiber-mode")) { phydev->dev_flags |= MICREL_PHY_FXEN; - phydev->supported &= SUPPORTED_100baseT_Full | - SUPPORTED_100baseT_Half; - phydev->supported |= SUPPORTED_FIBRE; - phydev->advertising &= ADVERTISED_100baseT_Full | - ADVERTISED_100baseT_Half; - phydev->advertising |= ADVERTISED_FIBRE; + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask); + + linkmode_and(phydev->supported, phydev->supported, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->supported); + linkmode_and(phydev->advertising, phydev->advertising, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->advertising); phydev->autoneg = AUTONEG_DISABLE; } @@ -918,7 +923,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KS8737", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ks8737_type, .config_init = kszphy_config_init, .ack_interrupt = kszphy_ack_interrupt, @@ -930,7 +934,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = 0x00ffffff, .name = "Micrel KSZ8021 or KSZ8031", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz8021_type, .probe = kszphy_probe, .config_init = kszphy_config_init, @@ -946,7 +949,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = 0x00ffffff, .name = "Micrel KSZ8031", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz8021_type, .probe = kszphy_probe, .config_init = kszphy_config_init, @@ -962,7 +964,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8041", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz8041_type, .probe = kszphy_probe, .config_init = ksz8041_config_init, @@ -979,7 +980,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8041RNLI", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz8041_type, .probe = kszphy_probe, .config_init = kszphy_config_init, @@ -995,7 +995,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8051", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz8051_type, .probe = kszphy_probe, .config_init = kszphy_config_init, @@ -1011,7 +1010,6 @@ static struct phy_driver ksphy_driver[] = { .name = "Micrel KSZ8001 or KS8721", .phy_id_mask = 0x00fffffc, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz8041_type, .probe = kszphy_probe, .config_init = kszphy_config_init, @@ -1027,7 +1025,6 @@ static struct phy_driver ksphy_driver[] = { .name = "Micrel KSZ8081 or KSZ8091", .phy_id_mask = MICREL_PHY_ID_MASK, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz8081_type, .probe = kszphy_probe, .config_init = kszphy_config_init, @@ -1043,7 +1040,6 @@ static struct phy_driver ksphy_driver[] = { .name = "Micrel KSZ8061", .phy_id_mask = MICREL_PHY_ID_MASK, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = kszphy_config_init, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, @@ -1054,7 +1050,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = 0x000ffffe, .name = "Micrel KSZ9021 Gigabit PHY", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz9021_type, .probe = kszphy_probe, .config_init = ksz9021_config_init, @@ -1072,7 +1067,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ9031 Gigabit PHY", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz9021_type, .probe = kszphy_probe, .config_init = ksz9031_config_init, @@ -1089,7 +1083,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Microchip KSZ9131 Gigabit PHY", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz9021_type, .probe = kszphy_probe, .config_init = ksz9131_config_init, @@ -1115,7 +1108,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ886X Switch", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = kszphy_config_init, .suspend = genphy_suspend, .resume = genphy_resume, @@ -1124,7 +1116,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8795", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = kszphy_config_init, .config_aneg = ksz8873mll_config_aneg, .read_status = ksz8873mll_read_status, diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index 04b12e34da58..7557bebd5d7f 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -346,7 +346,6 @@ static struct phy_driver microchip_phy_driver[] = { .name = "Microchip LAN88xx", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = lan88xx_probe, .remove = lan88xx_remove, diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c index c600a8509d60..3d09b471632c 100644 --- a/drivers/net/phy/microchip_t1.c +++ b/drivers/net/phy/microchip_t1.c @@ -47,7 +47,6 @@ static struct phy_driver microchip_t1_phy_driver[] = { .name = "Microchip LAN87xx T1", .features = PHY_BASIC_T1_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = genphy_config_init, .config_aneg = genphy_config_aneg, diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c index a2e59f4f6f01..62269e578718 100644 --- a/drivers/net/phy/mscc.c +++ b/drivers/net/phy/mscc.c @@ -1833,7 +1833,6 @@ static struct phy_driver vsc85xx_driver[] = { .name = "Microsemi FE VSC8530", .phy_id_mask = 0xfffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .soft_reset = &genphy_soft_reset, .config_init = &vsc85xx_config_init, .config_aneg = &vsc85xx_config_aneg, @@ -1859,7 +1858,6 @@ static struct phy_driver vsc85xx_driver[] = { .name = "Microsemi VSC8531", .phy_id_mask = 0xfffffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .soft_reset = &genphy_soft_reset, .config_init = &vsc85xx_config_init, .config_aneg = &vsc85xx_config_aneg, @@ -1885,7 +1883,6 @@ static struct phy_driver vsc85xx_driver[] = { .name = "Microsemi FE VSC8540 SyncE", .phy_id_mask = 0xfffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .soft_reset = &genphy_soft_reset, .config_init = &vsc85xx_config_init, .config_aneg = &vsc85xx_config_aneg, @@ -1911,7 +1908,6 @@ static struct phy_driver vsc85xx_driver[] = { .name = "Microsemi VSC8541 SyncE", .phy_id_mask = 0xfffffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .soft_reset = &genphy_soft_reset, .config_init = &vsc85xx_config_init, .config_aneg = &vsc85xx_config_aneg, @@ -1937,7 +1933,6 @@ static struct phy_driver vsc85xx_driver[] = { .name = "Microsemi GE VSC8574 SyncE", .phy_id_mask = 0xfffffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .soft_reset = &genphy_soft_reset, .config_init = &vsc8584_config_init, .config_aneg = &vsc85xx_config_aneg, @@ -1964,7 +1959,6 @@ static struct phy_driver vsc85xx_driver[] = { .name = "Microsemi GE VSC8584 SyncE", .phy_id_mask = 0xfffffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .soft_reset = &genphy_soft_reset, .config_init = &vsc8584_config_init, .config_aneg = &vsc85xx_config_aneg, diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c index 2b1e336961f9..139bed2c8ab4 100644 --- a/drivers/net/phy/national.c +++ b/drivers/net/phy/national.c @@ -134,7 +134,6 @@ static struct phy_driver dp83865_driver[] = { { .phy_id_mask = 0xfffffff0, .name = "NatSemi DP83865", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = ns_config_init, .ack_interrupt = ns_ack_interrupt, .config_intr = ns_config_intr, diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index d7636ff03bc7..03af927fa5ad 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -181,7 +181,7 @@ int genphy_c45_read_lpa(struct phy_device *phydev) if (val < 0) return val; - phydev->lp_advertising = mii_lpa_to_ethtool_lpa_t(val); + mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, val); phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0; phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0; @@ -191,7 +191,8 @@ int genphy_c45_read_lpa(struct phy_device *phydev) return val; if (val & MDIO_AN_10GBT_STAT_LP10G) - phydev->lp_advertising |= ADVERTISED_10000baseT_Full; + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + phydev->lp_advertising); return 0; } @@ -304,8 +305,11 @@ EXPORT_SYMBOL_GPL(gen10g_no_soft_reset); int gen10g_config_init(struct phy_device *phydev) { /* Temporarily just say we support everything */ - phydev->supported = SUPPORTED_10000baseT_Full; - phydev->advertising = SUPPORTED_10000baseT_Full; + linkmode_zero(phydev->supported); + + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + phydev->supported); + linkmode_copy(phydev->advertising, phydev->supported); return 0; } diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index c7da4cbb1103..20fbd5eb56fd 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -62,6 +62,124 @@ EXPORT_SYMBOL_GPL(phy_duplex_to_str); * must be grouped by speed and sorted in descending match priority * - iow, descending speed. */ static const struct phy_setting settings[] = { + /* 100G */ + { + .speed = SPEED_100000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + }, + { + .speed = SPEED_100000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + }, + { + .speed = SPEED_100000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, + }, + { + .speed = SPEED_100000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + }, + /* 56G */ + { + .speed = SPEED_56000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, + }, + { + .speed = SPEED_56000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, + }, + { + .speed = SPEED_56000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, + }, + { + .speed = SPEED_56000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, + }, + /* 50G */ + { + .speed = SPEED_50000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + }, + { + .speed = SPEED_50000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + }, + { + .speed = SPEED_50000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + }, + /* 40G */ + { + .speed = SPEED_40000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + }, + { + .speed = SPEED_40000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + }, + { + .speed = SPEED_40000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + }, + { + .speed = SPEED_40000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + }, + /* 25G */ + { + .speed = SPEED_25000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + }, + { + .speed = SPEED_25000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + }, + { + .speed = SPEED_25000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + }, + + /* 20G */ + { + .speed = SPEED_20000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, + }, + { + .speed = SPEED_20000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT, + }, + /* 10G */ + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + }, + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseER_Full_BIT, + }, { .speed = SPEED_10000, .duplex = DUPLEX_FULL, @@ -75,22 +193,51 @@ static const struct phy_setting settings[] = { { .speed = SPEED_10000, .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + }, + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, + }, + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + }, + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + }, + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, .bit = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, }, + /* 5G */ + { + .speed = SPEED_5000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_5000baseT_Full_BIT, + }, + + /* 2.5G */ { .speed = SPEED_2500, .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT, + .bit = ETHTOOL_LINK_MODE_2500baseT_Full_BIT, }, { - .speed = SPEED_1000, + .speed = SPEED_2500, .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT, }, + /* 1G */ { .speed = SPEED_1000, .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, }, { .speed = SPEED_1000, @@ -103,6 +250,12 @@ static const struct phy_setting settings[] = { .bit = ETHTOOL_LINK_MODE_1000baseT_Half_BIT, }, { + .speed = SPEED_1000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + }, + /* 100M */ + { .speed = SPEED_100, .duplex = DUPLEX_FULL, .bit = ETHTOOL_LINK_MODE_100baseT_Full_BIT, @@ -112,6 +265,7 @@ static const struct phy_setting settings[] = { .duplex = DUPLEX_HALF, .bit = ETHTOOL_LINK_MODE_100baseT_Half_BIT, }, + /* 10M */ { .speed = SPEED_10, .duplex = DUPLEX_FULL, @@ -129,7 +283,6 @@ static const struct phy_setting settings[] = { * @speed: speed to match * @duplex: duplex to match * @mask: allowed link modes - * @maxbit: bit size of link modes * @exact: an exact match is required * * Search the settings array for a setting that matches the speed and @@ -143,14 +296,14 @@ static const struct phy_setting settings[] = { * they all fail, %NULL will be returned. */ const struct phy_setting * -phy_lookup_setting(int speed, int duplex, const unsigned long *mask, - size_t maxbit, bool exact) +phy_lookup_setting(int speed, int duplex, const unsigned long *mask, bool exact) { const struct phy_setting *p, *match = NULL, *last = NULL; int i; for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) { - if (p->bit < maxbit && test_bit(p->bit, mask)) { + if (p->bit < __ETHTOOL_LINK_MODE_MASK_NBITS && + test_bit(p->bit, mask)) { last = p; if (p->speed == speed && p->duplex == duplex) { /* Exact match for speed and duplex */ @@ -175,13 +328,13 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask, EXPORT_SYMBOL_GPL(phy_lookup_setting); size_t phy_speeds(unsigned int *speeds, size_t size, - unsigned long *mask, size_t maxbit) + unsigned long *mask) { size_t count; int i; for (i = 0, count = 0; i < ARRAY_SIZE(settings) && count < size; i++) - if (settings[i].bit < maxbit && + if (settings[i].bit < __ETHTOOL_LINK_MODE_MASK_NBITS && test_bit(settings[i].bit, mask) && (count == 0 || speeds[count - 1] != settings[i].speed)) speeds[count++] = settings[i].speed; @@ -199,35 +352,53 @@ size_t phy_speeds(unsigned int *speeds, size_t size, */ void phy_resolve_aneg_linkmode(struct phy_device *phydev) { - u32 common = phydev->lp_advertising & phydev->advertising; + __ETHTOOL_DECLARE_LINK_MODE_MASK(common); - if (common & ADVERTISED_10000baseT_Full) { + linkmode_and(common, phydev->lp_advertising, phydev->advertising); + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, common)) { phydev->speed = SPEED_10000; phydev->duplex = DUPLEX_FULL; - } else if (common & ADVERTISED_1000baseT_Full) { + } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, + common)) { + phydev->speed = SPEED_5000; + phydev->duplex = DUPLEX_FULL; + } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + common)) { + phydev->speed = SPEED_2500; + phydev->duplex = DUPLEX_FULL; + } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + common)) { phydev->speed = SPEED_1000; phydev->duplex = DUPLEX_FULL; - } else if (common & ADVERTISED_1000baseT_Half) { + } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + common)) { phydev->speed = SPEED_1000; phydev->duplex = DUPLEX_HALF; - } else if (common & ADVERTISED_100baseT_Full) { + } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + common)) { phydev->speed = SPEED_100; phydev->duplex = DUPLEX_FULL; - } else if (common & ADVERTISED_100baseT_Half) { + } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + common)) { phydev->speed = SPEED_100; phydev->duplex = DUPLEX_HALF; - } else if (common & ADVERTISED_10baseT_Full) { + } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + common)) { phydev->speed = SPEED_10; phydev->duplex = DUPLEX_FULL; - } else if (common & ADVERTISED_10baseT_Half) { + } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + common)) { phydev->speed = SPEED_10; phydev->duplex = DUPLEX_HALF; } if (phydev->duplex == DUPLEX_FULL) { - phydev->pause = !!(phydev->lp_advertising & ADVERTISED_Pause); - phydev->asym_pause = !!(phydev->lp_advertising & - ADVERTISED_Asym_Pause); + phydev->pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->lp_advertising); + phydev->asym_pause = linkmode_test_bit( + ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->lp_advertising); } } EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 1d73ac3309ce..d73873334e47 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -46,11 +46,8 @@ static const char *phy_state_to_str(enum phy_state st) { switch (st) { PHY_STATE_STR(DOWN) - PHY_STATE_STR(STARTING) PHY_STATE_STR(READY) - PHY_STATE_STR(PENDING) PHY_STATE_STR(UP) - PHY_STATE_STR(AN) PHY_STATE_STR(RUNNING) PHY_STATE_STR(NOLINK) PHY_STATE_STR(FORCING) @@ -62,6 +59,17 @@ static const char *phy_state_to_str(enum phy_state st) return NULL; } +static void phy_link_up(struct phy_device *phydev) +{ + phydev->phy_link_change(phydev, true, true); + phy_led_trigger_change_speed(phydev); +} + +static void phy_link_down(struct phy_device *phydev, bool do_carrier) +{ + phydev->phy_link_change(phydev, false, do_carrier); + phy_led_trigger_change_speed(phydev); +} /** * phy_print_status - Convenience function to print out the current phy status @@ -105,9 +113,9 @@ static int phy_clear_interrupt(struct phy_device *phydev) * * Returns 0 on success or < 0 on error. */ -static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts) +static int phy_config_interrupt(struct phy_device *phydev, bool interrupts) { - phydev->interrupts = interrupts; + phydev->interrupts = interrupts ? 1 : 0; if (phydev->drv->config_intr) return phydev->drv->config_intr(phydev); @@ -171,11 +179,9 @@ EXPORT_SYMBOL(phy_aneg_done); * settings were found. */ static const struct phy_setting * -phy_find_valid(int speed, int duplex, u32 supported) +phy_find_valid(int speed, int duplex, unsigned long *supported) { - unsigned long mask = supported; - - return phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, false); + return phy_lookup_setting(speed, duplex, supported, false); } /** @@ -192,9 +198,7 @@ unsigned int phy_supported_speeds(struct phy_device *phy, unsigned int *speeds, unsigned int size) { - unsigned long supported = phy->supported; - - return phy_speeds(speeds, size, &supported, BITS_PER_LONG); + return phy_speeds(speeds, size, phy->supported); } /** @@ -206,11 +210,10 @@ unsigned int phy_supported_speeds(struct phy_device *phy, * * Description: Returns true if there is a valid setting, false otherwise. */ -static inline bool phy_check_valid(int speed, int duplex, u32 features) +static inline bool phy_check_valid(int speed, int duplex, + unsigned long *features) { - unsigned long mask = features; - - return !!phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, true); + return !!phy_lookup_setting(speed, duplex, features, true); } /** @@ -224,13 +227,13 @@ static inline bool phy_check_valid(int speed, int duplex, u32 features) static void phy_sanitize_settings(struct phy_device *phydev) { const struct phy_setting *setting; - u32 features = phydev->supported; /* Sanitize settings based on PHY capabilities */ - if ((features & SUPPORTED_Autoneg) == 0) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported)) phydev->autoneg = AUTONEG_DISABLE; - setting = phy_find_valid(phydev->speed, phydev->duplex, features); + setting = phy_find_valid(phydev->speed, phydev->duplex, + phydev->supported); if (setting) { phydev->speed = setting->speed; phydev->duplex = setting->duplex; @@ -256,13 +259,15 @@ static void phy_sanitize_settings(struct phy_device *phydev) */ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); u32 speed = ethtool_cmd_speed(cmd); if (cmd->phy_address != phydev->mdio.addr) return -EINVAL; /* We make sure that we don't pass unsupported values in to the PHY */ - cmd->advertising &= phydev->supported; + ethtool_convert_legacy_u32_to_link_mode(advertising, cmd->advertising); + linkmode_and(advertising, advertising, phydev->supported); /* Verify the settings we care about. */ if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) @@ -283,12 +288,14 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) phydev->speed = speed; - phydev->advertising = cmd->advertising; + linkmode_copy(phydev->advertising, advertising); if (AUTONEG_ENABLE == cmd->autoneg) - phydev->advertising |= ADVERTISED_Autoneg; + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->advertising); else - phydev->advertising &= ~ADVERTISED_Autoneg; + linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->advertising); phydev->duplex = cmd->duplex; @@ -304,19 +311,18 @@ EXPORT_SYMBOL(phy_ethtool_sset); int phy_ethtool_ksettings_set(struct phy_device *phydev, const struct ethtool_link_ksettings *cmd) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); u8 autoneg = cmd->base.autoneg; u8 duplex = cmd->base.duplex; u32 speed = cmd->base.speed; - u32 advertising; if (cmd->base.phy_address != phydev->mdio.addr) return -EINVAL; - ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising); + linkmode_copy(advertising, cmd->link_modes.advertising); /* We make sure that we don't pass unsupported values in to the PHY */ - advertising &= phydev->supported; + linkmode_and(advertising, advertising, phydev->supported); /* Verify the settings we care about. */ if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE) @@ -337,12 +343,14 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev, phydev->speed = speed; - phydev->advertising = advertising; + linkmode_copy(phydev->advertising, advertising); if (autoneg == AUTONEG_ENABLE) - phydev->advertising |= ADVERTISED_Autoneg; + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->advertising); else - phydev->advertising &= ~ADVERTISED_Autoneg; + linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->advertising); phydev->duplex = duplex; @@ -358,14 +366,9 @@ EXPORT_SYMBOL(phy_ethtool_ksettings_set); void phy_ethtool_ksettings_get(struct phy_device *phydev, struct ethtool_link_ksettings *cmd) { - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - phydev->supported); - - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - phydev->advertising); - - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, - phydev->lp_advertising); + linkmode_copy(cmd->link_modes.supported, phydev->supported); + linkmode_copy(cmd->link_modes.advertising, phydev->advertising); + linkmode_copy(cmd->link_modes.lp_advertising, phydev->lp_advertising); cmd->base.speed = phydev->speed; cmd->base.duplex = phydev->duplex; @@ -434,7 +437,8 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) } break; case MII_ADVERTISE: - phydev->advertising = mii_adv_to_ethtool_adv_t(val); + mii_adv_to_linkmode_adv_t(phydev->advertising, + val); change_autoneg = true; break; default: @@ -467,6 +471,18 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) } EXPORT_SYMBOL(phy_mii_ioctl); +static void phy_queue_state_machine(struct phy_device *phydev, + unsigned int secs) +{ + mod_delayed_work(system_power_efficient_wq, &phydev->state_queue, + secs * HZ); +} + +static void phy_trigger_machine(struct phy_device *phydev) +{ + phy_queue_state_machine(phydev, 0); +} + static int phy_config_aneg(struct phy_device *phydev) { if (phydev->drv->config_aneg) @@ -482,6 +498,34 @@ static int phy_config_aneg(struct phy_device *phydev) } /** + * phy_check_link_status - check link status and set state accordingly + * @phydev: the phy_device struct + * + * Description: Check for link and whether autoneg was triggered / is running + * and set state accordingly + */ +static int phy_check_link_status(struct phy_device *phydev) +{ + int err; + + WARN_ON(!mutex_is_locked(&phydev->lock)); + + err = phy_read_status(phydev); + if (err) + return err; + + if (phydev->link && phydev->state != PHY_RUNNING) { + phydev->state = PHY_RUNNING; + phy_link_up(phydev); + } else if (!phydev->link && phydev->state != PHY_NOLINK) { + phydev->state = PHY_NOLINK; + phy_link_down(phydev, true); + } + + return 0; +} + +/** * phy_start_aneg - start auto-negotiation for this PHY device * @phydev: the phy_device struct * @@ -492,7 +536,6 @@ static int phy_config_aneg(struct phy_device *phydev) */ int phy_start_aneg(struct phy_device *phydev) { - bool trigger = 0; int err; if (!phydev->drv) @@ -504,7 +547,7 @@ int phy_start_aneg(struct phy_device *phydev) phy_sanitize_settings(phydev); /* Invalidate LP advertising flags */ - phydev->lp_advertising = 0; + linkmode_zero(phydev->lp_advertising); err = phy_config_aneg(phydev); if (err < 0) @@ -512,32 +555,16 @@ int phy_start_aneg(struct phy_device *phydev) if (phydev->state != PHY_HALTED) { if (AUTONEG_ENABLE == phydev->autoneg) { - phydev->state = PHY_AN; - phydev->link_timeout = PHY_AN_TIMEOUT; + err = phy_check_link_status(phydev); } else { phydev->state = PHY_FORCING; phydev->link_timeout = PHY_FORCE_TIMEOUT; } } - /* Re-schedule a PHY state machine to check PHY status because - * negotiation may already be done and aneg interrupt may not be - * generated. - */ - if (!phy_polling_mode(phydev) && phydev->state == PHY_AN) { - err = phy_aneg_done(phydev); - if (err > 0) { - trigger = true; - err = 0; - } - } - out_unlock: mutex_unlock(&phydev->lock); - if (trigger) - phy_trigger_machine(phydev); - return err; } EXPORT_SYMBOL(phy_start_aneg); @@ -573,20 +600,38 @@ static int phy_poll_aneg_done(struct phy_device *phydev) */ int phy_speed_down(struct phy_device *phydev, bool sync) { - u32 adv = phydev->lp_advertising & phydev->supported; - u32 adv_old = phydev->advertising; + __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old); + __ETHTOOL_DECLARE_LINK_MODE_MASK(adv); int ret; if (phydev->autoneg != AUTONEG_ENABLE) return 0; - if (adv & PHY_10BT_FEATURES) - phydev->advertising &= ~(PHY_100BT_FEATURES | - PHY_1000BT_FEATURES); - else if (adv & PHY_100BT_FEATURES) - phydev->advertising &= ~PHY_1000BT_FEATURES; + linkmode_copy(adv_old, phydev->advertising); + linkmode_copy(adv, phydev->lp_advertising); + linkmode_and(adv, adv, phydev->supported); + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, adv) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, adv)) { + linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->advertising); + } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + adv) || + linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + adv)) { + linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->advertising); + } - if (phydev->advertising == adv_old) + if (linkmode_equal(phydev->advertising, adv_old)) return 0; ret = phy_config_aneg(phydev); @@ -605,28 +650,36 @@ EXPORT_SYMBOL_GPL(phy_speed_down); */ int phy_speed_up(struct phy_device *phydev) { - u32 mask = PHY_10BT_FEATURES | PHY_100BT_FEATURES | PHY_1000BT_FEATURES; - u32 adv_old = phydev->advertising; + __ETHTOOL_DECLARE_LINK_MODE_MASK(all_speeds) = { 0, }; + __ETHTOOL_DECLARE_LINK_MODE_MASK(not_speeds); + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old); + __ETHTOOL_DECLARE_LINK_MODE_MASK(speeds); + + linkmode_copy(adv_old, phydev->advertising); if (phydev->autoneg != AUTONEG_ENABLE) return 0; - phydev->advertising = (adv_old & ~mask) | (phydev->supported & mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, all_speeds); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, all_speeds); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, all_speeds); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, all_speeds); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, all_speeds); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, all_speeds); + + linkmode_andnot(not_speeds, adv_old, all_speeds); + linkmode_copy(supported, phydev->supported); + linkmode_and(speeds, supported, all_speeds); + linkmode_or(phydev->advertising, not_speeds, speeds); - if (phydev->advertising == adv_old) + if (linkmode_equal(phydev->advertising, adv_old)) return 0; return phy_config_aneg(phydev); } EXPORT_SYMBOL_GPL(phy_speed_up); -static void phy_queue_state_machine(struct phy_device *phydev, - unsigned int secs) -{ - mod_delayed_work(system_power_efficient_wq, &phydev->state_queue, - secs * HZ); -} - /** * phy_start_machine - start PHY state machine tracking * @phydev: the phy_device struct @@ -644,20 +697,6 @@ void phy_start_machine(struct phy_device *phydev) EXPORT_SYMBOL_GPL(phy_start_machine); /** - * phy_trigger_machine - trigger the state machine to run - * - * @phydev: the phy_device struct - * - * Description: There has been a change in state which requires that the - * state machine runs. - */ - -void phy_trigger_machine(struct phy_device *phydev) -{ - phy_queue_state_machine(phydev, 0); -} - -/** * phy_stop_machine - stop the PHY state machine tracking * @phydev: target phy_device struct * @@ -711,30 +750,26 @@ static int phy_disable_interrupts(struct phy_device *phydev) } /** - * phy_change - Called by the phy_interrupt to handle PHY changes - * @phydev: phy_device struct that interrupted + * phy_interrupt - PHY interrupt handler + * @irq: interrupt line + * @phy_dat: phy_device pointer + * + * Description: Handle PHY interrupt */ -static irqreturn_t phy_change(struct phy_device *phydev) +static irqreturn_t phy_interrupt(int irq, void *phy_dat) { - if (phy_interrupt_is_valid(phydev)) { - if (phydev->drv->did_interrupt && - !phydev->drv->did_interrupt(phydev)) - return IRQ_NONE; - - if (phydev->state == PHY_HALTED) - if (phy_disable_interrupts(phydev)) - goto phy_err; - } + struct phy_device *phydev = phy_dat; - mutex_lock(&phydev->lock); - if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) - phydev->state = PHY_CHANGELINK; - mutex_unlock(&phydev->lock); + if (PHY_HALTED == phydev->state) + return IRQ_NONE; /* It can't be ours. */ + + if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev)) + return IRQ_NONE; /* reschedule state queue work to run as soon as possible */ phy_trigger_machine(phydev); - if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev)) + if (phy_clear_interrupt(phydev)) goto phy_err; return IRQ_HANDLED; @@ -744,36 +779,6 @@ phy_err: } /** - * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes - * @work: work_struct that describes the work to be done - */ -void phy_change_work(struct work_struct *work) -{ - struct phy_device *phydev = - container_of(work, struct phy_device, phy_queue); - - phy_change(phydev); -} - -/** - * phy_interrupt - PHY interrupt handler - * @irq: interrupt line - * @phy_dat: phy_device pointer - * - * Description: When a PHY interrupt occurs, the handler disables - * interrupts, and uses phy_change to handle the interrupt. - */ -static irqreturn_t phy_interrupt(int irq, void *phy_dat) -{ - struct phy_device *phydev = phy_dat; - - if (PHY_HALTED == phydev->state) - return IRQ_NONE; /* It can't be ours. */ - - return phy_change(phydev); -} - -/** * phy_enable_interrupts - Enable the interrupts from the PHY side * @phydev: target phy_device struct */ @@ -851,7 +856,7 @@ out_unlock: phy_state_machine(&phydev->state_queue.work); /* Cannot call flush_scheduled_work() here as desired because - * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change() + * of rtnl_lock(), but PHY_HALTED shall guarantee irq handler * will not reenable interrupts. */ } @@ -874,9 +879,6 @@ void phy_start(struct phy_device *phydev) mutex_lock(&phydev->lock); switch (phydev->state) { - case PHY_STARTING: - phydev->state = PHY_PENDING; - break; case PHY_READY: phydev->state = PHY_UP; break; @@ -902,18 +904,6 @@ void phy_start(struct phy_device *phydev) } EXPORT_SYMBOL(phy_start); -static void phy_link_up(struct phy_device *phydev) -{ - phydev->phy_link_change(phydev, true, true); - phy_led_trigger_change_speed(phydev); -} - -static void phy_link_down(struct phy_device *phydev, bool do_carrier) -{ - phydev->phy_link_change(phydev, false, do_carrier); - phy_led_trigger_change_speed(phydev); -} - /** * phy_state_machine - Handle the state machine * @work: work_struct that describes the work to be done @@ -936,63 +926,17 @@ void phy_state_machine(struct work_struct *work) switch (phydev->state) { case PHY_DOWN: - case PHY_STARTING: case PHY_READY: - case PHY_PENDING: break; case PHY_UP: needs_aneg = true; - phydev->link_timeout = PHY_AN_TIMEOUT; - - break; - case PHY_AN: - err = phy_read_status(phydev); - if (err < 0) - break; - - /* If the link is down, give up on negotiation for now */ - if (!phydev->link) { - phydev->state = PHY_NOLINK; - phy_link_down(phydev, true); - break; - } - - /* Check if negotiation is done. Break if there's an error */ - err = phy_aneg_done(phydev); - if (err < 0) - break; - - /* If AN is done, we're running */ - if (err > 0) { - phydev->state = PHY_RUNNING; - phy_link_up(phydev); - } else if (0 == phydev->link_timeout--) - needs_aneg = true; break; case PHY_NOLINK: - if (!phy_polling_mode(phydev)) - break; - - err = phy_read_status(phydev); - if (err) - break; - - if (phydev->link) { - if (AUTONEG_ENABLE == phydev->autoneg) { - err = phy_aneg_done(phydev); - if (err < 0) - break; - - if (!err) { - phydev->state = PHY_AN; - phydev->link_timeout = PHY_AN_TIMEOUT; - break; - } - } - phydev->state = PHY_RUNNING; - phy_link_up(phydev); - } + case PHY_RUNNING: + case PHY_CHANGELINK: + case PHY_RESUMING: + err = phy_check_link_status(phydev); break; case PHY_FORCING: err = genphy_update_link(phydev); @@ -1008,32 +952,6 @@ void phy_state_machine(struct work_struct *work) phy_link_down(phydev, false); } break; - case PHY_RUNNING: - if (!phy_polling_mode(phydev)) - break; - - err = phy_read_status(phydev); - if (err) - break; - - if (!phydev->link) { - phydev->state = PHY_NOLINK; - phy_link_down(phydev, true); - } - break; - case PHY_CHANGELINK: - err = phy_read_status(phydev); - if (err) - break; - - if (phydev->link) { - phydev->state = PHY_RUNNING; - phy_link_up(phydev); - } else { - phydev->state = PHY_NOLINK; - phy_link_down(phydev, true); - } - break; case PHY_HALTED: if (phydev->link) { phydev->link = 0; @@ -1041,30 +959,6 @@ void phy_state_machine(struct work_struct *work) do_suspend = true; } break; - case PHY_RESUMING: - if (AUTONEG_ENABLE == phydev->autoneg) { - err = phy_aneg_done(phydev); - if (err < 0) { - break; - } else if (!err) { - phydev->state = PHY_AN; - phydev->link_timeout = PHY_AN_TIMEOUT; - break; - } - } - - err = phy_read_status(phydev); - if (err) - break; - - if (phydev->link) { - phydev->state = PHY_RUNNING; - phy_link_up(phydev); - } else { - phydev->state = PHY_NOLINK; - phy_link_down(phydev, false); - } - break; } mutex_unlock(&phydev->lock); @@ -1104,10 +998,34 @@ void phy_state_machine(struct work_struct *work) void phy_mac_interrupt(struct phy_device *phydev) { /* Trigger a state machine change */ - queue_work(system_power_efficient_wq, &phydev->phy_queue); + phy_trigger_machine(phydev); } EXPORT_SYMBOL(phy_mac_interrupt); +static void mmd_eee_adv_to_linkmode(unsigned long *advertising, u16 eee_adv) +{ + linkmode_zero(advertising); + + if (eee_adv & MDIO_EEE_100TX) + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + advertising); + if (eee_adv & MDIO_EEE_1000T) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + advertising); + if (eee_adv & MDIO_EEE_10GT) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + advertising); + if (eee_adv & MDIO_EEE_1000KX) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + advertising); + if (eee_adv & MDIO_EEE_10GKX4) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + advertising); + if (eee_adv & MDIO_EEE_10GKR) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + advertising); +} + /** * phy_init_eee - init and check the EEE feature * @phydev: target phy_device struct @@ -1126,9 +1044,12 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) /* According to 802.3az,the EEE is supported only in full duplex-mode. */ if (phydev->duplex == DUPLEX_FULL) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(common); + __ETHTOOL_DECLARE_LINK_MODE_MASK(lp); + __ETHTOOL_DECLARE_LINK_MODE_MASK(adv); int eee_lp, eee_cap, eee_adv; - u32 lp, cap, adv; int status; + u32 cap; /* Read phy status to properly get the right settings */ status = phy_read_status(phydev); @@ -1155,9 +1076,11 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) if (eee_adv <= 0) goto eee_exit_err; - adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); - lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); - if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv)) + mmd_eee_adv_to_linkmode(adv, eee_adv); + mmd_eee_adv_to_linkmode(lp, eee_lp); + linkmode_and(common, adv, lp); + + if (!phy_check_valid(phydev->speed, phydev->duplex, common)) goto eee_exit_err; if (clk_stop_enable) { diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index ab33d1777132..55202a0ac476 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -66,10 +66,12 @@ static const int phy_basic_ports_array[] = { ETHTOOL_LINK_MODE_TP_BIT, ETHTOOL_LINK_MODE_MII_BIT, }; +EXPORT_SYMBOL_GPL(phy_basic_ports_array); static const int phy_fibre_port_array[] = { ETHTOOL_LINK_MODE_FIBRE_BIT, }; +EXPORT_SYMBOL_GPL(phy_fibre_port_array); static const int phy_all_ports_features_array[] = { ETHTOOL_LINK_MODE_Autoneg_BIT, @@ -80,27 +82,32 @@ static const int phy_all_ports_features_array[] = { ETHTOOL_LINK_MODE_BNC_BIT, ETHTOOL_LINK_MODE_Backplane_BIT, }; +EXPORT_SYMBOL_GPL(phy_all_ports_features_array); -static const int phy_10_100_features_array[] = { +const int phy_10_100_features_array[4] = { ETHTOOL_LINK_MODE_10baseT_Half_BIT, ETHTOOL_LINK_MODE_10baseT_Full_BIT, ETHTOOL_LINK_MODE_100baseT_Half_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT, }; +EXPORT_SYMBOL_GPL(phy_10_100_features_array); -static const int phy_basic_t1_features_array[] = { +const int phy_basic_t1_features_array[2] = { ETHTOOL_LINK_MODE_TP_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT, }; +EXPORT_SYMBOL_GPL(phy_basic_t1_features_array); -static const int phy_gbit_features_array[] = { +const int phy_gbit_features_array[2] = { ETHTOOL_LINK_MODE_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT, }; +EXPORT_SYMBOL_GPL(phy_gbit_features_array); -static const int phy_10gbit_features_array[] = { +const int phy_10gbit_features_array[1] = { ETHTOOL_LINK_MODE_10000baseT_Full_BIT, }; +EXPORT_SYMBOL_GPL(phy_10gbit_features_array); __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; EXPORT_SYMBOL_GPL(phy_10gbit_full_features); @@ -587,7 +594,6 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, mutex_init(&dev->lock); INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine); - INIT_WORK(&dev->phy_queue, phy_change_work); /* Request the appropriate module unconditionally; don't * bother trying to do so only if it isn't already loaded, @@ -1442,8 +1448,13 @@ static int genphy_config_advert(struct phy_device *phydev) int err, changed = 0; /* Only allow advertising what this PHY supports */ - phydev->advertising &= phydev->supported; - advertise = phydev->advertising; + linkmode_and(phydev->advertising, phydev->advertising, + phydev->supported); + if (!ethtool_convert_link_mode_to_legacy_u32(&advertise, + phydev->advertising)) + phydev_warn(phydev, "PHY advertising (%*pb) more modes than genphy supports, some modes not advertised.\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, + phydev->advertising); /* Setup standard advertisement */ adv = phy_read(phydev, MII_ADVERTISE); @@ -1482,10 +1493,11 @@ static int genphy_config_advert(struct phy_device *phydev) oldadv = adv; adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); - if (phydev->supported & (SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full)) { + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phydev->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->supported)) adv |= ethtool_adv_to_mii_ctrl1000_t(advertise); - } if (adv != oldadv) changed = 1; @@ -1690,11 +1702,13 @@ int genphy_read_status(struct phy_device *phydev) if (err) return err; - phydev->lp_advertising = 0; + linkmode_zero(phydev->lp_advertising); if (AUTONEG_ENABLE == phydev->autoneg) { - if (phydev->supported & (SUPPORTED_1000baseT_Half - | SUPPORTED_1000baseT_Full)) { + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phydev->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->supported)) { lpagb = phy_read(phydev, MII_STAT1000); if (lpagb < 0) return lpagb; @@ -1711,8 +1725,8 @@ int genphy_read_status(struct phy_device *phydev) return -ENOLINK; } - phydev->lp_advertising = - mii_stat1000_to_ethtool_lpa_t(lpagb); + mii_stat1000_to_linkmode_lpa_t(phydev->lp_advertising, + lpagb); common_adv_gb = lpagb & adv << 2; } @@ -1720,7 +1734,7 @@ int genphy_read_status(struct phy_device *phydev) if (lpa < 0) return lpa; - phydev->lp_advertising |= mii_lpa_to_ethtool_lpa_t(lpa); + mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa); adv = phy_read(phydev, MII_ADVERTISE); if (adv < 0) @@ -1801,11 +1815,13 @@ EXPORT_SYMBOL(genphy_soft_reset); int genphy_config_init(struct phy_device *phydev) { int val; - u32 features; + __ETHTOOL_DECLARE_LINK_MODE_MASK(features) = { 0, }; - features = (SUPPORTED_TP | SUPPORTED_MII - | SUPPORTED_AUI | SUPPORTED_FIBRE | - SUPPORTED_BNC | SUPPORTED_Pause | SUPPORTED_Asym_Pause); + linkmode_set_bit_array(phy_basic_ports_array, + ARRAY_SIZE(phy_basic_ports_array), + features); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, features); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, features); /* Do we support autonegotiation? */ val = phy_read(phydev, MII_BMSR); @@ -1813,16 +1829,16 @@ int genphy_config_init(struct phy_device *phydev) return val; if (val & BMSR_ANEGCAPABLE) - features |= SUPPORTED_Autoneg; + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, features); if (val & BMSR_100FULL) - features |= SUPPORTED_100baseT_Full; + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, features); if (val & BMSR_100HALF) - features |= SUPPORTED_100baseT_Half; + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, features); if (val & BMSR_10FULL) - features |= SUPPORTED_10baseT_Full; + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, features); if (val & BMSR_10HALF) - features |= SUPPORTED_10baseT_Half; + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, features); if (val & BMSR_ESTATEN) { val = phy_read(phydev, MII_ESTATUS); @@ -1830,13 +1846,15 @@ int genphy_config_init(struct phy_device *phydev) return val; if (val & ESTATUS_1000_TFULL) - features |= SUPPORTED_1000baseT_Full; + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + features); if (val & ESTATUS_1000_THALF) - features |= SUPPORTED_1000baseT_Half; + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + features); } - phydev->supported &= features; - phydev->advertising &= features; + linkmode_and(phydev->supported, phydev->supported, features); + linkmode_and(phydev->advertising, phydev->advertising, features); return 0; } @@ -1880,20 +1898,37 @@ EXPORT_SYMBOL(genphy_loopback); static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) { - phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES | - PHY_10BT_FEATURES); + __ETHTOOL_DECLARE_LINK_MODE_MASK(speeds) = { 0, }; + + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + speeds); + linkmode_set_bit_array(phy_gbit_features_array, + ARRAY_SIZE(phy_gbit_features_array), + speeds); + + linkmode_andnot(phydev->supported, phydev->supported, speeds); switch (max_speed) { default: return -ENOTSUPP; case SPEED_1000: - phydev->supported |= PHY_1000BT_FEATURES; + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->supported); /* fall through */ case SPEED_100: - phydev->supported |= PHY_100BT_FEATURES; + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + phydev->supported); /* fall through */ case SPEED_10: - phydev->supported |= PHY_10BT_FEATURES; + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + phydev->supported); } return 0; @@ -1907,7 +1942,7 @@ int phy_set_max_speed(struct phy_device *phydev, u32 max_speed) if (err) return err; - phydev->advertising = phydev->supported; + linkmode_copy(phydev->advertising, phydev->supported); return 0; } @@ -1924,10 +1959,8 @@ EXPORT_SYMBOL(phy_set_max_speed); */ void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode) { - WARN_ON(link_mode > 31); - - phydev->supported &= ~BIT(link_mode); - phydev->advertising = phydev->supported; + linkmode_clear_bit(link_mode, phydev->supported); + linkmode_copy(phydev->advertising, phydev->supported); } EXPORT_SYMBOL(phy_remove_link_mode); @@ -1940,9 +1973,9 @@ EXPORT_SYMBOL(phy_remove_link_mode); */ void phy_support_sym_pause(struct phy_device *phydev) { - phydev->supported &= ~SUPPORTED_Asym_Pause; - phydev->supported |= SUPPORTED_Pause; - phydev->advertising = phydev->supported; + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported); + linkmode_copy(phydev->advertising, phydev->supported); } EXPORT_SYMBOL(phy_support_sym_pause); @@ -1954,8 +1987,9 @@ EXPORT_SYMBOL(phy_support_sym_pause); */ void phy_support_asym_pause(struct phy_device *phydev) { - phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - phydev->advertising = phydev->supported; + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported); + linkmode_copy(phydev->advertising, phydev->supported); } EXPORT_SYMBOL(phy_support_asym_pause); @@ -1973,12 +2007,13 @@ EXPORT_SYMBOL(phy_support_asym_pause); void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx, bool autoneg) { - phydev->supported &= ~SUPPORTED_Pause; + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported); if (rx && tx && autoneg) - phydev->supported |= SUPPORTED_Pause; + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->supported); - phydev->advertising = phydev->supported; + linkmode_copy(phydev->advertising, phydev->supported); } EXPORT_SYMBOL(phy_set_sym_pause); @@ -1995,20 +2030,29 @@ EXPORT_SYMBOL(phy_set_sym_pause); */ void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx) { - u16 oldadv = phydev->advertising; - u16 newadv = oldadv &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause); + __ETHTOOL_DECLARE_LINK_MODE_MASK(oldadv); - if (rx) - newadv |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - if (tx) - newadv ^= SUPPORTED_Asym_Pause; + linkmode_copy(oldadv, phydev->advertising); - if (oldadv != newadv) { - phydev->advertising = newadv; + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->advertising); - if (phydev->autoneg) - phy_start_aneg(phydev); + if (rx) { + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->advertising); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->advertising); } + + if (tx) + linkmode_change_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->advertising); + + if (!linkmode_equal(oldadv, phydev->advertising) && + phydev->autoneg) + phy_start_aneg(phydev); } EXPORT_SYMBOL(phy_set_asym_pause); @@ -2024,8 +2068,10 @@ EXPORT_SYMBOL(phy_set_asym_pause); bool phy_validate_pause(struct phy_device *phydev, struct ethtool_pauseparam *pp) { - if (!(phydev->supported & SUPPORTED_Pause) || - (!(phydev->supported & SUPPORTED_Asym_Pause) && + if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->supported) || + (!linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->supported) && pp->rx_pause != pp->tx_pause)) return false; return true; @@ -2074,6 +2120,11 @@ static void of_set_phy_eee_broken(struct phy_device *phydev) phydev->eee_broken_modes = broken; } +static bool phy_drv_supports_irq(struct phy_driver *phydrv) +{ + return phydrv->config_intr || phydrv->ack_interrupt; +} + /** * phy_probe - probe and init a PHY device * @dev: device to probe and init @@ -2095,8 +2146,7 @@ static int phy_probe(struct device *dev) /* Disable the interrupt if the PHY doesn't support it * but the interrupt is still a valid one */ - if (!(phydrv->flags & PHY_HAS_INTERRUPT) && - phy_interrupt_is_valid(phydev)) + if (!phy_drv_supports_irq(phydrv) && phy_interrupt_is_valid(phydev)) phydev->irq = PHY_POLL; if (phydrv->flags & PHY_IS_INTERNAL) @@ -2109,9 +2159,9 @@ static int phy_probe(struct device *dev) * or both of these values */ ethtool_convert_link_mode_to_legacy_u32(&features, phydrv->features); - phydev->supported = features; + linkmode_copy(phydev->supported, phydrv->features); of_set_phy_supported(phydev); - phydev->advertising = phydev->supported; + linkmode_copy(phydev->advertising, phydev->supported); /* Get the EEE modes we want to prohibit. We will ask * the PHY stop advertising these mode later on @@ -2131,14 +2181,22 @@ static int phy_probe(struct device *dev) */ if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features) || test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydrv->features)) { - phydev->supported &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->supported); if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features)) - phydev->supported |= SUPPORTED_Pause; + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->supported); if (test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydrv->features)) - phydev->supported |= SUPPORTED_Asym_Pause; + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->supported); } else { - phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->supported); } /* Set the state to READY by default */ diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c index 491efc1bf5c4..263385b75bba 100644 --- a/drivers/net/phy/phy_led_triggers.c +++ b/drivers/net/phy/phy_led_triggers.c @@ -67,7 +67,7 @@ void phy_led_trigger_change_speed(struct phy_device *phy) EXPORT_SYMBOL_GPL(phy_led_trigger_change_speed); static void phy_led_trigger_format_name(struct phy_device *phy, char *buf, - size_t size, char *suffix) + size_t size, const char *suffix) { snprintf(buf, size, PHY_ID_FMT ":%s", phy->mdio.bus->id, phy->mdio.addr, suffix); @@ -77,20 +77,9 @@ static int phy_led_trigger_register(struct phy_device *phy, struct phy_led_trigger *plt, unsigned int speed) { - char name_suffix[PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE]; - plt->speed = speed; - - if (speed < SPEED_1000) - snprintf(name_suffix, sizeof(name_suffix), "%dMbps", speed); - else if (speed == SPEED_2500) - snprintf(name_suffix, sizeof(name_suffix), "2.5Gbps"); - else - snprintf(name_suffix, sizeof(name_suffix), "%dGbps", - DIV_ROUND_CLOSEST(speed, 1000)); - phy_led_trigger_format_name(phy, plt->name, sizeof(plt->name), - name_suffix); + phy_speed_to_str(speed)); plt->trigger.name = plt->name; return led_trigger_register(&plt->trigger); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 9b8dd0d0ee42..e7becc7379d7 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -191,8 +191,7 @@ static int phylink_parse_fixedlink(struct phylink *pl, phylink_validate(pl, pl->supported, &pl->link_config); s = phy_lookup_setting(pl->link_config.speed, pl->link_config.duplex, - pl->supported, - __ETHTOOL_LINK_MODE_MASK_NBITS, true); + pl->supported, true); linkmode_zero(pl->supported); phylink_set(pl->supported, MII); if (s) { @@ -634,13 +633,11 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy) { struct phylink_link_state config; __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); - u32 advertising; int ret; memset(&config, 0, sizeof(config)); - ethtool_convert_legacy_u32_to_link_mode(supported, phy->supported); - ethtool_convert_legacy_u32_to_link_mode(config.advertising, - phy->advertising); + linkmode_copy(supported, phy->supported); + linkmode_copy(config.advertising, phy->advertising); config.interface = pl->link_config.interface; /* @@ -673,15 +670,14 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy) linkmode_copy(pl->link_config.advertising, config.advertising); /* Restrict the phy advertisement according to the MAC support. */ - ethtool_convert_link_mode_to_legacy_u32(&advertising, config.advertising); - phy->advertising = advertising; + linkmode_copy(phy->advertising, config.advertising); mutex_unlock(&pl->state_mutex); mutex_unlock(&phy->lock); netdev_dbg(pl->netdev, - "phy: setting supported %*pb advertising 0x%08x\n", + "phy: setting supported %*pb advertising %*pb\n", __ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported, - phy->advertising); + __ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising); phy_start_machine(phy); if (phy->irq > 0) @@ -1088,8 +1084,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, * duplex. */ s = phy_lookup_setting(kset->base.speed, kset->base.duplex, - pl->supported, - __ETHTOOL_LINK_MODE_MASK_NBITS, false); + pl->supported, false); if (!s) return -EINVAL; diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c index 889a4dce1648..cfe2313dbefd 100644 --- a/drivers/net/phy/qsemi.c +++ b/drivers/net/phy/qsemi.c @@ -116,7 +116,6 @@ static struct phy_driver qs6612_driver[] = { { .name = "QS6612", .phy_id_mask = 0xfffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = qs6612_config_init, .ack_interrupt = qs6612_ack_interrupt, .config_intr = qs6612_config_intr, diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 271e8adc39f1..c6010fb1aa0f 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -213,17 +213,13 @@ static int rtl8366rb_config_init(struct phy_device *phydev) static struct phy_driver realtek_drvs[] = { { - .phy_id = 0x00008201, + PHY_ID_MATCH_EXACT(0x00008201), .name = "RTL8201CP Ethernet", - .phy_id_mask = 0x0000ffff, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, }, { - .phy_id = 0x001cc816, + PHY_ID_MATCH_EXACT(0x001cc816), .name = "RTL8201F Fast Ethernet", - .phy_id_mask = 0x001fffff, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .ack_interrupt = &rtl8201_ack_interrupt, .config_intr = &rtl8201_config_intr, .suspend = genphy_suspend, @@ -231,19 +227,16 @@ static struct phy_driver realtek_drvs[] = { .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, }, { - .phy_id = 0x001cc910, + PHY_ID_MATCH_EXACT(0x001cc910), .name = "RTL8211 Gigabit Ethernet", - .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, .config_aneg = rtl8211_config_aneg, .read_mmd = &genphy_read_mmd_unsupported, .write_mmd = &genphy_write_mmd_unsupported, }, { - .phy_id = 0x001cc912, + PHY_ID_MATCH_EXACT(0x001cc912), .name = "RTL8211B Gigabit Ethernet", - .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .ack_interrupt = &rtl821x_ack_interrupt, .config_intr = &rtl8211b_config_intr, .read_mmd = &genphy_read_mmd_unsupported, @@ -251,39 +244,32 @@ static struct phy_driver realtek_drvs[] = { .suspend = rtl8211b_suspend, .resume = rtl8211b_resume, }, { - .phy_id = 0x001cc913, + PHY_ID_MATCH_EXACT(0x001cc913), .name = "RTL8211C Gigabit Ethernet", - .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, .config_init = rtl8211c_config_init, .read_mmd = &genphy_read_mmd_unsupported, .write_mmd = &genphy_write_mmd_unsupported, }, { - .phy_id = 0x001cc914, + PHY_ID_MATCH_EXACT(0x001cc914), .name = "RTL8211DN Gigabit Ethernet", - .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .ack_interrupt = rtl821x_ack_interrupt, .config_intr = rtl8211e_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, }, { - .phy_id = 0x001cc915, + PHY_ID_MATCH_EXACT(0x001cc915), .name = "RTL8211E Gigabit Ethernet", - .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .ack_interrupt = &rtl821x_ack_interrupt, .config_intr = &rtl8211e_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, }, { - .phy_id = 0x001cc916, + PHY_ID_MATCH_EXACT(0x001cc916), .name = "RTL8211F Gigabit Ethernet", - .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &rtl8211f_config_init, .ack_interrupt = &rtl8211f_ack_interrupt, .config_intr = &rtl8211f_config_intr, @@ -292,11 +278,9 @@ static struct phy_driver realtek_drvs[] = { .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, }, { - .phy_id = 0x001cc961, + PHY_ID_MATCH_EXACT(0x001cc961), .name = "RTL8366RB Gigabit Ethernet", - .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &rtl8366rb_config_init, .suspend = genphy_suspend, .resume = genphy_resume, @@ -305,15 +289,8 @@ static struct phy_driver realtek_drvs[] = { module_phy_driver(realtek_drvs); -static struct mdio_device_id __maybe_unused realtek_tbl[] = { - { 0x001cc816, 0x001fffff }, - { 0x001cc910, 0x001fffff }, - { 0x001cc912, 0x001fffff }, - { 0x001cc913, 0x001fffff }, - { 0x001cc914, 0x001fffff }, - { 0x001cc915, 0x001fffff }, - { 0x001cc916, 0x001fffff }, - { 0x001cc961, 0x001fffff }, +static const struct mdio_device_id __maybe_unused realtek_tbl[] = { + { PHY_ID_MATCH_VENDOR(0x001cc800) }, { } }; diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index c328208388da..f9477ff55545 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -219,7 +219,6 @@ static struct phy_driver smsc_phy_driver[] = { .name = "SMSC LAN83C185", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = smsc_phy_probe, @@ -239,7 +238,6 @@ static struct phy_driver smsc_phy_driver[] = { .name = "SMSC LAN8187", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = smsc_phy_probe, @@ -264,7 +262,6 @@ static struct phy_driver smsc_phy_driver[] = { .name = "SMSC LAN8700", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = smsc_phy_probe, @@ -290,7 +287,6 @@ static struct phy_driver smsc_phy_driver[] = { .name = "SMSC LAN911x Internal PHY", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = smsc_phy_probe, @@ -309,7 +305,7 @@ static struct phy_driver smsc_phy_driver[] = { .name = "SMSC LAN8710/LAN8720", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT | PHY_RST_AFTER_CLK_EN, + .flags = PHY_RST_AFTER_CLK_EN, .probe = smsc_phy_probe, @@ -335,7 +331,6 @@ static struct phy_driver smsc_phy_driver[] = { .name = "SMSC LAN8740", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = smsc_phy_probe, diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c index 2fe9a87b55b5..33d733684f5b 100644 --- a/drivers/net/phy/ste10Xp.c +++ b/drivers/net/phy/ste10Xp.c @@ -87,7 +87,6 @@ static struct phy_driver ste10xp_pdriver[] = { .phy_id_mask = 0xfffffff0, .name = "STe101p", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = ste10Xp_config_init, .ack_interrupt = ste10Xp_ack_interrupt, .config_intr = ste10Xp_config_intr, @@ -98,7 +97,6 @@ static struct phy_driver ste10xp_pdriver[] = { .phy_id_mask = 0xffffffff, .name = "STe100p", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = ste10Xp_config_init, .ack_interrupt = ste10Xp_ack_interrupt, .config_intr = ste10Xp_config_intr, diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c index 55f48ee3595a..1e4fc42e4629 100644 --- a/drivers/net/phy/uPD60620.c +++ b/drivers/net/phy/uPD60620.c @@ -47,7 +47,7 @@ static int upd60620_read_status(struct phy_device *phydev) return phy_state; phydev->link = 0; - phydev->lp_advertising = 0; + linkmode_zero(phydev->lp_advertising); phydev->pause = 0; phydev->asym_pause = 0; @@ -70,8 +70,8 @@ static int upd60620_read_status(struct phy_device *phydev) if (phy_state < 0) return phy_state; - phydev->lp_advertising - = mii_lpa_to_ethtool_lpa_t(phy_state); + mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, + phy_state); if (phydev->duplex == DUPLEX_FULL) { if (phy_state & LPA_PAUSE_CAP) diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index fbf9ad429593..4ca513feba0e 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -399,7 +399,6 @@ static struct phy_driver vsc82xx_driver[] = { .name = "Vitesse VSC8234", .phy_id_mask = 0x000ffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, .ack_interrupt = &vsc824x_ack_interrupt, @@ -409,7 +408,6 @@ static struct phy_driver vsc82xx_driver[] = { .name = "Vitesse VSC8244", .phy_id_mask = 0x000fffc0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, .ack_interrupt = &vsc824x_ack_interrupt, @@ -419,7 +417,6 @@ static struct phy_driver vsc82xx_driver[] = { .name = "Vitesse VSC8514", .phy_id_mask = 0x000ffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, .ack_interrupt = &vsc824x_ack_interrupt, @@ -429,7 +426,6 @@ static struct phy_driver vsc82xx_driver[] = { .name = "Vitesse VSC8572", .phy_id_mask = 0x000ffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, .ack_interrupt = &vsc824x_ack_interrupt, @@ -439,7 +435,6 @@ static struct phy_driver vsc82xx_driver[] = { .name = "Vitesse VSC8574", .phy_id_mask = 0x000ffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, .ack_interrupt = &vsc824x_ack_interrupt, @@ -449,7 +444,6 @@ static struct phy_driver vsc82xx_driver[] = { .name = "Vitesse VSC8601", .phy_id_mask = 0x000ffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &vsc8601_config_init, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, @@ -494,7 +488,6 @@ static struct phy_driver vsc82xx_driver[] = { .name = "Vitesse VSC8662", .phy_id_mask = 0x000ffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, .ack_interrupt = &vsc824x_ack_interrupt, @@ -505,7 +498,6 @@ static struct phy_driver vsc82xx_driver[] = { .phy_id_mask = 0x000ffff0, .name = "Vitesse VSC8221", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &vsc8221_config_init, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, @@ -515,7 +507,6 @@ static struct phy_driver vsc82xx_driver[] = { .phy_id_mask = 0x000ffff0, .name = "Vitesse VSC8211", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &vsc8221_config_init, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 060135ceaf0e..a65779c6d72f 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2448,7 +2448,8 @@ build: goto out; } - if (!rcu_dereference(tun->steering_prog)) + if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 && + !tfile->detached) rxhash = __skb_get_hash_symmetric(skb); netif_receive_skb(skb); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index be1917be28f2..3c8bdac78866 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/if_vlan.h> #include <linux/uaccess.h> +#include <linux/linkmode.h> #include <linux/list.h> #include <linux/ip.h> #include <linux/ipv6.h> @@ -1586,18 +1587,17 @@ static int lan78xx_set_pause(struct net_device *net, dev->fc_request_control |= FLOW_CTRL_TX; if (ecmd.base.autoneg) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, }; u32 mii_adv; - u32 advertising; - ethtool_convert_link_mode_to_legacy_u32( - &advertising, ecmd.link_modes.advertising); - - advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + ecmd.link_modes.advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + ecmd.link_modes.advertising); mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control); - advertising |= mii_adv_to_ethtool_adv_t(mii_adv); - - ethtool_convert_legacy_u32_to_link_mode( - ecmd.link_modes.advertising, advertising); + mii_adv_to_linkmode_adv_t(fc, mii_adv); + linkmode_or(ecmd.link_modes.advertising, fc, + ecmd.link_modes.advertising); phy_ethtool_ksettings_set(phydev, &ecmd); } @@ -2095,6 +2095,7 @@ static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev) static int lan78xx_phy_init(struct lan78xx_net *dev) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, }; int ret; u32 mii_adv; struct phy_device *phydev; @@ -2158,9 +2159,13 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) /* support both flow controls */ dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); - phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->advertising); mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control); - phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv); + mii_adv_to_linkmode_adv_t(fc, mii_adv); + linkmode_or(phydev->advertising, fc, phydev->advertising); if (phydev->mdio.dev.of_node) { u32 reg; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 890fa5b905e2..f412ea1cef18 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1253,7 +1253,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, return PTR_ERR(net); peer = rtnl_create_link(net, ifname, name_assign_type, - &veth_link_ops, tbp); + &veth_link_ops, tbp, extack); if (IS_ERR(peer)) { put_net(net); return PTR_ERR(peer); diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 69b7227c637e..21ad4b1d7f03 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -981,24 +981,23 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, struct sk_buff *skb) { int orig_iif = skb->skb_iif; - bool need_strict; + bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); + bool is_ndisc = ipv6_ndisc_frame(skb); - /* loopback traffic; do not push through packet taps again. - * Reset pkt_type for upper layers to process skb + /* loopback, multicast & non-ND link-local traffic; do not push through + * packet taps again. Reset pkt_type for upper layers to process skb */ - if (skb->pkt_type == PACKET_LOOPBACK) { + if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) { skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; IP6CB(skb)->flags |= IP6SKB_L3SLAVE; - skb->pkt_type = PACKET_HOST; + if (skb->pkt_type == PACKET_LOOPBACK) + skb->pkt_type = PACKET_HOST; goto out; } - /* if packet is NDISC or addressed to multicast or link-local - * then keep the ingress interface - */ - need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); - if (!ipv6_ndisc_frame(skb) && !need_strict) { + /* if packet is NDISC then keep the ingress interface */ + if (!is_ndisc) { vrf_rx_stats(vrf_dev, skb->len); skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 297cdeaef479..c3e65e78f015 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1552,6 +1552,34 @@ drop: return 0; } +/* Callback from net/ipv{4,6}/udp.c to check that we have a VNI for errors */ +static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb) +{ + struct vxlan_dev *vxlan; + struct vxlan_sock *vs; + struct vxlanhdr *hdr; + __be32 vni; + + if (skb->len < VXLAN_HLEN) + return -EINVAL; + + hdr = vxlan_hdr(skb); + + if (!(hdr->vx_flags & VXLAN_HF_VNI)) + return -EINVAL; + + vs = rcu_dereference_sk_user_data(sk); + if (!vs) + return -ENOENT; + + vni = vxlan_vni(hdr->vx_vni); + vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni); + if (!vxlan) + return -ENOENT; + + return 0; +} + static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) { struct vxlan_dev *vxlan = netdev_priv(dev); @@ -2250,13 +2278,24 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto tx_error; } - /* Bypass encapsulation if the destination is local */ if (!info) { + /* Bypass encapsulation if the destination is local */ err = encap_bypass_if_local(skb, dev, vxlan, dst, dst_port, ifindex, vni, &rt->dst, rt->rt_flags); if (err) goto out_unlock; + + if (vxlan->cfg.df == VXLAN_DF_SET) { + df = htons(IP_DF); + } else if (vxlan->cfg.df == VXLAN_DF_INHERIT) { + struct ethhdr *eth = eth_hdr(skb); + + if (ntohs(eth->h_proto) == ETH_P_IPV6 || + (ntohs(eth->h_proto) == ETH_P_IP && + old_iph->frag_off & htons(IP_DF))) + df = htons(IP_DF); + } } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) { df = htons(IP_DF); } @@ -2809,6 +2848,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, }, [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG }, [IFLA_VXLAN_TTL_INHERIT] = { .type = NLA_FLAG }, + [IFLA_VXLAN_DF] = { .type = NLA_U8 }, }; static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], @@ -2865,6 +2905,16 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], } } + if (data[IFLA_VXLAN_DF]) { + enum ifla_vxlan_df df = nla_get_u8(data[IFLA_VXLAN_DF]); + + if (df < 0 || df > VXLAN_DF_MAX) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_DF], + "Invalid DF attribute"); + return -EINVAL; + } + } + return 0; } @@ -2948,6 +2998,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, tunnel_cfg.sk_user_data = vs; tunnel_cfg.encap_type = 1; tunnel_cfg.encap_rcv = vxlan_rcv; + tunnel_cfg.encap_err_lookup = vxlan_err_lookup; tunnel_cfg.encap_destroy = NULL; tunnel_cfg.gro_receive = vxlan_gro_receive; tunnel_cfg.gro_complete = vxlan_gro_complete; @@ -3509,6 +3560,9 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], conf->mtu = nla_get_u32(tb[IFLA_MTU]); } + if (data[IFLA_VXLAN_DF]) + conf->df = nla_get_u8(data[IFLA_VXLAN_DF]); + return 0; } @@ -3601,6 +3655,7 @@ static size_t vxlan_get_size(const struct net_device *dev) nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL_INHERIT */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ + nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_DF */ nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */ @@ -3667,6 +3722,7 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT, !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) || nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || + nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) || nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || nla_put_u8(skb, IFLA_VXLAN_LEARNING, !!(vxlan->cfg.flags & VXLAN_F_LEARN)) || @@ -3749,7 +3805,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name, memset(&tb, 0, sizeof(tb)); dev = rtnl_create_link(net, name, name_assign_type, - &vxlan_link_ops, tb); + &vxlan_link_ops, tb, NULL); if (IS_ERR(dev)) return dev; diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 4d6409605207..7a42336c8af8 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -391,6 +391,7 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb(skb); return -ENOMEM; } + netdev_sent_queue(dev, skb->len); spin_lock_irqsave(&priv->lock, flags); /* Start from the next BD that should be filled */ @@ -447,6 +448,8 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv) { /* Start from the next BD that should be filled */ struct net_device *dev = priv->ndev; + unsigned int bytes_sent = 0; + int howmany = 0; struct qe_bd *bd; /* BD pointer */ u16 bd_status; int tx_restart = 0; @@ -474,6 +477,8 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv) skb = priv->tx_skbuff[priv->skb_dirtytx]; if (!skb) break; + howmany++; + bytes_sent += skb->len; dev->stats.tx_packets++; memset(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr), @@ -501,6 +506,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv) if (tx_restart) hdlc_tx_restart(priv); + netdev_completed_queue(dev, howmany, bytes_sent); return 0; } @@ -721,6 +727,7 @@ static int uhdlc_open(struct net_device *dev) priv->hdlc_busy = 1; netif_device_attach(priv->ndev); napi_enable(&priv->napi); + netdev_reset_queue(dev); netif_start_queue(dev); hdlc_open(dev); } @@ -812,6 +819,7 @@ static int uhdlc_close(struct net_device *dev) free_irq(priv->ut_info->uf_info.irq, priv); netif_stop_queue(dev); + netdev_reset_queue(dev); priv->hdlc_busy = 0; return 0; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 3e37c8cf82c6..b2ad2122c8c4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -342,6 +342,37 @@ static int brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev *sdiodev, return err; } +static int mmc_submit_one(struct mmc_data *md, struct mmc_request *mr, + struct mmc_command *mc, int sg_cnt, int req_sz, + int func_blk_sz, u32 *addr, + struct brcmf_sdio_dev *sdiodev, + struct sdio_func *func, int write) +{ + int ret; + + md->sg_len = sg_cnt; + md->blocks = req_sz / func_blk_sz; + mc->arg |= (*addr & 0x1FFFF) << 9; /* address */ + mc->arg |= md->blocks & 0x1FF; /* block count */ + /* incrementing addr for function 1 */ + if (func->num == 1) + *addr += req_sz; + + mmc_set_data_timeout(md, func->card); + mmc_wait_for_req(func->card->host, mr); + + ret = mc->error ? mc->error : md->error; + if (ret == -ENOMEDIUM) { + brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); + } else if (ret != 0) { + brcmf_err("CMD53 sg block %s failed %d\n", + write ? "write" : "read", ret); + ret = -EIO; + } + + return ret; +} + /** * brcmf_sdiod_sglist_rw - SDIO interface function for block data access * @sdiodev: brcmfmac sdio device @@ -360,11 +391,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, struct sk_buff_head *pktlist) { unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset; - unsigned int max_req_sz, orig_offset, dst_offset; - unsigned short max_seg_cnt, seg_sz; + unsigned int max_req_sz, src_offset, dst_offset; unsigned char *pkt_data, *orig_data, *dst_data; - struct sk_buff *pkt_next = NULL, *local_pkt_next; struct sk_buff_head local_list, *target_list; + struct sk_buff *pkt_next = NULL, *src; + unsigned short max_seg_cnt; struct mmc_request mmc_req; struct mmc_command mmc_cmd; struct mmc_data mmc_dat; @@ -404,9 +435,6 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, max_req_sz = sdiodev->max_request_size; max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count, target_list->qlen); - seg_sz = target_list->qlen; - pkt_offset = 0; - pkt_next = target_list->next; memset(&mmc_req, 0, sizeof(struct mmc_request)); memset(&mmc_cmd, 0, sizeof(struct mmc_command)); @@ -425,12 +453,12 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, mmc_req.cmd = &mmc_cmd; mmc_req.data = &mmc_dat; - while (seg_sz) { - req_sz = 0; - sg_cnt = 0; - sgl = sdiodev->sgtable.sgl; - /* prep sg table */ - while (pkt_next != (struct sk_buff *)target_list) { + req_sz = 0; + sg_cnt = 0; + sgl = sdiodev->sgtable.sgl; + skb_queue_walk(target_list, pkt_next) { + pkt_offset = 0; + while (pkt_offset < pkt_next->len) { pkt_data = pkt_next->data + pkt_offset; sg_data_sz = pkt_next->len - pkt_offset; if (sg_data_sz > sdiodev->max_segment_size) @@ -439,72 +467,55 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, sg_data_sz = max_req_sz - req_sz; sg_set_buf(sgl, pkt_data, sg_data_sz); - sg_cnt++; + sgl = sg_next(sgl); req_sz += sg_data_sz; pkt_offset += sg_data_sz; - if (pkt_offset == pkt_next->len) { - pkt_offset = 0; - pkt_next = pkt_next->next; + if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt) { + ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd, + sg_cnt, req_sz, func_blk_sz, + &addr, sdiodev, func, write); + if (ret) + goto exit_queue_walk; + req_sz = 0; + sg_cnt = 0; + sgl = sdiodev->sgtable.sgl; } - - if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt) - break; - } - seg_sz -= sg_cnt; - - if (req_sz % func_blk_sz != 0) { - brcmf_err("sg request length %u is not %u aligned\n", - req_sz, func_blk_sz); - ret = -ENOTBLK; - goto exit; - } - - mmc_dat.sg_len = sg_cnt; - mmc_dat.blocks = req_sz / func_blk_sz; - mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */ - mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */ - /* incrementing addr for function 1 */ - if (func->num == 1) - addr += req_sz; - - mmc_set_data_timeout(&mmc_dat, func->card); - mmc_wait_for_req(func->card->host, &mmc_req); - - ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error; - if (ret == -ENOMEDIUM) { - brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); - break; - } else if (ret != 0) { - brcmf_err("CMD53 sg block %s failed %d\n", - write ? "write" : "read", ret); - ret = -EIO; - break; } } - + if (sg_cnt) + ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd, + sg_cnt, req_sz, func_blk_sz, + &addr, sdiodev, func, write); +exit_queue_walk: if (!write && sdiodev->settings->bus.sdio.broken_sg_support) { - local_pkt_next = local_list.next; - orig_offset = 0; + src = __skb_peek(&local_list); + src_offset = 0; skb_queue_walk(pktlist, pkt_next) { dst_offset = 0; - do { - req_sz = local_pkt_next->len - orig_offset; - req_sz = min_t(uint, pkt_next->len - dst_offset, - req_sz); - orig_data = local_pkt_next->data + orig_offset; + + /* This is safe because we must have enough SKB data + * in the local list to cover everything in pktlist. + */ + while (1) { + req_sz = pkt_next->len - dst_offset; + if (req_sz > src->len - src_offset) + req_sz = src->len - src_offset; + + orig_data = src->data + src_offset; dst_data = pkt_next->data + dst_offset; memcpy(dst_data, orig_data, req_sz); - orig_offset += req_sz; - dst_offset += req_sz; - if (orig_offset == local_pkt_next->len) { - orig_offset = 0; - local_pkt_next = local_pkt_next->next; + + src_offset += req_sz; + if (src_offset == src->len) { + src_offset = 0; + src = skb_peek_next(src, &local_list); } + dst_offset += req_sz; if (dst_offset == pkt_next->len) break; - } while (!skb_queue_empty(&local_list)); + } } } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index f17f602e6171..a8303afa15f1 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -337,8 +337,6 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) return; } - wmb(); /* barrier so backend seens requests */ - RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); if (notify) notify_remote_via_irq(queue->rx_irq); |