diff options
Diffstat (limited to 'drivers/net')
103 files changed, 3724 insertions, 1791 deletions
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index a2abfade82dd..70814303aab8 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -84,7 +84,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion, sizeof(ipversion))) { - dev_core_stats_rx_dropped_inc(bareudp->dev); + dev_dstats_rx_dropped(bareudp->dev); goto drop; } ipversion >>= 4; @@ -94,7 +94,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) } else if (ipversion == 6 && bareudp->multi_proto_mode) { proto = htons(ETH_P_IPV6); } else { - dev_core_stats_rx_dropped_inc(bareudp->dev); + dev_dstats_rx_dropped(bareudp->dev); goto drop; } } else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) { @@ -108,7 +108,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) ipv4_is_multicast(tunnel_hdr->daddr)) { proto = htons(ETH_P_MPLS_MC); } else { - dev_core_stats_rx_dropped_inc(bareudp->dev); + dev_dstats_rx_dropped(bareudp->dev); goto drop; } } else { @@ -124,7 +124,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) (addr_type & IPV6_ADDR_MULTICAST)) { proto = htons(ETH_P_MPLS_MC); } else { - dev_core_stats_rx_dropped_inc(bareudp->dev); + dev_dstats_rx_dropped(bareudp->dev); goto drop; } } @@ -136,7 +136,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) proto, !net_eq(bareudp->net, dev_net(bareudp->dev)))) { - dev_core_stats_rx_dropped_inc(bareudp->dev); + dev_dstats_rx_dropped(bareudp->dev); goto drop; } @@ -144,7 +144,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0); if (!tun_dst) { - dev_core_stats_rx_dropped_inc(bareudp->dev); + dev_dstats_rx_dropped(bareudp->dev); goto drop; } skb_dst_set(skb, &tun_dst->dst); @@ -194,7 +194,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) len = skb->len; err = gro_cells_receive(&bareudp->gro_cells, skb); if (likely(err == NET_RX_SUCCESS)) - dev_sw_netstats_rx_add(bareudp->dev, len); + dev_dstats_rx_add(bareudp->dev, len); return 0; drop: @@ -589,7 +589,7 @@ static void bareudp_setup(struct net_device *dev) dev->priv_flags |= IFF_NO_QUEUE; dev->lltx = true; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; - dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS; } static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[], diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c index c42ebe9da55a..2d555f854008 100644 --- a/drivers/net/can/sja1000/sja1000_platform.c +++ b/drivers/net/can/sja1000/sja1000_platform.c @@ -230,18 +230,9 @@ static int sp_probe(struct platform_device *pdev) return -ENODEV; } - res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res_mem) - return -ENODEV; - - if (!devm_request_mem_region(&pdev->dev, res_mem->start, - resource_size(res_mem), DRV_NAME)) - return -EBUSY; - - addr = devm_ioremap(&pdev->dev, res_mem->start, - resource_size(res_mem)); - if (!addr) - return -ENOMEM; + addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res_mem); + if (IS_ERR(addr)) + return PTR_ERR(addr); if (of) { irq = platform_get_irq(pdev, 0); diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 285785c942b0..0561b60f668f 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -2224,13 +2224,16 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy) } EXPORT_SYMBOL(b53_eee_init); -int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e) +bool b53_support_eee(struct dsa_switch *ds, int port) { struct b53_device *dev = ds->priv; - if (is5325(dev) || is5365(dev)) - return -EOPNOTSUPP; + return !is5325(dev) && !is5365(dev); +} +EXPORT_SYMBOL(b53_support_eee); +int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e) +{ return 0; } EXPORT_SYMBOL(b53_get_mac_eee); @@ -2240,9 +2243,6 @@ int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e) struct b53_device *dev = ds->priv; struct ethtool_keee *p = &dev->ports[port].eee; - if (is5325(dev) || is5365(dev)) - return -EOPNOTSUPP; - p->eee_enabled = e->eee_enabled; b53_eee_enable_set(ds, port, e->eee_enabled); @@ -2298,6 +2298,7 @@ static const struct dsa_switch_ops b53_switch_ops = { .phylink_get_caps = b53_phylink_get_caps, .port_enable = b53_enable_port, .port_disable = b53_disable_port, + .support_eee = b53_support_eee, .get_mac_eee = b53_get_mac_eee, .set_mac_eee = b53_set_mac_eee, .port_bridge_join = b53_br_join, diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 05141176daf5..99e5cfc98ae8 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -384,6 +384,7 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy); void b53_disable_port(struct dsa_switch *ds, int port); void b53_brcm_hdr_setup(struct dsa_switch *ds, int port); int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy); +bool b53_support_eee(struct dsa_switch *ds, int port); int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e); int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 43bde1f583ff..a53fb6191e6b 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1232,6 +1232,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .set_wol = bcm_sf2_sw_set_wol, .port_enable = bcm_sf2_port_setup, .port_disable = bcm_sf2_port_disable, + .support_eee = b53_support_eee, .get_mac_eee = b53_get_mac_eee, .set_mac_eee = b53_set_mac_eee, .port_bridge_join = b53_br_join, diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 8a03baa6aecc..df314724e6a7 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -2544,7 +2544,11 @@ static int ksz_mdio_register(struct ksz_device *dev) bus->read = ksz_sw_mdio_read; bus->write = ksz_sw_mdio_write; bus->name = "ksz user smi"; - snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index); + if (ds->dst->index != 0) { + snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d-%d", ds->dst->index, ds->index); + } else { + snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index); + } } ret = ksz_parse_dt_phy_config(dev, bus, mdio_np); @@ -3444,12 +3448,12 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port) return -EOPNOTSUPP; } -static int ksz_validate_eee(struct dsa_switch *ds, int port) +static bool ksz_support_eee(struct dsa_switch *ds, int port) { struct ksz_device *dev = ds->priv; if (!dev->info->internal_phy[port]) - return -EOPNOTSUPP; + return false; switch (dev->chip_id) { case KSZ8563_CHIP_ID: @@ -3461,21 +3465,15 @@ static int ksz_validate_eee(struct dsa_switch *ds, int port) case KSZ9896_CHIP_ID: case KSZ9897_CHIP_ID: case LAN9646_CHIP_ID: - return 0; + return true; } - return -EOPNOTSUPP; + return false; } static int ksz_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e) { - int ret; - - ret = ksz_validate_eee(ds, port); - if (ret) - return ret; - /* There is no documented control of Tx LPI configuration. */ e->tx_lpi_enabled = true; @@ -3491,11 +3489,6 @@ static int ksz_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e) { struct ksz_device *dev = ds->priv; - int ret; - - ret = ksz_validate_eee(ds, port); - if (ret) - return ret; if (!e->tx_lpi_enabled) { dev_err(dev->dev, "Disabling EEE Tx LPI is not supported\n"); @@ -4641,6 +4634,7 @@ static const struct dsa_switch_ops ksz_switch_ops = { .cls_flower_add = ksz_cls_flower_add, .cls_flower_del = ksz_cls_flower_del, .port_setup_tc = ksz_setup_tc, + .support_eee = ksz_support_eee, .get_mac_eee = ksz_get_mac_eee, .set_mac_eee = ksz_set_mac_eee, .port_get_default_prio = ksz_port_get_default_prio, diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 086b8b3d5b40..9605febd3573 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -3238,6 +3238,7 @@ const struct dsa_switch_ops mt7530_switch_ops = { .port_mirror_add = mt753x_port_mirror_add, .port_mirror_del = mt753x_port_mirror_del, .phylink_get_caps = mt753x_phylink_get_caps, + .support_eee = dsa_supports_eee, .get_mac_eee = mt753x_get_mac_eee, .set_mac_eee = mt753x_set_mac_eee, .conduit_state_change = mt753x_conduit_state_change, diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 3a792f79270d..570c8642d387 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1289,9 +1289,6 @@ static size_t mv88e6095_stats_get_stat(struct mv88e6xxx_chip *chip, int port, const struct mv88e6xxx_hw_stat *stat, uint64_t *data) { - if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_PORT))) - return 0; - *data = _mv88e6xxx_get_ethtool_stat(chip, stat, port, 0, MV88E6XXX_G1_STATS_OP_HIST_RX); return 1; @@ -1301,9 +1298,6 @@ static size_t mv88e6250_stats_get_stat(struct mv88e6xxx_chip *chip, int port, const struct mv88e6xxx_hw_stat *stat, uint64_t *data) { - if (!(stat->type & STATS_TYPE_BANK0)) - return 0; - *data = _mv88e6xxx_get_ethtool_stat(chip, stat, port, 0, MV88E6XXX_G1_STATS_OP_HIST_RX); return 1; @@ -1313,9 +1307,6 @@ static size_t mv88e6320_stats_get_stat(struct mv88e6xxx_chip *chip, int port, const struct mv88e6xxx_hw_stat *stat, uint64_t *data) { - if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_BANK1))) - return 0; - *data = _mv88e6xxx_get_ethtool_stat(chip, stat, port, MV88E6XXX_G1_STATS_OP_BANK_1_BIT_9, MV88E6XXX_G1_STATS_OP_HIST_RX); @@ -1326,9 +1317,6 @@ static size_t mv88e6390_stats_get_stat(struct mv88e6xxx_chip *chip, int port, const struct mv88e6xxx_hw_stat *stat, uint64_t *data) { - if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_BANK1))) - return 0; - *data = _mv88e6xxx_get_ethtool_stat(chip, stat, port, MV88E6XXX_G1_STATS_OP_BANK_1_BIT_10, 0); @@ -1341,6 +1329,9 @@ static size_t mv88e6xxx_stats_get_stat(struct mv88e6xxx_chip *chip, int port, { int ret = 0; + if (!(stat->type & chip->info->stats_type)) + return 0; + if (chip->info->ops->stats_get_stat) { mv88e6xxx_reg_lock(chip); ret = chip->info->ops->stats_get_stat(chip, port, stat, data); @@ -5645,6 +5636,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 5, + .stats_type = STATS_TYPE_BANK0, .atu_move_port_mask = 0xf, .dual_chip = true, .ops = &mv88e6250_ops, @@ -5665,6 +5657,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 5, + .stats_type = STATS_TYPE_BANK0, .atu_move_port_mask = 0xf, .dual_chip = true, .ops = &mv88e6250_ops, @@ -5687,6 +5680,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 8, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT, .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, @@ -5708,6 +5702,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT, .atu_move_port_mask = 0xf, .multi_chip = true, .ops = &mv88e6095_ops, @@ -5730,6 +5725,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 8, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT, .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, @@ -5754,6 +5750,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT, .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, @@ -5776,6 +5773,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT, .atu_move_port_mask = 0xf, .multi_chip = true, .ops = &mv88e6131_ops, @@ -5800,6 +5798,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0x1f, .g1_irqs = 9, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, .pvt = true, .multi_chip = true, .edsa_support = MV88E6XXX_EDSA_SUPPORTED, @@ -5823,6 +5822,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT, .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, @@ -5848,6 +5848,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT, .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, @@ -5872,6 +5873,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, @@ -5897,6 +5899,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT, .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, @@ -5921,6 +5924,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT, .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, @@ -5946,6 +5950,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT, .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, @@ -5968,6 +5973,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT, .atu_move_port_mask = 0xf, .multi_chip = true, .edsa_support = MV88E6XXX_EDSA_SUPPORTED, @@ -5992,6 +5998,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 3750, .g1_irqs = 9, .g2_irqs = 14, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, .pvt = true, .multi_chip = true, .atu_move_port_mask = 0x1f, @@ -6016,6 +6023,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 3750, .g1_irqs = 9, .g2_irqs = 14, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, .atu_move_port_mask = 0x1f, .pvt = true, .multi_chip = true, @@ -6039,6 +6047,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 3750, .g1_irqs = 9, .g2_irqs = 14, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, .atu_move_port_mask = 0x1f, .pvt = true, .multi_chip = true, @@ -6063,6 +6072,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 3750, .g1_irqs = 10, .g2_irqs = 14, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, .atu_move_port_mask = 0x1f, .pvt = true, .multi_chip = true, @@ -6087,6 +6097,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 3750, .g1_irqs = 10, .g2_irqs = 14, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, .atu_move_port_mask = 0x1f, .pvt = true, .multi_chip = true, @@ -6114,6 +6125,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0, .atu_move_port_mask = 0xf, .dual_chip = true, .ptp_support = true, @@ -6138,6 +6150,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT, .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, @@ -6161,6 +6174,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0, .atu_move_port_mask = 0xf, .dual_chip = true, .ptp_support = true, @@ -6184,6 +6198,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 3750, .g1_irqs = 9, .g2_irqs = 14, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, .atu_move_port_mask = 0x1f, .pvt = true, .multi_chip = true, @@ -6208,6 +6223,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 8, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, @@ -6233,6 +6249,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 8, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, .atu_move_port_mask = 0xf, .multi_chip = true, .edsa_support = MV88E6XXX_EDSA_SUPPORTED, @@ -6259,6 +6276,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0x1f, .g1_irqs = 9, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, .pvt = true, .multi_chip = true, .edsa_support = MV88E6XXX_EDSA_SUPPORTED, @@ -6283,6 +6301,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT, .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, @@ -6307,6 +6326,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT, .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, @@ -6332,6 +6352,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT, .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, @@ -6359,6 +6380,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 3750, .g1_irqs = 10, .g2_irqs = 14, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, .atu_move_port_mask = 0x1f, .pvt = true, .multi_chip = true, @@ -6383,6 +6405,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 3750, .g1_irqs = 9, .g2_irqs = 14, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, .atu_move_port_mask = 0x1f, .pvt = true, .multi_chip = true, @@ -6408,6 +6431,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 3750, .g1_irqs = 9, .g2_irqs = 14, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, .atu_move_port_mask = 0x1f, .pvt = true, .multi_chip = true, @@ -6433,6 +6457,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 3750, .g1_irqs = 10, .g2_irqs = 14, + .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, .atu_move_port_mask = 0x1f, .pvt = true, .multi_chip = true, @@ -7074,6 +7099,7 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .get_sset_count = mv88e6xxx_get_sset_count, .port_max_mtu = mv88e6xxx_get_max_mtu, .port_change_mtu = mv88e6xxx_change_mtu, + .support_eee = dsa_supports_eee, .get_mac_eee = mv88e6xxx_get_mac_eee, .set_mac_eee = mv88e6xxx_set_mac_eee, .get_eeprom_len = mv88e6xxx_get_eeprom_len, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 9fe8e8a7856b..86bf113c9bfa 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -144,6 +144,7 @@ struct mv88e6xxx_info { unsigned int age_time_coeff; unsigned int g1_irqs; unsigned int g2_irqs; + int stats_type; bool pvt; /* Mark certain ports as invalid. This is required for example for the diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c index 59b4a7240b58..ec74e3c2b0e9 100644 --- a/drivers/net/dsa/qca/qca8k-8xxx.c +++ b/drivers/net/dsa/qca/qca8k-8xxx.c @@ -2016,6 +2016,7 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .get_ethtool_stats = qca8k_get_ethtool_stats, .get_sset_count = qca8k_get_sset_count, .set_ageing_time = qca8k_set_ageing_time, + .support_eee = dsa_supports_eee, .get_mac_eee = qca8k_get_mac_eee, .set_mac_eee = qca8k_set_mac_eee, .port_enable = qca8k_port_enable, diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c index baba204ad62f..3d790f8c6f4d 100644 --- a/drivers/net/dsa/sja1105/sja1105_static_config.c +++ b/drivers/net/dsa/sja1105/sja1105_static_config.c @@ -26,12 +26,8 @@ void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len) pr_err("Start bit (%d) expected to be larger than end (%d)\n", start, end); } else if (rc == -ERANGE) { - if ((start - end + 1) > 64) - pr_err("Field %d-%d too large for 64 bits!\n", - start, end); - else - pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n", - *val, start, end); + pr_err("Field %d-%d too large for 64 bits!\n", + start, end); } dump_stack(); } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index e641dbbea1e2..b854b6b42d77 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -421,18 +421,12 @@ static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata) if (dev->of_node) { struct clk *parent = clk_get_parent(pdata->clk); + long rate = rgmii_clock(pdata->phy_speed); - switch (pdata->phy_speed) { - case SPEED_10: - clk_set_rate(parent, 2500000); - break; - case SPEED_100: - clk_set_rate(parent, 25000000); - break; - default: - clk_set_rate(parent, 125000000); - break; - } + if (rate < 0) + rate = 125000000; + + clk_set_rate(parent, rate); } #ifdef CONFIG_ACPI else { diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index daa416fb1724..640f500f989d 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -530,19 +530,9 @@ static void macb_set_tx_clk(struct macb *bp, int speed) if (bp->phy_interface == PHY_INTERFACE_MODE_MII) return; - switch (speed) { - case SPEED_10: - rate = 2500000; - break; - case SPEED_100: - rate = 25000000; - break; - case SPEED_1000: - rate = 125000000; - break; - default: + rate = rgmii_clock(speed); + if (rate < 0) return; - } rate_rounded = clk_round_rate(bp->tx_clk, rate); if (rate_rounded < 0) diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index 75401d2a5fb4..a2d7300925a8 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -81,8 +81,7 @@ config UCC_GETH tristate "Freescale QE Gigabit Ethernet" depends on QUICC_ENGINE && PPC32 select FSL_PQ_MDIO - select PHYLIB - select FIXED_PHY + select PHYLINK help This driver supports the Gigabit Ethernet mode of the QUICC Engine, which is available on some Freescale SOCs. diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index a293b08f36d4..147a93bf9fa9 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -780,13 +780,14 @@ struct ethsw_dump_ctx { static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry, struct ethsw_dump_ctx *dump) { + struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx; int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC; u32 portid = NETLINK_CB(dump->cb->skb).portid; u32 seq = dump->cb->nlh->nlmsg_seq; struct nlmsghdr *nlh; struct ndmsg *ndm; - if (dump->idx < dump->cb->args[2]) + if (dump->idx < ctx->fdb_idx) goto skip; nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 1cca0425d493..c81f2ea588f2 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -671,8 +671,6 @@ struct fec_enet_private { unsigned int tx_time_itr; unsigned int itr_clk_rate; - /* tx lpi eee mode */ - struct ethtool_keee eee; unsigned int clk_ref_rate; /* ptp clock period in ns*/ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 1b55047c0237..b2daed55bf6c 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2045,14 +2045,14 @@ static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us) return us * (fep->clk_ref_rate / 1000) / 1000; } -static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable) +static int fec_enet_eee_mode_set(struct net_device *ndev, u32 lpi_timer, + bool enable) { struct fec_enet_private *fep = netdev_priv(ndev); - struct ethtool_keee *p = &fep->eee; unsigned int sleep_cycle, wake_cycle; if (enable) { - sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer); + sleep_cycle = fec_enet_us_to_tx_cycle(ndev, lpi_timer); wake_cycle = sleep_cycle; } else { sleep_cycle = 0; @@ -2105,7 +2105,9 @@ static void fec_enet_adjust_link(struct net_device *ndev) napi_enable(&fep->napi); } if (fep->quirks & FEC_QUIRK_HAS_EEE) - fec_enet_eee_mode_set(ndev, phy_dev->enable_tx_lpi); + fec_enet_eee_mode_set(ndev, + phy_dev->eee_cfg.tx_lpi_timer, + phy_dev->enable_tx_lpi); } else { if (fep->link) { netif_stop_queue(ndev); @@ -3181,7 +3183,6 @@ static int fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata) { struct fec_enet_private *fep = netdev_priv(ndev); - struct ethtool_keee *p = &fep->eee; if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) return -EOPNOTSUPP; @@ -3189,8 +3190,6 @@ fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata) if (!netif_running(ndev)) return -ENETDOWN; - edata->tx_lpi_timer = p->tx_lpi_timer; - return phy_ethtool_get_eee(ndev->phydev, edata); } @@ -3198,7 +3197,6 @@ static int fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata) { struct fec_enet_private *fep = netdev_priv(ndev); - struct ethtool_keee *p = &fep->eee; if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) return -EOPNOTSUPP; @@ -3206,8 +3204,6 @@ fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata) if (!netif_running(ndev)) return -ENETDOWN; - p->tx_lpi_timer = edata->tx_lpi_timer; - return phy_ethtool_set_eee(ndev->phydev, edata); } diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index fb416d60dcd7..11887458f050 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -2690,13 +2690,12 @@ static struct fman *read_dts_node(struct platform_device *of_dev) { struct fman *fman; struct device_node *fm_node, *muram_node; + void __iomem *base_addr; struct resource *res; u32 val, range[2]; int err, irq; struct clk *clk; u32 clk_rate; - phys_addr_t phys_base_addr; - resource_size_t mem_size; fman = kzalloc(sizeof(*fman), GFP_KERNEL); if (!fman) @@ -2724,18 +2723,6 @@ static struct fman *read_dts_node(struct platform_device *of_dev) goto fman_node_put; fman->dts_params.err_irq = err; - /* Get the FM address */ - res = platform_get_resource(of_dev, IORESOURCE_MEM, 0); - if (!res) { - err = -EINVAL; - dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n", - __func__); - goto fman_node_put; - } - - phys_base_addr = res->start; - mem_size = resource_size(res); - clk = of_clk_get(fm_node, 0); if (IS_ERR(clk)) { err = PTR_ERR(clk); @@ -2803,24 +2790,16 @@ static struct fman *read_dts_node(struct platform_device *of_dev) } } - fman->dts_params.res = - devm_request_mem_region(&of_dev->dev, phys_base_addr, - mem_size, "fman"); - if (!fman->dts_params.res) { - err = -EBUSY; - dev_err(&of_dev->dev, "%s: request_mem_region() failed\n", - __func__); - goto fman_free; - } - - fman->dts_params.base_addr = - devm_ioremap(&of_dev->dev, phys_base_addr, mem_size); - if (!fman->dts_params.base_addr) { - err = -ENOMEM; + base_addr = devm_platform_get_and_ioremap_resource(of_dev, 0, &res); + if (IS_ERR(base_addr)) { + err = PTR_ERR(base_addr); dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__); goto fman_free; } + fman->dts_params.base_addr = base_addr; + fman->dts_params.res = res; + fman->dev = &of_dev->dev; err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 6663c1768089..f47f8177a93b 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -26,7 +26,7 @@ #include <linux/dma-mapping.h> #include <linux/mii.h> #include <linux/phy.h> -#include <linux/phy_fixed.h> +#include <linux/phylink.h> #include <linux/workqueue.h> #include <linux/of.h> #include <linux/of_address.h> @@ -34,6 +34,7 @@ #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/platform_device.h> +#include <linux/rtnetlink.h> #include <linux/uaccess.h> #include <asm/irq.h> @@ -132,7 +133,6 @@ static const struct ucc_geth_info ugeth_primary_info = { .transmitFlowControl = 1, .maxGroupAddrInHash = 4, .maxIndAddrInHash = 4, - .prel = 7, .maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */ .minFrameLength = 64, .maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */ @@ -1205,34 +1205,6 @@ static int init_mac_station_addr_regs(u8 address_byte_0, return 0; } -static int init_check_frame_length_mode(int length_check, - u32 __iomem *maccfg2_register) -{ - u32 value = 0; - - value = in_be32(maccfg2_register); - - if (length_check) - value |= MACCFG2_LC; - else - value &= ~MACCFG2_LC; - - out_be32(maccfg2_register, value); - return 0; -} - -static int init_preamble_length(u8 preamble_length, - u32 __iomem *maccfg2_register) -{ - if ((preamble_length < 3) || (preamble_length > 7)) - return -EINVAL; - - clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK, - preamble_length << MACCFG2_PREL_SHIFT); - - return 0; -} - static int init_rx_parameters(int reject_broadcast, int receive_short_frames, int promiscuous, u32 __iomem *upsmr_register) @@ -1287,94 +1259,11 @@ static int init_min_frame_len(u16 min_frame_length, return 0; } -static int adjust_enet_interface(struct ucc_geth_private *ugeth) +static bool phy_interface_mode_is_reduced(phy_interface_t interface) { - struct ucc_geth_info *ug_info; - struct ucc_geth __iomem *ug_regs; - struct ucc_fast __iomem *uf_regs; - int ret_val; - u32 upsmr, maccfg2; - u16 value; - - ugeth_vdbg("%s: IN", __func__); - - ug_info = ugeth->ug_info; - ug_regs = ugeth->ug_regs; - uf_regs = ugeth->uccf->uf_regs; - - /* Set MACCFG2 */ - maccfg2 = in_be32(&ug_regs->maccfg2); - maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; - if ((ugeth->max_speed == SPEED_10) || - (ugeth->max_speed == SPEED_100)) - maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; - else if (ugeth->max_speed == SPEED_1000) - maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; - maccfg2 |= ug_info->padAndCrc; - out_be32(&ug_regs->maccfg2, maccfg2); - - /* Set UPSMR */ - upsmr = in_be32(&uf_regs->upsmr); - upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M | - UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM); - if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { - if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII) - upsmr |= UCC_GETH_UPSMR_RPM; - switch (ugeth->max_speed) { - case SPEED_10: - upsmr |= UCC_GETH_UPSMR_R10M; - fallthrough; - case SPEED_100: - if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) - upsmr |= UCC_GETH_UPSMR_RMM; - } - } - if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { - upsmr |= UCC_GETH_UPSMR_TBIM; - } - if (ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII) - upsmr |= UCC_GETH_UPSMR_SGMM; - - out_be32(&uf_regs->upsmr, upsmr); - - /* Disable autonegotiation in tbi mode, because by default it - comes up in autonegotiation mode. */ - /* Note that this depends on proper setting in utbipar register. */ - if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { - struct ucc_geth_info *ug_info = ugeth->ug_info; - struct phy_device *tbiphy; - - if (!ug_info->tbi_node) - pr_warn("TBI mode requires that the device tree specify a tbi-handle\n"); - - tbiphy = of_phy_find_device(ug_info->tbi_node); - if (!tbiphy) - pr_warn("Could not get TBI device\n"); - - value = phy_read(tbiphy, ENET_TBI_MII_CR); - value &= ~0x1000; /* Turn off autonegotiation */ - phy_write(tbiphy, ENET_TBI_MII_CR, value); - - put_device(&tbiphy->mdio.dev); - } - - init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); - - ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); - if (ret_val != 0) { - if (netif_msg_probe(ugeth)) - pr_err("Preamble length must be between 3 and 7 inclusive\n"); - return ret_val; - } - - return 0; + return phy_interface_mode_is_rgmii(interface) || + interface == PHY_INTERFACE_MODE_RMII || + interface == PHY_INTERFACE_MODE_RTBI; } static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) @@ -1548,107 +1437,6 @@ static void ugeth_activate(struct ucc_geth_private *ugeth) __netdev_watchdog_up(ugeth->ndev); } -/* Called every time the controller might need to be made - * aware of new link state. The PHY code conveys this - * information through variables in the ugeth structure, and this - * function converts those variables into the appropriate - * register values, and can bring down the device if needed. - */ - -static void adjust_link(struct net_device *dev) -{ - struct ucc_geth_private *ugeth = netdev_priv(dev); - struct ucc_geth __iomem *ug_regs; - struct ucc_fast __iomem *uf_regs; - struct phy_device *phydev = ugeth->phydev; - int new_state = 0; - - ug_regs = ugeth->ug_regs; - uf_regs = ugeth->uccf->uf_regs; - - if (phydev->link) { - u32 tempval = in_be32(&ug_regs->maccfg2); - u32 upsmr = in_be32(&uf_regs->upsmr); - /* Now we make sure that we can be in full duplex mode. - * If not, we operate in half-duplex mode. */ - if (phydev->duplex != ugeth->oldduplex) { - new_state = 1; - if (!(phydev->duplex)) - tempval &= ~(MACCFG2_FDX); - else - tempval |= MACCFG2_FDX; - ugeth->oldduplex = phydev->duplex; - } - - if (phydev->speed != ugeth->oldspeed) { - new_state = 1; - switch (phydev->speed) { - case SPEED_1000: - tempval = ((tempval & - ~(MACCFG2_INTERFACE_MODE_MASK)) | - MACCFG2_INTERFACE_MODE_BYTE); - break; - case SPEED_100: - case SPEED_10: - tempval = ((tempval & - ~(MACCFG2_INTERFACE_MODE_MASK)) | - MACCFG2_INTERFACE_MODE_NIBBLE); - /* if reduced mode, re-set UPSMR.R10M */ - if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { - if (phydev->speed == SPEED_10) - upsmr |= UCC_GETH_UPSMR_R10M; - else - upsmr &= ~UCC_GETH_UPSMR_R10M; - } - break; - default: - if (netif_msg_link(ugeth)) - pr_warn( - "%s: Ack! Speed (%d) is not 10/100/1000!", - dev->name, phydev->speed); - break; - } - ugeth->oldspeed = phydev->speed; - } - - if (!ugeth->oldlink) { - new_state = 1; - ugeth->oldlink = 1; - } - - if (new_state) { - /* - * To change the MAC configuration we need to disable - * the controller. To do so, we have to either grab - * ugeth->lock, which is a bad idea since 'graceful - * stop' commands might take quite a while, or we can - * quiesce driver's activity. - */ - ugeth_quiesce(ugeth); - ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); - - out_be32(&ug_regs->maccfg2, tempval); - out_be32(&uf_regs->upsmr, upsmr); - - ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); - ugeth_activate(ugeth); - } - } else if (ugeth->oldlink) { - new_state = 1; - ugeth->oldlink = 0; - ugeth->oldspeed = 0; - ugeth->oldduplex = -1; - } - - if (new_state && netif_msg_link(ugeth)) - phy_print_status(phydev); -} - /* Initialize TBI PHY interface for communicating with the * SERDES lynx PHY on the chip. We communicate with this PHY * through the MDIO bus on each controller, treating it as a @@ -1664,8 +1452,7 @@ static void uec_configure_serdes(struct net_device *dev) struct phy_device *tbiphy; if (!ug_info->tbi_node) { - dev_warn(&dev->dev, "SGMII mode requires that the device " - "tree specify a tbi-handle\n"); + dev_warn(&dev->dev, "SGMII mode requires that the device tree specify a tbi-handle\n"); return; } @@ -1696,34 +1483,145 @@ static void uec_configure_serdes(struct net_device *dev) put_device(&tbiphy->mdio.dev); } -/* Configure the PHY for dev. - * returns 0 if success. -1 if failure - */ -static int init_phy(struct net_device *dev) +static void ugeth_mac_link_up(struct phylink_config *config, struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, bool tx_pause, bool rx_pause) { - struct ucc_geth_private *priv = netdev_priv(dev); - struct ucc_geth_info *ug_info = priv->ug_info; - struct phy_device *phydev; + struct net_device *ndev = to_net_dev(config->dev); + struct ucc_geth_private *ugeth = netdev_priv(ndev); + struct ucc_geth_info *ug_info = ugeth->ug_info; + struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; + struct ucc_fast __iomem *uf_regs = ugeth->uccf->uf_regs; + u32 old_maccfg2, maccfg2 = in_be32(&ug_regs->maccfg2); + u32 old_upsmr, upsmr = in_be32(&uf_regs->upsmr); - priv->oldlink = 0; - priv->oldspeed = 0; - priv->oldduplex = -1; + old_maccfg2 = maccfg2; + old_upsmr = upsmr; - phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, - priv->phy_interface); - if (!phydev) { - dev_err(&dev->dev, "Could not attach to PHY\n"); - return -ENODEV; + /* No length check */ + maccfg2 &= ~MACCFG2_LC; + maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; + upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M | + UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM); + + if (speed == SPEED_10 || speed == SPEED_100) + maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; + else if (speed == SPEED_1000) + maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; + + maccfg2 |= ug_info->padAndCrc; + + if (phy_interface_mode_is_reduced(interface)) { + + if (interface != PHY_INTERFACE_MODE_RMII) + upsmr |= UCC_GETH_UPSMR_RPM; + + switch (speed) { + case SPEED_10: + upsmr |= UCC_GETH_UPSMR_R10M; + fallthrough; + case SPEED_100: + if (interface != PHY_INTERFACE_MODE_RTBI) + upsmr |= UCC_GETH_UPSMR_RMM; + } } - if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) - uec_configure_serdes(dev); + if (interface == PHY_INTERFACE_MODE_TBI || + interface == PHY_INTERFACE_MODE_RTBI) + upsmr |= UCC_GETH_UPSMR_TBIM; - phy_set_max_speed(phydev, priv->max_speed); + if (interface == PHY_INTERFACE_MODE_SGMII) + upsmr |= UCC_GETH_UPSMR_SGMM; - priv->phydev = phydev; + if (duplex == DUPLEX_HALF) + maccfg2 &= ~(MACCFG2_FDX); + else + maccfg2 |= MACCFG2_FDX; - return 0; + if (maccfg2 != old_maccfg2 || upsmr != old_upsmr) { + /* + * To change the MAC configuration we need to disable + * the controller. To do so, we have to either grab + * ugeth->lock, which is a bad idea since 'graceful + * stop' commands might take quite a while, or we can + * quiesce driver's activity. + */ + ugeth_quiesce(ugeth); + ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); + + out_be32(&ug_regs->maccfg2, maccfg2); + out_be32(&uf_regs->upsmr, upsmr); + + ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); + ugeth_activate(ugeth); + } + + if (interface == PHY_INTERFACE_MODE_SGMII) + uec_configure_serdes(ndev); + + if (!phylink_autoneg_inband(mode)) { + ug_info->aufc = 0; + ug_info->receiveFlowControl = rx_pause; + ug_info->transmitFlowControl = tx_pause; + + init_flow_control_params(ug_info->aufc, + ug_info->receiveFlowControl, + ug_info->transmitFlowControl, + ug_info->pausePeriod, + ug_info->extensionField, + &ugeth->uccf->uf_regs->upsmr, + &ugeth->ug_regs->uempr, + &ugeth->ug_regs->maccfg1); + } + + ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); +} + +static void ugeth_mac_link_down(struct phylink_config *config, + unsigned int mode, phy_interface_t interface) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct ucc_geth_private *ugeth = netdev_priv(ndev); + + ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); +} + +static void ugeth_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct ucc_geth_private *ugeth = netdev_priv(ndev); + struct ucc_geth_info *ug_info = ugeth->ug_info; + u16 value; + + if (state->interface == PHY_INTERFACE_MODE_TBI || + state->interface == PHY_INTERFACE_MODE_RTBI) { + struct phy_device *tbiphy; + + if (!ug_info->tbi_node) + pr_warn("TBI mode requires that the device tree specify a tbi-handle\n"); + + tbiphy = of_phy_find_device(ug_info->tbi_node); + if (!tbiphy) + pr_warn("Could not get TBI device\n"); + + value = phy_read(tbiphy, ENET_TBI_MII_CR); + value &= ~0x1000; /* Turn off autonegotiation */ + phy_write(tbiphy, ENET_TBI_MII_CR, value); + + put_device(&tbiphy->mdio.dev); + } + + if (phylink_autoneg_inband(mode)) { + ug_info->aufc = 1; + + init_flow_control_params(ug_info->aufc, 1, 1, + ug_info->pausePeriod, + ug_info->extensionField, + &ugeth->uccf->uf_regs->upsmr, + &ugeth->ug_regs->uempr, + &ugeth->ug_regs->maccfg1); + } } static void ugeth_dump_regs(struct ucc_geth_private *ugeth) @@ -1995,7 +1893,6 @@ static void ucc_geth_set_multi(struct net_device *dev) static void ucc_geth_stop(struct ucc_geth_private *ugeth) { struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; - struct phy_device *phydev = ugeth->phydev; ugeth_vdbg("%s: IN", __func__); @@ -2004,7 +1901,7 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth) * Must be done before disabling the controller * or deadlock may happen. */ - phy_stop(phydev); + phylink_stop(ugeth->phylink); /* Disable the controller */ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); @@ -3246,12 +3143,6 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth) goto err; } - err = adjust_enet_interface(ugeth); - if (err) { - netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n"); - goto err; - } - /* Set MACSTNADDR1, MACSTNADDR2 */ /* For more details see the hardware spec. */ init_mac_station_addr_regs(dev->dev_addr[0], @@ -3263,12 +3154,6 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth) &ugeth->ug_regs->macstnaddr1, &ugeth->ug_regs->macstnaddr2); - err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); - if (err) { - netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n"); - goto err; - } - return 0; err: ucc_geth_stop(ugeth); @@ -3291,10 +3176,10 @@ static int ucc_geth_open(struct net_device *dev) return -EINVAL; } - err = init_phy(dev); + err = phylink_of_phy_connect(ugeth->phylink, ugeth->dev->of_node, 0); if (err) { - netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n"); - return err; + dev_err(&dev->dev, "Could not attach to PHY\n"); + return -ENODEV; } err = ucc_geth_init_mac(ugeth); @@ -3310,13 +3195,13 @@ static int ucc_geth_open(struct net_device *dev) goto err; } - phy_start(ugeth->phydev); + phylink_start(ugeth->phylink); napi_enable(&ugeth->napi); netdev_reset_queue(dev); netif_start_queue(dev); device_set_wakeup_capable(&dev->dev, - qe_alive_during_sleep() || ugeth->phydev->irq); + qe_alive_during_sleep() || dev->phydev->irq); device_set_wakeup_enable(&dev->dev, ugeth->wol_en); return err; @@ -3337,8 +3222,7 @@ static int ucc_geth_close(struct net_device *dev) cancel_work_sync(&ugeth->timeout_work); ucc_geth_stop(ugeth); - phy_disconnect(ugeth->phydev); - ugeth->phydev = NULL; + phylink_disconnect_phy(ugeth->phylink); free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); @@ -3372,7 +3256,7 @@ static void ucc_geth_timeout_work(struct work_struct *work) ucc_geth_stop(ugeth); ucc_geth_init_mac(ugeth); /* Must start PHY here */ - phy_start(ugeth->phydev); + phylink_start(ugeth->phylink); netif_tx_start_all_queues(dev); } @@ -3397,6 +3281,7 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) { struct net_device *ndev = platform_get_drvdata(ofdev); struct ucc_geth_private *ugeth = netdev_priv(ndev); + bool mac_wol = false; if (!netif_running(ndev)) return 0; @@ -3410,14 +3295,17 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) */ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); - if (ugeth->wol_en & WAKE_MAGIC) { + if (ugeth->wol_en & WAKE_MAGIC && !ugeth->phy_wol_en) { setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX); - } else if (!(ugeth->wol_en & WAKE_PHY)) { - phy_stop(ugeth->phydev); + mac_wol = true; } + rtnl_lock(); + phylink_suspend(ugeth->phylink, mac_wol); + rtnl_unlock(); + return 0; } @@ -3451,12 +3339,9 @@ static int ucc_geth_resume(struct platform_device *ofdev) } } - ugeth->oldlink = 0; - ugeth->oldspeed = 0; - ugeth->oldduplex = -1; - - phy_stop(ugeth->phydev); - phy_start(ugeth->phydev); + rtnl_lock(); + phylink_resume(ugeth->phylink); + rtnl_unlock(); napi_enable(&ugeth->napi); netif_device_attach(ndev); @@ -3469,32 +3354,6 @@ static int ucc_geth_resume(struct platform_device *ofdev) #define ucc_geth_resume NULL #endif -static phy_interface_t to_phy_interface(const char *phy_connection_type) -{ - if (strcasecmp(phy_connection_type, "mii") == 0) - return PHY_INTERFACE_MODE_MII; - if (strcasecmp(phy_connection_type, "gmii") == 0) - return PHY_INTERFACE_MODE_GMII; - if (strcasecmp(phy_connection_type, "tbi") == 0) - return PHY_INTERFACE_MODE_TBI; - if (strcasecmp(phy_connection_type, "rmii") == 0) - return PHY_INTERFACE_MODE_RMII; - if (strcasecmp(phy_connection_type, "rgmii") == 0) - return PHY_INTERFACE_MODE_RGMII; - if (strcasecmp(phy_connection_type, "rgmii-id") == 0) - return PHY_INTERFACE_MODE_RGMII_ID; - if (strcasecmp(phy_connection_type, "rgmii-txid") == 0) - return PHY_INTERFACE_MODE_RGMII_TXID; - if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0) - return PHY_INTERFACE_MODE_RGMII_RXID; - if (strcasecmp(phy_connection_type, "rtbi") == 0) - return PHY_INTERFACE_MODE_RTBI; - if (strcasecmp(phy_connection_type, "sgmii") == 0) - return PHY_INTERFACE_MODE_SGMII; - - return PHY_INTERFACE_MODE_MII; -} - static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct ucc_geth_private *ugeth = netdev_priv(dev); @@ -3502,10 +3361,7 @@ static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!netif_running(dev)) return -EINVAL; - if (!ugeth->phydev) - return -ENODEV; - - return phy_mii_ioctl(ugeth->phydev, rq, cmd); + return phylink_mii_ioctl(ugeth->phylink, rq, cmd); } static const struct net_device_ops ucc_geth_netdev_ops = { @@ -3513,7 +3369,6 @@ static const struct net_device_ops ucc_geth_netdev_ops = { .ndo_stop = ucc_geth_close, .ndo_start_xmit = ucc_geth_start_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_change_carrier = fixed_phy_change_carrier, .ndo_set_mac_address = ucc_geth_set_mac_addr, .ndo_set_rx_mode = ucc_geth_set_multi, .ndo_tx_timeout = ucc_geth_timeout, @@ -3553,6 +3408,12 @@ static int ucc_geth_parse_clock(struct device_node *np, const char *which, return 0; } +struct phylink_mac_ops ugeth_mac_ops = { + .mac_link_up = ugeth_mac_link_up, + .mac_link_down = ugeth_mac_link_down, + .mac_config = ugeth_mac_config, +}; + static int ucc_geth_probe(struct platform_device* ofdev) { struct device *device = &ofdev->dev; @@ -3560,23 +3421,12 @@ static int ucc_geth_probe(struct platform_device* ofdev) struct net_device *dev = NULL; struct ucc_geth_private *ugeth = NULL; struct ucc_geth_info *ug_info; + struct device_node *phy_node; + struct phylink *phylink; struct resource res; - int err, ucc_num, max_speed = 0; + int err, ucc_num; const unsigned int *prop; phy_interface_t phy_interface; - static const int enet_to_speed[] = { - SPEED_10, SPEED_10, SPEED_10, - SPEED_100, SPEED_100, SPEED_100, - SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000, - }; - static const phy_interface_t enet_to_phy_interface[] = { - PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII, - PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII, - PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII, - PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII, - PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, - PHY_INTERFACE_MODE_SGMII, - }; ugeth_vdbg("%s: IN", __func__); @@ -3612,57 +3462,35 @@ static int ucc_geth_probe(struct platform_device* ofdev) ug_info->uf_info.regs = res.start; ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); - ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0); - if (!ug_info->phy_node && of_phy_is_fixed_link(np)) { - /* - * In the case of a fixed PHY, the DT node associated - * to the PHY is the Ethernet MAC DT node. - */ - err = of_phy_register_fixed_link(np); - if (err) - return err; - ug_info->phy_node = of_node_get(np); - } - /* Find the TBI PHY node. If it's not there, we don't support SGMII */ ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); - /* get the phy interface type, or default to MII */ - prop = of_get_property(np, "phy-connection-type", NULL); - if (!prop) { - /* handle interface property present in old trees */ - prop = of_get_property(ug_info->phy_node, "interface", NULL); - if (prop != NULL) { - phy_interface = enet_to_phy_interface[*prop]; - max_speed = enet_to_speed[*prop]; - } else - phy_interface = PHY_INTERFACE_MODE_MII; - } else { - phy_interface = to_phy_interface((const char *)prop); - } - - /* get speed, or derive from PHY interface */ - if (max_speed == 0) - switch (phy_interface) { - case PHY_INTERFACE_MODE_GMII: - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_TBI: - case PHY_INTERFACE_MODE_RTBI: - case PHY_INTERFACE_MODE_SGMII: - max_speed = SPEED_1000; - break; - default: - max_speed = SPEED_100; - break; + phy_node = of_parse_phandle(np, "phy-handle", 0); + if (phy_node) { + prop = of_get_property(phy_node, "interface", NULL); + if (prop) { + dev_err(&ofdev->dev, + "Device-tree property 'interface' is no longer supported. Please use 'phy-connection-type' instead."); + of_node_put(phy_node); + err = -EINVAL; + goto err_put_tbi; } + of_node_put(phy_node); + } + + err = of_get_phy_mode(np, &phy_interface); + if (err) { + dev_err(&ofdev->dev, "Invalid phy-connection-type"); + goto err_put_tbi; + } - if (max_speed == SPEED_1000) { + if (phy_interface == PHY_INTERFACE_MODE_GMII || + phy_interface_mode_is_rgmii(phy_interface) || + phy_interface == PHY_INTERFACE_MODE_TBI || + phy_interface == PHY_INTERFACE_MODE_RTBI || + phy_interface == PHY_INTERFACE_MODE_SGMII) { unsigned int snums = qe_get_num_of_snums(); - /* configure muram FIFOs for gigabit operation */ ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT; ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT; ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT; @@ -3691,7 +3519,7 @@ static int ucc_geth_probe(struct platform_device* ofdev) dev = devm_alloc_etherdev(&ofdev->dev, sizeof(*ugeth)); if (!dev) { err = -ENOMEM; - goto err_deregister_fixed_link; + goto err_put_tbi; } ugeth = netdev_priv(dev); @@ -3718,23 +3546,50 @@ static int ucc_geth_probe(struct platform_device* ofdev) dev->max_mtu = 1518; ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT); - ugeth->phy_interface = phy_interface; - ugeth->max_speed = max_speed; - /* Carrier starts down, phylib will bring it up */ - netif_carrier_off(dev); + ugeth->phylink_config.dev = &dev->dev; + ugeth->phylink_config.type = PHYLINK_NETDEV; + + ugeth->phylink_config.mac_capabilities = + MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD; + + __set_bit(PHY_INTERFACE_MODE_MII, + ugeth->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_RMII, + ugeth->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_GMII, + ugeth->phylink_config.supported_interfaces); + phy_interface_set_rgmii(ugeth->phylink_config.supported_interfaces); + + if (ug_info->tbi_node) { + __set_bit(PHY_INTERFACE_MODE_SGMII, + ugeth->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_TBI, + ugeth->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_RTBI, + ugeth->phylink_config.supported_interfaces); + } + + phylink = phylink_create(&ugeth->phylink_config, dev_fwnode(&dev->dev), + phy_interface, &ugeth_mac_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + goto err_put_tbi; + } + + ugeth->phylink = phylink; err = devm_register_netdev(&ofdev->dev, dev); if (err) { if (netif_msg_probe(ugeth)) pr_err("%s: Cannot register net device, aborting\n", dev->name); - goto err_deregister_fixed_link; + goto err_destroy_phylink; } err = of_get_ethdev_address(np, dev); if (err == -EPROBE_DEFER) - goto err_deregister_fixed_link; + goto err_destroy_phylink; ugeth->ug_info = ug_info; ugeth->dev = device; @@ -3743,11 +3598,11 @@ static int ucc_geth_probe(struct platform_device* ofdev) return 0; -err_deregister_fixed_link: - if (of_phy_is_fixed_link(np)) - of_phy_deregister_fixed_link(np); +err_destroy_phylink: + phylink_destroy(phylink); +err_put_tbi: of_node_put(ug_info->tbi_node); - of_node_put(ug_info->phy_node); + return err; } @@ -3755,13 +3610,10 @@ static void ucc_geth_remove(struct platform_device* ofdev) { struct net_device *dev = platform_get_drvdata(ofdev); struct ucc_geth_private *ugeth = netdev_priv(dev); - struct device_node *np = ofdev->dev.of_node; ucc_geth_memclean(ugeth); - if (of_phy_is_fixed_link(np)) - of_phy_deregister_fixed_link(np); + phylink_destroy(ugeth->phylink); of_node_put(ugeth->ug_info->tbi_node); - of_node_put(ugeth->ug_info->phy_node); } static const struct of_device_id ucc_geth_match[] = { diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h index 4294ed096ebb..38789faae706 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.h +++ b/drivers/net/ethernet/freescale/ucc_geth.h @@ -16,6 +16,7 @@ #include <linux/kernel.h> #include <linux/list.h> +#include <linux/phylink.h> #include <linux/if_ether.h> #include <soc/fsl/qe/immap_qe.h> @@ -921,7 +922,8 @@ struct ucc_geth_hardware_statistics { #define UCC_GETH_UPSMR_INIT UCC_GETH_UPSMR_RES1 #define UCC_GETH_MACCFG1_INIT 0 -#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1) +#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1 | \ + (7 << MACCFG2_PREL_SHIFT)) /* Ethernet Address Type. */ enum enet_addr_type { @@ -1073,6 +1075,9 @@ struct ucc_geth_tad_params { u16 vid; }; +struct phylink; +struct phylink_config; + /* GETH protocol initialization structure */ struct ucc_geth_info { struct ucc_fast_info uf_info; @@ -1088,7 +1093,6 @@ struct ucc_geth_info { u8 miminumInterFrameGapEnforcement; u8 backToBackInterFrameGap; int ipAddressAlignment; - int lengthCheckRx; u32 mblinterval; u16 nortsrbytetime; u8 fracsiz; @@ -1114,7 +1118,6 @@ struct ucc_geth_info { int transmitFlowControl; u8 maxGroupAddrInHash; u8 maxIndAddrInHash; - u8 prel; u16 maxFrameLength; u16 minFrameLength; u16 maxD1Length; @@ -1125,7 +1128,6 @@ struct ucc_geth_info { u32 eventRegMask; u16 pausePeriod; u16 extensionField; - struct device_node *phy_node; struct device_node *tbi_node; u8 weightfactor[NUM_TX_QUEUES]; u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES]; @@ -1210,14 +1212,12 @@ struct ucc_geth_private { u16 skb_dirtytx[NUM_TX_QUEUES]; struct ugeth_mii_info *mii_info; - struct phy_device *phydev; - phy_interface_t phy_interface; - int max_speed; uint32_t msg_enable; - int oldspeed; - int oldduplex; - int oldlink; - int wol_en; + u32 wol_en; + u32 phy_wol_en; + + struct phylink *phylink; + struct phylink_config phylink_config; struct device_node *node; }; diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c index 699f346faf5c..1fb49e5a414a 100644 --- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c +++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c @@ -104,14 +104,8 @@ static int uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct ucc_geth_private *ugeth = netdev_priv(netdev); - struct phy_device *phydev = ugeth->phydev; - if (!phydev) - return -ENODEV; - - phy_ethtool_ksettings_get(phydev, cmd); - - return 0; + return phylink_ethtool_ksettings_get(ugeth->phylink, cmd); } static int @@ -119,12 +113,8 @@ uec_set_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { struct ucc_geth_private *ugeth = netdev_priv(netdev); - struct phy_device *phydev = ugeth->phydev; - if (!phydev) - return -ENODEV; - - return phy_ethtool_ksettings_set(phydev, cmd); + return phylink_ethtool_ksettings_set(ugeth->phylink, cmd); } static void @@ -133,12 +123,7 @@ uec_get_pauseparam(struct net_device *netdev, { struct ucc_geth_private *ugeth = netdev_priv(netdev); - pause->autoneg = ugeth->phydev->autoneg; - - if (ugeth->ug_info->receiveFlowControl) - pause->rx_pause = 1; - if (ugeth->ug_info->transmitFlowControl) - pause->tx_pause = 1; + return phylink_ethtool_get_pauseparam(ugeth->phylink, pause); } static int @@ -146,30 +131,11 @@ uec_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct ucc_geth_private *ugeth = netdev_priv(netdev); - int ret = 0; ugeth->ug_info->receiveFlowControl = pause->rx_pause; ugeth->ug_info->transmitFlowControl = pause->tx_pause; - if (ugeth->phydev->autoneg) { - if (netif_running(netdev)) { - /* FIXME: automatically restart */ - netdev_info(netdev, "Please re-open the interface\n"); - } - } else { - struct ucc_geth_info *ug_info = ugeth->ug_info; - - ret = init_flow_control_params(ug_info->aufc, - ug_info->receiveFlowControl, - ug_info->transmitFlowControl, - ug_info->pausePeriod, - ug_info->extensionField, - &ugeth->uccf->uf_regs->upsmr, - &ugeth->ug_regs->uempr, - &ugeth->ug_regs->maccfg1); - } - - return ret; + return phylink_ethtool_set_pauseparam(ugeth->phylink, pause); } static uint32_t @@ -343,28 +309,42 @@ uec_get_drvinfo(struct net_device *netdev, static void uec_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct ucc_geth_private *ugeth = netdev_priv(netdev); - struct phy_device *phydev = ugeth->phydev; - if (phydev && phydev->irq) - wol->supported |= WAKE_PHY; + phylink_ethtool_get_wol(ugeth->phylink, wol); + if (qe_alive_during_sleep()) wol->supported |= WAKE_MAGIC; - wol->wolopts = ugeth->wol_en; + wol->wolopts |= ugeth->wol_en; } static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct ucc_geth_private *ugeth = netdev_priv(netdev); - struct phy_device *phydev = ugeth->phydev; + int ret = 0; - if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) - return -EINVAL; - else if (wol->wolopts & WAKE_PHY && (!phydev || !phydev->irq)) + ret = phylink_ethtool_set_wol(ugeth->phylink, wol); + if (ret == -EOPNOTSUPP) { + ugeth->phy_wol_en = 0; + } else if (ret) { + return ret; + } else { + ugeth->phy_wol_en = wol->wolopts; + goto out; + } + + /* If the PHY isn't handling the WoL and the MAC is asked to more than + * WAKE_MAGIC, error-out + */ + if (!ugeth->phy_wol_en && + wol->wolopts & ~WAKE_MAGIC) return -EINVAL; - else if (wol->wolopts & WAKE_MAGIC && !qe_alive_during_sleep()) + + if (wol->wolopts & WAKE_MAGIC && + !qe_alive_during_sleep()) return -EINVAL; +out: ugeth->wol_en = wol->wolopts; device_set_wakeup_enable(&netdev->dev, ugeth->wol_en); diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 060e0e674938..aa7d723011d0 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -1128,20 +1128,6 @@ int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id) return gve_adminq_execute_cmd(priv, &cmd); } -int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu) -{ - union gve_adminq_command cmd; - - memset(&cmd, 0, sizeof(cmd)); - cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER); - cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) { - .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU), - .parameter_value = cpu_to_be64(mtu), - }; - - return gve_adminq_execute_cmd(priv, &cmd); -} - int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len, dma_addr_t stats_report_addr, u64 interval) { diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h index 863683de9694..228217458275 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.h +++ b/drivers/net/ethernet/google/gve/gve_adminq.h @@ -612,7 +612,6 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id); int gve_adminq_register_page_list(struct gve_priv *priv, struct gve_queue_page_list *qpl); int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id); -int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu); int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len, dma_addr_t stats_report_addr, u64 interval); int gve_adminq_verify_driver_compatibility(struct gve_priv *priv, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c index f81a43d2cdfc..486fb0e20bef 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c @@ -469,7 +469,7 @@ int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en) err = HINIC_MGMT_CMD_UNSUPPORTED; } else if (err || !out_size || vlan_filter.status) { dev_err(&pdev->dev, - "Failed to set vlan fliter, err: %d, status: 0x%x, out size: 0x%x\n", + "Failed to set vlan filter, err: %d, status: 0x%x, out size: 0x%x\n", err, vlan_filter.status, out_size); err = -EINVAL; } diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 20bc40eec487..24ec9a4f1ffa 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -292,6 +292,7 @@ config ICE select DIMLIB select LIBIE select NET_DEVLINK + select PACKING select PLDMFW select DPLL help diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 1489a8ceec51..3bf05b135b35 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -12,6 +12,13 @@ #define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9 #define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728 +#define ICE_RXQ_CTX_SIZE_DWORDS 8 +#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) +#define ICE_TXQ_CTX_SZ 22 + +typedef struct __packed { u8 buf[ICE_RXQ_CTX_SZ]; } ice_rxq_ctx_buf_t; +typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t; + struct ice_aqc_generic { __le32 param0; __le32 param1; @@ -2084,10 +2091,10 @@ struct ice_aqc_add_txqs_perq { __le16 txq_id; u8 rsvd[2]; __le32 q_teid; - u8 txq_ctx[22]; + ice_txq_ctx_buf_t txq_ctx; u8 rsvd2[2]; struct ice_aqc_txsched_elem info; -}; +} __packed; /* The format of the command buffer for Add Tx LAN Queues (0x0C30) * is an array of the following structs. Please note that the length of diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 82a9cd4ec7ae..b2af8e3586f7 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -454,6 +454,9 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) /* Rx queue threshold in units of 64 */ rlan_ctx.lrxqthresh = 1; + /* Enable descriptor prefetch */ + rlan_ctx.prefena = 1; + /* PF acts as uplink for switchdev; set flex descriptor with src_vsi * metadata and flags to allow redirecting to PR netdev */ @@ -910,8 +913,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); /* copy context contents into the qg_buf */ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); - ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, - ice_tlan_ctx_info); + ice_pack_txq_ctx(&tlan_ctx, &qg_buf->txqs[0].txq_ctx); /* init queue specific tail reg. It is referred as * transmit comm scheduler queue doorbell. diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 496d86cbd13f..f89bc6ede315 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -6,6 +6,7 @@ #include "ice_adminq_cmd.h" #include "ice_flow.h" #include "ice_ptp_hw.h" +#include <linux/packing.h> #define ICE_PF_RESET_WAIT_COUNT 300 #define ICE_MAX_NETLIST_SIZE 10 @@ -1360,39 +1361,31 @@ int ice_reset(struct ice_hw *hw, enum ice_reset_req req) } /** - * ice_copy_rxq_ctx_to_hw + * ice_copy_rxq_ctx_to_hw - Copy packed Rx queue context to HW registers * @hw: pointer to the hardware structure - * @ice_rxq_ctx: pointer to the rxq context + * @rxq_ctx: pointer to the packed Rx queue context * @rxq_index: the index of the Rx queue - * - * Copies rxq context from dense structure to HW register space */ -static int -ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) +static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, + const ice_rxq_ctx_buf_t *rxq_ctx, + u32 rxq_index) { - u8 i; - - if (!ice_rxq_ctx) - return -EINVAL; - - if (rxq_index > QRX_CTRL_MAX_INDEX) - return -EINVAL; - /* Copy each dword separately to HW */ - for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { - wr32(hw, QRX_CONTEXT(i, rxq_index), - *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); + for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { + u32 ctx = ((const u32 *)rxq_ctx)[i]; - ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, - *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); - } + wr32(hw, QRX_CONTEXT(i, rxq_index), ctx); - return 0; + ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx); + } } +#define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \ + PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field) + /* LAN Rx Queue Context */ -static const struct ice_ctx_ele ice_rlan_ctx_info[] = { - /* Field Width LSB */ +static const struct packed_field_u8 ice_rlan_ctx_fields[] = { + /* Field Width LSB */ ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), @@ -1413,35 +1406,50 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), - { 0 } }; /** - * ice_write_rxq_ctx + * ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer + * @ctx: the Rx queue context to pack + * @buf: the HW buffer to pack into + * + * Pack the Rx queue context from the CPU-friendly unpacked buffer into its + * bit-packed HW layout. + */ +static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx, + ice_rxq_ctx_buf_t *buf) +{ + pack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields, + QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); +} + +/** + * ice_write_rxq_ctx - Write Rx Queue context to hardware * @hw: pointer to the hardware structure - * @rlan_ctx: pointer to the rxq context + * @rlan_ctx: pointer to the unpacked Rx queue context * @rxq_index: the index of the Rx queue * - * Converts rxq context from sparse to dense structure and then writes - * it to HW register space and enables the hardware to prefetch descriptors - * instead of only fetching them on demand + * Pack the sparse Rx Queue context into dense hardware format and write it + * into the HW register space. + * + * Return: 0 on success, or -EINVAL if the Rx queue index is invalid. */ int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index) { - u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; + ice_rxq_ctx_buf_t buf = {}; - if (!rlan_ctx) + if (rxq_index > QRX_CTRL_MAX_INDEX) return -EINVAL; - rlan_ctx->prefena = 1; + ice_pack_rxq_ctx(rlan_ctx, &buf); + ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index); - ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); - return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); + return 0; } /* LAN Tx Queue Context */ -const struct ice_ctx_ele ice_tlan_ctx_info[] = { +static const struct packed_field_u8 ice_tlan_ctx_fields[] = { /* Field Width LSB */ ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), @@ -1470,10 +1478,22 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = { ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), - ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), - { 0 } }; +/** + * ice_pack_txq_ctx - Pack Tx queue context into a HW buffer + * @ctx: the Tx queue context to pack + * @buf: the HW buffer to pack into + * + * Pack the Tx queue context from the CPU-friendly unpacked buffer into its + * bit-packed HW layout. + */ +void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf) +{ + pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields, + QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); +} + /* Sideband Queue command wrappers */ /** @@ -4558,205 +4578,6 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, /* End of FW Admin Queue command wrappers */ /** - * ice_pack_ctx_byte - write a byte to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u8 src_byte, dest_byte, mask; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); - - src_byte = *from; - src_byte <<= shift_width; - src_byte &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_byte, dest, sizeof(dest_byte)); - - dest_byte &= ~mask; /* get the bits not changing */ - dest_byte |= src_byte; /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_byte, sizeof(dest_byte)); -} - -/** - * ice_pack_ctx_word - write a word to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u16 src_word, mask; - __le16 dest_word; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); - - /* don't swizzle the bits until after the mask because the mask bits - * will be in a different bit position on big endian machines - */ - src_word = *(u16 *)from; - src_word <<= shift_width; - src_word &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_word, dest, sizeof(dest_word)); - - dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ - dest_word |= cpu_to_le16(src_word); /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_word, sizeof(dest_word)); -} - -/** - * ice_pack_ctx_dword - write a dword to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u32 src_dword, mask; - __le32 dest_dword; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); - - /* don't swizzle the bits until after the mask because the mask bits - * will be in a different bit position on big endian machines - */ - src_dword = *(u32 *)from; - src_dword <<= shift_width; - src_dword &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_dword, dest, sizeof(dest_dword)); - - dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ - dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_dword, sizeof(dest_dword)); -} - -/** - * ice_pack_ctx_qword - write a qword to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u64 src_qword, mask; - __le64 dest_qword; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width); - - /* don't swizzle the bits until after the mask because the mask bits - * will be in a different bit position on big endian machines - */ - src_qword = *(u64 *)from; - src_qword <<= shift_width; - src_qword &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_qword, dest, sizeof(dest_qword)); - - dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ - dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_qword, sizeof(dest_qword)); -} - -/** - * ice_set_ctx - set context bits in packed structure - * @hw: pointer to the hardware structure - * @src_ctx: pointer to a generic non-packed context structure - * @dest_ctx: pointer to memory for the packed structure - * @ce_info: List of Rx context elements - */ -int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - int f; - - for (f = 0; ce_info[f].width; f++) { - /* We have to deal with each element of the FW response - * using the correct size so that we are correct regardless - * of the endianness of the machine. - */ - if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { - ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", - f, ce_info[f].width, ce_info[f].size_of); - continue; - } - switch (ce_info[f].size_of) { - case sizeof(u8): - ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]); - break; - case sizeof(u16): - ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]); - break; - case sizeof(u32): - ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]); - break; - case sizeof(u64): - ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]); - break; - default: - return -EINVAL; - } - } - - return 0; -} - -/** * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC * @hw: pointer to the HW struct * @vsi_handle: software VSI handle diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 27208a60cece..a68bea3934e3 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -92,9 +92,8 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); -extern const struct ice_ctx_ele ice_tlan_ctx_info[]; -int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info); + +void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf); extern struct mutex ice_global_cfg_lock_sw; diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 611577ebc29d..1479b45738af 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -371,29 +371,21 @@ enum ice_rx_flex_desc_status_error_1_bits { ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */ }; -#define ICE_RXQ_CTX_SIZE_DWORDS 8 -#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) #define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22 #define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5 #define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800)) -/* RLAN Rx queue context data - * - * The sizes of the variables may be larger than needed due to crossing byte - * boundaries. If we do not have the width of the variable set to the correct - * size then we could end up shifting bits off the top of the variable when the - * variable is at the top of a byte and crosses over into the next byte. - */ +/* RLAN Rx queue context data */ struct ice_rlan_ctx { u16 head; - u16 cpuid; /* bigger than needed, see above for reason */ + u8 cpuid; #define ICE_RLAN_BASE_S 7 u64 base; u16 qlen; #define ICE_RLAN_CTX_DBUF_S 7 - u16 dbuf; /* bigger than needed, see above for reason */ + u8 dbuf; #define ICE_RLAN_CTX_HBUF_S 6 - u16 hbuf; /* bigger than needed, see above for reason */ + u8 hbuf; u8 dtype; u8 dsize; u8 crcstrip; @@ -401,29 +393,15 @@ struct ice_rlan_ctx { u8 hsplit_0; u8 hsplit_1; u8 showiv; - u32 rxmax; /* bigger than needed, see above for reason */ + u16 rxmax; u8 tphrdesc_ena; u8 tphwdesc_ena; u8 tphdata_ena; u8 tphhead_ena; - u16 lrxqthresh; /* bigger than needed, see above for reason */ + u8 lrxqthresh; u8 prefena; /* NOTE: normally must be set to 1 at init */ }; -struct ice_ctx_ele { - u16 offset; - u16 size_of; - u16 width; - u16 lsb; -}; - -#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \ - .offset = offsetof(struct _struct, _ele), \ - .size_of = sizeof_field(struct _struct, _ele), \ - .width = _width, \ - .lsb = _lsb, \ -} - /* for hsplit_0 field of Rx RLAN context */ enum ice_rlan_ctx_rx_hsplit_0 { ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0, @@ -551,18 +529,12 @@ enum ice_tx_ctx_desc_eipt_offload { #define ICE_LAN_TXQ_MAX_QGRPS 127 #define ICE_LAN_TXQ_MAX_QDIS 1023 -/* Tx queue context data - * - * The sizes of the variables may be larger than needed due to crossing byte - * boundaries. If we do not have the width of the variable set to the correct - * size then we could end up shifting bits off the top of the variable when the - * variable is at the top of a byte and crosses over into the next byte. - */ +/* Tx queue context data */ struct ice_tlan_ctx { #define ICE_TLAN_CTX_BASE_S 7 u64 base; /* base is defined in 128-byte units */ u8 port_num; - u16 cgd_num; /* bigger than needed, see above for reason */ + u8 cgd_num; u8 pf_num; u16 vmvf_num; u8 vmvf_type; @@ -573,7 +545,7 @@ struct ice_tlan_ctx { u8 tsyn_ena; u8 internal_usage_flag; u8 alt_vlan; - u16 cpuid; /* bigger than needed, see above for reason */ + u8 cpuid; u8 wb_mode; u8 tphrd_desc; u8 tphrd; @@ -582,7 +554,7 @@ struct ice_tlan_ctx { u16 qnum_in_func; u8 itr_notification_mode; u8 adjust_prof_id; - u32 qlen; /* bigger than needed, see above for reason */ + u16 qlen; u8 quanta_prof_idx; u8 tso_ena; u16 tso_qnum; @@ -590,7 +562,6 @@ struct ice_tlan_ctx { u8 drop_ena; u8 cache_prof_idx; u8 pkt_shaper_prof_idx; - u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */ }; #endif /* _ICE_LAN_TX_RX_H_ */ diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 660dff5426e7..83ce3bfefa5c 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -90,7 +90,6 @@ struct ltq_etop_priv { struct net_device *netdev; struct platform_device *pdev; struct ltq_eth_data *pldata; - struct resource *res; struct mii_bus *mii_bus; @@ -643,31 +642,14 @@ ltq_etop_probe(struct platform_device *pdev) { struct net_device *dev; struct ltq_etop_priv *priv; - struct resource *res; int err; int i; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "failed to get etop resource\n"); - err = -ENOENT; - goto err_out; - } - - res = devm_request_mem_region(&pdev->dev, res->start, - resource_size(res), dev_name(&pdev->dev)); - if (!res) { - dev_err(&pdev->dev, "failed to request etop resource\n"); - err = -EBUSY; - goto err_out; - } - - ltq_etop_membase = devm_ioremap(&pdev->dev, res->start, - resource_size(res)); - if (!ltq_etop_membase) { + ltq_etop_membase = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ltq_etop_membase)) { dev_err(&pdev->dev, "failed to remap etop engine %d\n", pdev->id); - err = -ENOMEM; + err = PTR_ERR(ltq_etop_membase); goto err_out; } @@ -679,7 +661,6 @@ ltq_etop_probe(struct platform_device *pdev) dev->netdev_ops = <q_eth_netdev_ops; dev->ethtool_ops = <q_etop_ethtool_ops; priv = netdev_priv(dev); - priv->res = res; priv->pdev = pdev; priv->pldata = dev_get_platdata(&pdev->dev); priv->netdev = dev; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 1fb285fa0bdb..fe6261b81540 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3960,20 +3960,27 @@ static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs) return container_of(pcs, struct mvneta_port, phylink_pcs); } -static int mvneta_pcs_validate(struct phylink_pcs *pcs, - unsigned long *supported, - const struct phylink_link_state *state) +static unsigned int mvneta_pcs_inband_caps(struct phylink_pcs *pcs, + phy_interface_t interface) { - /* We only support QSGMII, SGMII, 802.3z and RGMII modes. - * When in 802.3z mode, we must have AN enabled: + /* When operating in an 802.3z mode, we must have AN enabled: * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... * When <PortType> = 1 (1000BASE-X) this field must be set to 1." + * Therefore, inband is "required". */ - if (phy_interface_mode_is_8023z(state->interface) && - !phylink_test(state->advertising, Autoneg)) - return -EINVAL; + if (phy_interface_mode_is_8023z(interface)) + return LINK_INBAND_ENABLE; - return 0; + /* QSGMII, SGMII and RGMII can be configured to use inband + * signalling of the AN result. Indicate these as "possible". + */ + if (interface == PHY_INTERFACE_MODE_SGMII || + interface == PHY_INTERFACE_MODE_QSGMII || + phy_interface_mode_is_rgmii(interface)) + return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE; + + /* For any other modes, indicate that inband is not supported. */ + return LINK_INBAND_DISABLE; } static void mvneta_pcs_get_state(struct phylink_pcs *pcs, @@ -4071,7 +4078,7 @@ static void mvneta_pcs_an_restart(struct phylink_pcs *pcs) } static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = { - .pcs_validate = mvneta_pcs_validate, + .pcs_inband_caps = mvneta_pcs_inband_caps, .pcs_get_state = mvneta_pcs_get_state, .pcs_config = mvneta_pcs_config, .pcs_an_restart = mvneta_pcs_an_restart, diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 571631a30320..f85229a30844 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -6224,19 +6224,26 @@ static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = { .pcs_config = mvpp2_xlg_pcs_config, }; -static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs, - unsigned long *supported, - const struct phylink_link_state *state) +static unsigned int mvpp2_gmac_pcs_inband_caps(struct phylink_pcs *pcs, + phy_interface_t interface) { - /* When in 802.3z mode, we must have AN enabled: + /* When operating in an 802.3z mode, we must have AN enabled: * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... * When <PortType> = 1 (1000BASE-X) this field must be set to 1. + * Therefore, inband is "required". */ - if (phy_interface_mode_is_8023z(state->interface) && - !phylink_test(state->advertising, Autoneg)) - return -EINVAL; + if (phy_interface_mode_is_8023z(interface)) + return LINK_INBAND_ENABLE; - return 0; + /* SGMII and RGMII can be configured to use inband signalling of the + * AN result. Indicate these as "possible". + */ + if (interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_rgmii(interface)) + return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE; + + /* For any other modes, indicate that inband is not supported. */ + return LINK_INBAND_DISABLE; } static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs, @@ -6343,7 +6350,7 @@ static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs) } static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = { - .pcs_validate = mvpp2_gmac_pcs_validate, + .pcs_inband_caps = mvpp2_gmac_pcs_inband_caps, .pcs_get_state = mvpp2_gmac_pcs_get_state, .pcs_config = mvpp2_gmac_pcs_config, .pcs_an_restart = mvpp2_gmac_pcs_an_restart, diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 549436efc204..3a9825883d79 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -1137,6 +1137,43 @@ static int octep_set_features(struct net_device *dev, netdev_features_t features return err; } +static int octep_get_vf_config(struct net_device *dev, int vf, + struct ifla_vf_info *ivi) +{ + struct octep_device *oct = netdev_priv(dev); + + ivi->vf = vf; + ether_addr_copy(ivi->mac, oct->vf_info[vf].mac_addr); + ivi->spoofchk = true; + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + ivi->trusted = false; + + return 0; +} + +static int octep_set_vf_mac(struct net_device *dev, int vf, u8 *mac) +{ + struct octep_device *oct = netdev_priv(dev); + int err; + + if (!is_valid_ether_addr(mac)) { + dev_err(&oct->pdev->dev, "Invalid MAC Address %pM\n", mac); + return -EADDRNOTAVAIL; + } + + dev_dbg(&oct->pdev->dev, "set vf-%d mac to %pM\n", vf, mac); + ether_addr_copy(oct->vf_info[vf].mac_addr, mac); + oct->vf_info[vf].flags |= OCTEON_PFVF_FLAG_MAC_SET_BY_PF; + + err = octep_ctrl_net_set_mac_addr(oct, vf, mac, true); + if (err) + dev_err(&oct->pdev->dev, + "Set VF%d MAC address failed via host control Mbox\n", + vf); + + return err; +} + static const struct net_device_ops octep_netdev_ops = { .ndo_open = octep_open, .ndo_stop = octep_stop, @@ -1146,6 +1183,8 @@ static const struct net_device_ops octep_netdev_ops = { .ndo_set_mac_address = octep_set_mac, .ndo_change_mtu = octep_change_mtu, .ndo_set_features = octep_set_features, + .ndo_get_vf_config = octep_get_vf_config, + .ndo_set_vf_mac = octep_set_vf_mac }; /** diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h index fee59e0e0138..3b56916af468 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h @@ -220,6 +220,7 @@ struct octep_iface_link_info { /* The Octeon VF device specific info data structure.*/ struct octep_pfvf_info { u8 mac_addr[ETH_ALEN]; + u32 flags; u32 mbox_version; }; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c index e6eb98d70f3c..ebecdd29f3bd 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c @@ -156,12 +156,23 @@ static void octep_pfvf_set_mac_addr(struct octep_device *oct, u32 vf_id, { int err; + if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) { + dev_err(&oct->pdev->dev, + "VF%d attempted to override administrative set MAC address\n", + vf_id); + rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK; + return; + } + err = octep_ctrl_net_set_mac_addr(oct, vf_id, cmd.s_set_mac.mac_addr, true); if (err) { rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK; - dev_err(&oct->pdev->dev, "Set VF MAC address failed via host control Mbox\n"); + dev_err(&oct->pdev->dev, "Set VF%d MAC address failed via host control Mbox\n", + vf_id); return; } + + ether_addr_copy(oct->vf_info[vf_id].mac_addr, cmd.s_set_mac.mac_addr); rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK; } @@ -171,10 +182,18 @@ static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id, { int err; + if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) { + dev_dbg(&oct->pdev->dev, "VF%d MAC address set by PF\n", vf_id); + ether_addr_copy(rsp->s_set_mac.mac_addr, + oct->vf_info[vf_id].mac_addr); + rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK; + return; + } err = octep_ctrl_net_get_mac_addr(oct, vf_id, rsp->s_set_mac.mac_addr); if (err) { rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK; - dev_err(&oct->pdev->dev, "Get VF MAC address failed via host control Mbox\n"); + dev_err(&oct->pdev->dev, "Get VF%d MAC address failed via host control Mbox\n", + vf_id); return; } rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h index 0dc6eead292a..386a095a99bc 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h @@ -8,8 +8,6 @@ #ifndef _OCTEP_PFVF_MBOX_H_ #define _OCTEP_PFVF_MBOX_H_ -/* VF flags */ -#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT_ULL(0) /* PF has set VF MAC address */ #define OCTEON_SDP_16K_HW_FRS 16380UL #define OCTEON_SDP_64K_HW_FRS 65531UL @@ -23,6 +21,10 @@ enum octep_pfvf_mbox_version { #define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2 +/* VF flags */ +/* PF has set VF MAC address */ +#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT(0) + enum octep_pfvf_mbox_opcode { OCTEP_PFVF_MBOX_CMD_VERSION, OCTEP_PFVF_MBOX_CMD_SET_MTU, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 62c07407eb94..005ca8a056c0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -313,6 +313,10 @@ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \ msg_rsp) \ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \ nix_bandprof_get_hwinfo_rsp) \ +M(NIX_CPT_BP_ENABLE, 0x8020, nix_cpt_bp_enable, nix_bp_cfg_req, \ + nix_bp_cfg_rsp) \ +M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req, \ + msg_rsp) \ M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \ msg_req, nix_inline_ipsec_cfg) \ M(NIX_MCAST_GRP_CREATE, 0x802b, nix_mcast_grp_create, nix_mcast_grp_create_req, \ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index a5d1e2bddd58..613655fcd34f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -569,9 +569,17 @@ void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc) mutex_unlock(&rvu->rsrc_lock); } -int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, - struct nix_bp_cfg_req *req, - struct msg_rsp *rsp) +static u16 nix_get_channel(u16 chan, bool cpt_link) +{ + /* CPT channel for a given link channel is always + * assumed to be BIT(11) set in link channel. + */ + return cpt_link ? chan | BIT(11) : chan; +} + +static int nix_bp_disable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct msg_rsp *rsp, bool cpt_link) { u16 pcifunc = req->hdr.pcifunc; int blkaddr, pf, type, err; @@ -579,6 +587,7 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, struct rvu_pfvf *pfvf; struct nix_hw *nix_hw; struct nix_bp *bp; + u16 chan_v; u64 cfg; pf = rvu_get_pf(pcifunc); @@ -589,6 +598,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, if (is_sdp_pfvf(pcifunc)) type = NIX_INTF_TYPE_SDP; + if (cpt_link && !rvu->hw->cpt_links) + return 0; + pfvf = rvu_get_pfvf(rvu, pcifunc); err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); if (err) @@ -597,8 +609,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, bp = &nix_hw->bp; chan_base = pfvf->rx_chan_base + req->chan_base; for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { - cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); - rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), + chan_v = nix_get_channel(chan, cpt_link); + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v)); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v), cfg & ~BIT_ULL(16)); if (type == NIX_INTF_TYPE_LBK) { @@ -617,6 +630,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, return 0; } +int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct msg_rsp *rsp) +{ + return nix_bp_disable(rvu, req, rsp, false); +} + +int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct msg_rsp *rsp) +{ + return nix_bp_disable(rvu, req, rsp, true); +} + static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, int type, int chan_id) { @@ -696,15 +723,17 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, return bpid; } -int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, - struct nix_bp_cfg_req *req, - struct nix_bp_cfg_rsp *rsp) +static int nix_bp_enable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct nix_bp_cfg_rsp *rsp, + bool cpt_link) { int blkaddr, pf, type, chan_id = 0; u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; u16 chan_base, chan; s16 bpid, bpid_base; + u16 chan_v; u64 cfg; pf = rvu_get_pf(pcifunc); @@ -717,6 +746,9 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, type != NIX_INTF_TYPE_SDP) return 0; + if (cpt_link && !rvu->hw->cpt_links) + return 0; + pfvf = rvu_get_pfvf(rvu, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); @@ -730,9 +762,11 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, return -EINVAL; } - cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); + chan_v = nix_get_channel(chan, cpt_link); + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v)); cfg &= ~GENMASK_ULL(8, 0); - rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), + rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v), cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); chan_id++; bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); @@ -750,6 +784,20 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, return 0; } +int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct nix_bp_cfg_rsp *rsp) +{ + return nix_bp_enable(rvu, req, rsp, false); +} + +int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct nix_bp_cfg_rsp *rsp) +{ + return nix_bp_enable(rvu, req, rsp, true); +} + static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, u64 format, bool v4, u64 *fidx) { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index dbc971266865..cb6513ab35e7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -15,5 +15,6 @@ rvu_rep-y := rep.o rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o +rvu_nicpf-$(CONFIG_XFRM_OFFLOAD) += cn10k_ipsec.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c new file mode 100644 index 000000000000..09a5b5268205 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c @@ -0,0 +1,1056 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell IPSEC offload driver + * + * Copyright (C) 2024 Marvell. + */ + +#include <net/xfrm.h> +#include <linux/netdevice.h> +#include <linux/bitfield.h> +#include <crypto/aead.h> +#include <crypto/gcm.h> + +#include "otx2_common.h" +#include "otx2_struct.h" +#include "cn10k_ipsec.h" + +static bool is_dev_support_ipsec_offload(struct pci_dev *pdev) +{ + return is_dev_cn10ka_b0(pdev) || is_dev_cn10kb(pdev); +} + +static bool cn10k_cpt_device_set_inuse(struct otx2_nic *pf) +{ + enum cn10k_cpt_hw_state_e state; + + while (true) { + state = atomic_cmpxchg(&pf->ipsec.cpt_state, + CN10K_CPT_HW_AVAILABLE, + CN10K_CPT_HW_IN_USE); + if (state == CN10K_CPT_HW_AVAILABLE) + return true; + if (state == CN10K_CPT_HW_UNAVAILABLE) + return false; + + mdelay(1); + } +} + +static void cn10k_cpt_device_set_available(struct otx2_nic *pf) +{ + atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_AVAILABLE); +} + +static void cn10k_cpt_device_set_unavailable(struct otx2_nic *pf) +{ + atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_UNAVAILABLE); +} + +static int cn10k_outb_cptlf_attach(struct otx2_nic *pf) +{ + struct rsrc_attach *attach; + int ret = -ENOMEM; + + mutex_lock(&pf->mbox.lock); + /* Get memory to put this msg */ + attach = otx2_mbox_alloc_msg_attach_resources(&pf->mbox); + if (!attach) + goto unlock; + + attach->cptlfs = true; + attach->modify = true; + + /* Send attach request to AF */ + ret = otx2_sync_mbox_msg(&pf->mbox); + +unlock: + mutex_unlock(&pf->mbox.lock); + return ret; +} + +static int cn10k_outb_cptlf_detach(struct otx2_nic *pf) +{ + struct rsrc_detach *detach; + int ret = -ENOMEM; + + mutex_lock(&pf->mbox.lock); + detach = otx2_mbox_alloc_msg_detach_resources(&pf->mbox); + if (!detach) + goto unlock; + + detach->partial = true; + detach->cptlfs = true; + + /* Send detach request to AF */ + ret = otx2_sync_mbox_msg(&pf->mbox); + +unlock: + mutex_unlock(&pf->mbox.lock); + return ret; +} + +static int cn10k_outb_cptlf_alloc(struct otx2_nic *pf) +{ + struct cpt_lf_alloc_req_msg *req; + int ret = -ENOMEM; + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_cpt_lf_alloc(&pf->mbox); + if (!req) + goto unlock; + + /* PF function */ + req->nix_pf_func = pf->pcifunc; + /* Enable SE-IE Engine Group */ + req->eng_grpmsk = 1 << CN10K_DEF_CPT_IPSEC_EGRP; + + ret = otx2_sync_mbox_msg(&pf->mbox); + +unlock: + mutex_unlock(&pf->mbox.lock); + return ret; +} + +static void cn10k_outb_cptlf_free(struct otx2_nic *pf) +{ + mutex_lock(&pf->mbox.lock); + otx2_mbox_alloc_msg_cpt_lf_free(&pf->mbox); + otx2_sync_mbox_msg(&pf->mbox); + mutex_unlock(&pf->mbox.lock); +} + +static int cn10k_outb_cptlf_config(struct otx2_nic *pf) +{ + struct cpt_inline_ipsec_cfg_msg *req; + int ret = -ENOMEM; + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(&pf->mbox); + if (!req) + goto unlock; + + req->dir = CPT_INLINE_OUTBOUND; + req->enable = 1; + req->nix_pf_func = pf->pcifunc; + ret = otx2_sync_mbox_msg(&pf->mbox); +unlock: + mutex_unlock(&pf->mbox.lock); + return ret; +} + +static void cn10k_outb_cptlf_iq_enable(struct otx2_nic *pf) +{ + u64 reg_val; + + /* Set Execution Enable of instruction queue */ + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); + reg_val |= BIT_ULL(16); + otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val); + + /* Set iqueue's enqueuing */ + reg_val = otx2_read64(pf, CN10K_CPT_LF_CTL); + reg_val |= BIT_ULL(0); + otx2_write64(pf, CN10K_CPT_LF_CTL, reg_val); +} + +static void cn10k_outb_cptlf_iq_disable(struct otx2_nic *pf) +{ + u32 inflight, grb_cnt, gwb_cnt; + u32 nq_ptr, dq_ptr; + int timeout = 20; + u64 reg_val; + int cnt; + + /* Disable instructions enqueuing */ + otx2_write64(pf, CN10K_CPT_LF_CTL, 0ull); + + /* Wait for instruction queue to become empty. + * CPT_LF_INPROG.INFLIGHT count is zero + */ + do { + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); + inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val); + if (!inflight) + break; + + usleep_range(10000, 20000); + if (timeout-- < 0) { + netdev_err(pf->netdev, "Timeout to cleanup CPT IQ\n"); + break; + } + } while (1); + + /* Disable executions in the LF's queue, + * the queue should be empty at this point + */ + reg_val &= ~BIT_ULL(16); + otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val); + + /* Wait for instruction queue to become empty */ + cnt = 0; + do { + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); + if (reg_val & BIT_ULL(31)) + cnt = 0; + else + cnt++; + reg_val = otx2_read64(pf, CN10K_CPT_LF_Q_GRP_PTR); + nq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val); + dq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val); + } while ((cnt < 10) && (nq_ptr != dq_ptr)); + + cnt = 0; + do { + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); + inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val); + grb_cnt = FIELD_GET(CPT_LF_INPROG_GRB_CNT, reg_val); + gwb_cnt = FIELD_GET(CPT_LF_INPROG_GWB_CNT, reg_val); + if (inflight == 0 && gwb_cnt < 40 && + (grb_cnt == 0 || grb_cnt == 40)) + cnt++; + else + cnt = 0; + } while (cnt < 10); +} + +/* Allocate memory for CPT outbound Instruction queue. + * Instruction queue memory format is: + * ----------------------------- + * | Instruction Group memory | + * | (CPT_LF_Q_SIZE[SIZE_DIV40] | + * | x 16 Bytes) | + * | | + * ----------------------------- <-- CPT_LF_Q_BASE[ADDR] + * | Flow Control (128 Bytes) | + * | | + * ----------------------------- + * | Instruction Memory | + * | (CPT_LF_Q_SIZE[SIZE_DIV40] | + * | × 40 × 64 bytes) | + * | | + * ----------------------------- + */ +static int cn10k_outb_cptlf_iq_alloc(struct otx2_nic *pf) +{ + struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq; + + iq->size = CN10K_CPT_INST_QLEN_BYTES + CN10K_CPT_Q_FC_LEN + + CN10K_CPT_INST_GRP_QLEN_BYTES + OTX2_ALIGN; + + iq->real_vaddr = dma_alloc_coherent(pf->dev, iq->size, + &iq->real_dma_addr, GFP_KERNEL); + if (!iq->real_vaddr) + return -ENOMEM; + + /* iq->vaddr/dma_addr points to Flow Control location */ + iq->vaddr = iq->real_vaddr + CN10K_CPT_INST_GRP_QLEN_BYTES; + iq->dma_addr = iq->real_dma_addr + CN10K_CPT_INST_GRP_QLEN_BYTES; + + /* Align pointers */ + iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_ALIGN); + iq->dma_addr = PTR_ALIGN(iq->dma_addr, OTX2_ALIGN); + return 0; +} + +static void cn10k_outb_cptlf_iq_free(struct otx2_nic *pf) +{ + struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq; + + if (iq->real_vaddr) + dma_free_coherent(pf->dev, iq->size, iq->real_vaddr, + iq->real_dma_addr); + + iq->real_vaddr = NULL; + iq->vaddr = NULL; +} + +static int cn10k_outb_cptlf_iq_init(struct otx2_nic *pf) +{ + u64 reg_val; + int ret; + + /* Allocate Memory for CPT IQ */ + ret = cn10k_outb_cptlf_iq_alloc(pf); + if (ret) + return ret; + + /* Disable IQ */ + cn10k_outb_cptlf_iq_disable(pf); + + /* Set IQ base address */ + otx2_write64(pf, CN10K_CPT_LF_Q_BASE, pf->ipsec.iq.dma_addr); + + /* Set IQ size */ + reg_val = FIELD_PREP(CPT_LF_Q_SIZE_DIV40, CN10K_CPT_SIZE_DIV40 + + CN10K_CPT_EXTRA_SIZE_DIV40); + otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, reg_val); + + return 0; +} + +static int cn10k_outb_cptlf_init(struct otx2_nic *pf) +{ + int ret; + + /* Initialize CPTLF Instruction Queue (IQ) */ + ret = cn10k_outb_cptlf_iq_init(pf); + if (ret) + return ret; + + /* Configure CPTLF for outbound ipsec offload */ + ret = cn10k_outb_cptlf_config(pf); + if (ret) + goto iq_clean; + + /* Enable CPTLF IQ */ + cn10k_outb_cptlf_iq_enable(pf); + return 0; +iq_clean: + cn10k_outb_cptlf_iq_free(pf); + return ret; +} + +static int cn10k_outb_cpt_init(struct net_device *netdev) +{ + struct otx2_nic *pf = netdev_priv(netdev); + int ret; + + /* Attach a CPT LF for outbound ipsec offload */ + ret = cn10k_outb_cptlf_attach(pf); + if (ret) + return ret; + + /* Allocate a CPT LF for outbound ipsec offload */ + ret = cn10k_outb_cptlf_alloc(pf); + if (ret) + goto detach; + + /* Initialize the CPTLF for outbound ipsec offload */ + ret = cn10k_outb_cptlf_init(pf); + if (ret) + goto lf_free; + + pf->ipsec.io_addr = (__force u64)otx2_get_regaddr(pf, + CN10K_CPT_LF_NQX(0)); + + /* Set ipsec offload enabled for this device */ + pf->flags |= OTX2_FLAG_IPSEC_OFFLOAD_ENABLED; + + cn10k_cpt_device_set_available(pf); + return 0; + +lf_free: + cn10k_outb_cptlf_free(pf); +detach: + cn10k_outb_cptlf_detach(pf); + return ret; +} + +static int cn10k_outb_cpt_clean(struct otx2_nic *pf) +{ + int ret; + + if (!cn10k_cpt_device_set_inuse(pf)) { + netdev_err(pf->netdev, "CPT LF device unavailable\n"); + return -ENODEV; + } + + /* Set ipsec offload disabled for this device */ + pf->flags &= ~OTX2_FLAG_IPSEC_OFFLOAD_ENABLED; + + /* Disable CPTLF Instruction Queue (IQ) */ + cn10k_outb_cptlf_iq_disable(pf); + + /* Set IQ base address and size to 0 */ + otx2_write64(pf, CN10K_CPT_LF_Q_BASE, 0); + otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, 0); + + /* Free CPTLF IQ */ + cn10k_outb_cptlf_iq_free(pf); + + /* Free and detach CPT LF */ + cn10k_outb_cptlf_free(pf); + ret = cn10k_outb_cptlf_detach(pf); + if (ret) + netdev_err(pf->netdev, "Failed to detach CPT LF\n"); + + cn10k_cpt_device_set_unavailable(pf); + return ret; +} + +static void cn10k_cpt_inst_flush(struct otx2_nic *pf, struct cpt_inst_s *inst, + u64 size) +{ + struct otx2_lmt_info *lmt_info; + u64 val = 0, tar_addr = 0; + + lmt_info = per_cpu_ptr(pf->hw.lmt_info, smp_processor_id()); + /* FIXME: val[0:10] LMT_ID. + * [12:15] no of LMTST - 1 in the burst. + * [19:63] data size of each LMTST in the burst except first. + */ + val = (lmt_info->lmt_id & 0x7FF); + /* Target address for LMTST flush tells HW how many 128bit + * words are present. + * tar_addr[6:4] size of first LMTST - 1 in units of 128b. + */ + tar_addr |= pf->ipsec.io_addr | (((size / 16) - 1) & 0x7) << 4; + dma_wmb(); + memcpy((u64 *)lmt_info->lmt_addr, inst, size); + cn10k_lmt_flush(val, tar_addr); +} + +static int cn10k_wait_for_cpt_respose(struct otx2_nic *pf, + struct cpt_res_s *res) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(100); + u64 *completion_ptr = (u64 *)res; + + do { + if (time_after(jiffies, timeout)) { + netdev_err(pf->netdev, "CPT response timeout\n"); + return -EBUSY; + } + } while ((READ_ONCE(*completion_ptr) & CN10K_CPT_COMP_E_MASK) == + CN10K_CPT_COMP_E_NOTDONE); + + if (!(res->compcode == CN10K_CPT_COMP_E_GOOD || + res->compcode == CN10K_CPT_COMP_E_WARN) || res->uc_compcode) { + netdev_err(pf->netdev, "compcode=%x doneint=%x\n", + res->compcode, res->doneint); + netdev_err(pf->netdev, "uc_compcode=%x uc_info=%llx esn=%llx\n", + res->uc_compcode, (u64)res->uc_info, res->esn); + } + return 0; +} + +static int cn10k_outb_write_sa(struct otx2_nic *pf, struct qmem *sa_info) +{ + dma_addr_t res_iova, dptr_iova, sa_iova; + struct cn10k_tx_sa_s *sa_dptr; + struct cpt_inst_s inst = {}; + struct cpt_res_s *res; + u32 sa_size, off; + u64 *sptr, *dptr; + u64 reg_val; + int ret; + + sa_iova = sa_info->iova; + if (!sa_iova) + return -EINVAL; + + res = dma_alloc_coherent(pf->dev, sizeof(struct cpt_res_s), + &res_iova, GFP_ATOMIC); + if (!res) + return -ENOMEM; + + sa_size = sizeof(struct cn10k_tx_sa_s); + sa_dptr = dma_alloc_coherent(pf->dev, sa_size, &dptr_iova, GFP_ATOMIC); + if (!sa_dptr) { + dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, + res_iova); + return -ENOMEM; + } + + sptr = (__force u64 *)sa_info->base; + dptr = (__force u64 *)sa_dptr; + for (off = 0; off < (sa_size / 8); off++) + *(dptr + off) = (__force u64)cpu_to_be64(*(sptr + off)); + + res->compcode = CN10K_CPT_COMP_E_NOTDONE; + inst.res_addr = res_iova; + inst.dptr = (u64)dptr_iova; + inst.param2 = sa_size >> 3; + inst.dlen = sa_size; + inst.opcode_major = CN10K_IPSEC_MAJOR_OP_WRITE_SA; + inst.opcode_minor = CN10K_IPSEC_MINOR_OP_WRITE_SA; + inst.cptr = sa_iova; + inst.ctx_val = 1; + inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP; + + /* Check if CPT-LF available */ + if (!cn10k_cpt_device_set_inuse(pf)) { + ret = -ENODEV; + goto free_mem; + } + + cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s)); + dma_wmb(); + ret = cn10k_wait_for_cpt_respose(pf, res); + if (ret) + goto set_available; + + /* Trigger CTX flush to write dirty data back to DRAM */ + reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH, sa_iova >> 7); + otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val); + +set_available: + cn10k_cpt_device_set_available(pf); +free_mem: + dma_free_coherent(pf->dev, sa_size, sa_dptr, dptr_iova); + dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, res_iova); + return ret; +} + +static int cn10k_ipsec_get_hw_ctx_offset(void) +{ + /* Offset on Hardware-context offset in word */ + return (offsetof(struct cn10k_tx_sa_s, hw_ctx) / sizeof(u64)) & 0x7F; +} + +static int cn10k_ipsec_get_ctx_push_size(void) +{ + /* Context push size is round up and in multiple of 8 Byte */ + return (roundup(offsetof(struct cn10k_tx_sa_s, hw_ctx), 8) / 8) & 0x7F; +} + +static int cn10k_ipsec_get_aes_key_len(int key_len) +{ + /* key_len is aes key length in bytes */ + switch (key_len) { + case 16: + return CN10K_IPSEC_SA_AES_KEY_LEN_128; + case 24: + return CN10K_IPSEC_SA_AES_KEY_LEN_192; + default: + return CN10K_IPSEC_SA_AES_KEY_LEN_256; + } +} + +static void cn10k_outb_prepare_sa(struct xfrm_state *x, + struct cn10k_tx_sa_s *sa_entry) +{ + int key_len = (x->aead->alg_key_len + 7) / 8; + struct net_device *netdev = x->xso.dev; + u8 *key = x->aead->alg_key; + struct otx2_nic *pf; + u32 *tmp_salt; + u64 *tmp_key; + int idx; + + memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s)); + + /* context size, 128 Byte aligned up */ + pf = netdev_priv(netdev); + sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF; + sa_entry->hw_ctx_off = cn10k_ipsec_get_hw_ctx_offset(); + sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size(); + + /* Ucode to skip two words of CPT_CTX_HW_S */ + sa_entry->ctx_hdr_size = 1; + + /* Allow Atomic operation (AOP) */ + sa_entry->aop_valid = 1; + + /* Outbound, ESP TRANSPORT/TUNNEL Mode, AES-GCM with */ + sa_entry->sa_dir = CN10K_IPSEC_SA_DIR_OUTB; + sa_entry->ipsec_protocol = CN10K_IPSEC_SA_IPSEC_PROTO_ESP; + sa_entry->enc_type = CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM; + sa_entry->iv_src = CN10K_IPSEC_SA_IV_SRC_PACKET; + if (x->props.mode == XFRM_MODE_TUNNEL) + sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL; + else + sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT; + + /* Last 4 bytes are salt */ + key_len -= 4; + sa_entry->aes_key_len = cn10k_ipsec_get_aes_key_len(key_len); + memcpy(sa_entry->cipher_key, key, key_len); + tmp_key = (u64 *)sa_entry->cipher_key; + + for (idx = 0; idx < key_len / 8; idx++) + tmp_key[idx] = (__force u64)cpu_to_be64(tmp_key[idx]); + + memcpy(&sa_entry->iv_gcm_salt, key + key_len, 4); + tmp_salt = (u32 *)&sa_entry->iv_gcm_salt; + *tmp_salt = (__force u32)cpu_to_be32(*tmp_salt); + + /* Write SA context data to memory before enabling */ + wmb(); + + /* Enable SA */ + sa_entry->sa_valid = 1; +} + +static int cn10k_ipsec_validate_state(struct xfrm_state *x, + struct netlink_ext_ack *extack) +{ + if (x->props.aalgo != SADB_AALG_NONE) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload authenticated xfrm states"); + return -EINVAL; + } + if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) { + NL_SET_ERR_MSG_MOD(extack, + "Only AES-GCM-ICV16 xfrm state may be offloaded"); + return -EINVAL; + } + if (x->props.calgo != SADB_X_CALG_NONE) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload compressed xfrm states"); + return -EINVAL; + } + if (x->props.flags & XFRM_STATE_ESN) { + NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states"); + return -EINVAL; + } + if (x->props.family != AF_INET && x->props.family != AF_INET6) { + NL_SET_ERR_MSG_MOD(extack, + "Only IPv4/v6 xfrm states may be offloaded"); + return -EINVAL; + } + if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload other than crypto-mode"); + return -EINVAL; + } + if (x->props.mode != XFRM_MODE_TRANSPORT && + x->props.mode != XFRM_MODE_TUNNEL) { + NL_SET_ERR_MSG_MOD(extack, + "Only tunnel/transport xfrm states may be offloaded"); + return -EINVAL; + } + if (x->id.proto != IPPROTO_ESP) { + NL_SET_ERR_MSG_MOD(extack, + "Only ESP xfrm state may be offloaded"); + return -EINVAL; + } + if (x->encap) { + NL_SET_ERR_MSG_MOD(extack, + "Encapsulated xfrm state may not be offloaded"); + return -EINVAL; + } + if (!x->aead) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states without aead"); + return -EINVAL; + } + + if (x->aead->alg_icv_len != 128) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states with AEAD ICV length other than 128bit"); + return -EINVAL; + } + if (x->aead->alg_key_len != 128 + 32 && + x->aead->alg_key_len != 192 + 32 && + x->aead->alg_key_len != 256 + 32) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states with AEAD key length other than 128/192/256bit"); + return -EINVAL; + } + if (x->tfcpad) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states with tfc padding"); + return -EINVAL; + } + if (!x->geniv) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states without geniv"); + return -EINVAL; + } + if (strcmp(x->geniv, "seqiv")) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states with geniv other than seqiv"); + return -EINVAL; + } + return 0; +} + +static int cn10k_ipsec_inb_add_state(struct xfrm_state *x, + struct netlink_ext_ack *extack) +{ + NL_SET_ERR_MSG_MOD(extack, "xfrm inbound offload not supported"); + return -EOPNOTSUPP; +} + +static int cn10k_ipsec_outb_add_state(struct xfrm_state *x, + struct netlink_ext_ack *extack) +{ + struct net_device *netdev = x->xso.dev; + struct cn10k_tx_sa_s *sa_entry; + struct qmem *sa_info; + struct otx2_nic *pf; + int err; + + err = cn10k_ipsec_validate_state(x, extack); + if (err) + return err; + + pf = netdev_priv(netdev); + + err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN); + if (err) + return err; + + sa_entry = (struct cn10k_tx_sa_s *)sa_info->base; + cn10k_outb_prepare_sa(x, sa_entry); + + err = cn10k_outb_write_sa(pf, sa_info); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Error writing outbound SA"); + qmem_free(pf->dev, sa_info); + return err; + } + + x->xso.offload_handle = (unsigned long)sa_info; + /* Enable static branch when first SA setup */ + if (!pf->ipsec.outb_sa_count) + static_branch_enable(&cn10k_ipsec_sa_enabled); + pf->ipsec.outb_sa_count++; + return 0; +} + +static int cn10k_ipsec_add_state(struct xfrm_state *x, + struct netlink_ext_ack *extack) +{ + if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) + return cn10k_ipsec_inb_add_state(x, extack); + else + return cn10k_ipsec_outb_add_state(x, extack); +} + +static void cn10k_ipsec_del_state(struct xfrm_state *x) +{ + struct net_device *netdev = x->xso.dev; + struct cn10k_tx_sa_s *sa_entry; + struct qmem *sa_info; + struct otx2_nic *pf; + int err; + + if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) + return; + + pf = netdev_priv(netdev); + + sa_info = (struct qmem *)x->xso.offload_handle; + sa_entry = (struct cn10k_tx_sa_s *)sa_info->base; + memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s)); + /* Disable SA in CPT h/w */ + sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size(); + sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF; + sa_entry->aop_valid = 1; + + err = cn10k_outb_write_sa(pf, sa_info); + if (err) + netdev_err(netdev, "Error (%d) deleting SA\n", err); + + x->xso.offload_handle = 0; + qmem_free(pf->dev, sa_info); + + /* If no more SA's then update netdev feature for potential change + * in NETIF_F_HW_ESP. + */ + if (!--pf->ipsec.outb_sa_count) + queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work); +} + +static bool cn10k_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) +{ + if (x->props.family == AF_INET) { + /* Offload with IPv4 options is not supported yet */ + if (ip_hdr(skb)->ihl > 5) + return false; + } else { + /* Offload with IPv6 extension headers is not support yet */ + if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) + return false; + } + return true; +} + +static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = { + .xdo_dev_state_add = cn10k_ipsec_add_state, + .xdo_dev_state_delete = cn10k_ipsec_del_state, + .xdo_dev_offload_ok = cn10k_ipsec_offload_ok, +}; + +static void cn10k_ipsec_sa_wq_handler(struct work_struct *work) +{ + struct cn10k_ipsec *ipsec = container_of(work, struct cn10k_ipsec, + sa_work); + struct otx2_nic *pf = container_of(ipsec, struct otx2_nic, ipsec); + + /* Disable static branch when no more SA enabled */ + static_branch_disable(&cn10k_ipsec_sa_enabled); + rtnl_lock(); + netdev_update_features(pf->netdev); + rtnl_unlock(); +} + +int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable) +{ + struct otx2_nic *pf = netdev_priv(netdev); + + /* IPsec offload supported on cn10k */ + if (!is_dev_support_ipsec_offload(pf->pdev)) + return -EOPNOTSUPP; + + /* Initialize CPT for outbound ipsec offload */ + if (enable) + return cn10k_outb_cpt_init(netdev); + + /* Don't do CPT cleanup if SA installed */ + if (pf->ipsec.outb_sa_count) { + netdev_err(pf->netdev, "SA installed on this device\n"); + return -EBUSY; + } + + return cn10k_outb_cpt_clean(pf); +} + +int cn10k_ipsec_init(struct net_device *netdev) +{ + struct otx2_nic *pf = netdev_priv(netdev); + u32 sa_size; + + if (!is_dev_support_ipsec_offload(pf->pdev)) + return 0; + + /* Each SA entry size is 128 Byte round up in size */ + sa_size = sizeof(struct cn10k_tx_sa_s) % OTX2_ALIGN ? + (sizeof(struct cn10k_tx_sa_s) / OTX2_ALIGN + 1) * + OTX2_ALIGN : sizeof(struct cn10k_tx_sa_s); + pf->ipsec.sa_size = sa_size; + + INIT_WORK(&pf->ipsec.sa_work, cn10k_ipsec_sa_wq_handler); + pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq", 0, 0); + if (!pf->ipsec.sa_workq) { + netdev_err(pf->netdev, "SA alloc workqueue failed\n"); + return -ENOMEM; + } + + /* Set xfrm device ops */ + netdev->xfrmdev_ops = &cn10k_ipsec_xfrmdev_ops; + netdev->hw_features |= NETIF_F_HW_ESP; + netdev->hw_enc_features |= NETIF_F_HW_ESP; + + cn10k_cpt_device_set_unavailable(pf); + return 0; +} +EXPORT_SYMBOL(cn10k_ipsec_init); + +void cn10k_ipsec_clean(struct otx2_nic *pf) +{ + if (!is_dev_support_ipsec_offload(pf->pdev)) + return; + + if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED)) + return; + + if (pf->ipsec.sa_workq) { + destroy_workqueue(pf->ipsec.sa_workq); + pf->ipsec.sa_workq = NULL; + } + + cn10k_outb_cpt_clean(pf); +} +EXPORT_SYMBOL(cn10k_ipsec_clean); + +static u16 cn10k_ipsec_get_ip_data_len(struct xfrm_state *x, + struct sk_buff *skb) +{ + struct ipv6hdr *ipv6h; + struct iphdr *iph; + u8 *src; + + src = (u8 *)skb->data + ETH_HLEN; + + if (x->props.family == AF_INET) { + iph = (struct iphdr *)src; + return ntohs(iph->tot_len); + } + + ipv6h = (struct ipv6hdr *)src; + return ntohs(ipv6h->payload_len) + sizeof(struct ipv6hdr); +} + +/* Prepare CPT and NIX SQE scatter/gather subdescriptor structure. + * SG of NIX and CPT are same in size. + * Layout of a NIX SQE and CPT SG entry: + * ----------------------------- + * | CPT Scatter Gather | + * | (SQE SIZE) | + * | | + * ----------------------------- + * | NIX SQE | + * | (SQE SIZE) | + * | | + * ----------------------------- + */ +bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct sk_buff *skb, int num_segs, int *offset) +{ + struct cpt_sg_s *cpt_sg = NULL; + struct nix_sqe_sg_s *sg = NULL; + u64 dma_addr, *iova = NULL; + u64 *cpt_iova = NULL; + u16 *sg_lens = NULL; + int seg, len; + + sq->sg[sq->head].num_segs = 0; + cpt_sg = (struct cpt_sg_s *)(sq->sqe_base - sq->sqe_size); + + for (seg = 0; seg < num_segs; seg++) { + if ((seg % MAX_SEGS_PER_SG) == 0) { + sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); + sg->ld_type = NIX_SEND_LDTYPE_LDD; + sg->subdc = NIX_SUBDC_SG; + sg->segs = 0; + sg_lens = (void *)sg; + iova = (void *)sg + sizeof(*sg); + /* Next subdc always starts at a 16byte boundary. + * So if sg->segs is whether 2 or 3, offset += 16bytes. + */ + if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1)) + *offset += sizeof(*sg) + (3 * sizeof(u64)); + else + *offset += sizeof(*sg) + sizeof(u64); + + cpt_sg += (seg / MAX_SEGS_PER_SG) * 4; + cpt_iova = (void *)cpt_sg + sizeof(*cpt_sg); + } + dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); + if (dma_mapping_error(pfvf->dev, dma_addr)) + return false; + + sg_lens[seg % MAX_SEGS_PER_SG] = len; + sg->segs++; + *iova++ = dma_addr; + *cpt_iova++ = dma_addr; + + /* Save DMA mapping info for later unmapping */ + sq->sg[sq->head].dma_addr[seg] = dma_addr; + sq->sg[sq->head].size[seg] = len; + sq->sg[sq->head].num_segs++; + + *cpt_sg = *(struct cpt_sg_s *)sg; + cpt_sg->rsvd_63_50 = 0; + } + + sq->sg[sq->head].skb = (u64)skb; + return true; +} + +static u16 cn10k_ipsec_get_param1(u8 iv_offset) +{ + u16 param1_val; + + /* Set Crypto mode, disable L3/L4 checksum */ + param1_val = CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM | + CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM; + param1_val |= (u16)iv_offset << CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT; + return param1_val; +} + +bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq, + struct otx2_snd_queue *sq, struct sk_buff *skb, + int num_segs, int size) +{ + struct cpt_inst_s inst; + struct cpt_res_s *res; + struct xfrm_state *x; + struct qmem *sa_info; + dma_addr_t dptr_iova; + struct sec_path *sp; + u8 encap_offset; + u8 auth_offset; + u8 gthr_size; + u8 iv_offset; + u16 dlen; + + /* Check for IPSEC offload enabled */ + if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED)) + goto drop; + + sp = skb_sec_path(skb); + if (unlikely(!sp->len)) + goto drop; + + x = xfrm_input_state(skb); + if (unlikely(!x)) + goto drop; + + if (x->props.mode != XFRM_MODE_TRANSPORT && + x->props.mode != XFRM_MODE_TUNNEL) + goto drop; + + dlen = cn10k_ipsec_get_ip_data_len(x, skb); + if (dlen == 0 && netif_msg_tx_err(pf)) { + netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n"); + goto drop; + } + + /* Check for valid SA context */ + sa_info = (struct qmem *)x->xso.offload_handle; + if (!sa_info) + goto drop; + + memset(&inst, 0, sizeof(struct cpt_inst_s)); + + /* Get authentication offset */ + if (x->props.family == AF_INET) + auth_offset = sizeof(struct iphdr); + else + auth_offset = sizeof(struct ipv6hdr); + + /* IV offset is after ESP header */ + iv_offset = auth_offset + sizeof(struct ip_esp_hdr); + /* Encap will start after IV */ + encap_offset = iv_offset + GCM_RFC4106_IV_SIZE; + + /* CPT Instruction word-1 */ + res = (struct cpt_res_s *)(sq->cpt_resp->base + (64 * sq->head)); + res->compcode = 0; + inst.res_addr = sq->cpt_resp->iova + (64 * sq->head); + + /* CPT Instruction word-2 */ + inst.rvu_pf_func = pf->pcifunc; + + /* CPT Instruction word-3: + * Set QORD to force CPT_RES_S write completion + */ + inst.qord = 1; + + /* CPT Instruction word-4 */ + /* inst.dlen should not include ICV length */ + inst.dlen = dlen + ETH_HLEN - (x->aead->alg_icv_len / 8); + inst.opcode_major = CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC; + inst.param1 = cn10k_ipsec_get_param1(iv_offset); + + inst.param2 = encap_offset << + CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT; + inst.param2 |= (u16)auth_offset << + CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT; + + /* CPT Instruction word-5 */ + gthr_size = num_segs / MAX_SEGS_PER_SG; + gthr_size = (num_segs % MAX_SEGS_PER_SG) ? gthr_size + 1 : gthr_size; + + gthr_size &= 0xF; + dptr_iova = (sq->sqe_ring->iova + (sq->head * (sq->sqe_size * 2))); + inst.dptr = dptr_iova | ((u64)gthr_size << 60); + + /* CPT Instruction word-6 */ + inst.rptr = inst.dptr; + + /* CPT Instruction word-7 */ + inst.cptr = sa_info->iova; + inst.ctx_val = 1; + inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP; + + /* CPT Instruction word-0 */ + inst.nixtxl = (size / 16) - 1; + inst.dat_offset = ETH_HLEN; + inst.nixtx_offset = sq->sqe_size; + + netdev_tx_sent_queue(txq, skb->len); + + /* Finally Flush the CPT instruction */ + sq->head++; + sq->head &= (sq->sqe_cnt - 1); + cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s)); + return true; +drop: + dev_kfree_skb_any(skb); + return false; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h new file mode 100644 index 000000000000..9965df0faa3e --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h @@ -0,0 +1,265 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell IPSEC offload driver + * + * Copyright (C) 2024 Marvell. + */ + +#ifndef CN10K_IPSEC_H +#define CN10K_IPSEC_H + +#include <linux/types.h> + +DECLARE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled); + +/* CPT instruction size in bytes */ +#define CN10K_CPT_INST_SIZE 64 + +/* CPT instruction (CPT_INST_S) queue length */ +#define CN10K_CPT_INST_QLEN 8200 + +/* CPT instruction queue size passed to HW is in units of + * 40*CPT_INST_S messages. + */ +#define CN10K_CPT_SIZE_DIV40 (CN10K_CPT_INST_QLEN / 40) + +/* CPT needs 320 free entries */ +#define CN10K_CPT_INST_QLEN_EXTRA_BYTES (320 * CN10K_CPT_INST_SIZE) +#define CN10K_CPT_EXTRA_SIZE_DIV40 (320 / 40) + +/* CPT instruction queue length in bytes */ +#define CN10K_CPT_INST_QLEN_BYTES \ + ((CN10K_CPT_SIZE_DIV40 * 40 * CN10K_CPT_INST_SIZE) + \ + CN10K_CPT_INST_QLEN_EXTRA_BYTES) + +/* CPT instruction group queue length in bytes */ +#define CN10K_CPT_INST_GRP_QLEN_BYTES \ + ((CN10K_CPT_SIZE_DIV40 + CN10K_CPT_EXTRA_SIZE_DIV40) * 16) + +/* CPT FC length in bytes */ +#define CN10K_CPT_Q_FC_LEN 128 + +/* Default CPT engine group for ipsec offload */ +#define CN10K_DEF_CPT_IPSEC_EGRP 1 + +/* CN10K CPT LF registers */ +#define CPT_LFBASE (BLKTYPE_CPT << RVU_FUNC_BLKADDR_SHIFT) +#define CN10K_CPT_LF_CTL (CPT_LFBASE | 0x10) +#define CN10K_CPT_LF_INPROG (CPT_LFBASE | 0x40) +#define CN10K_CPT_LF_Q_BASE (CPT_LFBASE | 0xf0) +#define CN10K_CPT_LF_Q_SIZE (CPT_LFBASE | 0x100) +#define CN10K_CPT_LF_Q_INST_PTR (CPT_LFBASE | 0x110) +#define CN10K_CPT_LF_Q_GRP_PTR (CPT_LFBASE | 0x120) +#define CN10K_CPT_LF_NQX(a) (CPT_LFBASE | 0x400 | (a) << 3) +#define CN10K_CPT_LF_CTX_FLUSH (CPT_LFBASE | 0x510) + +/* IPSEC Instruction opcodes */ +#define CN10K_IPSEC_MAJOR_OP_WRITE_SA 0x01UL +#define CN10K_IPSEC_MINOR_OP_WRITE_SA 0x09UL +#define CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC 0x2AUL + +enum cn10k_cpt_comp_e { + CN10K_CPT_COMP_E_NOTDONE = 0x00, + CN10K_CPT_COMP_E_GOOD = 0x01, + CN10K_CPT_COMP_E_FAULT = 0x02, + CN10K_CPT_COMP_E_HWERR = 0x04, + CN10K_CPT_COMP_E_INSTERR = 0x05, + CN10K_CPT_COMP_E_WARN = 0x06, + CN10K_CPT_COMP_E_MASK = 0x3F +}; + +struct cn10k_cpt_inst_queue { + u8 *vaddr; + u8 *real_vaddr; + dma_addr_t dma_addr; + dma_addr_t real_dma_addr; + u32 size; +}; + +enum cn10k_cpt_hw_state_e { + CN10K_CPT_HW_UNAVAILABLE, + CN10K_CPT_HW_AVAILABLE, + CN10K_CPT_HW_IN_USE +}; + +struct cn10k_ipsec { + /* Outbound CPT */ + u64 io_addr; + atomic_t cpt_state; + struct cn10k_cpt_inst_queue iq; + + /* SA info */ + u32 sa_size; + u32 outb_sa_count; + struct work_struct sa_work; + struct workqueue_struct *sa_workq; +}; + +/* CN10K IPSEC Security Association (SA) */ +/* SA direction */ +#define CN10K_IPSEC_SA_DIR_INB 0 +#define CN10K_IPSEC_SA_DIR_OUTB 1 +/* SA protocol */ +#define CN10K_IPSEC_SA_IPSEC_PROTO_AH 0 +#define CN10K_IPSEC_SA_IPSEC_PROTO_ESP 1 +/* SA Encryption Type */ +#define CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM 5 +/* SA IPSEC mode Transport/Tunnel */ +#define CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT 0 +#define CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL 1 +/* SA AES Key Length */ +#define CN10K_IPSEC_SA_AES_KEY_LEN_128 1 +#define CN10K_IPSEC_SA_AES_KEY_LEN_192 2 +#define CN10K_IPSEC_SA_AES_KEY_LEN_256 3 +/* IV Source */ +#define CN10K_IPSEC_SA_IV_SRC_COUNTER 0 +#define CN10K_IPSEC_SA_IV_SRC_PACKET 3 + +struct cn10k_tx_sa_s { + u64 esn_en : 1; /* W0 */ + u64 rsvd_w0_1_8 : 8; + u64 hw_ctx_off : 7; + u64 ctx_id : 16; + u64 rsvd_w0_32_47 : 16; + u64 ctx_push_size : 7; + u64 rsvd_w0_55 : 1; + u64 ctx_hdr_size : 2; + u64 aop_valid : 1; + u64 rsvd_w0_59 : 1; + u64 ctx_size : 4; + u64 w1; /* W1 */ + u64 sa_valid : 1; /* W2 */ + u64 sa_dir : 1; + u64 rsvd_w2_2_3 : 2; + u64 ipsec_mode : 1; + u64 ipsec_protocol : 1; + u64 aes_key_len : 2; + u64 enc_type : 3; + u64 rsvd_w2_11_19 : 9; + u64 iv_src : 2; + u64 rsvd_w2_22_31 : 10; + u64 rsvd_w2_32_63 : 32; + u64 w3; /* W3 */ + u8 cipher_key[32]; /* W4 - W7 */ + u32 rsvd_w8_0_31; /* W8 : IV */ + u32 iv_gcm_salt; + u64 rsvd_w9_w30[22]; /* W9 - W30 */ + u64 hw_ctx[6]; /* W31 - W36 */ +}; + +/* CPT instruction parameter-1 */ +#define CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM 0x1 +#define CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM 0x2 +#define CN10K_IPSEC_INST_PARAM1_CRYPTO_MODE 0x20 +#define CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT 8 + +/* CPT instruction parameter-2 */ +#define CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT 0 +#define CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT 8 + +/* CPT Instruction Structure */ +struct cpt_inst_s { + u64 nixtxl : 3; /* W0 */ + u64 doneint : 1; + u64 rsvd_w0_4_15 : 12; + u64 dat_offset : 8; + u64 ext_param1 : 8; + u64 nixtx_offset : 20; + u64 rsvd_w0_52_63 : 12; + u64 res_addr; /* W1 */ + u64 tag : 32; /* W2 */ + u64 tt : 2; + u64 grp : 10; + u64 rsvd_w2_44_47 : 4; + u64 rvu_pf_func : 16; + u64 qord : 1; /* W3 */ + u64 rsvd_w3_1_2 : 2; + u64 wqe_ptr : 61; + u64 dlen : 16; /* W4 */ + u64 param2 : 16; + u64 param1 : 16; + u64 opcode_major : 8; + u64 opcode_minor : 8; + u64 dptr; /* W5 */ + u64 rptr; /* W6 */ + u64 cptr : 60; /* W7 */ + u64 ctx_val : 1; + u64 egrp : 3; +}; + +/* CPT Instruction Result Structure */ +struct cpt_res_s { + u64 compcode : 7; /* W0 */ + u64 doneint : 1; + u64 uc_compcode : 8; + u64 uc_info : 48; + u64 esn; /* W1 */ +}; + +/* CPT SG structure */ +struct cpt_sg_s { + u64 seg1_size : 16; + u64 seg2_size : 16; + u64 seg3_size : 16; + u64 segs : 2; + u64 rsvd_63_50 : 14; +}; + +/* CPT LF_INPROG Register */ +#define CPT_LF_INPROG_INFLIGHT GENMASK_ULL(8, 0) +#define CPT_LF_INPROG_GRB_CNT GENMASK_ULL(39, 32) +#define CPT_LF_INPROG_GWB_CNT GENMASK_ULL(47, 40) + +/* CPT LF_Q_GRP_PTR Register */ +#define CPT_LF_Q_GRP_PTR_DQ_PTR GENMASK_ULL(14, 0) +#define CPT_LF_Q_GRP_PTR_NQ_PTR GENMASK_ULL(46, 32) + +/* CPT LF_Q_SIZE Register */ +#define CPT_LF_Q_BASE_ADDR GENMASK_ULL(52, 7) + +/* CPT LF_Q_SIZE Register */ +#define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0) + +/* CPT LF CTX Flush Register */ +#define CPT_LF_CTX_FLUSH GENMASK_ULL(45, 0) + +#ifdef CONFIG_XFRM_OFFLOAD +int cn10k_ipsec_init(struct net_device *netdev); +void cn10k_ipsec_clean(struct otx2_nic *pf); +int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable); +bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct sk_buff *skb, int num_segs, int *offset); +bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq, + struct otx2_snd_queue *sq, struct sk_buff *skb, + int num_segs, int size); +#else +static inline __maybe_unused int cn10k_ipsec_init(struct net_device *netdev) +{ + return 0; +} + +static inline __maybe_unused void cn10k_ipsec_clean(struct otx2_nic *pf) +{ +} + +static inline __maybe_unused +int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable) +{ + return 0; +} + +static inline bool __maybe_unused +otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct sk_buff *skb, int num_segs, int *offset) +{ + return true; +} + +static inline bool __maybe_unused +cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq, + struct otx2_snd_queue *sq, struct sk_buff *skb, + int num_segs, int size) +{ + return true; +} +#endif +#endif // CN10K_IPSEC_H diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 523ecb798a7a..bf56888e7fe7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -10,12 +10,18 @@ #include <net/page_pool/helpers.h> #include <net/tso.h> #include <linux/bitfield.h> +#include <net/xfrm.h> #include "otx2_reg.h" #include "otx2_common.h" #include "otx2_struct.h" #include "cn10k.h" +static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf) +{ + return IS_ENABLED(CONFIG_DCB) && !!pfvf->pfc_en; +} + static void otx2_nix_rq_op_stats(struct queue_stats *stats, struct otx2_nic *pfvf, int qidx) { @@ -964,6 +970,29 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) if (err) return err; + /* Allocate memory for NIX SQE (which includes NIX SG) and CPT SG. + * SG of NIX and CPT are same in size. Allocate memory for CPT SG + * same as NIX SQE for base address alignment. + * Layout of a NIX SQE and CPT SG entry: + * ----------------------------- + * | CPT Scatter Gather | + * | (SQE SIZE) | + * | | + * ----------------------------- + * | NIX SQE | + * | (SQE SIZE) | + * | | + * ----------------------------- + */ + err = qmem_alloc(pfvf->dev, &sq->sqe_ring, qset->sqe_cnt, + sq->sqe_size * 2); + if (err) + return err; + + err = qmem_alloc(pfvf->dev, &sq->cpt_resp, qset->sqe_cnt, 64); + if (err) + return err; + if (qidx < pfvf->hw.tx_queues) { err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, TSO_HEADER_SIZE); @@ -1722,18 +1751,43 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable) return -ENOMEM; req->chan_base = 0; -#ifdef CONFIG_DCB - req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1; - req->bpid_per_chan = pfvf->pfc_en ? 1 : 0; -#else - req->chan_cnt = 1; - req->bpid_per_chan = 0; -#endif + if (otx2_is_pfc_enabled(pfvf)) { + req->chan_cnt = IEEE_8021QAZ_MAX_TCS; + req->bpid_per_chan = 1; + } else { + req->chan_cnt = 1; + req->bpid_per_chan = 0; + } return otx2_sync_mbox_msg(&pfvf->mbox); } EXPORT_SYMBOL(otx2_nix_config_bp); +int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable) +{ + struct nix_bp_cfg_req *req; + + if (enable) + req = otx2_mbox_alloc_msg_nix_cpt_bp_enable(&pfvf->mbox); + else + req = otx2_mbox_alloc_msg_nix_cpt_bp_disable(&pfvf->mbox); + + if (!req) + return -ENOMEM; + + req->chan_base = 0; + if (otx2_is_pfc_enabled(pfvf)) { + req->chan_cnt = IEEE_8021QAZ_MAX_TCS; + req->bpid_per_chan = 1; + } else { + req->chan_cnt = 1; + req->bpid_per_chan = 0; + } + + return otx2_sync_mbox_msg(&pfvf->mbox); +} +EXPORT_SYMBOL(otx2_nix_cpt_config_bp); + /* Mbox message handlers */ void mbox_handler_cgx_stats(struct otx2_nic *pfvf, struct cgx_stats_rsp *rsp) @@ -1947,3 +2001,48 @@ EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name); MBOX_UP_CGX_MESSAGES MBOX_UP_MCS_MESSAGES #undef M + +dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, + struct sk_buff *skb, int seg, int *len) +{ + enum dma_data_direction dir = DMA_TO_DEVICE; + const skb_frag_t *frag; + struct page *page; + int offset; + + /* Crypto hardware need write permission for ipsec crypto offload */ + if (unlikely(xfrm_offload(skb))) { + dir = DMA_BIDIRECTIONAL; + skb = skb_unshare(skb, GFP_ATOMIC); + } + + /* First segment is always skb->data */ + if (!seg) { + page = virt_to_page(skb->data); + offset = offset_in_page(skb->data); + *len = skb_headlen(skb); + } else { + frag = &skb_shinfo(skb)->frags[seg - 1]; + page = skb_frag_page(frag); + offset = skb_frag_off(frag); + *len = skb_frag_size(frag); + } + return otx2_dma_map_page(pfvf, page, offset, *len, dir); +} + +void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) +{ + enum dma_data_direction dir = DMA_TO_DEVICE; + struct sk_buff *skb = NULL; + int seg; + + skb = (struct sk_buff *)sg->skb; + if (unlikely(xfrm_offload(skb))) + dir = DMA_BIDIRECTIONAL; + + for (seg = 0; seg < sg->num_segs; seg++) { + otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], + sg->size[seg], dir); + } + sg->num_segs = 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 566848663fea..44d737a0dd09 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -30,6 +30,7 @@ #include <rvu_trace.h> #include "qos.h" #include "rep.h" +#include "cn10k_ipsec.h" /* IPv4 flag more fragment bit */ #define IPV4_FLAG_MORE 0x20 @@ -40,6 +41,7 @@ #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 +#define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900 #define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00 #define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7 @@ -55,6 +57,9 @@ #define NIX_PF_PFC_PRIO_MAX 8 #endif +/* Number of segments per SG structure */ +#define MAX_SEGS_PER_SG 3 + enum arua_mapped_qtypes { AURA_NIX_RQ, AURA_NIX_SQ, @@ -448,6 +453,7 @@ struct otx2_nic { #define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17) #define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18) #define OTX2_FLAG_PORT_UP BIT_ULL(19) +#define OTX2_FLAG_IPSEC_OFFLOAD_ENABLED BIT_ULL(20) u64 flags; u64 *cq_op_addr; @@ -522,6 +528,9 @@ struct otx2_nic { u16 rep_pf_map[RVU_MAX_REP]; u16 esw_mode; #endif + + /* Inline ipsec */ + struct cn10k_ipsec ipsec; }; static inline bool is_otx2_lbkvf(struct pci_dev *pdev) @@ -572,6 +581,15 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev) return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF; } +static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev) +{ + if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF && + (pdev->revision & 0xFF) == 0x54) + return true; + + return false; +} + static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) { struct otx2_hw *hw = &pfvf->hw; @@ -621,6 +639,9 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset) case BLKTYPE_NPA: blkaddr = BLKADDR_NPA; break; + case BLKTYPE_CPT: + blkaddr = BLKADDR_CPT0; + break; default: blkaddr = BLKADDR_RVUM; break; @@ -985,6 +1006,7 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); +int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable); void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx); void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura); @@ -1149,4 +1171,8 @@ static inline int mcam_entry_cmp(const void *a, const void *b) { return *(u16 *)a - *(u16 *)b; } + +dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, + struct sk_buff *skb, int seg, int *len); +void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg); #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c index 294fba58b670..f110dfa42360 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c @@ -435,6 +435,9 @@ process_pfc: return err; } + /* Default disable backpressure on NIX-CPT */ + otx2_nix_cpt_config_bp(pfvf, false); + /* Request Per channel Bpids */ if (pfc->pfc_en) otx2_nix_config_bp(pfvf, true); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index e310f99b1736..e1dde93e8af8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -26,6 +26,7 @@ #include "cn10k.h" #include "qos.h" #include <rvu_trace.h> +#include "cn10k_ipsec.h" #define DRV_NAME "rvu_nicpf" #define DRV_STRING "Marvell RVU NIC Physical Function Driver" @@ -1484,6 +1485,8 @@ static void otx2_free_sq_res(struct otx2_nic *pf) if (!sq->sqe) continue; qmem_free(pf->dev, sq->sqe); + qmem_free(pf->dev, sq->sqe_ring); + qmem_free(pf->dev, sq->cpt_resp); qmem_free(pf->dev, sq->tso_hdrs); kfree(sq->sg); kfree(sq->sqb_ptrs); @@ -1551,6 +1554,9 @@ int otx2_init_hw_resources(struct otx2_nic *pf) if (err) goto err_free_npa_lf; + /* Default disable backpressure on NIX-CPT */ + otx2_nix_cpt_config_bp(pf, false); + /* Enable backpressure for CGX mapped PF/VFs */ if (!is_otx2_lbkvf(pf->pdev)) otx2_nix_config_bp(pf, true); @@ -2273,6 +2279,10 @@ static int otx2_set_features(struct net_device *netdev, return otx2_enable_rxvlan(pf, features & NETIF_F_HW_VLAN_CTAG_RX); + if (changed & NETIF_F_HW_ESP) + return cn10k_ipsec_ethtool_init(netdev, + features & NETIF_F_HW_ESP); + return otx2_handle_ntuple_tc_features(netdev, features); } @@ -3162,10 +3172,14 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* reset CGX/RPM MAC stats */ otx2_reset_mac_stats(pf); + err = cn10k_ipsec_init(netdev); + if (err) + goto err_mcs_free; + err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); - goto err_mcs_free; + goto err_ipsec_clean; } err = otx2_wq_init(pf); @@ -3206,6 +3220,8 @@ err_mcam_flow_del: otx2_mcam_flow_del(pf); err_unreg_netdev: unregister_netdev(netdev); +err_ipsec_clean: + cn10k_ipsec_clean(pf); err_mcs_free: cn10k_mcs_free(pf); err_del_mcam_entries: @@ -3403,6 +3419,7 @@ static void otx2_remove(struct pci_dev *pdev) otx2_unregister_dl(pf); unregister_netdev(netdev); + cn10k_ipsec_clean(pf); cn10k_mcs_free(pf); otx2_sriov_disable(pf->pdev); otx2_sriov_vfcfg_cleanup(pf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 04bc06a80e23..224cef938927 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -11,6 +11,7 @@ #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <net/ip6_checksum.h> +#include <net/xfrm.h> #include "otx2_reg.h" #include "otx2_common.h" @@ -26,12 +27,25 @@ */ #define PTP_SYNC_SEC_OFFSET 34 +DEFINE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled); + static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, struct bpf_prog *prog, struct nix_cqe_rx_s *cqe, struct otx2_cq_queue *cq, bool *need_xdp_flush); +static void otx2_sq_set_sqe_base(struct otx2_snd_queue *sq, + struct sk_buff *skb) +{ + if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) && + (xfrm_offload(skb))) + sq->sqe_base = sq->sqe_ring->base + sq->sqe_size + + (sq->head * (sq->sqe_size * 2)); + else + sq->sqe_base = sq->sqe->base; +} + static int otx2_nix_cq_op_status(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) { @@ -80,38 +94,6 @@ static unsigned int frag_num(unsigned int i) #endif } -static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, - struct sk_buff *skb, int seg, int *len) -{ - const skb_frag_t *frag; - struct page *page; - int offset; - - /* First segment is always skb->data */ - if (!seg) { - page = virt_to_page(skb->data); - offset = offset_in_page(skb->data); - *len = skb_headlen(skb); - } else { - frag = &skb_shinfo(skb)->frags[seg - 1]; - page = skb_frag_page(frag); - offset = skb_frag_off(frag); - *len = skb_frag_size(frag); - } - return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE); -} - -static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) -{ - int seg; - - for (seg = 0; seg < sg->num_segs; seg++) { - otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], - sg->size[seg], DMA_TO_DEVICE); - } - sg->num_segs = 0; -} - static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct nix_cqe_tx_s *cqe) @@ -625,7 +607,6 @@ void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, sq->head &= (sq->sqe_cnt - 1); } -#define MAX_SEGS_PER_SG 3 /* Add SQE scatter/gather subdescriptor structure */ static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct sk_buff *skb, int num_segs, int *offset) @@ -1161,6 +1142,7 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, int offset, num_segs, free_desc; struct nix_sqe_hdr_s *sqe_hdr; struct otx2_nic *pfvf = dev; + bool ret; /* Check if there is enough room between producer * and consumer index. @@ -1177,6 +1159,7 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, /* If SKB doesn't fit in a single SQE, linearize it. * TODO: Consider adding JUMP descriptor instead. */ + if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) { if (__skb_linearize(skb)) { dev_kfree_skb_any(skb); @@ -1196,6 +1179,9 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, return true; } + /* Set sqe base address */ + otx2_sq_set_sqe_base(sq, skb); + /* Set SQE's SEND_HDR. * Do not clear the first 64bit as it contains constant info. */ @@ -1208,7 +1194,13 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, otx2_sqe_add_ext(pfvf, sq, skb, &offset); /* Add SG subdesc with data frags */ - if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) { + if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) && + (xfrm_offload(skb))) + ret = otx2_sqe_add_sg_ipsec(pfvf, sq, skb, num_segs, &offset); + else + ret = otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset); + + if (!ret) { otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]); return false; } @@ -1217,11 +1209,15 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, sqe_hdr->sizem1 = (offset / 16) - 1; + if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) && + (xfrm_offload(skb))) + return cn10k_ipsec_transmit(pfvf, txq, sq, skb, num_segs, + offset); + netdev_tx_sent_queue(txq, skb->len); /* Flush SQE to HW */ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); - return true; } EXPORT_SYMBOL(otx2_sq_append_skb); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index e1db5f961877..d23810963fdb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -101,6 +101,9 @@ struct otx2_snd_queue { struct queue_stats stats; u16 sqb_count; u64 *sqb_ptrs; + /* SQE ring and CPT response queue for Inline IPSEC */ + struct qmem *sqe_ring; + struct qmem *cpt_resp; } ____cacheline_aligned_in_smp; enum cq_type { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 839fc77c11b2..e926c6ce96cf 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -14,6 +14,7 @@ #include "otx2_reg.h" #include "otx2_ptp.h" #include "cn10k.h" +#include "cn10k_ipsec.h" #define DRV_NAME "rvu_nicvf" #define DRV_STRING "Marvell RVU NIC Virtual Function Driver" @@ -693,10 +694,14 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) pdev->bus->number, n); } + err = cn10k_ipsec_init(netdev); + if (err) + goto err_ptp_destroy; + err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); - goto err_ptp_destroy; + goto err_ipsec_clean; } err = otx2_vf_wq_init(vf); @@ -730,6 +735,8 @@ err_shutdown_tc: otx2_shutdown_tc(vf); err_unreg_netdev: unregister_netdev(netdev); +err_ipsec_clean: + cn10k_ipsec_clean(vf); err_ptp_destroy: otx2_ptp_destroy(vf); err_detach_rsrc: @@ -782,6 +789,7 @@ static void otx2vf_remove(struct pci_dev *pdev) unregister_netdev(netdev); if (vf->otx2_wq) destroy_workqueue(vf->otx2_wq); + cn10k_ipsec_clean(vf); otx2_ptp_destroy(vf); otx2_mcam_flow_del(vf); otx2_shutdown_tc(vf); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index f07955b5439f..6a4a81c63451 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -192,6 +192,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, return -EOPNOTSUPP; } + if (sample_act_count) { + NL_SET_ERR_MSG_MOD(extack, "Mirror action after sample action is not supported"); + return -EOPNOTSUPP; + } + err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei, block, out_dev, extack); @@ -265,6 +270,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, return -EOPNOTSUPP; } + if (mirror_act_count) { + NL_SET_ERR_MSG_MOD(extack, "Sample action after mirror action is not supported"); + return -EOPNOTSUPP; + } + err = mlxsw_sp_acl_rulei_act_sample(mlxsw_sp, rulei, block, act->sample.psample_group, diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 558e03301aa8..8d48468cddd7 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -758,12 +758,13 @@ static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid, bool is_static, void *data) { struct ocelot_dump_ctx *dump = data; + struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx; u32 portid = NETLINK_CB(dump->cb->skb).portid; u32 seq = dump->cb->nlh->nlmsg_seq; struct nlmsghdr *nlh; struct ndmsg *ndm; - if (dump->idx < dump->cb->args[2]) + if (dump->idx < ctx->fdb_idx) goto skip; nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 98e098c09c03..abba165738a3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2779,7 +2779,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn) break; } - netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000); + netdev->watchdog_timeo = secs_to_jiffies(5); /* MTU range: 68 - hw-specific max */ netdev->min_mtu = ETH_MIN_MTU; diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h index 1c61390677f7..0639bf56bd3a 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic.h +++ b/drivers/net/ethernet/pensando/ionic/ionic.h @@ -18,8 +18,6 @@ struct ionic_lif; #define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002 #define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003 -#define IONIC_ASIC_TYPE_ELBA 2 - #define DEVCMD_TIMEOUT 5 #define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index dda22fa4448c..720092b1633a 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -158,6 +158,20 @@ static int ionic_get_link_ksettings(struct net_device *netdev, 25000baseCR_Full); copper_seen++; break; + case IONIC_XCVR_PID_QSFP_50G_CR2_FC: + case IONIC_XCVR_PID_QSFP_50G_CR2: + ethtool_link_ksettings_add_link_mode(ks, supported, + 50000baseCR2_Full); + copper_seen++; + break; + case IONIC_XCVR_PID_QSFP_200G_CR4: + ethtool_link_ksettings_add_link_mode(ks, supported, 200000baseCR4_Full); + copper_seen++; + break; + case IONIC_XCVR_PID_QSFP_400G_CR4: + ethtool_link_ksettings_add_link_mode(ks, supported, 400000baseCR4_Full); + copper_seen++; + break; case IONIC_XCVR_PID_SFP_10GBASE_AOC: case IONIC_XCVR_PID_SFP_10GBASE_CU: ethtool_link_ksettings_add_link_mode(ks, supported, @@ -196,6 +210,31 @@ static int ionic_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseSR_Full); break; + case IONIC_XCVR_PID_QSFP_200G_AOC: + case IONIC_XCVR_PID_QSFP_200G_SR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 200000baseSR4_Full); + break; + case IONIC_XCVR_PID_QSFP_200G_FR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 200000baseLR4_ER4_FR4_Full); + break; + case IONIC_XCVR_PID_QSFP_200G_DR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 200000baseDR4_Full); + break; + case IONIC_XCVR_PID_QSFP_400G_FR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 400000baseLR4_ER4_FR4_Full); + break; + case IONIC_XCVR_PID_QSFP_400G_DR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 400000baseDR4_Full); + break; + case IONIC_XCVR_PID_QSFP_400G_SR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 400000baseSR4_Full); + break; case IONIC_XCVR_PID_SFP_10GBASE_SR: ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseSR_Full); @@ -929,6 +968,7 @@ static int ionic_get_module_info(struct net_device *netdev, break; case SFF8024_ID_QSFP_8436_8636: case SFF8024_ID_QSFP28_8636: + case SFF8024_ID_QSFP_PLUS_CMIS: modinfo->type = ETH_MODULE_SFF_8436; modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; break; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h index 9c85c0706c6e..830c8adbfbee 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_if.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h @@ -1277,7 +1277,10 @@ enum ionic_xcvr_pid { IONIC_XCVR_PID_SFP_25GBASE_CR_S = 3, IONIC_XCVR_PID_SFP_25GBASE_CR_L = 4, IONIC_XCVR_PID_SFP_25GBASE_CR_N = 5, - + IONIC_XCVR_PID_QSFP_50G_CR2_FC = 6, + IONIC_XCVR_PID_QSFP_50G_CR2 = 7, + IONIC_XCVR_PID_QSFP_200G_CR4 = 8, + IONIC_XCVR_PID_QSFP_400G_CR4 = 9, /* Fiber */ IONIC_XCVR_PID_QSFP_100G_AOC = 50, IONIC_XCVR_PID_QSFP_100G_ACC = 51, @@ -1303,6 +1306,15 @@ enum ionic_xcvr_pid { IONIC_XCVR_PID_SFP_25GBASE_ACC = 71, IONIC_XCVR_PID_SFP_10GBASE_T = 72, IONIC_XCVR_PID_SFP_1000BASE_T = 73, + IONIC_XCVR_PID_QSFP_200G_AOC = 74, + IONIC_XCVR_PID_QSFP_200G_FR4 = 75, + IONIC_XCVR_PID_QSFP_200G_DR4 = 76, + IONIC_XCVR_PID_QSFP_200G_SR4 = 77, + IONIC_XCVR_PID_QSFP_200G_ACC = 78, + IONIC_XCVR_PID_QSFP_400G_FR4 = 79, + IONIC_XCVR_PID_QSFP_400G_DR4 = 80, + IONIC_XCVR_PID_QSFP_400G_SR4 = 81, + IONIC_XCVR_PID_QSFP_400G_VR4 = 82, }; /** @@ -1404,6 +1416,8 @@ struct ionic_xcvr_status { */ union ionic_port_config { struct { +#define IONIC_SPEED_400G 400000 /* 400G in Mbps */ +#define IONIC_SPEED_200G 200000 /* 200G in Mbps */ #define IONIC_SPEED_100G 100000 /* 100G in Mbps */ #define IONIC_SPEED_50G 50000 /* 50G in Mbps */ #define IONIC_SPEED_40G 40000 /* 40G in Mbps */ @@ -3209,7 +3223,11 @@ union ionic_adminq_comp { #define IONIC_BAR0_INTR_CTRL_OFFSET 0x2000 #define IONIC_DEV_CMD_DONE 0x00000001 -#define IONIC_ASIC_TYPE_CAPRI 0 +#define IONIC_ASIC_TYPE_NONE 0 +#define IONIC_ASIC_TYPE_CAPRI 1 +#define IONIC_ASIC_TYPE_ELBA 2 +#define IONIC_ASIC_TYPE_GIGLIO 3 +#define IONIC_ASIC_TYPE_SALINA 4 /** * struct ionic_doorbell - Doorbell register layout diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 40496587b2b3..052c767a2c75 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -3265,7 +3265,7 @@ int ionic_lif_alloc(struct ionic *ionic) lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU, le32_to_cpu(lif->identity->eth.min_frame_size)); lif->netdev->max_mtu = - le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN; + le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN; lif->neqs = ionic->neqs_per_lif; lif->nxqs = ionic->ntxqs_per_lif; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 0f817c3f92d8..daf1e82cb76b 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -81,8 +81,9 @@ static int ionic_error_to_errno(enum ionic_status_code code) case IONIC_RC_EQTYPE: case IONIC_RC_EQID: case IONIC_RC_EINVAL: - case IONIC_RC_ENOSUPP: return -EINVAL; + case IONIC_RC_ENOSUPP: + return -EOPNOTSUPP; case IONIC_RC_EPERM: return -EPERM; case IONIC_RC_ENOENT: diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h index be4c9622618d..8904aae41aca 100644 --- a/drivers/net/ethernet/realtek/r8169.h +++ b/drivers/net/ethernet/realtek/r8169.h @@ -23,7 +23,7 @@ enum mac_version { RTL_GIGA_MAC_VER_08, RTL_GIGA_MAC_VER_09, RTL_GIGA_MAC_VER_10, - RTL_GIGA_MAC_VER_11, + /* support for RTL_GIGA_MAC_VER_11 has been removed */ /* RTL_GIGA_MAC_VER_12 was handled the same as VER_17 */ /* RTL_GIGA_MAC_VER_13 was merged with VER_10 */ RTL_GIGA_MAC_VER_14, diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 739707a7b40f..6934bdee2a91 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -104,7 +104,6 @@ static const struct { [RTL_GIGA_MAC_VER_08] = {"RTL8102e" }, [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" }, [RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e" }, - [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" }, [RTL_GIGA_MAC_VER_14] = {"RTL8401" }, [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" }, [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" }, @@ -623,7 +622,6 @@ struct rtl8169_tc_offsets { enum rtl_flag { RTL_FLAG_TASK_RESET_PENDING, - RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, RTL_FLAG_TASK_TX_TIMEOUT, RTL_FLAG_MAX }; @@ -2336,7 +2334,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) /* 8168B family. */ { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 }, - /* This one is very old and rare, let's see if anybody complains. + /* This one is very old and rare, support has been removed. * { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 }, */ @@ -3804,7 +3802,6 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3, [RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2, [RTL_GIGA_MAC_VER_10] = NULL, - [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b, [RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401, [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b, [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1, @@ -4680,12 +4677,6 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) if (status & LinkChg) phy_mac_interrupt(tp->phydev); - if (unlikely(status & RxFIFOOver && - tp->mac_version == RTL_GIGA_MAC_VER_11)) { - netif_stop_queue(tp->dev); - rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); - } - rtl_irq_disable(tp); napi_schedule(&tp->napi); out: @@ -4723,8 +4714,6 @@ static void rtl_task(struct work_struct *work) reset: rtl_reset_work(tp); netif_wake_queue(tp->dev); - } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) { - rtl_reset_work(tp); } } @@ -5103,9 +5092,6 @@ static void rtl_set_irq_mask(struct rtl8169_private *tp) if (tp->mac_version <= RTL_GIGA_MAC_VER_06) tp->irq_mask |= SYSErr | RxFIFOOver; - else if (tp->mac_version == RTL_GIGA_MAC_VER_11) - /* special workaround needed */ - tp->irq_mask |= RxFIFOOver; } static int rtl_alloc_irq(struct rtl8169_private *tp) @@ -5300,7 +5286,6 @@ static int rtl_jumbo_max(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: return JUMBO_7K; /* RTL8168b */ - case RTL_GIGA_MAC_VER_11: case RTL_GIGA_MAC_VER_17: return JUMBO_4K; /* RTL8168c */ @@ -5347,13 +5332,6 @@ static bool rtl_aspm_is_safe(struct rtl8169_private *tp) return false; } -static umode_t r8169_hwmon_is_visible(const void *drvdata, - enum hwmon_sensor_types type, - u32 attr, int channel) -{ - return 0444; -} - static int r8169_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { @@ -5370,7 +5348,7 @@ static int r8169_hwmon_read(struct device *dev, enum hwmon_sensor_types type, } static const struct hwmon_ops r8169_hwmon_ops = { - .is_visible = r8169_hwmon_is_visible, + .visible = 0444, .read = r8169_hwmon_read, }; diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index 5307c6ff4e25..b28b30390e84 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -276,15 +276,6 @@ static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp, rtl_writephy_batch(phydev, phy_reg_init); } -static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp, - struct phy_device *phydev) -{ - phy_write(phydev, 0x1f, 0x0001); - phy_set_bits(phydev, 0x16, BIT(0)); - phy_write(phydev, 0x10, 0xf41b); - phy_write(phydev, 0x1f, 0x0000); -} - static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev) { @@ -1136,7 +1127,6 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config, [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config, [RTL_GIGA_MAC_VER_10] = NULL, - [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config, [RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config, [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config, [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config, diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h index dbc3f92eebc4..2bbfcad613ab 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase.h +++ b/drivers/net/ethernet/realtek/rtase/rtase.h @@ -13,6 +13,7 @@ #define RTASE_HW_VER_906X_7XA 0x00800000 #define RTASE_HW_VER_906X_7XC 0x04000000 #define RTASE_HW_VER_907XD_V1 0x04800000 +#define RTASE_HW_VER_907XD_VA 0x08000000 #define RTASE_RX_DMA_BURST_256 4 #define RTASE_TX_DMA_BURST_UNLIMITED 7 diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c index de7f11232593..585d0b21c9e0 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase_main.c +++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c @@ -1725,6 +1725,7 @@ static int rtase_get_settings(struct net_device *dev, cmd->base.speed = SPEED_5000; break; case RTASE_HW_VER_907XD_V1: + case RTASE_HW_VER_907XD_VA: cmd->base.speed = SPEED_10000; break; } @@ -1993,6 +1994,7 @@ static int rtase_check_mac_version_valid(struct rtase_private *tp) case RTASE_HW_VER_906X_7XA: case RTASE_HW_VER_906X_7XC: case RTASE_HW_VER_907XD_V1: + case RTASE_HW_VER_907XD_VA: ret = 0; break; } @@ -2016,7 +2018,7 @@ static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out, SET_NETDEV_DEV(dev, &pdev->dev); ret = pci_enable_device(pdev); - if (ret < 0) + if (ret) goto err_out_free_dev; /* make sure PCI base addr 1 is MMIO */ @@ -2032,7 +2034,7 @@ static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out, } ret = pci_request_regions(pdev, KBUILD_MODNAME); - if (ret < 0) + if (ret) goto err_out_disable; ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); @@ -2108,7 +2110,7 @@ static int rtase_init_one(struct pci_dev *pdev, dev_dbg(&pdev->dev, "Automotive Switch Ethernet driver loaded\n"); ret = rtase_init_board(pdev, &dev, &ioaddr); - if (ret != 0) + if (ret) return ret; tp = netdev_priv(dev); @@ -2118,7 +2120,7 @@ static int rtase_init_one(struct pci_dev *pdev, /* identify chip attached to board */ ret = rtase_check_mac_version_valid(tp); - if (ret != 0) { + if (ret) { dev_err(&pdev->dev, "unknown chip version: 0x%08x, contact rtase maintainers (see MAINTAINERS file)\n", tp->hw_ver); @@ -2129,7 +2131,7 @@ static int rtase_init_one(struct pci_dev *pdev, rtase_init_hardware(tp); ret = rtase_alloc_interrupt(pdev, tp); - if (ret < 0) { + if (ret) { dev_err(&pdev->dev, "unable to alloc MSIX/MSI\n"); goto err_out_del_napi; } @@ -2174,7 +2176,7 @@ static int rtase_init_one(struct pci_dev *pdev, netif_carrier_off(dev); ret = register_netdev(dev); - if (ret != 0) + if (ret) goto err_out_free_dma; netdev_dbg(dev, "%pM, IRQ %d\n", dev->dev_addr, dev->irq); diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index dbbbf024e7ab..5fc8c94d1e4b 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -111,25 +111,35 @@ static void rswitch_top_init(struct rswitch_private *priv) /* Forwarding engine block (MFWD) */ static void rswitch_fwd_init(struct rswitch_private *priv) { + u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0); unsigned int i; - /* For ETHA */ - for (i = 0; i < RSWITCH_NUM_PORTS; i++) { - iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i)); + /* Start with empty configuration */ + for (i = 0; i < RSWITCH_NUM_AGENTS; i++) { + /* Disable all port features */ + iowrite32(0, priv->addr + FWPC0(i)); + /* Disallow L3 forwarding and direct descriptor forwarding */ + iowrite32(FIELD_PREP(FWCP1_LTHFW, all_ports_mask), + priv->addr + FWPC1(i)); + /* Disallow L2 forwarding */ + iowrite32(FIELD_PREP(FWCP2_LTWFW, all_ports_mask), + priv->addr + FWPC2(i)); + /* Disallow port based forwarding */ iowrite32(0, priv->addr + FWPBFC(i)); } - for (i = 0; i < RSWITCH_NUM_PORTS; i++) { + /* For enabled ETHA ports, setup port based forwarding */ + rswitch_for_each_enabled_port(priv, i) { + /* Port based forwarding from port i to GWCA port */ + rswitch_modify(priv->addr, FWPBFC(i), FWPBFC_PBDV, + FIELD_PREP(FWPBFC_PBDV, BIT(priv->gwca.index))); + /* Within GWCA port, forward to Rx queue for port i */ iowrite32(priv->rdev[i]->rx_queue->index, priv->addr + FWPBFCSDC(GWCA_INDEX, i)); - iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i)); } - /* For GWCA */ - iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index)); - iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index)); - iowrite32(0, priv->addr + FWPBFC(priv->gwca.index)); - iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index)); + /* For GWCA port, allow direct descriptor forwarding */ + rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE); } /* Gateway CPU agent block (GWCA) */ @@ -1544,7 +1554,7 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv) { unsigned int i; - for (i = 0; i < RSWITCH_NUM_PORTS; i++) { + rswitch_for_each_enabled_port(priv, i) { phy_exit(priv->rdev[i]->serdes); rswitch_ether_port_deinit_one(priv->rdev[i]); } @@ -1920,9 +1930,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index if (err < 0) goto out_get_params; - if (rdev->priv->gwca.speed < rdev->etha->speed) - rdev->priv->gwca.speed = rdev->etha->speed; - err = rswitch_rxdmac_alloc(ndev); if (err < 0) goto out_rxdmac; diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index e020800dcc57..4b1489100330 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -12,6 +12,7 @@ #define RSWITCH_MAX_NUM_QUEUES 128 +#define RSWITCH_NUM_AGENTS 5 #define RSWITCH_NUM_PORTS 3 #define rswitch_for_each_enabled_port(priv, i) \ for (i = 0; i < RSWITCH_NUM_PORTS; i++) \ @@ -806,6 +807,7 @@ enum rswitch_gwca_mode { #define CABPPFLC_INIT_VALUE 0x00800080 /* MFWD */ +#define FWPC0(i) (FWPC00 + (i) * 0x10) #define FWPC0_LTHTA BIT(0) #define FWPC0_IP4UE BIT(3) #define FWPC0_IP4TE BIT(4) @@ -819,15 +821,15 @@ enum rswitch_gwca_mode { #define FWPC0_MACHMA BIT(27) #define FWPC0_VLANSA BIT(28) -#define FWPC0(i) (FWPC00 + (i) * 0x10) -#define FWPC0_DEFAULT (FWPC0_LTHTA | FWPC0_IP4UE | FWPC0_IP4TE | \ - FWPC0_IP4OE | FWPC0_L2SE | FWPC0_IP4EA | \ - FWPC0_IPDSA | FWPC0_IPHLA | FWPC0_MACSDA | \ - FWPC0_MACHLA | FWPC0_MACHMA | FWPC0_VLANSA) #define FWPC1(i) (FWPC10 + (i) * 0x10) +#define FWCP1_LTHFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16) #define FWPC1_DDE BIT(0) -#define FWPBFC(i) (FWPBFC0 + (i) * 0x10) +#define FWPC2(i) (FWPC20 + (i) * 0x10) +#define FWCP2_LTWFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16) + +#define FWPBFC(i) (FWPBFC0 + (i) * 0x10) +#define FWPBFC_PBDV GENMASK(RSWITCH_NUM_AGENTS - 1, 0) #define FWPBFCSDC(j, i) (FWPBFCSDC00 + (i) * 0x10 + (j) * 0x04) @@ -993,7 +995,6 @@ struct rswitch_gwca { DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES); u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS]; u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS]; - int speed; }; #define NUM_QUEUES_PER_NDEV 2 diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 6658536a4e17..4cc85a36a1ab 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -154,6 +154,18 @@ config DWMAC_RZN1 the stmmac device driver. This support can make use of a custom MII converter PCS device. +config DWMAC_S32 + tristate "NXP S32G/S32R GMAC support" + default ARCH_S32 + depends on OF && (ARCH_S32 || COMPILE_TEST) + help + Support for ethernet controller on NXP S32CC SOCs. + + This selects NXP SoC glue layer support for the stmmac + device driver. This driver is used for the S32CC series + SOCs GMAC ethernet controller, ie. S32G2xx, S32G3xx and + S32R45. + config DWMAC_SOCFPGA tristate "SOCFPGA dwmac support" default ARCH_INTEL_SOCFPGA diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 2389fd261344..b26f0e79c2b3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o +obj-$(CONFIG_DWMAC_S32) += dwmac-s32.o obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o obj-$(CONFIG_DWMAC_STARFIVE) += dwmac-starfive.o obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 1367fa5c9b8e..e25db747a81a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -257,6 +257,8 @@ struct stmmac_safety_stats { #define CSR_F_150M 150000000 #define CSR_F_250M 250000000 #define CSR_F_300M 300000000 +#define CSR_F_500M 500000000 +#define CSR_F_800M 800000000 #define MAC_CSR_H_FRQ_MASK 0x20 @@ -543,18 +545,8 @@ struct dma_features { #define STMMAC_VLAN_INSERT 0x2 #define STMMAC_VLAN_REPLACE 0x3 -extern const struct stmmac_desc_ops enh_desc_ops; -extern const struct stmmac_desc_ops ndesc_ops; - struct mac_device_info; -extern const struct stmmac_hwtimestamp stmmac_ptp; -extern const struct stmmac_hwtimestamp dwmac1000_ptp; -extern const struct stmmac_mode_ops dwmac4_ring_mode_ops; - -extern const struct ptp_clock_info stmmac_ptp_clock_ops; -extern const struct ptp_clock_info dwmac1000_ptp_clock_ops; - struct mac_link { u32 caps; u32 speed_mask; @@ -641,8 +633,4 @@ void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable); void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); -extern const struct stmmac_mode_ops ring_mode_ops; -extern const struct stmmac_mode_ops chain_mode_ops; -extern const struct stmmac_desc_ops dwmac4_desc_ops; - #endif /* __COMMON_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index 83290e707df5..bd4eb187f8c6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -181,24 +181,19 @@ static void dwc_qos_remove(struct platform_device *pdev) static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mode) { struct tegra_eqos *eqos = priv; - unsigned long rate = 125000000; bool needs_calibration = false; + long rate = 125000000; u32 value; int err; switch (speed) { case SPEED_1000: - needs_calibration = true; - rate = 125000000; - break; - case SPEED_100: needs_calibration = true; - rate = 25000000; - break; + fallthrough; case SPEED_10: - rate = 2500000; + rate = rgmii_clock(speed); break; default: diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index 641f3cd019a3..43e0fbba4f77 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -186,7 +186,7 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod { struct plat_stmmacenet_data *plat_dat; struct imx_priv_data *dwmac = priv; - unsigned long rate; + long rate; int err; plat_dat = dwmac->plat_dat; @@ -196,17 +196,8 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII)) return; - switch (speed) { - case SPEED_1000: - rate = 125000000; - break; - case SPEED_100: - rate = 25000000; - break; - case SPEED_10: - rate = 2500000; - break; - default: + rate = rgmii_clock(speed); + if (rate < 0) { dev_err(dwmac->dev, "invalid speed %u\n", speed); return; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c index d94f0a150e93..ddee6154d40b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c @@ -31,27 +31,13 @@ struct intel_dwmac_data { static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) { struct intel_dwmac *dwmac = priv; - unsigned long rate; + long rate; int ret; - rate = clk_get_rate(dwmac->tx_clk); - - switch (speed) { - case SPEED_1000: - rate = 125000000; - break; - - case SPEED_100: - rate = 25000000; - break; - - case SPEED_10: - rate = 2500000; - break; - - default: + rate = rgmii_clock(speed); + if (rate < 0) { dev_err(dwmac->dev, "Invalid speed\n"); - break; + return; } ret = clk_set_rate(dwmac->tx_clk, rate); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 901a3c1959fa..2a5b38723635 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -777,7 +777,7 @@ static void ethqos_ptp_clk_freq_config(struct stmmac_priv *priv) netdev_err(priv->dev, "Failed to max out clk_ptp_ref: %d\n", err); plat_dat->clk_ptp_rate = clk_get_rate(plat_dat->clk_ptp_ref); - netdev_dbg(priv->dev, "PTP rate %d\n", plat_dat->clk_ptp_rate); + netdev_dbg(priv->dev, "PTP rate %lu\n", plat_dat->clk_ptp_rate); } static int qcom_ethqos_probe(struct platform_device *pdev) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 8cb374668b74..a4dc89e23a68 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1079,20 +1079,11 @@ static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed) { struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk; struct device *dev = &bsp_priv->pdev->dev; - unsigned long rate; + long rate; int ret; - switch (speed) { - case 10: - rate = 2500000; - break; - case 100: - rate = 25000000; - break; - case 1000: - rate = 125000000; - break; - default: + rate = rgmii_clock(speed); + if (rate < 0) { dev_err(dev, "unknown speed value for GMAC speed=%d", speed); return; } @@ -1540,20 +1531,11 @@ static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk; struct device *dev = &bsp_priv->pdev->dev; - unsigned long rate; + long rate; int ret; - switch (speed) { - case 10: - rate = 2500000; - break; - case 100: - rate = 25000000; - break; - case 1000: - rate = 125000000; - break; - default: + rate = rgmii_clock(speed); + if (rate < 0) { dev_err(dev, "unknown speed value for RGMII speed=%d", speed); return; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c new file mode 100644 index 000000000000..9cc0e5817416 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NXP S32G/R GMAC glue layer + * + * Copyright 2019-2024 NXP + * + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/device.h> +#include <linux/ethtool.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_mdio.h> +#include <linux/of_address.h> +#include <linux/phy.h> +#include <linux/phylink.h> +#include <linux/platform_device.h> +#include <linux/stmmac.h> + +#include "stmmac_platform.h" + +#define GMAC_INTF_RATE_125M 125000000 /* 125MHz */ + +/* SoC PHY interface control register */ +#define PHY_INTF_SEL_MII 0x00 +#define PHY_INTF_SEL_SGMII 0x01 +#define PHY_INTF_SEL_RGMII 0x02 +#define PHY_INTF_SEL_RMII 0x08 + +struct s32_priv_data { + void __iomem *ioaddr; + void __iomem *ctrl_sts; + struct device *dev; + phy_interface_t *intf_mode; + struct clk *tx_clk; + struct clk *rx_clk; +}; + +static int s32_gmac_write_phy_intf_select(struct s32_priv_data *gmac) +{ + writel(PHY_INTF_SEL_RGMII, gmac->ctrl_sts); + + dev_dbg(gmac->dev, "PHY mode set to %s\n", phy_modes(*gmac->intf_mode)); + + return 0; +} + +static int s32_gmac_init(struct platform_device *pdev, void *priv) +{ + struct s32_priv_data *gmac = priv; + int ret; + + /* Set initial TX interface clock */ + ret = clk_prepare_enable(gmac->tx_clk); + if (ret) { + dev_err(&pdev->dev, "Can't enable tx clock\n"); + return ret; + } + ret = clk_set_rate(gmac->tx_clk, GMAC_INTF_RATE_125M); + if (ret) { + dev_err(&pdev->dev, "Can't set tx clock\n"); + goto err_tx_disable; + } + + /* Set initial RX interface clock */ + ret = clk_prepare_enable(gmac->rx_clk); + if (ret) { + dev_err(&pdev->dev, "Can't enable rx clock\n"); + goto err_tx_disable; + } + ret = clk_set_rate(gmac->rx_clk, GMAC_INTF_RATE_125M); + if (ret) { + dev_err(&pdev->dev, "Can't set rx clock\n"); + goto err_txrx_disable; + } + + /* Set interface mode */ + ret = s32_gmac_write_phy_intf_select(gmac); + if (ret) { + dev_err(&pdev->dev, "Can't set PHY interface mode\n"); + goto err_txrx_disable; + } + + return 0; + +err_txrx_disable: + clk_disable_unprepare(gmac->rx_clk); +err_tx_disable: + clk_disable_unprepare(gmac->tx_clk); + return ret; +} + +static void s32_gmac_exit(struct platform_device *pdev, void *priv) +{ + struct s32_priv_data *gmac = priv; + + clk_disable_unprepare(gmac->tx_clk); + clk_disable_unprepare(gmac->rx_clk); +} + +static void s32_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) +{ + struct s32_priv_data *gmac = priv; + long tx_clk_rate; + int ret; + + tx_clk_rate = rgmii_clock(speed); + if (tx_clk_rate < 0) { + dev_err(gmac->dev, "Unsupported/Invalid speed: %d\n", speed); + return; + } + + dev_dbg(gmac->dev, "Set tx clock to %ld Hz\n", tx_clk_rate); + ret = clk_set_rate(gmac->tx_clk, tx_clk_rate); + if (ret) + dev_err(gmac->dev, "Can't set tx clock\n"); +} + +static int s32_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat; + struct device *dev = &pdev->dev; + struct stmmac_resources res; + struct s32_priv_data *gmac; + int ret; + + gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL); + if (!gmac) + return -ENOMEM; + + gmac->dev = &pdev->dev; + + ret = stmmac_get_platform_resources(pdev, &res); + if (ret) + return dev_err_probe(dev, ret, + "Failed to get platform resources\n"); + + plat = devm_stmmac_probe_config_dt(pdev, res.mac); + if (IS_ERR(plat)) + return dev_err_probe(dev, PTR_ERR(plat), + "dt configuration failed\n"); + + /* PHY interface mode control reg */ + gmac->ctrl_sts = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); + if (IS_ERR(gmac->ctrl_sts)) + return dev_err_probe(dev, PTR_ERR(gmac->ctrl_sts), + "S32CC config region is missing\n"); + + /* tx clock */ + gmac->tx_clk = devm_clk_get(&pdev->dev, "tx"); + if (IS_ERR(gmac->tx_clk)) + return dev_err_probe(dev, PTR_ERR(gmac->tx_clk), + "tx clock not found\n"); + + /* rx clock */ + gmac->rx_clk = devm_clk_get(&pdev->dev, "rx"); + if (IS_ERR(gmac->rx_clk)) + return dev_err_probe(dev, PTR_ERR(gmac->rx_clk), + "rx clock not found\n"); + + gmac->intf_mode = &plat->phy_interface; + gmac->ioaddr = res.addr; + + /* S32CC core feature set */ + plat->has_gmac4 = true; + plat->pmt = 1; + plat->flags |= STMMAC_FLAG_SPH_DISABLE; + plat->rx_fifo_size = 20480; + plat->tx_fifo_size = 20480; + + plat->init = s32_gmac_init; + plat->exit = s32_gmac_exit; + plat->fix_mac_speed = s32_fix_mac_speed; + + plat->bsp_priv = gmac; + + return stmmac_pltfr_probe(pdev, plat, &res); +} + +static const struct of_device_id s32_dwmac_match[] = { + { .compatible = "nxp,s32g2-dwmac" }, + { } +}; +MODULE_DEVICE_TABLE(of, s32_dwmac_match); + +static struct platform_driver s32_dwmac_driver = { + .probe = s32_dwmac_probe, + .remove = stmmac_pltfr_remove, + .driver = { + .name = "s32-dwmac", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = s32_dwmac_match, + }, +}; +module_platform_driver(s32_dwmac_driver); + +MODULE_AUTHOR("Jan Petrous (OSS) <jan.petrous@oss.nxp.com>"); +MODULE_DESCRIPTION("NXP S32G/R common chassis GMAC driver"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c index 421666279dd3..0a0a363d3730 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c @@ -34,24 +34,13 @@ struct starfive_dwmac { static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) { struct starfive_dwmac *dwmac = priv; - unsigned long rate; + long rate; int err; - rate = clk_get_rate(dwmac->clk_tx); - - switch (speed) { - case SPEED_1000: - rate = 125000000; - break; - case SPEED_100: - rate = 25000000; - break; - case SPEED_10: - rate = 2500000; - break; - default: + rate = rgmii_clock(speed); + if (rate < 0) { dev_err(dwmac->dev, "invalid speed %u\n", speed); - break; + return; } err = clk_set_rate(dwmac->clk_tx, rate); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index a6ff02d905a9..eabc4da9e1a9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -21,10 +21,7 @@ #include "stmmac_platform.h" -#define DWMAC_125MHZ 125000000 #define DWMAC_50MHZ 50000000 -#define DWMAC_25MHZ 25000000 -#define DWMAC_2_5MHZ 2500000 #define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \ iface == PHY_INTERFACE_MODE_RGMII_ID || \ @@ -140,7 +137,7 @@ static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode) struct sti_dwmac *dwmac = priv; u32 src = dwmac->tx_retime_src; u32 reg = dwmac->ctrl_reg; - u32 freq = 0; + long freq = 0; if (dwmac->interface == PHY_INTERFACE_MODE_MII) { src = TX_RETIME_SRC_TXCLK; @@ -153,19 +150,14 @@ static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode) } } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) { /* On GiGa clk source can be either ext or from clkgen */ - if (spd == SPEED_1000) { - freq = DWMAC_125MHZ; - } else { + freq = rgmii_clock(spd); + + if (spd != SPEED_1000 && freq > 0) /* Switch to clkgen for these speeds */ src = TX_RETIME_SRC_CLKGEN; - if (spd == SPEED_100) - freq = DWMAC_25MHZ; - else if (spd == SPEED_10) - freq = DWMAC_2_5MHZ; - } } - if (src == TX_RETIME_SRC_CLKGEN && freq) + if (src == TX_RETIME_SRC_CLKGEN && freq > 0) clk_set_rate(dwmac->clk, freq); regmap_update_bits(dwmac->regmap, reg, STIH4XX_RETIME_SRC_MASK, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index c25781874aa7..c36f90a782c5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -27,7 +27,7 @@ static void dwmac4_core_init(struct mac_device_info *hw, struct stmmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = hw->pcsr; u32 value = readl(ioaddr + GMAC_CONFIG); - u32 clk_rate; + unsigned long clk_rate; value |= GMAC_CORE_INIT; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h index 1ce6f43d545a..806555976496 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h @@ -144,4 +144,7 @@ /* TDS3 use for both format (read and write back) */ #define RDES3_OWN BIT(31) +extern const struct stmmac_mode_ops dwmac4_ring_mode_ops; +extern const struct stmmac_desc_ops dwmac4_desc_ops; + #endif /* __DWMAC4_DESCS_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index a04a79003692..20027d3c25a7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -493,4 +493,9 @@ #define XGMAC_RDES3_TSD BIT(6) #define XGMAC_RDES3_TSA BIT(4) +extern const struct stmmac_ops dwxgmac210_ops; +extern const struct stmmac_ops dwxlgmac2_ops; +extern const struct stmmac_dma_ops dwxgmac210_dma_ops; +extern const struct stmmac_desc_ops dwxgmac210_desc_ops; + #endif /* __STMMAC_DWXGMAC2_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index a72d336a8350..4bd79de2e222 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -9,6 +9,8 @@ #include "stmmac_fpe.h" #include "stmmac_ptp.h" #include "stmmac_est.h" +#include "dwmac4_descs.h" +#include "dwxgmac2.h" static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg) { diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 64f8ed67dcc4..e428c82b7d31 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -665,6 +665,15 @@ struct stmmac_regs_off { u32 est_off; }; +extern const struct stmmac_desc_ops enh_desc_ops; +extern const struct stmmac_desc_ops ndesc_ops; + +extern const struct stmmac_hwtimestamp stmmac_ptp; +extern const struct stmmac_hwtimestamp dwmac1000_ptp; + +extern const struct stmmac_mode_ops ring_mode_ops; +extern const struct stmmac_mode_ops chain_mode_ops; + extern const struct stmmac_ops dwmac100_ops; extern const struct stmmac_dma_ops dwmac100_dma_ops; extern const struct stmmac_ops dwmac1000_ops; @@ -677,13 +686,6 @@ extern const struct stmmac_ops dwmac510_ops; extern const struct stmmac_tc_ops dwmac4_tc_ops; extern const struct stmmac_tc_ops dwmac510_tc_ops; extern const struct stmmac_tc_ops dwxgmac_tc_ops; -extern const struct stmmac_ops dwxgmac210_ops; -extern const struct stmmac_ops dwxlgmac2_ops; -extern const struct stmmac_dma_ops dwxgmac210_dma_ops; -extern const struct stmmac_desc_ops dwxgmac210_desc_ops; -extern const struct stmmac_mmc_ops dwmac_mmc_ops; -extern const struct stmmac_mmc_ops dwxgmac_mmc_ops; -extern const struct stmmac_est_ops dwmac510_est_ops; #define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ #define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */ diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h index 5d1ea3e07459..1cba39fb2c44 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc.h +++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h @@ -139,4 +139,7 @@ struct stmmac_counters { unsigned int mmc_rx_fpe_fragment_cntr; }; +extern const struct stmmac_mmc_ops dwmac_mmc_ops; +extern const struct stmmac_mmc_ops dwxgmac_mmc_ops; + #endif /* __MMC_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h index 7a858c566e7e..d247fa383a6e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h @@ -62,3 +62,5 @@ #define EST_SRWO BIT(0) #define EST_GCL_DATA 0x00000034 + +extern const struct stmmac_est_ops dwmac510_est_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c81ea8cdfe6e..16b8bcfa8b11 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -301,7 +301,7 @@ static void stmmac_global_err(struct stmmac_priv *priv) */ static void stmmac_clk_csr_set(struct stmmac_priv *priv) { - u32 clk_rate; + unsigned long clk_rate; clk_rate = clk_get_rate(priv->plat->stmmac_clk); @@ -325,6 +325,10 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) priv->clk_csr = STMMAC_CSR_150_250M; else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) priv->clk_csr = STMMAC_CSR_250_300M; + else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M)) + priv->clk_csr = STMMAC_CSR_300_500M; + else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M)) + priv->clk_csr = STMMAC_CSR_500_800M; } if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 3ac32444e492..06e07e6e180b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -640,7 +640,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) dev_info(&pdev->dev, "PTP uses main clock\n"); } else { plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref); - dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate); + dev_dbg(&pdev->dev, "PTP rate %lu\n", plat->clk_ptp_rate); } plat->stmmac_rst = devm_reset_control_get_optional(&pdev->dev, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index 4cc70480ce0f..3fe0e3a80e80 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -104,4 +104,7 @@ int dwmac1000_ptp_enable(struct ptp_clock_info *ptp, void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time); void dwmac1000_timestamp_interrupt(struct stmmac_priv *priv); +extern const struct ptp_clock_info stmmac_ptp_clock_ops; +extern const struct ptp_clock_info dwmac1000_ptp_clock_ops; + #endif /* __STMMAC_PTP_H__ */ diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index bc658bc60885..642155cb8315 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -235,7 +235,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, vni_to_tunnel_id(gnvh->vni), gnvh->opt_len * 4); if (!tun_dst) { - DEV_STATS_INC(geneve->dev, rx_dropped); + dev_dstats_rx_dropped(geneve->dev); goto drop; } /* Update tunnel dst according to Geneve options. */ @@ -322,7 +322,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, len = skb->len; err = gro_cells_receive(&geneve->gro_cells, skb); if (likely(err == NET_RX_SUCCESS)) - dev_sw_netstats_rx_add(geneve->dev, len); + dev_dstats_rx_add(geneve->dev, len); return; drop: @@ -387,14 +387,14 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (unlikely((!geneve->cfg.inner_proto_inherit && inner_proto != htons(ETH_P_TEB)))) { - DEV_STATS_INC(geneve->dev, rx_dropped); + dev_dstats_rx_dropped(geneve->dev); goto drop; } opts_len = geneveh->opt_len * 4; if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, inner_proto, !net_eq(geneve->net, dev_net(geneve->dev)))) { - DEV_STATS_INC(geneve->dev, rx_dropped); + dev_dstats_rx_dropped(geneve->dev); goto drop; } @@ -1023,7 +1023,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { netdev_dbg(dev, "no tunnel metadata\n"); dev_kfree_skb(skb); - DEV_STATS_INC(dev, tx_dropped); + dev_dstats_tx_dropped(dev); return NETDEV_TX_OK; } } else { @@ -1202,7 +1202,7 @@ static void geneve_setup(struct net_device *dev) dev->hw_features |= NETIF_F_RXCSUM; dev->hw_features |= NETIF_F_GSO_SOFTWARE; - dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS; /* MTU range: 68 - (something less than 65535) */ dev->min_mtu = ETH_MIN_MTU; /* The max_mtu calculation does not take account of GENEVE diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c index d2b3f5a59141..e3dcdeacc12c 100644 --- a/drivers/net/mctp/mctp-i2c.c +++ b/drivers/net/mctp/mctp-i2c.c @@ -177,8 +177,7 @@ static struct mctp_i2c_client *mctp_i2c_new_client(struct i2c_client *client) return mcli; err: if (mcli) { - if (mcli->client) - i2c_unregister_device(mcli->client); + i2c_unregister_device(mcli->client); kfree(mcli); } return ERR_PTR(rc); diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c index 2beb83154d39..cb53dccbde1a 100644 --- a/drivers/net/mdio/mdio-octeon.c +++ b/drivers/net/mdio/mdio-octeon.c @@ -17,37 +17,20 @@ static int octeon_mdiobus_probe(struct platform_device *pdev) { struct cavium_mdiobus *bus; struct mii_bus *mii_bus; - struct resource *res_mem; - resource_size_t mdio_phys; - resource_size_t regsize; union cvmx_smix_en smi_en; - int err = -ENOENT; + int err; mii_bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*bus)); if (!mii_bus) return -ENOMEM; - res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res_mem == NULL) { - dev_err(&pdev->dev, "found no memory resource\n"); - return -ENXIO; - } - bus = mii_bus->priv; bus->mii_bus = mii_bus; - mdio_phys = res_mem->start; - regsize = resource_size(res_mem); - if (!devm_request_mem_region(&pdev->dev, mdio_phys, regsize, - res_mem->name)) { - dev_err(&pdev->dev, "request_mem_region failed\n"); - return -ENXIO; - } - - bus->register_base = devm_ioremap(&pdev->dev, mdio_phys, regsize); - if (!bus->register_base) { + bus->register_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(bus->register_base)) { dev_err(&pdev->dev, "dev_ioremap failed\n"); - return -ENOMEM; + return PTR_ERR(bus->register_base); } smi_en.u64 = 0; diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 4ea44a2f48f7..f422a2f666ef 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -36,6 +36,7 @@ #include <linux/inet.h> #include <linux/configfs.h> #include <linux/etherdevice.h> +#include <linux/u64_stats_sync.h> #include <linux/utsname.h> #include <linux/rtnetlink.h> @@ -90,6 +91,12 @@ static DEFINE_MUTEX(target_cleanup_list_lock); */ static struct console netconsole_ext; +struct netconsole_target_stats { + u64_stats_t xmit_drop_count; + u64_stats_t enomem_count; + struct u64_stats_sync syncp; +}; + /** * struct netconsole_target - Represents a configured netconsole target. * @list: Links this target into the target_list. @@ -97,6 +104,7 @@ static struct console netconsole_ext; * @userdata_group: Links to the userdata configfs hierarchy * @userdata_complete: Cached, formatted string of append * @userdata_length: String length of userdata_complete + * @stats: Packet send stats for the target. Used for debugging. * @enabled: On / off knob to enable / disable target. * Visible from userspace (read-write). * We maintain a strict 1:1 correspondence between this and @@ -124,6 +132,7 @@ struct netconsole_target { char userdata_complete[MAX_USERDATA_ENTRY_LENGTH * MAX_USERDATA_ITEMS]; size_t userdata_length; #endif + struct netconsole_target_stats stats; bool enabled; bool extended; bool release; @@ -262,6 +271,7 @@ static void netconsole_process_cleanups_core(void) * | remote_ip * | local_mac * | remote_mac + * | transmit_errors * | userdata/ * | <key>/ * | value @@ -371,6 +381,21 @@ static ssize_t remote_mac_show(struct config_item *item, char *buf) return sysfs_emit(buf, "%pM\n", to_target(item)->np.remote_mac); } +static ssize_t transmit_errors_show(struct config_item *item, char *buf) +{ + struct netconsole_target *nt = to_target(item); + u64 xmit_drop_count, enomem_count; + unsigned int start; + + do { + start = u64_stats_fetch_begin(&nt->stats.syncp); + xmit_drop_count = u64_stats_read(&nt->stats.xmit_drop_count); + enomem_count = u64_stats_read(&nt->stats.enomem_count); + } while (u64_stats_fetch_retry(&nt->stats.syncp, start)); + + return sysfs_emit(buf, "%llu\n", xmit_drop_count + enomem_count); +} + /* * This one is special -- targets created through the configfs interface * are not enabled (and the corresponding netpoll activated) by default. @@ -842,6 +867,7 @@ CONFIGFS_ATTR(, remote_ip); CONFIGFS_ATTR_RO(, local_mac); CONFIGFS_ATTR(, remote_mac); CONFIGFS_ATTR(, release); +CONFIGFS_ATTR_RO(, transmit_errors); static struct configfs_attribute *netconsole_target_attrs[] = { &attr_enabled, @@ -854,6 +880,7 @@ static struct configfs_attribute *netconsole_target_attrs[] = { &attr_remote_ip, &attr_local_mac, &attr_remote_mac, + &attr_transmit_errors, NULL, }; @@ -1058,6 +1085,33 @@ static struct notifier_block netconsole_netdev_notifier = { .notifier_call = netconsole_netdev_event, }; +/** + * send_udp - Wrapper for netpoll_send_udp that counts errors + * @nt: target to send message to + * @msg: message to send + * @len: length of message + * + * Calls netpoll_send_udp and classifies the return value. If an error + * occurred it increments statistics in nt->stats accordingly. + * Only calls netpoll_send_udp if CONFIG_NETCONSOLE_DYNAMIC is disabled. + */ +static void send_udp(struct netconsole_target *nt, const char *msg, int len) +{ + int result = netpoll_send_udp(&nt->np, msg, len); + + if (IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC)) { + if (result == NET_XMIT_DROP) { + u64_stats_update_begin(&nt->stats.syncp); + u64_stats_inc(&nt->stats.xmit_drop_count); + u64_stats_update_end(&nt->stats.syncp); + } else if (result == -ENOMEM) { + u64_stats_update_begin(&nt->stats.syncp); + u64_stats_inc(&nt->stats.enomem_count); + u64_stats_update_end(&nt->stats.syncp); + } + } +} + static void send_msg_no_fragmentation(struct netconsole_target *nt, const char *msg, int msg_len, @@ -1085,7 +1139,7 @@ static void send_msg_no_fragmentation(struct netconsole_target *nt, MAX_PRINT_CHUNK - msg_len, "%s", userdata); - netpoll_send_udp(&nt->np, buf, msg_len); + send_udp(nt, buf, msg_len); } static void append_release(char *buf) @@ -1178,7 +1232,7 @@ static void send_fragmented_body(struct netconsole_target *nt, char *buf, this_offset += this_chunk; } - netpoll_send_udp(&nt->np, buf, this_header + this_offset); + send_udp(nt, buf, this_header + this_offset); offset += this_offset; } } @@ -1288,7 +1342,7 @@ static void write_msg(struct console *con, const char *msg, unsigned int len) tmp = msg; for (left = len; left;) { frag = min(left, MAX_PRINT_CHUNK); - netpoll_send_udp(&nt->np, tmp, frag); + send_udp(nt, tmp, frag); tmp += frag; left -= frag; } diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c index b79aedad855b..767a8c0714ac 100644 --- a/drivers/net/pcs/pcs-lynx.c +++ b/drivers/net/pcs/pcs-lynx.c @@ -35,6 +35,27 @@ enum sgmii_speed { #define phylink_pcs_to_lynx(pl_pcs) container_of((pl_pcs), struct lynx_pcs, pcs) #define lynx_to_phylink_pcs(lynx) (&(lynx)->pcs) +static unsigned int lynx_pcs_inband_caps(struct phylink_pcs *pcs, + phy_interface_t interface) +{ + switch (interface) { + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_QSGMII: + return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE; + + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_2500BASEX: + return LINK_INBAND_DISABLE; + + case PHY_INTERFACE_MODE_USXGMII: + return LINK_INBAND_ENABLE; + + default: + return 0; + } +} + static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs, struct phylink_link_state *state) { @@ -306,6 +327,7 @@ static void lynx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, } static const struct phylink_pcs_ops lynx_pcs_phylink_ops = { + .pcs_inband_caps = lynx_pcs_inband_caps, .pcs_get_state = lynx_pcs_get_state, .pcs_config = lynx_pcs_config, .pcs_an_restart = lynx_pcs_an_restart, diff --git a/drivers/net/pcs/pcs-mtk-lynxi.c b/drivers/net/pcs/pcs-mtk-lynxi.c index 4f63abe638c4..7de804535229 100644 --- a/drivers/net/pcs/pcs-mtk-lynxi.c +++ b/drivers/net/pcs/pcs-mtk-lynxi.c @@ -88,6 +88,21 @@ static struct mtk_pcs_lynxi *pcs_to_mtk_pcs_lynxi(struct phylink_pcs *pcs) return container_of(pcs, struct mtk_pcs_lynxi, pcs); } +static unsigned int mtk_pcs_lynxi_inband_caps(struct phylink_pcs *pcs, + phy_interface_t interface) +{ + switch (interface) { + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_QSGMII: + return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE; + + default: + return 0; + } +} + static void mtk_pcs_lynxi_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state) { @@ -241,6 +256,7 @@ static void mtk_pcs_lynxi_disable(struct phylink_pcs *pcs) } static const struct phylink_pcs_ops mtk_pcs_lynxi_ops = { + .pcs_inband_caps = mtk_pcs_lynxi_inband_caps, .pcs_get_state = mtk_pcs_lynxi_get_state, .pcs_config = mtk_pcs_lynxi_config, .pcs_an_restart = mtk_pcs_lynxi_restart_an, diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index 7246a910728d..f70ca39f0905 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -567,6 +567,33 @@ static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported, return 0; } +static unsigned int xpcs_inband_caps(struct phylink_pcs *pcs, + phy_interface_t interface) +{ + struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs); + const struct dw_xpcs_compat *compat; + + compat = xpcs_find_compat(xpcs, interface); + if (!compat) + return 0; + + switch (compat->an_mode) { + case DW_AN_C73: + return LINK_INBAND_ENABLE; + + case DW_AN_C37_SGMII: + case DW_AN_C37_1000BASEX: + return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE; + + case DW_10GBASER: + case DW_2500BASEX: + return LINK_INBAND_DISABLE; + + default: + return 0; + } +} + void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces) { const struct dw_xpcs_compat *compat; @@ -1306,6 +1333,7 @@ static const struct dw_xpcs_desc xpcs_desc_list[] = { static const struct phylink_pcs_ops xpcs_phylink_ops = { .pcs_validate = xpcs_validate, + .pcs_inband_caps = xpcs_inband_caps, .pcs_pre_config = xpcs_pre_config, .pcs_config = xpcs_config, .pcs_get_state = xpcs_get_state, diff --git a/drivers/net/phy/bcm84881.c b/drivers/net/phy/bcm84881.c index 97da3aee4942..47405bded677 100644 --- a/drivers/net/phy/bcm84881.c +++ b/drivers/net/phy/bcm84881.c @@ -235,11 +235,21 @@ static int bcm84881_read_status(struct phy_device *phydev) return genphy_c45_read_mdix(phydev); } +/* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII + * or 802.3z control word, so inband will not work. + */ +static unsigned int bcm84881_inband_caps(struct phy_device *phydev, + phy_interface_t interface) +{ + return LINK_INBAND_DISABLE; +} + static struct phy_driver bcm84881_drivers[] = { { .phy_id = 0xae025150, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM84881", + .inband_caps = bcm84881_inband_caps, .config_init = bcm84881_config_init, .probe = bcm84881_probe, .get_features = bcm84881_get_features, diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index cf8b6d0bfaa9..25ee09c48027 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -22,8 +22,6 @@ #define DP83826C_PHY_ID 0x2000a130 #define DP83826NC_PHY_ID 0x2000a110 -#define DP83822_DEVADDR 0x1f - #define MII_DP83822_CTRL_2 0x0a #define MII_DP83822_PHYSTS 0x10 #define MII_DP83822_PHYSCR 0x11 @@ -159,14 +157,14 @@ static int dp83822_config_wol(struct phy_device *phydev, /* MAC addresses start with byte 5, but stored in mac[0]. * 822 PHYs store bytes 4|5, 2|3, 0|1 */ - phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA1, + phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_DA1, (mac[1] << 8) | mac[0]); - phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA2, + phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_DA2, (mac[3] << 8) | mac[2]); - phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA3, + phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_DA3, (mac[5] << 8) | mac[4]); - value = phy_read_mmd(phydev, DP83822_DEVADDR, + value = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG); if (wol->wolopts & WAKE_MAGIC) value |= DP83822_WOL_MAGIC_EN; @@ -174,13 +172,13 @@ static int dp83822_config_wol(struct phy_device *phydev, value &= ~DP83822_WOL_MAGIC_EN; if (wol->wolopts & WAKE_MAGICSECURE) { - phy_write_mmd(phydev, DP83822_DEVADDR, + phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RXSOP1, (wol->sopass[1] << 8) | wol->sopass[0]); - phy_write_mmd(phydev, DP83822_DEVADDR, + phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RXSOP2, (wol->sopass[3] << 8) | wol->sopass[2]); - phy_write_mmd(phydev, DP83822_DEVADDR, + phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RXSOP3, (wol->sopass[5] << 8) | wol->sopass[4]); value |= DP83822_WOL_SECURE_ON; @@ -194,10 +192,10 @@ static int dp83822_config_wol(struct phy_device *phydev, value |= DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL | DP83822_WOL_CLR_INDICATION; - return phy_write_mmd(phydev, DP83822_DEVADDR, + return phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG, value); } else { - return phy_clear_bits_mmd(phydev, DP83822_DEVADDR, + return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG, DP83822_WOL_EN | DP83822_WOL_MAGIC_EN | @@ -226,23 +224,23 @@ static void dp83822_get_wol(struct phy_device *phydev, wol->supported = (WAKE_MAGIC | WAKE_MAGICSECURE); wol->wolopts = 0; - value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG); + value = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG); if (value & DP83822_WOL_MAGIC_EN) wol->wolopts |= WAKE_MAGIC; if (value & DP83822_WOL_SECURE_ON) { - sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR, + sopass_val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RXSOP1); wol->sopass[0] = (sopass_val & 0xff); wol->sopass[1] = (sopass_val >> 8); - sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR, + sopass_val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RXSOP2); wol->sopass[2] = (sopass_val & 0xff); wol->sopass[3] = (sopass_val >> 8); - sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR, + sopass_val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RXSOP3); wol->sopass[4] = (sopass_val & 0xff); wol->sopass[5] = (sopass_val >> 8); @@ -430,18 +428,18 @@ static int dp83822_config_init(struct phy_device *phydev) if (tx_int_delay <= 0) rgmii_delay |= DP83822_TX_CLK_SHIFT; - err = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR, + err = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR, DP83822_RX_CLK_SHIFT | DP83822_TX_CLK_SHIFT, rgmii_delay); if (err) return err; - err = phy_set_bits_mmd(phydev, DP83822_DEVADDR, + err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR, DP83822_RGMII_MODE_EN); if (err) return err; } else { - err = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, + err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR, DP83822_RGMII_MODE_EN); if (err) @@ -496,7 +494,7 @@ static int dp83822_config_init(struct phy_device *phydev) return err; if (dp83822->fx_signal_det_low) { - err = phy_set_bits_mmd(phydev, DP83822_DEVADDR, + err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_GENCFG, DP83822_SIG_DET_LOW); if (err) @@ -514,10 +512,10 @@ static int dp8382x_config_rmii_mode(struct phy_device *phydev) if (!device_property_read_string(dev, "ti,rmii-mode", &of_val)) { if (strcmp(of_val, "master") == 0) { - ret = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR, + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR, DP83822_RMII_MODE_SEL); } else if (strcmp(of_val, "slave") == 0) { - ret = phy_set_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR, + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR, DP83822_RMII_MODE_SEL); } else { phydev_err(phydev, "Invalid value for ti,rmii-mode property (%s)\n", @@ -539,7 +537,7 @@ static int dp83826_config_init(struct phy_device *phydev) int ret; if (phydev->interface == PHY_INTERFACE_MODE_RMII) { - ret = phy_set_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR, + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR, DP83822_RMII_MODE_EN); if (ret) return ret; @@ -548,7 +546,7 @@ static int dp83826_config_init(struct phy_device *phydev) if (ret) return ret; } else { - ret = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR, + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR, DP83822_RMII_MODE_EN); if (ret) return ret; @@ -560,7 +558,7 @@ static int dp83826_config_init(struct phy_device *phydev) FIELD_GET(DP83826_CFG_DAC_MINUS_MDIX_5_TO_4, dp83822->cfg_dac_minus)); mask = DP83826_VOD_CFG1_MINUS_MDIX_MASK | DP83826_VOD_CFG1_MINUS_MDI_MASK; - ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG1, mask, val); + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83826_VOD_CFG1, mask, val); if (ret) return ret; @@ -568,7 +566,7 @@ static int dp83826_config_init(struct phy_device *phydev) FIELD_GET(DP83826_CFG_DAC_MINUS_MDIX_3_TO_0, dp83822->cfg_dac_minus)); mask = DP83826_VOD_CFG2_MINUS_MDIX_MASK; - ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG2, mask, val); + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83826_VOD_CFG2, mask, val); if (ret) return ret; } @@ -577,7 +575,7 @@ static int dp83826_config_init(struct phy_device *phydev) val = FIELD_PREP(DP83826_VOD_CFG2_PLUS_MDIX_MASK, dp83822->cfg_dac_plus) | FIELD_PREP(DP83826_VOD_CFG2_PLUS_MDI_MASK, dp83822->cfg_dac_plus); mask = DP83826_VOD_CFG2_PLUS_MDIX_MASK | DP83826_VOD_CFG2_PLUS_MDI_MASK; - ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG2, mask, val); + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83826_VOD_CFG2, mask, val); if (ret) return ret; } @@ -673,7 +671,7 @@ static int dp83822_read_straps(struct phy_device *phydev) int fx_enabled, fx_sd_enable; int val; - val = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_SOR1); + val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_SOR1); if (val < 0) return val; @@ -748,7 +746,7 @@ static int dp83822_suspend(struct phy_device *phydev) { int value; - value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG); + value = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG); if (!(value & DP83822_WOL_EN)) genphy_suspend(phydev); @@ -762,9 +760,9 @@ static int dp83822_resume(struct phy_device *phydev) genphy_resume(phydev); - value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG); + value = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG); - phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG, value | + phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG, value | DP83822_WOL_CLR_INDICATION); return 0; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index cd50cd6a7f75..ffe223ad9e5f 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -717,6 +717,48 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev) return genphy_check_and_restart_aneg(phydev, changed); } +static unsigned int m88e1111_inband_caps(struct phy_device *phydev, + phy_interface_t interface) +{ + /* In 1000base-X and SGMII modes, the inband mode can be changed + * through the Fibre page BMCR ANENABLE bit. + */ + if (interface == PHY_INTERFACE_MODE_1000BASEX || + interface == PHY_INTERFACE_MODE_SGMII) + return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE | + LINK_INBAND_BYPASS; + + return 0; +} + +static int m88e1111_config_inband(struct phy_device *phydev, unsigned int modes) +{ + u16 extsr, bmcr; + int err; + + if (phydev->interface != PHY_INTERFACE_MODE_1000BASEX && + phydev->interface != PHY_INTERFACE_MODE_SGMII) + return -EINVAL; + + if (modes == LINK_INBAND_BYPASS) + extsr = MII_M1111_HWCFG_SERIAL_AN_BYPASS; + else + extsr = 0; + + if (modes == LINK_INBAND_DISABLE) + bmcr = 0; + else + bmcr = BMCR_ANENABLE; + + err = phy_modify(phydev, MII_M1111_PHY_EXT_SR, + MII_M1111_HWCFG_SERIAL_AN_BYPASS, extsr); + if (err < 0) + return extsr; + + return phy_modify_paged(phydev, MII_MARVELL_FIBER_PAGE, MII_BMCR, + BMCR_ANENABLE, bmcr); +} + static int m88e1111_config_aneg(struct phy_device *phydev) { int extsr = phy_read(phydev, MII_M1111_PHY_EXT_SR); @@ -1508,7 +1550,6 @@ static int m88e1540_get_fld(struct phy_device *phydev, u8 *msecs) static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs) { - struct ethtool_keee eee; int val, ret; if (*msecs == ETHTOOL_PHY_FAST_LINK_DOWN_OFF) @@ -1518,8 +1559,7 @@ static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs) /* According to the Marvell data sheet EEE must be disabled for * Fast Link Down detection to work properly */ - ret = genphy_c45_ethtool_get_eee(phydev, &eee); - if (!ret && eee.eee_enabled) { + if (phydev->eee_cfg.eee_enabled) { phydev_warn(phydev, "Fast Link Down detection requires EEE to be disabled!\n"); return -EBUSY; } @@ -3677,6 +3717,8 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1112", /* PHY_GBIT_FEATURES */ .probe = marvell_probe, + .inband_caps = m88e1111_inband_caps, + .config_inband = m88e1111_config_inband, .config_init = m88e1112_config_init, .config_aneg = marvell_config_aneg, .config_intr = marvell_config_intr, @@ -3698,6 +3740,8 @@ static struct phy_driver marvell_drivers[] = { /* PHY_GBIT_FEATURES */ .flags = PHY_POLL_CABLE_TEST, .probe = marvell_probe, + .inband_caps = m88e1111_inband_caps, + .config_inband = m88e1111_config_inband, .config_init = m88e1111gbe_config_init, .config_aneg = m88e1111_config_aneg, .read_status = marvell_read_status, @@ -3721,6 +3765,8 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1111 (Finisar)", /* PHY_GBIT_FEATURES */ .probe = marvell_probe, + .inband_caps = m88e1111_inband_caps, + .config_inband = m88e1111_config_inband, .config_init = m88e1111gbe_config_init, .config_aneg = m88e1111_config_aneg, .read_status = marvell_read_status, diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index 944ae98ad110..0dac08e85304 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -1469,18 +1469,17 @@ EXPORT_SYMBOL_GPL(genphy_c45_plca_get_status); * @phydev: target phy_device struct * @adv: variable to store advertised linkmodes * @lp: variable to store LP advertised linkmodes - * @is_enabled: variable to store EEE enabled/disabled configuration value * * Description: this function will read local and link partner PHY * advertisements. Compare them return current EEE state. */ int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv, - unsigned long *lp, bool *is_enabled) + unsigned long *lp) { __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_adv) = {}; __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_lp) = {}; __ETHTOOL_DECLARE_LINK_MODE_MASK(common); - bool eee_enabled, eee_active; + bool eee_active; int ret; ret = genphy_c45_read_eee_adv(phydev, tmp_adv); @@ -1491,9 +1490,8 @@ int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv, if (ret) return ret; - eee_enabled = !linkmode_empty(tmp_adv); linkmode_and(common, tmp_adv, tmp_lp); - if (eee_enabled && !linkmode_empty(common)) + if (!linkmode_empty(tmp_adv) && !linkmode_empty(common)) eee_active = phy_check_valid(phydev->speed, phydev->duplex, common); else @@ -1503,8 +1501,6 @@ int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv, linkmode_copy(adv, tmp_adv); if (lp) linkmode_copy(lp, tmp_lp); - if (is_enabled) - *is_enabled = eee_enabled; return eee_active; } @@ -1521,15 +1517,13 @@ EXPORT_SYMBOL(genphy_c45_eee_is_active); int genphy_c45_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data) { - bool is_enabled; int ret; ret = genphy_c45_eee_is_active(phydev, data->advertised, - data->lp_advertised, &is_enabled); + data->lp_advertised); if (ret < 0) return ret; - data->eee_enabled = is_enabled; data->eee_active = phydev->eee_active; linkmode_copy(data->supported, phydev->supported_eee); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 0d20b534122b..e4b04cdaa995 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -988,8 +988,7 @@ static int phy_check_link_status(struct phy_device *phydev) if (phydev->link && phydev->state != PHY_RUNNING) { phy_check_downshift(phydev); phydev->state = PHY_RUNNING; - err = genphy_c45_eee_is_active(phydev, - NULL, NULL, NULL); + err = genphy_c45_eee_is_active(phydev, NULL, NULL); phydev->eee_active = err > 0; phydev->enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled && phydev->eee_active; @@ -1006,6 +1005,59 @@ static int phy_check_link_status(struct phy_device *phydev) } /** + * phy_inband_caps - query which in-band signalling modes are supported + * @phydev: a pointer to a &struct phy_device + * @interface: the interface mode for the PHY + * + * Returns zero if it is unknown what in-band signalling is supported by the + * PHY (e.g. because the PHY driver doesn't implement the method.) Otherwise, + * returns a bit mask of the LINK_INBAND_* values from + * &enum link_inband_signalling to describe which inband modes are supported + * by the PHY for this interface mode. + */ +unsigned int phy_inband_caps(struct phy_device *phydev, + phy_interface_t interface) +{ + if (phydev->drv && phydev->drv->inband_caps) + return phydev->drv->inband_caps(phydev, interface); + + return 0; +} +EXPORT_SYMBOL_GPL(phy_inband_caps); + +/** + * phy_config_inband - configure the desired PHY in-band mode + * @phydev: the phy_device struct + * @modes: in-band modes to configure + * + * Description: disables, enables or enables-with-bypass in-band signalling + * between the PHY and host system. + * + * Returns: zero on success, or negative errno value. + */ +int phy_config_inband(struct phy_device *phydev, unsigned int modes) +{ + int err; + + if (!!(modes & LINK_INBAND_DISABLE) + + !!(modes & LINK_INBAND_ENABLE) + + !!(modes & LINK_INBAND_BYPASS) != 1) + return -EINVAL; + + mutex_lock(&phydev->lock); + if (!phydev->drv) + err = -EIO; + else if (!phydev->drv->config_inband) + err = -EOPNOTSUPP; + else + err = phydev->drv->config_inband(phydev, modes); + mutex_unlock(&phydev->lock); + + return err; +} +EXPORT_SYMBOL(phy_config_inband); + +/** * _phy_start_aneg - start auto-negotiation for this PHY device * @phydev: the phy_device struct * @@ -1605,7 +1657,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) if (!phydev->drv) return -EIO; - ret = genphy_c45_eee_is_active(phydev, NULL, NULL, NULL); + ret = genphy_c45_eee_is_active(phydev, NULL, NULL); if (ret < 0) return ret; if (!ret) @@ -1649,8 +1701,8 @@ EXPORT_SYMBOL(phy_get_eee_err); * @phydev: target phy_device struct * @data: ethtool_keee data * - * Description: reports the Supported/Advertisement/LP Advertisement - * capabilities, etc. + * Description: get the current EEE settings, filling in all members of + * @data. */ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data) { diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 30a654e98352..95fbc363f9a6 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -56,7 +56,8 @@ struct phylink { struct phy_device *phydev; phy_interface_t link_interface; /* PHY_INTERFACE_xxx */ u8 cfg_link_an_mode; /* MLO_AN_xxx */ - u8 cur_link_an_mode; + u8 req_link_an_mode; /* Requested MLO_AN_xxx mode */ + u8 act_link_an_mode; /* Active MLO_AN_xxx mode */ u8 link_port; /* The current non-phy ethtool port */ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); @@ -74,6 +75,7 @@ struct phylink { struct mutex state_mutex; struct phylink_link_state phy_state; + unsigned int phy_ib_mode; struct work_struct resolve; unsigned int pcs_neg_mode; unsigned int pcs_state; @@ -174,6 +176,24 @@ static const char *phylink_an_mode_str(unsigned int mode) return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown"; } +static const char *phylink_pcs_mode_str(unsigned int mode) +{ + if (!mode) + return "none"; + + if (mode & PHYLINK_PCS_NEG_OUTBAND) + return "outband"; + + if (mode & PHYLINK_PCS_NEG_INBAND) { + if (mode & PHYLINK_PCS_NEG_ENABLED) + return "inband,an-enabled"; + else + return "inband,an-disabled"; + } + + return "unknown"; +} + static unsigned int phylink_interface_signal_rate(phy_interface_t interface) { switch (interface) { @@ -971,6 +991,15 @@ static void phylink_resolve_an_pause(struct phylink_link_state *state) } } +static unsigned int phylink_pcs_inband_caps(struct phylink_pcs *pcs, + phy_interface_t interface) +{ + if (pcs && pcs->ops->pcs_inband_caps) + return pcs->ops->pcs_inband_caps(pcs, interface); + + return 0; +} + static void phylink_pcs_pre_config(struct phylink_pcs *pcs, phy_interface_t interface) { @@ -1024,6 +1053,24 @@ static void phylink_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, pcs->ops->pcs_link_up(pcs, neg_mode, interface, speed, duplex); } +/* Query inband for a specific interface mode, asking the MAC for the + * PCS which will be used to handle the interface mode. + */ +static unsigned int phylink_inband_caps(struct phylink *pl, + phy_interface_t interface) +{ + struct phylink_pcs *pcs; + + if (!pl->mac_ops->mac_select_pcs) + return 0; + + pcs = pl->mac_ops->mac_select_pcs(pl->config, interface); + if (!pcs) + return 0; + + return phylink_pcs_inband_caps(pcs, interface); +} + static void phylink_pcs_poll_stop(struct phylink *pl) { if (pl->cfg_link_an_mode == MLO_AN_INBAND) @@ -1065,13 +1112,13 @@ static void phylink_mac_config(struct phylink *pl, phylink_dbg(pl, "%s: mode=%s/%s/%s adv=%*pb pause=%02x\n", - __func__, phylink_an_mode_str(pl->cur_link_an_mode), + __func__, phylink_an_mode_str(pl->act_link_an_mode), phy_modes(st.interface), phy_rate_matching_to_str(st.rate_matching), __ETHTOOL_LINK_MODE_MASK_NBITS, st.advertising, st.pause); - pl->mac_ops->mac_config(pl->config, pl->cur_link_an_mode, &st); + pl->mac_ops->mac_config(pl->config, pl->act_link_an_mode, &st); } static void phylink_pcs_an_restart(struct phylink *pl) @@ -1079,13 +1126,14 @@ static void phylink_pcs_an_restart(struct phylink *pl) if (pl->pcs && linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, pl->link_config.advertising) && phy_interface_mode_is_8023z(pl->link_config.interface) && - phylink_autoneg_inband(pl->cur_link_an_mode)) + phylink_autoneg_inband(pl->act_link_an_mode)) pl->pcs->ops->pcs_an_restart(pl->pcs); } /** * phylink_pcs_neg_mode() - helper to determine PCS inband mode - * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @pcs: a pointer to &struct phylink_pcs * @interface: interface mode to be used * @advertising: adertisement ethtool link mode mask * @@ -1102,11 +1150,21 @@ static void phylink_pcs_an_restart(struct phylink *pl) * Note: this is for cases where the PCS itself is involved in negotiation * (e.g. Clause 37, SGMII and similar) not Clause 73. */ -static unsigned int phylink_pcs_neg_mode(unsigned int mode, - phy_interface_t interface, - const unsigned long *advertising) +static void phylink_pcs_neg_mode(struct phylink *pl, struct phylink_pcs *pcs, + phy_interface_t interface, + const unsigned long *advertising) { - unsigned int neg_mode; + unsigned int pcs_ib_caps = 0; + unsigned int phy_ib_caps = 0; + unsigned int neg_mode, mode; + enum { + INBAND_CISCO_SGMII, + INBAND_BASEX, + } type; + + mode = pl->req_link_an_mode; + + pl->phy_ib_mode = 0; switch (interface) { case PHY_INTERFACE_MODE_SGMII: @@ -1119,10 +1177,7 @@ static unsigned int phylink_pcs_neg_mode(unsigned int mode, * inband communication. Note: there exist PHYs that run * with SGMII but do not send the inband data. */ - if (!phylink_autoneg_inband(mode)) - neg_mode = PHYLINK_PCS_NEG_OUTBAND; - else - neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED; + type = INBAND_CISCO_SGMII; break; case PHY_INTERFACE_MODE_1000BASEX: @@ -1133,21 +1188,143 @@ static unsigned int phylink_pcs_neg_mode(unsigned int mode, * as well, but drivers may not support this, so may * need to override this. */ - if (!phylink_autoneg_inband(mode)) + type = INBAND_BASEX; + break; + + default: + pl->pcs_neg_mode = PHYLINK_PCS_NEG_NONE; + pl->act_link_an_mode = mode; + return; + } + + if (pcs) + pcs_ib_caps = phylink_pcs_inband_caps(pcs, interface); + + if (pl->phydev) + phy_ib_caps = phy_inband_caps(pl->phydev, interface); + + phylink_dbg(pl, "interface %s inband modes: pcs=%02x phy=%02x\n", + phy_modes(interface), pcs_ib_caps, phy_ib_caps); + + if (!phylink_autoneg_inband(mode)) { + bool pcs_ib_only = false; + bool phy_ib_only = false; + + if (pcs_ib_caps && pcs_ib_caps != LINK_INBAND_DISABLE) { + /* PCS supports reporting in-band capabilities, and + * supports more than disable mode. + */ + if (pcs_ib_caps & LINK_INBAND_DISABLE) + neg_mode = PHYLINK_PCS_NEG_OUTBAND; + else if (pcs_ib_caps & LINK_INBAND_ENABLE) + pcs_ib_only = true; + } + + if (phy_ib_caps && phy_ib_caps != LINK_INBAND_DISABLE) { + /* PHY supports in-band capabilities, and supports + * more than disable mode. + */ + if (phy_ib_caps & LINK_INBAND_DISABLE) + pl->phy_ib_mode = LINK_INBAND_DISABLE; + else if (phy_ib_caps & LINK_INBAND_BYPASS) + pl->phy_ib_mode = LINK_INBAND_BYPASS; + else if (phy_ib_caps & LINK_INBAND_ENABLE) + phy_ib_only = true; + } + + /* If either the PCS or PHY requires inband to be enabled, + * this is an invalid configuration. Provide a diagnostic + * message for this case, but don't try to force the issue. + */ + if (pcs_ib_only || phy_ib_only) + phylink_warn(pl, + "firmware wants %s mode, but %s%s%s requires inband\n", + phylink_an_mode_str(mode), + pcs_ib_only ? "PCS" : "", + pcs_ib_only && phy_ib_only ? " and " : "", + phy_ib_only ? "PHY" : ""); + + neg_mode = PHYLINK_PCS_NEG_OUTBAND; + } else if (type == INBAND_CISCO_SGMII || pl->phydev) { + /* For SGMII modes which are designed to be used with PHYs, or + * Base-X with a PHY, we try to use in-band mode where-ever + * possible. However, there are some PHYs e.g. BCM84881 which + * do not support in-band. + */ + const unsigned int inband_ok = LINK_INBAND_ENABLE | + LINK_INBAND_BYPASS; + const unsigned int outband_ok = LINK_INBAND_DISABLE | + LINK_INBAND_BYPASS; + /* PCS PHY + * D E D E + * 0 0 0 0 no information inband enabled + * 1 0 0 0 pcs doesn't support outband + * 0 1 0 0 pcs required inband enabled + * 1 1 0 0 pcs optional inband enabled + * 0 0 1 0 phy doesn't support outband + * 1 0 1 0 pcs+phy doesn't support outband + * 0 1 1 0 pcs required, phy doesn't support, invalid + * 1 1 1 0 pcs optional, phy doesn't support, outband + * 0 0 0 1 phy required inband enabled + * 1 0 0 1 pcs doesn't support, phy required, invalid + * 0 1 0 1 pcs+phy required inband enabled + * 1 1 0 1 pcs optional, phy required inband enabled + * 0 0 1 1 phy optional inband enabled + * 1 0 1 1 pcs doesn't support, phy optional, outband + * 0 1 1 1 pcs required, phy optional inband enabled + * 1 1 1 1 pcs+phy optional inband enabled + */ + if ((!pcs_ib_caps || pcs_ib_caps & inband_ok) && + (!phy_ib_caps || phy_ib_caps & inband_ok)) { + /* In-band supported or unknown at both ends. Enable + * in-band mode with or without bypass at the PHY. + */ + if (phy_ib_caps & LINK_INBAND_ENABLE) + pl->phy_ib_mode = LINK_INBAND_ENABLE; + else if (phy_ib_caps & LINK_INBAND_BYPASS) + pl->phy_ib_mode = LINK_INBAND_BYPASS; + + neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED; + } else if ((!pcs_ib_caps || pcs_ib_caps & outband_ok) && + (!phy_ib_caps || phy_ib_caps & outband_ok)) { + /* Either in-band not supported at at least one end. + * In-band bypass at the other end is possible. + */ + if (phy_ib_caps & LINK_INBAND_DISABLE) + pl->phy_ib_mode = LINK_INBAND_DISABLE; + else if (phy_ib_caps & LINK_INBAND_BYPASS) + pl->phy_ib_mode = LINK_INBAND_BYPASS; + neg_mode = PHYLINK_PCS_NEG_OUTBAND; + if (pl->phydev) + mode = MLO_AN_PHY; + } else { + /* invalid */ + phylink_warn(pl, "%s: incompatible in-band capabilities, trying in-band", + phy_modes(interface)); + neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED; + } + } else { + /* For Base-X without a PHY */ + if (pcs_ib_caps == LINK_INBAND_DISABLE) + /* If the PCS doesn't support inband, then inband must + * be disabled. + */ + neg_mode = PHYLINK_PCS_NEG_INBAND_DISABLED; + else if (pcs_ib_caps == LINK_INBAND_ENABLE) + /* If the PCS requires inband, then inband must always + * be enabled. + */ + neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED; else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising)) neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED; else neg_mode = PHYLINK_PCS_NEG_INBAND_DISABLED; - break; - - default: - neg_mode = PHYLINK_PCS_NEG_NONE; - break; } - return neg_mode; + pl->pcs_neg_mode = neg_mode; + pl->act_link_an_mode = mode; } static void phylink_major_config(struct phylink *pl, bool restart, @@ -1159,11 +1336,9 @@ static void phylink_major_config(struct phylink *pl, bool restart, unsigned int neg_mode; int err; - phylink_dbg(pl, "major config %s\n", phy_modes(state->interface)); - - pl->pcs_neg_mode = phylink_pcs_neg_mode(pl->cur_link_an_mode, - state->interface, - state->advertising); + phylink_dbg(pl, "major config, requested %s/%s\n", + phylink_an_mode_str(pl->req_link_an_mode), + phy_modes(state->interface)); if (pl->mac_ops->mac_select_pcs) { pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface); @@ -1177,10 +1352,17 @@ static void phylink_major_config(struct phylink *pl, bool restart, pcs_changed = pl->pcs != pcs; } + phylink_pcs_neg_mode(pl, pcs, state->interface, state->advertising); + + phylink_dbg(pl, "major config, active %s/%s/%s\n", + phylink_an_mode_str(pl->act_link_an_mode), + phylink_pcs_mode_str(pl->pcs_neg_mode), + phy_modes(state->interface)); + phylink_pcs_poll_stop(pl); if (pl->mac_ops->mac_prepare) { - err = pl->mac_ops->mac_prepare(pl->config, pl->cur_link_an_mode, + err = pl->mac_ops->mac_prepare(pl->config, pl->act_link_an_mode, state->interface); if (err < 0) { phylink_err(pl, "mac_prepare failed: %pe\n", @@ -1214,7 +1396,7 @@ static void phylink_major_config(struct phylink *pl, bool restart, if (pl->pcs_state == PCS_STATE_STARTING || pcs_changed) phylink_pcs_enable(pl->pcs); - neg_mode = pl->cur_link_an_mode; + neg_mode = pl->act_link_an_mode; if (pl->pcs && pl->pcs->neg_mode) neg_mode = pl->pcs_neg_mode; @@ -1230,13 +1412,20 @@ static void phylink_major_config(struct phylink *pl, bool restart, phylink_pcs_an_restart(pl); if (pl->mac_ops->mac_finish) { - err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode, + err = pl->mac_ops->mac_finish(pl->config, pl->act_link_an_mode, state->interface); if (err < 0) phylink_err(pl, "mac_finish failed: %pe\n", ERR_PTR(err)); } + if (pl->phydev && pl->phy_ib_mode) { + err = phy_config_inband(pl->phydev, pl->phy_ib_mode); + if (err < 0) + phylink_err(pl, "phy_config_inband: %pe\n", + ERR_PTR(err)); + } + if (pl->sfp_bus) { rate_kbd = phylink_interface_signal_rate(state->interface); if (rate_kbd) @@ -1261,17 +1450,16 @@ static int phylink_change_inband_advert(struct phylink *pl) return 0; phylink_dbg(pl, "%s: mode=%s/%s adv=%*pb pause=%02x\n", __func__, - phylink_an_mode_str(pl->cur_link_an_mode), + phylink_an_mode_str(pl->req_link_an_mode), phy_modes(pl->link_config.interface), __ETHTOOL_LINK_MODE_MASK_NBITS, pl->link_config.advertising, pl->link_config.pause); /* Recompute the PCS neg mode */ - pl->pcs_neg_mode = phylink_pcs_neg_mode(pl->cur_link_an_mode, - pl->link_config.interface, - pl->link_config.advertising); + phylink_pcs_neg_mode(pl, pl->pcs, pl->link_config.interface, + pl->link_config.advertising); - neg_mode = pl->cur_link_an_mode; + neg_mode = pl->act_link_an_mode; if (pl->pcs->neg_mode) neg_mode = pl->pcs_neg_mode; @@ -1336,7 +1524,7 @@ static void phylink_mac_initial_config(struct phylink *pl, bool force_restart) { struct phylink_link_state link_state; - switch (pl->cur_link_an_mode) { + switch (pl->req_link_an_mode) { case MLO_AN_PHY: link_state = pl->phy_state; break; @@ -1410,14 +1598,14 @@ static void phylink_link_up(struct phylink *pl, pl->cur_interface = link_state.interface; - neg_mode = pl->cur_link_an_mode; + neg_mode = pl->act_link_an_mode; if (pl->pcs && pl->pcs->neg_mode) neg_mode = pl->pcs_neg_mode; phylink_pcs_link_up(pl->pcs, neg_mode, pl->cur_interface, speed, duplex); - pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->cur_link_an_mode, + pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->act_link_an_mode, pl->cur_interface, speed, duplex, !!(link_state.pause & MLO_PAUSE_TX), rx_pause); @@ -1437,7 +1625,7 @@ static void phylink_link_down(struct phylink *pl) if (ndev) netif_carrier_off(ndev); - pl->mac_ops->mac_link_down(pl->config, pl->cur_link_an_mode, + pl->mac_ops->mac_link_down(pl->config, pl->act_link_an_mode, pl->cur_interface); phylink_info(pl, "Link is Down\n"); } @@ -1463,10 +1651,10 @@ static void phylink_resolve(struct work_struct *w) } else if (pl->link_failed) { link_state.link = false; retrigger = true; - } else if (pl->cur_link_an_mode == MLO_AN_FIXED) { + } else if (pl->act_link_an_mode == MLO_AN_FIXED) { phylink_get_fixed_state(pl, &link_state); mac_config = link_state.link; - } else if (pl->cur_link_an_mode == MLO_AN_PHY) { + } else if (pl->act_link_an_mode == MLO_AN_PHY) { link_state = pl->phy_state; mac_config = link_state.link; } else { @@ -1520,7 +1708,7 @@ static void phylink_resolve(struct work_struct *w) } } - if (pl->cur_link_an_mode != MLO_AN_FIXED) + if (pl->act_link_an_mode != MLO_AN_FIXED) phylink_apply_manual_flow(pl, &link_state); if (mac_config) { @@ -1644,7 +1832,7 @@ int phylink_set_fixed_link(struct phylink *pl, pl->link_config.an_complete = 1; pl->cfg_link_an_mode = MLO_AN_FIXED; - pl->cur_link_an_mode = pl->cfg_link_an_mode; + pl->req_link_an_mode = pl->cfg_link_an_mode; return 0; } @@ -1732,7 +1920,7 @@ struct phylink *phylink_create(struct phylink_config *config, } } - pl->cur_link_an_mode = pl->cfg_link_an_mode; + pl->req_link_an_mode = pl->cfg_link_an_mode; ret = phylink_register_sfp(pl, fwnode); if (ret < 0) { @@ -2189,7 +2377,7 @@ void phylink_start(struct phylink *pl) ASSERT_RTNL(); phylink_info(pl, "configuring for %s/%s link mode\n", - phylink_an_mode_str(pl->cur_link_an_mode), + phylink_an_mode_str(pl->req_link_an_mode), phy_modes(pl->link_config.interface)); /* Always set the carrier off */ @@ -2474,7 +2662,7 @@ int phylink_ethtool_ksettings_get(struct phylink *pl, linkmode_copy(kset->link_modes.supported, pl->supported); - switch (pl->cur_link_an_mode) { + switch (pl->act_link_an_mode) { case MLO_AN_FIXED: /* We are using fixed settings. Report these as the * current link settings - and note that these also @@ -2505,6 +2693,26 @@ int phylink_ethtool_ksettings_get(struct phylink *pl, } EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get); +static bool phylink_validate_pcs_inband_autoneg(struct phylink *pl, + phy_interface_t interface, + unsigned long *adv) +{ + unsigned int inband = phylink_inband_caps(pl, interface); + unsigned int mask; + + /* If the PCS doesn't implement inband support, be permissive. */ + if (!inband) + return true; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, adv)) + mask = LINK_INBAND_ENABLE; + else + mask = LINK_INBAND_DISABLE; + + /* Check whether the PCS implements the required mode */ + return !!(inband & mask); +} + /** * phylink_ethtool_ksettings_set() - set the link settings * @pl: a pointer to a &struct phylink returned from phylink_create() @@ -2566,7 +2774,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, /* If we have a fixed link, refuse to change link parameters. * If the link parameters match, accept them but do nothing. */ - if (pl->cur_link_an_mode == MLO_AN_FIXED) { + if (pl->req_link_an_mode == MLO_AN_FIXED) { if (s->speed != pl->link_config.speed || s->duplex != pl->link_config.duplex) return -EINVAL; @@ -2582,7 +2790,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, * is our default case) but do not allow the advertisement to * be changed. If the advertisement matches, simply return. */ - if (pl->cur_link_an_mode == MLO_AN_FIXED) { + if (pl->req_link_an_mode == MLO_AN_FIXED) { if (!linkmode_equal(config.advertising, pl->link_config.advertising)) return -EINVAL; @@ -2617,7 +2825,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, linkmode_copy(support, pl->supported); if (phylink_validate(pl, support, &config)) { phylink_err(pl, "validation of %s/%s with support %*pb failed\n", - phylink_an_mode_str(pl->cur_link_an_mode), + phylink_an_mode_str(pl->req_link_an_mode), phy_modes(config.interface), __ETHTOOL_LINK_MODE_MASK_NBITS, support); return -EINVAL; @@ -2635,6 +2843,13 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, phylink_is_empty_linkmode(config.advertising)) return -EINVAL; + /* Validate the autonegotiation state. We don't have a PHY in this + * situation, so the PCS is the media-facing entity. + */ + if (!phylink_validate_pcs_inband_autoneg(pl, config.interface, + config.advertising)) + return -EINVAL; + mutex_lock(&pl->state_mutex); pl->link_config.speed = config.speed; pl->link_config.duplex = config.duplex; @@ -2717,7 +2932,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl, ASSERT_RTNL(); - if (pl->cur_link_an_mode == MLO_AN_FIXED) + if (pl->req_link_an_mode == MLO_AN_FIXED) return -EOPNOTSUPP; if (!phylink_test(pl->supported, Pause) && @@ -2981,7 +3196,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id, struct phylink_link_state state; int val = 0xffff; - switch (pl->cur_link_an_mode) { + switch (pl->act_link_an_mode) { case MLO_AN_FIXED: if (phy_id == 0) { phylink_get_fixed_state(pl, &state); @@ -3006,7 +3221,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id, static int phylink_mii_write(struct phylink *pl, unsigned int phy_id, unsigned int reg, unsigned int val) { - switch (pl->cur_link_an_mode) { + switch (pl->act_link_an_mode) { case MLO_AN_FIXED: break; @@ -3176,10 +3391,11 @@ static phy_interface_t phylink_choose_sfp_interface(struct phylink *pl, return interface; } -static void phylink_sfp_set_config(struct phylink *pl, u8 mode, +static void phylink_sfp_set_config(struct phylink *pl, unsigned long *supported, struct phylink_link_state *state) { + u8 mode = MLO_AN_INBAND; bool changed = false; phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n", @@ -3196,9 +3412,9 @@ static void phylink_sfp_set_config(struct phylink *pl, u8 mode, changed = true; } - if (pl->cur_link_an_mode != mode || + if (pl->req_link_an_mode != mode || pl->link_config.interface != state->interface) { - pl->cur_link_an_mode = mode; + pl->req_link_an_mode = mode; pl->link_config.interface = state->interface; changed = true; @@ -3213,8 +3429,7 @@ static void phylink_sfp_set_config(struct phylink *pl, u8 mode, phylink_mac_initial_config(pl, false); } -static int phylink_sfp_config_phy(struct phylink *pl, u8 mode, - struct phy_device *phy) +static int phylink_sfp_config_phy(struct phylink *pl, struct phy_device *phy) { __ETHTOOL_DECLARE_LINK_MODE_MASK(support); struct phylink_link_state config; @@ -3258,7 +3473,7 @@ static int phylink_sfp_config_phy(struct phylink *pl, u8 mode, pl->link_port = pl->sfp_port; - phylink_sfp_set_config(pl, mode, support, &config); + phylink_sfp_set_config(pl, support, &config); return 0; } @@ -3314,6 +3529,12 @@ static int phylink_sfp_config_optical(struct phylink *pl) phylink_dbg(pl, "optical SFP: chosen %s interface\n", phy_modes(interface)); + if (!phylink_validate_pcs_inband_autoneg(pl, interface, + config.advertising)) { + phylink_err(pl, "autoneg setting not compatible with PCS"); + return -EINVAL; + } + config.interface = interface; /* Ignore errors if we're expecting a PHY to attach later */ @@ -3327,7 +3548,7 @@ static int phylink_sfp_config_optical(struct phylink *pl) pl->link_port = pl->sfp_port; - phylink_sfp_set_config(pl, MLO_AN_INBAND, pl->sfp_support, &config); + phylink_sfp_set_config(pl, pl->sfp_support, &config); return 0; } @@ -3398,19 +3619,9 @@ static void phylink_sfp_link_up(void *upstream) phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_LINK); } -/* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII - * or 802.3z control word, so inband will not work. - */ -static bool phylink_phy_no_inband(struct phy_device *phy) -{ - return phy->is_c45 && phy_id_compare(phy->c45_ids.device_ids[1], - 0xae025150, 0xfffffff0); -} - static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy) { struct phylink *pl = upstream; - u8 mode; /* * This is the new way of dealing with flow control for PHYs, @@ -3421,17 +3632,12 @@ static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy) */ phy_support_asym_pause(phy); - if (phylink_phy_no_inband(phy)) - mode = MLO_AN_PHY; - else - mode = MLO_AN_INBAND; - /* Set the PHY's host supported interfaces */ phy_interface_and(phy->host_interfaces, phylink_sfp_interfaces, pl->config->supported_interfaces); /* Do the initial configuration */ - return phylink_sfp_config_phy(pl, mode, phy); + return phylink_sfp_config_phy(pl, phy); } static void phylink_sfp_disconnect_phy(void *upstream, diff --git a/drivers/net/tun.c b/drivers/net/tun.c index d7a865ef370b..8e94df88392c 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -574,14 +574,18 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, return ret; } -static inline bool tun_not_capable(struct tun_struct *tun) +static inline bool tun_capable(struct tun_struct *tun) { const struct cred *cred = current_cred(); struct net *net = dev_net(tun->dev); - return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || - (gid_valid(tun->group) && !in_egroup_p(tun->group))) && - !ns_capable(net->user_ns, CAP_NET_ADMIN); + if (ns_capable(net->user_ns, CAP_NET_ADMIN)) + return 1; + if (uid_valid(tun->owner) && uid_eq(cred->euid, tun->owner)) + return 1; + if (gid_valid(tun->group) && in_egroup_p(tun->group)) + return 1; + return 0; } static void tun_set_real_num_queues(struct tun_struct *tun) @@ -2778,7 +2782,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) !!(tun->flags & IFF_MULTI_QUEUE)) return -EINVAL; - if (tun_not_capable(tun)) + if (!tun_capable(tun)) return -EPERM; err = security_tun_dev_open(tun->security); if (err < 0) diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 531b1b6a37d1..4661d131b190 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -472,10 +472,6 @@ struct lan78xx_net { struct irq_domain_data domain_data; }; -/* define external phy id */ -#define PHY_LAN8835 (0x0007C130) -#define PHY_KSZ9031RNX (0x00221620) - /* use ethtool to change the level for any given device */ static int msg_level = -1; module_param(msg_level, int, 0); @@ -625,8 +621,8 @@ static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data) *data = *buf; } else if (net_ratelimit()) { netdev_warn(dev->net, - "Failed to read register index 0x%08x. ret = %d", - index, ret); + "Failed to read register index 0x%08x. ret = %pe", + index, ERR_PTR(ret)); } kfree(buf); @@ -656,8 +652,8 @@ static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data) if (unlikely(ret < 0) && net_ratelimit()) { netdev_warn(dev->net, - "Failed to write register index 0x%08x. ret = %d", - index, ret); + "Failed to write register index 0x%08x. ret = %pe", + index, ERR_PTR(ret)); } kfree(buf); @@ -678,11 +674,7 @@ static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask, buf &= ~mask; buf |= (mask & data); - ret = lan78xx_write_reg(dev, reg, buf); - if (ret < 0) - return ret; - - return 0; + return lan78xx_write_reg(dev, reg, buf); } static int lan78xx_read_stats(struct lan78xx_net *dev, @@ -812,8 +804,158 @@ static void lan78xx_update_stats(struct lan78xx_net *dev) usb_autopm_put_interface(dev->intf); } +static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable) +{ + return lan78xx_update_reg(dev, reg, hw_enable, hw_enable); +} + +static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled, + u32 hw_disabled) +{ + unsigned long timeout; + bool stopped = true; + int ret; + u32 buf; + + /* Stop the h/w block (if not already stopped) */ + + ret = lan78xx_read_reg(dev, reg, &buf); + if (ret < 0) + return ret; + + if (buf & hw_enabled) { + buf &= ~hw_enabled; + + ret = lan78xx_write_reg(dev, reg, buf); + if (ret < 0) + return ret; + + stopped = false; + timeout = jiffies + HW_DISABLE_TIMEOUT; + do { + ret = lan78xx_read_reg(dev, reg, &buf); + if (ret < 0) + return ret; + + if (buf & hw_disabled) + stopped = true; + else + msleep(HW_DISABLE_DELAY_MS); + } while (!stopped && !time_after(jiffies, timeout)); + } + + ret = stopped ? 0 : -ETIME; + + return ret; +} + +static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush) +{ + return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush); +} + +static int lan78xx_start_tx_path(struct lan78xx_net *dev) +{ + int ret; + + netif_dbg(dev, drv, dev->net, "start tx path"); + + /* Start the MAC transmitter */ + + ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_); + if (ret < 0) + return ret; + + /* Start the Tx FIFO */ + + ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_); + if (ret < 0) + return ret; + + return 0; +} + +static int lan78xx_stop_tx_path(struct lan78xx_net *dev) +{ + int ret; + + netif_dbg(dev, drv, dev->net, "stop tx path"); + + /* Stop the Tx FIFO */ + + ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_); + if (ret < 0) + return ret; + + /* Stop the MAC transmitter */ + + ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_); + if (ret < 0) + return ret; + + return 0; +} + +/* The caller must ensure the Tx path is stopped before calling + * lan78xx_flush_tx_fifo(). + */ +static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev) +{ + return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_); +} + +static int lan78xx_start_rx_path(struct lan78xx_net *dev) +{ + int ret; + + netif_dbg(dev, drv, dev->net, "start rx path"); + + /* Start the Rx FIFO */ + + ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_); + if (ret < 0) + return ret; + + /* Start the MAC receiver*/ + + ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_); + if (ret < 0) + return ret; + + return 0; +} + +static int lan78xx_stop_rx_path(struct lan78xx_net *dev) +{ + int ret; + + netif_dbg(dev, drv, dev->net, "stop rx path"); + + /* Stop the MAC receiver */ + + ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_); + if (ret < 0) + return ret; + + /* Stop the Rx FIFO */ + + ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_); + if (ret < 0) + return ret; + + return 0; +} + +/* The caller must ensure the Rx path is stopped before calling + * lan78xx_flush_rx_fifo(). + */ +static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev) +{ + return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_); +} + /* Loop until the read is completed with timeout called with phy_mutex held */ -static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev) +static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev) { unsigned long start_time = jiffies; u32 val; @@ -821,14 +963,14 @@ static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev) do { ret = lan78xx_read_reg(dev, MII_ACC, &val); - if (unlikely(ret < 0)) - return -EIO; + if (ret < 0) + return ret; if (!(val & MII_ACC_MII_BUSY_)) return 0; } while (!time_after(jiffies, start_time + HZ)); - return -EIO; + return -ETIMEDOUT; } static inline u32 mii_access(int id, int index, int read) @@ -854,8 +996,8 @@ static int lan78xx_wait_eeprom(struct lan78xx_net *dev) do { ret = lan78xx_read_reg(dev, E2P_CMD, &val); - if (unlikely(ret < 0)) - return -EIO; + if (ret < 0) + return ret; if (!(val & E2P_CMD_EPC_BUSY_) || (val & E2P_CMD_EPC_TIMEOUT_)) @@ -865,7 +1007,7 @@ static int lan78xx_wait_eeprom(struct lan78xx_net *dev) if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) { netdev_warn(dev->net, "EEPROM read operation timeout"); - return -EIO; + return -ETIMEDOUT; } return 0; @@ -879,8 +1021,8 @@ static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev) do { ret = lan78xx_read_reg(dev, E2P_CMD, &val); - if (unlikely(ret < 0)) - return -EIO; + if (ret < 0) + return ret; if (!(val & E2P_CMD_EPC_BUSY_)) return 0; @@ -889,75 +1031,81 @@ static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev) } while (!time_after(jiffies, start_time + HZ)); netdev_warn(dev->net, "EEPROM is busy"); - return -EIO; + return -ETIMEDOUT; } static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { - u32 val; - u32 saved; + u32 val, saved; int i, ret; - int retval; /* depends on chip, some EEPROM pins are muxed with LED function. * disable & restore LED function to access EEPROM. */ ret = lan78xx_read_reg(dev, HW_CFG, &val); + if (ret < 0) + return ret; + saved = val; if (dev->chipid == ID_REV_CHIP_ID_7800_) { val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_); ret = lan78xx_write_reg(dev, HW_CFG, val); + if (ret < 0) + return ret; } - retval = lan78xx_eeprom_confirm_not_busy(dev); - if (retval) - return retval; + ret = lan78xx_eeprom_confirm_not_busy(dev); + if (ret == -ETIMEDOUT) + goto read_raw_eeprom_done; + /* If USB fails, there is nothing to do */ + if (ret < 0) + return ret; for (i = 0; i < length; i++) { val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_; val |= (offset & E2P_CMD_EPC_ADDR_MASK_); ret = lan78xx_write_reg(dev, E2P_CMD, val); - if (unlikely(ret < 0)) { - retval = -EIO; - goto exit; - } + if (ret < 0) + return ret; - retval = lan78xx_wait_eeprom(dev); - if (retval < 0) - goto exit; + ret = lan78xx_wait_eeprom(dev); + /* Looks like not USB specific error, try to recover */ + if (ret == -ETIMEDOUT) + goto read_raw_eeprom_done; + /* If USB fails, there is nothing to do */ + if (ret < 0) + return ret; ret = lan78xx_read_reg(dev, E2P_DATA, &val); - if (unlikely(ret < 0)) { - retval = -EIO; - goto exit; - } + if (ret < 0) + return ret; data[i] = val & 0xFF; offset++; } - retval = 0; -exit: +read_raw_eeprom_done: if (dev->chipid == ID_REV_CHIP_ID_7800_) - ret = lan78xx_write_reg(dev, HW_CFG, saved); + return lan78xx_write_reg(dev, HW_CFG, saved); - return retval; + return 0; } static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { - u8 sig; int ret; + u8 sig; ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig); - if ((ret == 0) && (sig == EEPROM_INDICATOR)) - ret = lan78xx_read_raw_eeprom(dev, offset, length, data); - else - ret = -EINVAL; + if (ret < 0) + return ret; - return ret; + if (sig != EEPROM_INDICATOR) + return -ENODATA; + + return lan78xx_read_raw_eeprom(dev, offset, length, data); } static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset, @@ -966,113 +1114,144 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset, u32 val; u32 saved; int i, ret; - int retval; /* depends on chip, some EEPROM pins are muxed with LED function. * disable & restore LED function to access EEPROM. */ ret = lan78xx_read_reg(dev, HW_CFG, &val); + if (ret < 0) + return ret; + saved = val; if (dev->chipid == ID_REV_CHIP_ID_7800_) { val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_); ret = lan78xx_write_reg(dev, HW_CFG, val); + if (ret < 0) + return ret; } - retval = lan78xx_eeprom_confirm_not_busy(dev); - if (retval) - goto exit; + ret = lan78xx_eeprom_confirm_not_busy(dev); + /* Looks like not USB specific error, try to recover */ + if (ret == -ETIMEDOUT) + goto write_raw_eeprom_done; + /* If USB fails, there is nothing to do */ + if (ret < 0) + return ret; /* Issue write/erase enable command */ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_; ret = lan78xx_write_reg(dev, E2P_CMD, val); - if (unlikely(ret < 0)) { - retval = -EIO; - goto exit; - } + if (ret < 0) + return ret; - retval = lan78xx_wait_eeprom(dev); - if (retval < 0) - goto exit; + ret = lan78xx_wait_eeprom(dev); + /* Looks like not USB specific error, try to recover */ + if (ret == -ETIMEDOUT) + goto write_raw_eeprom_done; + /* If USB fails, there is nothing to do */ + if (ret < 0) + return ret; for (i = 0; i < length; i++) { /* Fill data register */ val = data[i]; ret = lan78xx_write_reg(dev, E2P_DATA, val); - if (ret < 0) { - retval = -EIO; - goto exit; - } + if (ret < 0) + return ret; /* Send "write" command */ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_; val |= (offset & E2P_CMD_EPC_ADDR_MASK_); ret = lan78xx_write_reg(dev, E2P_CMD, val); - if (ret < 0) { - retval = -EIO; - goto exit; - } + if (ret < 0) + return ret; - retval = lan78xx_wait_eeprom(dev); - if (retval < 0) - goto exit; + ret = lan78xx_wait_eeprom(dev); + /* Looks like not USB specific error, try to recover */ + if (ret == -ETIMEDOUT) + goto write_raw_eeprom_done; + /* If USB fails, there is nothing to do */ + if (ret < 0) + return ret; offset++; } - retval = 0; -exit: +write_raw_eeprom_done: if (dev->chipid == ID_REV_CHIP_ID_7800_) - ret = lan78xx_write_reg(dev, HW_CFG, saved); + return lan78xx_write_reg(dev, HW_CFG, saved); - return retval; + return 0; } static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { - int i; - u32 buf; unsigned long timeout; + int ret, i; + u32 buf; - lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + if (ret < 0) + return ret; if (buf & OTP_PWR_DN_PWRDN_N_) { /* clear it and wait to be cleared */ - lan78xx_write_reg(dev, OTP_PWR_DN, 0); + ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0); + if (ret < 0) + return ret; timeout = jiffies + HZ; do { usleep_range(1, 10); - lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + if (ret < 0) + return ret; + if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "timeout on OTP_PWR_DN"); - return -EIO; + return -ETIMEDOUT; } } while (buf & OTP_PWR_DN_PWRDN_N_); } for (i = 0; i < length; i++) { - lan78xx_write_reg(dev, OTP_ADDR1, - ((offset + i) >> 8) & OTP_ADDR1_15_11); - lan78xx_write_reg(dev, OTP_ADDR2, - ((offset + i) & OTP_ADDR2_10_3)); + ret = lan78xx_write_reg(dev, OTP_ADDR1, + ((offset + i) >> 8) & OTP_ADDR1_15_11); + if (ret < 0) + return ret; - lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_); - lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); + ret = lan78xx_write_reg(dev, OTP_ADDR2, + ((offset + i) & OTP_ADDR2_10_3)); + if (ret < 0) + return ret; + + ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_); + if (ret < 0) + return ret; + + ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); + if (ret < 0) + return ret; timeout = jiffies + HZ; do { udelay(1); - lan78xx_read_reg(dev, OTP_STATUS, &buf); + ret = lan78xx_read_reg(dev, OTP_STATUS, &buf); + if (ret < 0) + return ret; + if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "timeout on OTP_STATUS"); - return -EIO; + return -ETIMEDOUT; } } while (buf & OTP_STATUS_BUSY_); - lan78xx_read_reg(dev, OTP_RD_DATA, &buf); + ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf); + if (ret < 0) + return ret; data[i] = (u8)(buf & 0xFF); } @@ -1086,45 +1265,72 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset, int i; u32 buf; unsigned long timeout; + int ret; - lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + if (ret < 0) + return ret; if (buf & OTP_PWR_DN_PWRDN_N_) { /* clear it and wait to be cleared */ - lan78xx_write_reg(dev, OTP_PWR_DN, 0); + ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0); + if (ret < 0) + return ret; timeout = jiffies + HZ; do { udelay(1); - lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + if (ret < 0) + return ret; + if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "timeout on OTP_PWR_DN completion"); - return -EIO; + return -ETIMEDOUT; } } while (buf & OTP_PWR_DN_PWRDN_N_); } /* set to BYTE program mode */ - lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_); + ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_); + if (ret < 0) + return ret; for (i = 0; i < length; i++) { - lan78xx_write_reg(dev, OTP_ADDR1, - ((offset + i) >> 8) & OTP_ADDR1_15_11); - lan78xx_write_reg(dev, OTP_ADDR2, - ((offset + i) & OTP_ADDR2_10_3)); - lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]); - lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_); - lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); + ret = lan78xx_write_reg(dev, OTP_ADDR1, + ((offset + i) >> 8) & OTP_ADDR1_15_11); + if (ret < 0) + return ret; + + ret = lan78xx_write_reg(dev, OTP_ADDR2, + ((offset + i) & OTP_ADDR2_10_3)); + if (ret < 0) + return ret; + + ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]); + if (ret < 0) + return ret; + + ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_); + if (ret < 0) + return ret; + + ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); + if (ret < 0) + return ret; timeout = jiffies + HZ; do { udelay(1); - lan78xx_read_reg(dev, OTP_STATUS, &buf); + ret = lan78xx_read_reg(dev, OTP_STATUS, &buf); + if (ret < 0) + return ret; + if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "Timeout on OTP_STATUS completion"); - return -EIO; + return -ETIMEDOUT; } } while (buf & OTP_STATUS_BUSY_); } @@ -1161,7 +1367,7 @@ static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev) ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel); if (unlikely(ret < 0)) - return -EIO; + return ret; if (dp_sel & DP_SEL_DPRDY_) return 0; @@ -1171,44 +1377,51 @@ static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev) netdev_warn(dev->net, "%s timed out", __func__); - return -EIO; + return -ETIMEDOUT; } static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select, u32 addr, u32 length, u32 *buf) { struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); - u32 dp_sel; int i, ret; - if (usb_autopm_get_interface(dev->intf) < 0) - return 0; + ret = usb_autopm_get_interface(dev->intf); + if (ret < 0) + return ret; mutex_lock(&pdata->dataport_mutex); ret = lan78xx_dataport_wait_not_busy(dev); if (ret < 0) - goto done; - - ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel); + goto dataport_write; - dp_sel &= ~DP_SEL_RSEL_MASK_; - dp_sel |= ram_select; - ret = lan78xx_write_reg(dev, DP_SEL, dp_sel); + ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select); + if (ret < 0) + goto dataport_write; for (i = 0; i < length; i++) { ret = lan78xx_write_reg(dev, DP_ADDR, addr + i); + if (ret < 0) + goto dataport_write; ret = lan78xx_write_reg(dev, DP_DATA, buf[i]); + if (ret < 0) + goto dataport_write; ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_); + if (ret < 0) + goto dataport_write; ret = lan78xx_dataport_wait_not_busy(dev); if (ret < 0) - goto done; + goto dataport_write; } -done: +dataport_write: + if (ret < 0) + netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret)); + mutex_unlock(&pdata->dataport_mutex); usb_autopm_put_interface(dev->intf); @@ -1244,23 +1457,39 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param) struct lan78xx_priv *pdata = container_of(param, struct lan78xx_priv, set_multicast); struct lan78xx_net *dev = pdata->dev; - int i; + int i, ret; netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n", pdata->rfe_ctl); - lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN, - DP_SEL_VHF_HASH_LEN, pdata->mchash_table); + ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, + DP_SEL_VHF_VLAN_LEN, + DP_SEL_VHF_HASH_LEN, pdata->mchash_table); + if (ret < 0) + goto multicast_write_done; for (i = 1; i < NUM_OF_MAF; i++) { - lan78xx_write_reg(dev, MAF_HI(i), 0); - lan78xx_write_reg(dev, MAF_LO(i), - pdata->pfilter_table[i][1]); - lan78xx_write_reg(dev, MAF_HI(i), - pdata->pfilter_table[i][0]); + ret = lan78xx_write_reg(dev, MAF_HI(i), 0); + if (ret < 0) + goto multicast_write_done; + + ret = lan78xx_write_reg(dev, MAF_LO(i), + pdata->pfilter_table[i][1]); + if (ret < 0) + goto multicast_write_done; + + ret = lan78xx_write_reg(dev, MAF_HI(i), + pdata->pfilter_table[i][0]); + if (ret < 0) + goto multicast_write_done; } - lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); + ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); + +multicast_write_done: + if (ret < 0) + netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret)); + return; } static void lan78xx_set_multicast(struct net_device *netdev) @@ -1375,7 +1604,7 @@ static int lan78xx_mac_reset(struct lan78xx_net *dev) * bus can result in the MAC interface locking up and not * completing register access transactions. */ - ret = lan78xx_phy_wait_not_busy(dev); + ret = lan78xx_mdiobus_wait_not_busy(dev); if (ret < 0) goto done; @@ -1920,13 +2149,19 @@ static const struct ethtool_ops lan78xx_ethtool_ops = { .get_regs = lan78xx_get_regs, }; -static void lan78xx_init_mac_address(struct lan78xx_net *dev) +static int lan78xx_init_mac_address(struct lan78xx_net *dev) { u32 addr_lo, addr_hi; u8 addr[6]; + int ret; - lan78xx_read_reg(dev, RX_ADDRL, &addr_lo); - lan78xx_read_reg(dev, RX_ADDRH, &addr_hi); + ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo); + if (ret < 0) + return ret; + + ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi); + if (ret < 0) + return ret; addr[0] = addr_lo & 0xFF; addr[1] = (addr_lo >> 8) & 0xFF; @@ -1959,14 +2194,26 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev) (addr[2] << 16) | (addr[3] << 24); addr_hi = addr[4] | (addr[5] << 8); - lan78xx_write_reg(dev, RX_ADDRL, addr_lo); - lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); + if (ret < 0) + return ret; + + ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + if (ret < 0) + return ret; } - lan78xx_write_reg(dev, MAF_LO(0), addr_lo); - lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); + ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo); + if (ret < 0) + return ret; + + ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); + if (ret < 0) + return ret; eth_hw_addr_set(dev->net, addr); + + return 0; } /* MDIO read and write wrappers for phylib */ @@ -1983,19 +2230,23 @@ static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx) mutex_lock(&dev->phy_mutex); /* confirm MII not busy */ - ret = lan78xx_phy_wait_not_busy(dev); + ret = lan78xx_mdiobus_wait_not_busy(dev); if (ret < 0) goto done; /* set the address, index & direction (read from PHY) */ addr = mii_access(phy_id, idx, MII_READ); ret = lan78xx_write_reg(dev, MII_ACC, addr); + if (ret < 0) + goto done; - ret = lan78xx_phy_wait_not_busy(dev); + ret = lan78xx_mdiobus_wait_not_busy(dev); if (ret < 0) goto done; ret = lan78xx_read_reg(dev, MII_DATA, &val); + if (ret < 0) + goto done; ret = (int)(val & 0xFFFF); @@ -2020,25 +2271,29 @@ static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx, mutex_lock(&dev->phy_mutex); /* confirm MII not busy */ - ret = lan78xx_phy_wait_not_busy(dev); + ret = lan78xx_mdiobus_wait_not_busy(dev); if (ret < 0) goto done; val = (u32)regval; ret = lan78xx_write_reg(dev, MII_DATA, val); + if (ret < 0) + goto done; /* set the address, index & direction (write to PHY) */ addr = mii_access(phy_id, idx, MII_WRITE); ret = lan78xx_write_reg(dev, MII_ACC, addr); + if (ret < 0) + goto done; - ret = lan78xx_phy_wait_not_busy(dev); + ret = lan78xx_mdiobus_wait_not_busy(dev); if (ret < 0) goto done; done: mutex_unlock(&dev->phy_mutex); usb_autopm_put_interface(dev->intf); - return 0; + return ret; } static int lan78xx_mdio_init(struct lan78xx_net *dev) @@ -2164,13 +2419,22 @@ static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd) struct lan78xx_net *dev = container_of(data, struct lan78xx_net, domain_data); u32 buf; + int ret; /* call register access here because irq_bus_lock & irq_bus_sync_unlock * are only two callbacks executed in non-atomic contex. */ - lan78xx_read_reg(dev, INT_EP_CTL, &buf); + ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf); + if (ret < 0) + goto irq_bus_sync_unlock; + if (buf != data->irqenable) - lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable); + ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable); + +irq_bus_sync_unlock: + if (ret < 0) + netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n", + ERR_PTR(ret)); mutex_unlock(&data->irq_lock); } @@ -2195,7 +2459,10 @@ static int lan78xx_setup_irq_domain(struct lan78xx_net *dev) mutex_init(&dev->domain_data.irq_lock); - lan78xx_read_reg(dev, INT_EP_CTL, &buf); + ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf); + if (ret < 0) + return ret; + dev->domain_data.irqenable = buf; dev->domain_data.irqchip = &lan78xx_irqchip; @@ -2234,46 +2501,6 @@ static void lan78xx_remove_irq_domain(struct lan78xx_net *dev) dev->domain_data.irqdomain = NULL; } -static int lan8835_fixup(struct phy_device *phydev) -{ - int buf; - struct lan78xx_net *dev = netdev_priv(phydev->attached_dev); - - /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */ - buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010); - buf &= ~0x1800; - buf |= 0x0800; - phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf); - - /* RGMII MAC TXC Delay Enable */ - lan78xx_write_reg(dev, MAC_RGMII_ID, - MAC_RGMII_ID_TXC_DELAY_EN_); - - /* RGMII TX DLL Tune Adjust */ - lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00); - - dev->interface = PHY_INTERFACE_MODE_RGMII_TXID; - - return 1; -} - -static int ksz9031rnx_fixup(struct phy_device *phydev) -{ - struct lan78xx_net *dev = netdev_priv(phydev->attached_dev); - - /* Micrel9301RNX PHY configuration */ - /* RGMII Control Signal Pad Skew */ - phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077); - /* RGMII RX Data Pad Skew */ - phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777); - /* RGMII RX Clock Pad Skew */ - phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF); - - dev->interface = PHY_INTERFACE_MODE_RGMII_RXID; - - return 1; -} - static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev) { u32 buf; @@ -2307,22 +2534,11 @@ static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev) netdev_err(dev->net, "no PHY driver found\n"); return NULL; } - dev->interface = PHY_INTERFACE_MODE_RGMII; - /* external PHY fixup for KSZ9031RNX */ - ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0, - ksz9031rnx_fixup); - if (ret < 0) { - netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n"); - return NULL; - } - /* external PHY fixup for LAN8835 */ - ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0, - lan8835_fixup); - if (ret < 0) { - netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n"); - return NULL; - } - /* add more external PHY fixup here if needed */ + dev->interface = PHY_INTERFACE_MODE_RGMII_ID; + /* The PHY driver is responsible to configure proper RGMII + * interface delays. Disable RGMII delays on MAC side. + */ + lan78xx_write_reg(dev, MAC_RGMII_ID, 0); phydev->is_internal = false; } @@ -2381,11 +2597,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) if (phy_is_pseudo_fixed_link(phydev)) { fixed_phy_unregister(phydev); phy_device_free(phydev); - } else { - phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, - 0xfffffff0); - phy_unregister_fixup_for_uid(PHY_LAN8835, - 0xfffffff0); } } return -EIO; @@ -2437,27 +2648,36 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size) { - u32 buf; bool rxenabled; + u32 buf; + int ret; - lan78xx_read_reg(dev, MAC_RX, &buf); + ret = lan78xx_read_reg(dev, MAC_RX, &buf); + if (ret < 0) + return ret; rxenabled = ((buf & MAC_RX_RXEN_) != 0); if (rxenabled) { buf &= ~MAC_RX_RXEN_; - lan78xx_write_reg(dev, MAC_RX, buf); + ret = lan78xx_write_reg(dev, MAC_RX, buf); + if (ret < 0) + return ret; } /* add 4 to size for FCS */ buf &= ~MAC_RX_MAX_SIZE_MASK_; buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_); - lan78xx_write_reg(dev, MAC_RX, buf); + ret = lan78xx_write_reg(dev, MAC_RX, buf); + if (ret < 0) + return ret; if (rxenabled) { buf |= MAC_RX_RXEN_; - lan78xx_write_reg(dev, MAC_RX, buf); + ret = lan78xx_write_reg(dev, MAC_RX, buf); + if (ret < 0) + return ret; } return 0; @@ -2523,7 +2743,10 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu) return ret; ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len); - if (!ret) + if (ret < 0) + netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n", + new_mtu, netdev->mtu, ERR_PTR(ret)); + else WRITE_ONCE(netdev->mtu, new_mtu); usb_autopm_put_interface(dev->intf); @@ -2536,6 +2759,7 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p) struct lan78xx_net *dev = netdev_priv(netdev); struct sockaddr *addr = p; u32 addr_lo, addr_hi; + int ret; if (netif_running(netdev)) return -EBUSY; @@ -2552,14 +2776,20 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p) addr_hi = netdev->dev_addr[4] | netdev->dev_addr[5] << 8; - lan78xx_write_reg(dev, RX_ADDRL, addr_lo); - lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); + if (ret < 0) + return ret; + + ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + if (ret < 0) + return ret; /* Added to support MAC address changes */ - lan78xx_write_reg(dev, MAF_LO(0), addr_lo); - lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); + ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo); + if (ret < 0) + return ret; - return 0; + return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); } /* Enable or disable Rx checksum offload engine */ @@ -2592,9 +2822,7 @@ static int lan78xx_set_features(struct net_device *netdev, spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); - lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); - - return 0; + return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); } static void lan78xx_deferred_vlan_write(struct work_struct *param) @@ -2645,13 +2873,16 @@ static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev, return 0; } -static void lan78xx_init_ltm(struct lan78xx_net *dev) +static int lan78xx_init_ltm(struct lan78xx_net *dev) { + u32 regs[6] = { 0 }; int ret; u32 buf; - u32 regs[6] = { 0 }; ret = lan78xx_read_reg(dev, USB_CFG1, &buf); + if (ret < 0) + goto init_ltm_failed; + if (buf & USB_CFG1_LTM_ENABLE_) { u8 temp[2]; /* Get values from EEPROM first */ @@ -2662,7 +2893,7 @@ static void lan78xx_init_ltm(struct lan78xx_net *dev) 24, (u8 *)regs); if (ret < 0) - return; + return ret; } } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) { if (temp[0] == 24) { @@ -2671,17 +2902,40 @@ static void lan78xx_init_ltm(struct lan78xx_net *dev) 24, (u8 *)regs); if (ret < 0) - return; + return ret; } } } - lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]); - lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]); - lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]); - lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]); - lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]); - lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]); + ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]); + if (ret < 0) + goto init_ltm_failed; + + ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]); + if (ret < 0) + goto init_ltm_failed; + + ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]); + if (ret < 0) + goto init_ltm_failed; + + ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]); + if (ret < 0) + goto init_ltm_failed; + + ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]); + if (ret < 0) + goto init_ltm_failed; + + ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]); + if (ret < 0) + goto init_ltm_failed; + + return 0; + +init_ltm_failed: + netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret)); + return ret; } static int lan78xx_urb_config_init(struct lan78xx_net *dev) @@ -2722,156 +2976,6 @@ static int lan78xx_urb_config_init(struct lan78xx_net *dev) return result; } -static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable) -{ - return lan78xx_update_reg(dev, reg, hw_enable, hw_enable); -} - -static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled, - u32 hw_disabled) -{ - unsigned long timeout; - bool stopped = true; - int ret; - u32 buf; - - /* Stop the h/w block (if not already stopped) */ - - ret = lan78xx_read_reg(dev, reg, &buf); - if (ret < 0) - return ret; - - if (buf & hw_enabled) { - buf &= ~hw_enabled; - - ret = lan78xx_write_reg(dev, reg, buf); - if (ret < 0) - return ret; - - stopped = false; - timeout = jiffies + HW_DISABLE_TIMEOUT; - do { - ret = lan78xx_read_reg(dev, reg, &buf); - if (ret < 0) - return ret; - - if (buf & hw_disabled) - stopped = true; - else - msleep(HW_DISABLE_DELAY_MS); - } while (!stopped && !time_after(jiffies, timeout)); - } - - ret = stopped ? 0 : -ETIME; - - return ret; -} - -static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush) -{ - return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush); -} - -static int lan78xx_start_tx_path(struct lan78xx_net *dev) -{ - int ret; - - netif_dbg(dev, drv, dev->net, "start tx path"); - - /* Start the MAC transmitter */ - - ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_); - if (ret < 0) - return ret; - - /* Start the Tx FIFO */ - - ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_); - if (ret < 0) - return ret; - - return 0; -} - -static int lan78xx_stop_tx_path(struct lan78xx_net *dev) -{ - int ret; - - netif_dbg(dev, drv, dev->net, "stop tx path"); - - /* Stop the Tx FIFO */ - - ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_); - if (ret < 0) - return ret; - - /* Stop the MAC transmitter */ - - ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_); - if (ret < 0) - return ret; - - return 0; -} - -/* The caller must ensure the Tx path is stopped before calling - * lan78xx_flush_tx_fifo(). - */ -static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev) -{ - return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_); -} - -static int lan78xx_start_rx_path(struct lan78xx_net *dev) -{ - int ret; - - netif_dbg(dev, drv, dev->net, "start rx path"); - - /* Start the Rx FIFO */ - - ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_); - if (ret < 0) - return ret; - - /* Start the MAC receiver*/ - - ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_); - if (ret < 0) - return ret; - - return 0; -} - -static int lan78xx_stop_rx_path(struct lan78xx_net *dev) -{ - int ret; - - netif_dbg(dev, drv, dev->net, "stop rx path"); - - /* Stop the MAC receiver */ - - ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_); - if (ret < 0) - return ret; - - /* Stop the Rx FIFO */ - - ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_); - if (ret < 0) - return ret; - - return 0; -} - -/* The caller must ensure the Rx path is stopped before calling - * lan78xx_flush_rx_fifo(). - */ -static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev) -{ - return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_); -} - static int lan78xx_reset(struct lan78xx_net *dev) { struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); @@ -2905,7 +3009,9 @@ static int lan78xx_reset(struct lan78xx_net *dev) } } while (buf & HW_CFG_LRST_); - lan78xx_init_mac_address(dev); + ret = lan78xx_init_mac_address(dev); + if (ret < 0) + return ret; /* save DEVID for later usage */ ret = lan78xx_read_reg(dev, ID_REV, &buf); @@ -2927,7 +3033,9 @@ static int lan78xx_reset(struct lan78xx_net *dev) return ret; /* Init LTM */ - lan78xx_init_ltm(dev); + ret = lan78xx_init_ltm(dev); + if (ret < 0) + return ret; ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap); if (ret < 0) @@ -4242,9 +4350,6 @@ static void lan78xx_disconnect(struct usb_interface *intf) phydev = net->phydev; - phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); - phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); - phy_disconnect(net->phydev); if (phy_is_pseudo_fixed_link(phydev)) { diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 67d25f4f94ef..ca81b212a246 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -122,16 +122,6 @@ struct net_vrf { int ifindex; }; -static void vrf_rx_stats(struct net_device *dev, int len) -{ - struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); - - u64_stats_update_begin(&dstats->syncp); - u64_stats_inc(&dstats->rx_packets); - u64_stats_add(&dstats->rx_bytes, len); - u64_stats_update_end(&dstats->syncp); -} - static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb) { vrf_dev->stats.tx_errors++; @@ -369,7 +359,7 @@ static bool qdisc_tx_is_default(const struct net_device *dev) static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev, struct dst_entry *dst) { - int len = skb->len; + unsigned int len = skb->len; skb_orphan(skb); @@ -382,15 +372,10 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev, skb->protocol = eth_type_trans(skb, dev); - if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) { - vrf_rx_stats(dev, len); - } else { - struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); - - u64_stats_update_begin(&dstats->syncp); - u64_stats_inc(&dstats->rx_drops); - u64_stats_update_end(&dstats->syncp); - } + if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) + dev_dstats_rx_add(dev, len); + else + dev_dstats_rx_dropped(dev); return NETDEV_TX_OK; } @@ -578,20 +563,14 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) { - struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); - - int len = skb->len; - netdev_tx_t ret = is_ip_tx_frame(skb, dev); - - u64_stats_update_begin(&dstats->syncp); - if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { + unsigned int len = skb->len; + netdev_tx_t ret; - u64_stats_inc(&dstats->tx_packets); - u64_stats_add(&dstats->tx_bytes, len); - } else { - u64_stats_inc(&dstats->tx_drops); - } - u64_stats_update_end(&dstats->syncp); + ret = is_ip_tx_frame(skb, dev); + if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) + dev_dstats_tx_add(dev, len); + else + dev_dstats_tx_dropped(dev); return ret; } @@ -1364,7 +1343,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, if (!is_ndisc) { struct net_device *orig_dev = skb->dev; - vrf_rx_stats(vrf_dev, skb->len); + dev_dstats_rx_add(vrf_dev, skb->len); skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; @@ -1420,7 +1399,7 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, goto out; } - vrf_rx_stats(vrf_dev, skb->len); + dev_dstats_rx_add(vrf_dev, skb->len); if (!list_empty(&vrf_dev->ptype_all)) { int err; diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 9ea63059d52d..0c356e0a61ef 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -622,9 +622,9 @@ static int vxlan_fdb_append(struct vxlan_fdb *f, return 1; } -static bool vxlan_parse_gpe_proto(struct vxlanhdr *hdr, __be16 *protocol) +static bool vxlan_parse_gpe_proto(const struct vxlanhdr *hdr, __be16 *protocol) { - struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)hdr; + const struct vxlanhdr_gpe *gpe = (const struct vxlanhdr_gpe *)hdr; /* Need to have Next Protocol set for interfaces in GPE mode. */ if (!gpe->np_applied) @@ -1352,6 +1352,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, struct net_device *filter_dev, int *idx) { + struct ndo_fdb_dump_context *ctx = (void *)cb->ctx; struct vxlan_dev *vxlan = netdev_priv(dev); unsigned int h; int err = 0; @@ -1364,7 +1365,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct vxlan_rdst *rd; if (rcu_access_pointer(f->nh)) { - if (*idx < cb->args[2]) + if (*idx < ctx->fdb_idx) goto skip_nh; err = vxlan_fdb_info(skb, vxlan, f, NETLINK_CB(cb->skb).portid, @@ -1381,7 +1382,7 @@ skip_nh: } list_for_each_entry_rcu(rd, &f->remotes, list) { - if (*idx < cb->args[2]) + if (*idx < ctx->fdb_idx) goto skip; err = vxlan_fdb_info(skb, vxlan, f, @@ -1554,18 +1555,17 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan) #endif } -static enum skb_drop_reason vxlan_remcsum(struct vxlanhdr *unparsed, - struct sk_buff *skb, - u32 vxflags) +static enum skb_drop_reason vxlan_remcsum(struct sk_buff *skb, u32 vxflags) { + const struct vxlanhdr *vh = vxlan_hdr(skb); enum skb_drop_reason reason; size_t start, offset; - if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload) - goto out; + if (!(vh->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload) + return SKB_NOT_DROPPED_YET; - start = vxlan_rco_start(unparsed->vx_vni); - offset = start + vxlan_rco_offset(unparsed->vx_vni); + start = vxlan_rco_start(vh->vx_vni); + offset = start + vxlan_rco_offset(vh->vx_vni); reason = pskb_may_pull_reason(skb, offset + sizeof(u16)); if (reason) @@ -1573,22 +1573,20 @@ static enum skb_drop_reason vxlan_remcsum(struct vxlanhdr *unparsed, skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset, !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL)); -out: - unparsed->vx_flags &= ~VXLAN_HF_RCO; - unparsed->vx_vni &= VXLAN_VNI_MASK; - return SKB_NOT_DROPPED_YET; } -static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed, - struct sk_buff *skb, u32 vxflags, +static void vxlan_parse_gbp_hdr(struct sk_buff *skb, u32 vxflags, struct vxlan_metadata *md) { - struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed; + const struct vxlanhdr *vh = vxlan_hdr(skb); + const struct vxlanhdr_gbp *gbp; struct metadata_dst *tun_dst; - if (!(unparsed->vx_flags & VXLAN_HF_GBP)) - goto out; + gbp = (const struct vxlanhdr_gbp *)vh; + + if (!(vh->vx_flags & VXLAN_HF_GBP)) + return; md->gbp = ntohs(gbp->policy_id); @@ -1607,8 +1605,6 @@ static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed, /* In flow-based mode, GBP is carried in dst_metadata */ if (!(vxflags & VXLAN_F_COLLECT_METADATA)) skb->mark = md->gbp; -out: - unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS; } static enum skb_drop_reason vxlan_set_mac(struct vxlan_dev *vxlan, @@ -1672,9 +1668,9 @@ static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph, static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) { struct vxlan_vni_node *vninode = NULL; + const struct vxlanhdr *vh; struct vxlan_dev *vxlan; struct vxlan_sock *vs; - struct vxlanhdr unparsed; struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; __be16 protocol = htons(ETH_P_TEB); @@ -1689,24 +1685,21 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) if (reason) goto drop; - unparsed = *vxlan_hdr(skb); + vh = vxlan_hdr(skb); /* VNI flag always required to be set */ - if (!(unparsed.vx_flags & VXLAN_HF_VNI)) { + if (!(vh->vx_flags & VXLAN_HF_VNI)) { netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", - ntohl(vxlan_hdr(skb)->vx_flags), - ntohl(vxlan_hdr(skb)->vx_vni)); + ntohl(vh->vx_flags), ntohl(vh->vx_vni)); reason = SKB_DROP_REASON_VXLAN_INVALID_HDR; /* Return non vxlan pkt */ goto drop; } - unparsed.vx_flags &= ~VXLAN_HF_VNI; - unparsed.vx_vni &= ~VXLAN_VNI_MASK; vs = rcu_dereference_sk_user_data(sk); if (!vs) goto drop; - vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); + vni = vxlan_vni(vh->vx_vni); vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, &vninode); if (!vxlan) { @@ -1714,13 +1707,27 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) goto drop; } - /* For backwards compatibility, only allow reserved fields to be - * used by VXLAN extensions if explicitly requested. - */ - if (vs->flags & VXLAN_F_GPE) { - if (!vxlan_parse_gpe_proto(&unparsed, &protocol)) + if (vh->vx_flags & vxlan->cfg.reserved_bits.vx_flags || + vh->vx_vni & vxlan->cfg.reserved_bits.vx_vni) { + /* If the header uses bits besides those enabled by the + * netdevice configuration, treat this as a malformed packet. + * This behavior diverges from VXLAN RFC (RFC7348) which + * stipulates that bits in reserved in reserved fields are to be + * ignored. The approach here maintains compatibility with + * previous stack code, and also is more robust and provides a + * little more security in adding extensions to VXLAN. + */ + reason = SKB_DROP_REASON_VXLAN_INVALID_HDR; + DEV_STATS_INC(vxlan->dev, rx_frame_errors); + DEV_STATS_INC(vxlan->dev, rx_errors); + vxlan_vnifilter_count(vxlan, vni, vninode, + VXLAN_VNI_STATS_RX_ERRORS, 0); + goto drop; + } + + if (vxlan->cfg.flags & VXLAN_F_GPE) { + if (!vxlan_parse_gpe_proto(vh, &protocol)) goto drop; - unparsed.vx_flags &= ~VXLAN_GPE_USED_BITS; raw_proto = true; } @@ -1730,8 +1737,8 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) goto drop; } - if (vs->flags & VXLAN_F_REMCSUM_RX) { - reason = vxlan_remcsum(&unparsed, skb, vs->flags); + if (vxlan->cfg.flags & VXLAN_F_REMCSUM_RX) { + reason = vxlan_remcsum(skb, vxlan->cfg.flags); if (unlikely(reason)) goto drop; } @@ -1756,25 +1763,12 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) memset(md, 0, sizeof(*md)); } - if (vs->flags & VXLAN_F_GBP) - vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md); + if (vxlan->cfg.flags & VXLAN_F_GBP) + vxlan_parse_gbp_hdr(skb, vxlan->cfg.flags, md); /* Note that GBP and GPE can never be active together. This is * ensured in vxlan_dev_configure. */ - if (unparsed.vx_flags || unparsed.vx_vni) { - /* If there are any unprocessed flags remaining treat - * this as a malformed packet. This behavior diverges from - * VXLAN RFC (RFC7348) which stipulates that bits in reserved - * in reserved fields are to be ignored. The approach here - * maintains compatibility with previous stack code, and also - * is more robust and provides a little more security in - * adding extensions to VXLAN. - */ - reason = SKB_DROP_REASON_VXLAN_INVALID_HDR; - goto drop; - } - if (!raw_proto) { reason = vxlan_set_mac(vxlan, vs, skb, vni); if (reason) @@ -1818,14 +1812,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) if (unlikely(!(vxlan->dev->flags & IFF_UP))) { rcu_read_unlock(); - dev_core_stats_rx_dropped_inc(vxlan->dev); + dev_dstats_rx_dropped(vxlan->dev); vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX_DROPS, 0); reason = SKB_DROP_REASON_DEV_READY; goto drop; } - dev_sw_netstats_rx_add(vxlan->dev, skb->len); + dev_dstats_rx_add(vxlan->dev, skb->len); vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX, skb->len); gro_cells_receive(&vxlan->gro_cells, skb); @@ -1880,7 +1874,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) goto out; if (!pskb_may_pull(skb, arp_hdr_len(dev))) { - dev_core_stats_tx_dropped_inc(dev); + dev_dstats_tx_dropped(dev); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); goto out; @@ -1938,7 +1932,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) reply->pkt_type = PACKET_HOST; if (netif_rx(reply) == NET_RX_DROP) { - dev_core_stats_rx_dropped_inc(dev); + dev_dstats_rx_dropped(dev); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_RX_DROPS, 0); } @@ -2097,7 +2091,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) goto out; if (netif_rx(reply) == NET_RX_DROP) { - dev_core_stats_rx_dropped_inc(dev); + dev_dstats_rx_dropped(dev); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_RX_DROPS, 0); } @@ -2271,8 +2265,8 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, { union vxlan_addr loopback; union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; + unsigned int len = skb->len; struct net_device *dev; - int len = skb->len; skb->pkt_type = PACKET_HOST; skb->encapsulation = 0; @@ -2299,16 +2293,16 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, if ((dst_vxlan->cfg.flags & VXLAN_F_LEARN) && snoop) vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni); - dev_sw_netstats_tx_add(src_vxlan->dev, 1, len); + dev_dstats_tx_add(src_vxlan->dev, len); vxlan_vnifilter_count(src_vxlan, vni, NULL, VXLAN_VNI_STATS_TX, len); if (__netif_rx(skb) == NET_RX_SUCCESS) { - dev_sw_netstats_rx_add(dst_vxlan->dev, len); + dev_dstats_rx_add(dst_vxlan->dev, len); vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX, len); } else { drop: - dev_core_stats_rx_dropped_inc(dev); + dev_dstats_rx_dropped(dev); vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX_DROPS, 0); } @@ -2621,7 +2615,7 @@ out_unlock: return; drop: - dev_core_stats_tx_dropped_inc(dev); + dev_dstats_tx_dropped(dev); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); kfree_skb_reason(skb, reason); return; @@ -2666,7 +2660,7 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev, return; drop: - dev_core_stats_tx_dropped_inc(dev); + dev_dstats_tx_dropped(dev); vxlan_vnifilter_count(netdev_priv(dev), vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); dev_kfree_skb(skb); @@ -2704,7 +2698,7 @@ static netdev_tx_t vxlan_xmit_nhid(struct sk_buff *skb, struct net_device *dev, return NETDEV_TX_OK; drop: - dev_core_stats_tx_dropped_inc(dev); + dev_dstats_tx_dropped(dev); vxlan_vnifilter_count(netdev_priv(dev), vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); dev_kfree_skb(skb); @@ -2801,7 +2795,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) !is_multicast_ether_addr(eth->h_dest)) vxlan_fdb_miss(vxlan, eth->h_dest); - dev_core_stats_tx_dropped_inc(dev); + dev_dstats_tx_dropped(dev); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_NO_REMOTE); @@ -3371,7 +3365,7 @@ static void vxlan_setup(struct net_device *dev) dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = ETH_MAX_MTU; - dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS; INIT_LIST_HEAD(&vxlan->next); timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE); @@ -3435,6 +3429,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_VNIFILTER] = { .type = NLA_U8 }, [IFLA_VXLAN_LOCALBYPASS] = NLA_POLICY_MAX(NLA_U8, 1), [IFLA_VXLAN_LABEL_POLICY] = NLA_POLICY_MAX(NLA_U32, VXLAN_LABEL_MAX), + [IFLA_VXLAN_RESERVED_BITS] = NLA_POLICY_EXACT_LEN(sizeof(struct vxlanhdr)), }; static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], @@ -4070,6 +4065,10 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], struct net_device *dev, struct vxlan_config *conf, bool changelink, struct netlink_ext_ack *extack) { + struct vxlanhdr used_bits = { + .vx_flags = VXLAN_HF_VNI, + .vx_vni = VXLAN_VNI_MASK, + }; struct vxlan_dev *vxlan = netdev_priv(dev); int err = 0; @@ -4296,6 +4295,8 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], extack); if (err) return err; + used_bits.vx_flags |= VXLAN_HF_RCO; + used_bits.vx_vni |= ~VXLAN_VNI_MASK; } if (data[IFLA_VXLAN_GBP]) { @@ -4303,6 +4304,7 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], VXLAN_F_GBP, changelink, false, extack); if (err) return err; + used_bits.vx_flags |= VXLAN_GBP_USED_BITS; } if (data[IFLA_VXLAN_GPE]) { @@ -4311,6 +4313,46 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], extack); if (err) return err; + + used_bits.vx_flags |= VXLAN_GPE_USED_BITS; + } + + if (data[IFLA_VXLAN_RESERVED_BITS]) { + struct vxlanhdr reserved_bits; + + if (changelink) { + NL_SET_ERR_MSG_ATTR(extack, + data[IFLA_VXLAN_RESERVED_BITS], + "Cannot change reserved_bits"); + return -EOPNOTSUPP; + } + + nla_memcpy(&reserved_bits, data[IFLA_VXLAN_RESERVED_BITS], + sizeof(reserved_bits)); + if (used_bits.vx_flags & reserved_bits.vx_flags || + used_bits.vx_vni & reserved_bits.vx_vni) { + __be64 ub_be64, rb_be64; + + memcpy(&ub_be64, &used_bits, sizeof(ub_be64)); + memcpy(&rb_be64, &reserved_bits, sizeof(rb_be64)); + + NL_SET_ERR_MSG_ATTR_FMT(extack, + data[IFLA_VXLAN_RESERVED_BITS], + "Used bits %#018llx cannot overlap reserved bits %#018llx", + be64_to_cpu(ub_be64), + be64_to_cpu(rb_be64)); + return -EINVAL; + } + + conf->reserved_bits = reserved_bits; + } else { + /* For backwards compatibility, only allow reserved fields to be + * used by VXLAN extensions if explicitly requested. + */ + conf->reserved_bits = (struct vxlanhdr) { + .vx_flags = ~used_bits.vx_flags, + .vx_vni = ~used_bits.vx_vni, + }; } if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) { @@ -4497,6 +4539,8 @@ static size_t vxlan_get_size(const struct net_device *dev) nla_total_size(0) + /* IFLA_VXLAN_GPE */ nla_total_size(0) + /* IFLA_VXLAN_REMCSUM_NOPARTIAL */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_VNIFILTER */ + /* IFLA_VXLAN_RESERVED_BITS */ + nla_total_size(sizeof(struct vxlanhdr)) + 0; } @@ -4599,6 +4643,11 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) !!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))) goto nla_put_failure; + if (nla_put(skb, IFLA_VXLAN_RESERVED_BITS, + sizeof(vxlan->cfg.reserved_bits), + &vxlan->cfg.reserved_bits)) + goto nla_put_failure; + return 0; nla_put_failure: diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c index 8381b0dc7acb..02f2ec7cf4ce 100644 --- a/drivers/net/wwan/t7xx/t7xx_pci.c +++ b/drivers/net/wwan/t7xx/t7xx_pci.c @@ -43,6 +43,8 @@ #include "t7xx_state_monitor.h" #include "t7xx_port_proxy.h" +#define DRIVER_NAME "mtk_t7xx" + #define T7XX_PCI_IREG_BASE 0 #define T7XX_PCI_EREG_BASE 2 @@ -833,6 +835,7 @@ static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev) static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct t7xx_pci_dev *t7xx_dev; + void __iomem *iomem; int ret; t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL); @@ -848,12 +851,21 @@ static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); - ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE), - pci_name(pdev)); + iomem = pcim_iomap_region(pdev, T7XX_PCI_IREG_BASE, DRIVER_NAME); + ret = PTR_ERR_OR_ZERO(iomem); + if (ret) { + dev_err(&pdev->dev, "Could not request IREG BAR: %d\n", ret); + return -ENOMEM; + } + IREG_BASE(t7xx_dev) = iomem; + + iomem = pcim_iomap_region(pdev, T7XX_PCI_EREG_BASE, DRIVER_NAME); + ret = PTR_ERR_OR_ZERO(iomem); if (ret) { - dev_err(&pdev->dev, "Could not request BARs: %d\n", ret); + dev_err(&pdev->dev, "Could not request EREG BAR: %d\n", ret); return -ENOMEM; } + t7xx_dev->base_addr.pcie_ext_reg_base = iomem; ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); if (ret) { @@ -867,9 +879,6 @@ static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return ret; } - IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE]; - t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE]; - ret = t7xx_pci_pm_init(t7xx_dev); if (ret) return ret; @@ -937,7 +946,7 @@ static const struct pci_device_id t7xx_pci_table[] = { MODULE_DEVICE_TABLE(pci, t7xx_pci_table); static struct pci_driver t7xx_pci_driver = { - .name = "mtk_t7xx", + .name = DRIVER_NAME, .id_table = t7xx_pci_table, .probe = t7xx_pci_probe, .remove = t7xx_pci_remove, |