diff options
Diffstat (limited to 'drivers/net/ethernet/freescale')
64 files changed, 5186 insertions, 2007 deletions
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index 75401d2a5fb4..bbef47c3480c 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -71,7 +71,6 @@ config FSL_XGMAC_MDIO tristate "Freescale XGMAC MDIO" select PHYLIB depends on OF - select MDIO_DEVRES select OF_MDIO help This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and @@ -81,8 +80,7 @@ config UCC_GETH tristate "Freescale QE Gigabit Ethernet" depends on QUICC_ENGINE && PPC32 select FSL_PQ_MDIO - select PHYLIB - select FIXED_PHY + select PHYLINK help This driver supports the Gigabit Ethernet mode of the QUICC Engine, which is available on some Freescale SOCs. diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index dcbc598b11c6..23c23cca2620 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -229,7 +229,7 @@ static int dpaa_netdev_init(struct net_device *net_dev, net_dev->max_mtu = dpaa_get_max_mtu(); net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_LLTX | NETIF_F_RXHASH); + NETIF_F_RXHASH); net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; /* The kernels enables GSO automatically, if we declare NETIF_F_SG. @@ -239,6 +239,7 @@ static int dpaa_netdev_init(struct net_device *net_dev, net_dev->features |= NETIF_F_RXCSUM; net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + net_dev->lltx = true; /* we do not want shared skbs on TX */ net_dev->priv_flags &= ~IFF_TX_SKB_SHARING; @@ -371,6 +372,7 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, void *type_data) { struct dpaa_priv *priv = netdev_priv(net_dev); + int num_txqs_per_tc = dpaa_num_txqs_per_tc(); struct tc_mqprio_qopt *mqprio = type_data; u8 num_tc; int i; @@ -398,12 +400,12 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, netdev_set_num_tc(net_dev, num_tc); for (i = 0; i < num_tc; i++) - netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM, - i * DPAA_TC_TXQ_NUM); + netdev_set_tc_queue(net_dev, i, num_txqs_per_tc, + i * num_txqs_per_tc); out: priv->num_tc = num_tc ? : 1; - netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); + netif_set_real_num_tx_queues(net_dev, priv->num_tc * num_txqs_per_tc); return 0; } @@ -461,6 +463,22 @@ static int dpaa_set_mac_address(struct net_device *net_dev, void *addr) return 0; } +static int dpaa_addr_sync(struct net_device *net_dev, const u8 *addr) +{ + const struct dpaa_priv *priv = netdev_priv(net_dev); + + return priv->mac_dev->add_hash_mac_addr(priv->mac_dev->fman_mac, + (enet_addr_t *)addr); +} + +static int dpaa_addr_unsync(struct net_device *net_dev, const u8 *addr) +{ + const struct dpaa_priv *priv = netdev_priv(net_dev); + + return priv->mac_dev->remove_hash_mac_addr(priv->mac_dev->fman_mac, + (enet_addr_t *)addr); +} + static void dpaa_set_rx_mode(struct net_device *net_dev) { const struct dpaa_priv *priv; @@ -488,9 +506,9 @@ static void dpaa_set_rx_mode(struct net_device *net_dev) err); } - err = priv->mac_dev->set_multi(net_dev, priv->mac_dev); + err = __dev_mc_sync(net_dev, dpaa_addr_sync, dpaa_addr_unsync); if (err < 0) - netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n", + netif_err(priv, drv, net_dev, "dpaa_addr_sync() = %d\n", err); } @@ -649,7 +667,7 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) fq->wq = 6; break; case FQ_TYPE_TX: - switch (idx / DPAA_TC_TXQ_NUM) { + switch (idx / dpaa_num_txqs_per_tc()) { case 0: /* Low priority (best effort) */ fq->wq = 6; @@ -667,8 +685,8 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) fq->wq = 0; break; default: - WARN(1, "Too many TX FQs: more than %d!\n", - DPAA_ETH_TXQ_NUM); + WARN(1, "Too many TX FQs: more than %zu!\n", + dpaa_max_num_txqs()); } break; default: @@ -740,7 +758,8 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, port_fqs->rx_pcdq = &dpaa_fq[0]; - if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ)) + if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list, + FQ_TYPE_TX_CONF_MQ)) goto fq_alloc_failed; dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR); @@ -755,7 +774,7 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, port_fqs->tx_defq = &dpaa_fq[0]; - if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX)) + if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list, FQ_TYPE_TX)) goto fq_alloc_failed; return 0; @@ -931,14 +950,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv, } } -static void dpaa_fq_setup(struct dpaa_priv *priv, - const struct dpaa_fq_cbs *fq_cbs, - struct fman_port *tx_port) +static int dpaa_fq_setup(struct dpaa_priv *priv, + const struct dpaa_fq_cbs *fq_cbs, + struct fman_port *tx_port) { int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu; const cpumask_t *affine_cpus = qman_affine_cpus(); - u16 channels[NR_CPUS]; struct dpaa_fq *fq; + u16 *channels; + + channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL); + if (!channels) + return -ENOMEM; for_each_cpu_and(cpu, affine_cpus, cpu_online_mask) channels[num_portals++] = qman_affine_channel(cpu); @@ -965,11 +988,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv, case FQ_TYPE_TX: dpaa_setup_egress(priv, fq, tx_port, &fq_cbs->egress_ern); - /* If we have more Tx queues than the number of cores, - * just ignore the extra ones. - */ - if (egress_cnt < DPAA_ETH_TXQ_NUM) - priv->egress_fqs[egress_cnt++] = &fq->fq_base; + priv->egress_fqs[egress_cnt++] = &fq->fq_base; break; case FQ_TYPE_TX_CONF_MQ: priv->conf_fqs[conf_cnt++] = &fq->fq_base; @@ -987,16 +1006,9 @@ static void dpaa_fq_setup(struct dpaa_priv *priv, } } - /* Make sure all CPUs receive a corresponding Tx queue. */ - while (egress_cnt < DPAA_ETH_TXQ_NUM) { - list_for_each_entry(fq, &priv->dpaa_fq_list, list) { - if (fq->fq_type != FQ_TYPE_TX) - continue; - priv->egress_fqs[egress_cnt++] = &fq->fq_base; - if (egress_cnt == DPAA_ETH_TXQ_NUM) - break; - } - } + kfree(channels); + + return 0; } static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, @@ -1004,7 +1016,7 @@ static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, { int i; - for (i = 0; i < DPAA_ETH_TXQ_NUM; i++) + for (i = 0; i < dpaa_max_num_txqs(); i++) if (priv->egress_fqs[i] == tx_fq) return i; @@ -1808,7 +1820,6 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, struct page *page, *head_page; struct dpaa_bp *dpaa_bp; void *vaddr, *sg_vaddr; - int frag_off, frag_len; struct sk_buff *skb; dma_addr_t sg_addr; int page_offset; @@ -1851,6 +1862,11 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, * on Tx, if extra headers are added. */ WARN_ON(fd_off != priv->rx_headroom); + /* The offset to data start within the buffer holding + * the SGT should always be equal to the offset to data + * start within the first buffer holding the frame. + */ + WARN_ON_ONCE(fd_off != qm_sg_entry_get_off(&sgt[i])); skb_reserve(skb, fd_off); skb_put(skb, qm_sg_entry_get_len(&sgt[i])); } else { @@ -1864,21 +1880,23 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, page = virt_to_page(sg_vaddr); head_page = virt_to_head_page(sg_vaddr); - /* Compute offset in (possibly tail) page */ + /* Compute offset of sg_vaddr in (possibly tail) page */ page_offset = ((unsigned long)sg_vaddr & (PAGE_SIZE - 1)) + (page_address(page) - page_address(head_page)); - /* page_offset only refers to the beginning of sgt[i]; - * but the buffer itself may have an internal offset. + + /* Non-initial SGT entries should not have a buffer + * offset. */ - frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset; - frag_len = qm_sg_entry_get_len(&sgt[i]); + WARN_ON_ONCE(qm_sg_entry_get_off(&sgt[i])); + /* skb_add_rx_frag() does no checking on the page; if * we pass it a tail page, we'll end up with - * bad page accounting and eventually with segafults. + * bad page accounting and eventually with segfaults. */ - skb_add_rx_frag(skb, i - 1, head_page, frag_off, - frag_len, dpaa_bp->size); + skb_add_rx_frag(skb, i - 1, head_page, page_offset, + qm_sg_entry_get_len(&sgt[i]), + dpaa_bp->size); } /* Update the pool count for the current {cpu x bpool} */ @@ -2263,7 +2281,7 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv, new_xdpf->len = xdpf->len; new_xdpf->headroom = priv->tx_headroom; new_xdpf->frame_sz = DPAA_BP_RAW_SIZE; - new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0; + new_xdpf->mem_type = MEM_TYPE_PAGE_ORDER0; /* Release the initial buffer */ xdp_return_frame_rx_napi(xdpf); @@ -2277,12 +2295,12 @@ static netdev_tx_t dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) { const int queue_mapping = skb_get_queue_mapping(skb); - bool nonlinear = skb_is_nonlinear(skb); struct rtnl_link_stats64 *percpu_stats; struct dpaa_percpu_priv *percpu_priv; struct netdev_queue *txq; struct dpaa_priv *priv; struct qm_fd fd; + bool nonlinear; int offset = 0; int err = 0; @@ -2292,6 +2310,13 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) qm_fd_clear_fd(&fd); + /* Packet data is always read as 32-bit words, so zero out any part of + * the skb which might be sent if we have to pad the packet + */ + if (__skb_put_padto(skb, ETH_ZLEN, false)) + goto enomem; + + nonlinear = skb_is_nonlinear(skb); if (!nonlinear) { /* We're going to store the skb backpointer at the beginning * of the data buffer, so we need a privately owned skb @@ -2747,7 +2772,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use && !fman_port_get_hash_result_offset(priv->mac_dev->port[RX], &hash_offset)) { - hash = be32_to_cpu(*(u32 *)(vaddr + hash_offset)); + hash = be32_to_cpu(*(__be32 *)(vaddr + hash_offset)); hash_valid = true; } @@ -2995,7 +3020,7 @@ static int dpaa_change_mtu(struct net_device *net_dev, int new_mtu) if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu)) return -EINVAL; - net_dev->mtu = new_mtu; + WRITE_ONCE(net_dev->mtu, new_mtu); return 0; } @@ -3064,15 +3089,25 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n, return nxmit; } -static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +static int dpaa_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) { struct dpaa_priv *priv = netdev_priv(dev); - struct hwtstamp_config config; - if (copy_from_user(&config, rq->ifr_data, sizeof(config))) - return -EFAULT; + config->tx_type = priv->tx_tstamp ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config->rx_filter = priv->rx_tstamp ? HWTSTAMP_FILTER_ALL : + HWTSTAMP_FILTER_NONE; + + return 0; +} - switch (config.tx_type) { +static int dpaa_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct dpaa_priv *priv = netdev_priv(dev); + + switch (config->tx_type) { case HWTSTAMP_TX_OFF: /* Couldn't disable rx/tx timestamping separately. * Do nothing here. @@ -3087,7 +3122,7 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return -ERANGE; } - if (config.rx_filter == HWTSTAMP_FILTER_NONE) { + if (config->rx_filter == HWTSTAMP_FILTER_NONE) { /* Couldn't disable rx/tx timestamping separately. * Do nothing here. */ @@ -3096,28 +3131,17 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); priv->rx_tstamp = true; /* TS is set for all frame types, not only those requested */ - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; } - return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; } static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) { - int ret = -EINVAL; struct dpaa_priv *priv = netdev_priv(net_dev); - if (cmd == SIOCGMIIREG) { - if (net_dev->phydev) - return phylink_mii_ioctl(priv->mac_dev->phylink, rq, - cmd); - } - - if (cmd == SIOCSHWTSTAMP) - return dpaa_ts_ioctl(net_dev, rq, cmd); - - return ret; + return phylink_mii_ioctl(priv->mac_dev->phylink, rq, cmd); } static const struct net_device_ops dpaa_ops = { @@ -3135,6 +3159,8 @@ static const struct net_device_ops dpaa_ops = { .ndo_change_mtu = dpaa_change_mtu, .ndo_bpf = dpaa_xdp, .ndo_xdp_xmit = dpaa_xdp_xmit, + .ndo_hwtstamp_get = dpaa_hwtstamp_get, + .ndo_hwtstamp_set = dpaa_hwtstamp_set, }; static int dpaa_napi_add(struct net_device *net_dev) @@ -3161,8 +3187,9 @@ static void dpaa_napi_del(struct net_device *net_dev) for_each_possible_cpu(cpu) { percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); - netif_napi_del(&percpu_priv->np.napi); + __netif_napi_del(&percpu_priv->np.napi); } + synchronize_net(); } static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp, @@ -3324,7 +3351,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) /* Allocate this early, so we can store relevant information in * the private area */ - net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM); + net_dev = alloc_etherdev_mq(sizeof(*priv), dpaa_max_num_txqs()); if (!net_dev) { dev_err(dev, "alloc_etherdev_mq() failed\n"); return -ENOMEM; @@ -3339,6 +3366,22 @@ static int dpaa_eth_probe(struct platform_device *pdev) priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT); + priv->egress_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(), + sizeof(*priv->egress_fqs), + GFP_KERNEL); + if (!priv->egress_fqs) { + err = -ENOMEM; + goto free_netdev; + } + + priv->conf_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(), + sizeof(*priv->conf_fqs), + GFP_KERNEL); + if (!priv->conf_fqs) { + err = -ENOMEM; + goto free_netdev; + } + mac_dev = dpaa_mac_dev_get(pdev); if (IS_ERR(mac_dev)) { netdev_err(net_dev, "dpaa_mac_dev_get() failed\n"); @@ -3416,7 +3459,9 @@ static int dpaa_eth_probe(struct platform_device *pdev) */ dpaa_eth_add_channel(priv->channel, &pdev->dev); - dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); + err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); + if (err) + goto free_dpaa_bps; /* Create a congestion group for this netdev, with * dynamically-allocated CGR ID. @@ -3462,7 +3507,8 @@ static int dpaa_eth_probe(struct platform_device *pdev) } priv->num_tc = 1; - netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); + netif_set_real_num_tx_queues(net_dev, + priv->num_tc * dpaa_num_txqs_per_tc()); /* Initialize NAPI */ err = dpaa_napi_add(net_dev); @@ -3548,7 +3594,7 @@ static struct platform_driver dpaa_driver = { }, .id_table = dpaa_devtype, .probe = dpaa_eth_probe, - .remove_new = dpaa_remove + .remove = dpaa_remove }; static int __init dpaa_load(void) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h index ac3c8ed57bbe..7ed659eb08de 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h @@ -18,10 +18,6 @@ /* Number of prioritised traffic classes */ #define DPAA_TC_NUM 4 -/* Number of Tx queues per traffic class */ -#define DPAA_TC_TXQ_NUM NR_CPUS -/* Total number of Tx queues */ -#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM) /* More detailed FQ types - used for fine-grained WQ assignments */ enum dpaa_fq_type { @@ -142,8 +138,8 @@ struct dpaa_priv { struct mac_device *mac_dev; struct device *rx_dma_dev; struct device *tx_dma_dev; - struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM]; - struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM]; + struct qman_fq **egress_fqs; + struct qman_fq **conf_fqs; u16 channel; struct list_head dpaa_fq_list; @@ -185,4 +181,16 @@ extern const struct ethtool_ops dpaa_ethtool_ops; /* from dpaa_eth_sysfs.c */ void dpaa_eth_sysfs_remove(struct device *dev); void dpaa_eth_sysfs_init(struct device *dev); + +static inline size_t dpaa_num_txqs_per_tc(void) +{ + return num_possible_cpus(); +} + +/* Total number of Tx queues */ +static inline size_t dpaa_max_num_txqs(void) +{ + return DPAA_TC_NUM * dpaa_num_txqs_per_tc(); +} + #endif /* __DPAA_H */ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c index 4fee74c024bd..aad470e9caea 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c @@ -35,7 +35,6 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev, u32 last_fqid = 0; ssize_t bytes = 0; char *str; - int i = 0; list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) { switch (fq->fq_type) { @@ -85,7 +84,6 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev, prev = fq; prevstr = str; - i++; } if (prev) { diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h index 889f89df9930..9e1d44ae92cc 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h @@ -56,8 +56,8 @@ DECLARE_EVENT_CLASS(dpaa_eth_fd, __entry->fd_format = qm_fd_get_format(fd); __entry->fd_offset = qm_fd_get_offset(fd); __entry->fd_length = qm_fd_get_length(fd); - __entry->fd_status = fd->status; - __assign_str(name, netdev->name); + __entry->fd_status = __be32_to_cpu(fd->status); + __assign_str(name); ), /* This is what gets printed when the trace event is triggered */ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 5bd0b36d1feb..9986f6e1f587 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -243,38 +243,24 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev, static void dpaa_get_strings(struct net_device *net_dev, u32 stringset, u8 *data) { - unsigned int i, j, num_cpus, size; - char string_cpu[ETH_GSTRING_LEN]; - u8 *strings; + unsigned int i, j, num_cpus; - memset(string_cpu, 0, sizeof(string_cpu)); - strings = data; - num_cpus = num_online_cpus(); - size = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN; + num_cpus = num_online_cpus(); for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) { - for (j = 0; j < num_cpus; j++) { - snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", - dpaa_stats_percpu[i], j); - memcpy(strings, string_cpu, ETH_GSTRING_LEN); - strings += ETH_GSTRING_LEN; - } - snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", - dpaa_stats_percpu[i]); - memcpy(strings, string_cpu, ETH_GSTRING_LEN); - strings += ETH_GSTRING_LEN; - } - for (j = 0; j < num_cpus; j++) { - snprintf(string_cpu, ETH_GSTRING_LEN, - "bpool [CPU %d]", j); - memcpy(strings, string_cpu, ETH_GSTRING_LEN); - strings += ETH_GSTRING_LEN; + for (j = 0; j < num_cpus; j++) + ethtool_sprintf(&data, "%s [CPU %d]", + dpaa_stats_percpu[i], j); + + ethtool_sprintf(&data, "%s [TOTAL]", dpaa_stats_percpu[i]); } - snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]"); - memcpy(strings, string_cpu, ETH_GSTRING_LEN); - strings += ETH_GSTRING_LEN; + for (i = 0; i < num_cpus; i++) + ethtool_sprintf(&data, "bpool [CPU %d]", i); - memcpy(strings, dpaa_stats_global, size); + ethtool_puts(&data, "bpool [TOTAL]"); + + for (i = 0; i < DPAA_STATS_GLOBAL_LEN; i++) + ethtool_puts(&data, dpaa_stats_global[i]); } static int dpaa_get_hash_opts(struct net_device *dev, @@ -394,7 +380,7 @@ static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) } static int dpaa_get_ts_info(struct net_device *net_dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct device *dev = net_dev->dev.parent; struct device_node *mac_node = dev->of_node; @@ -457,12 +443,16 @@ static int dpaa_set_coalesce(struct net_device *dev, struct netlink_ext_ack *extack) { const cpumask_t *cpus = qman_affine_cpus(); - bool needs_revert[NR_CPUS] = {false}; struct qman_portal *portal; u32 period, prev_period; u8 thresh, prev_thresh; + bool *needs_revert; int cpu, res; + needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL); + if (!needs_revert) + return -ENOMEM; + period = c->rx_coalesce_usecs; thresh = c->rx_max_coalesced_frames; @@ -485,6 +475,8 @@ static int dpaa_set_coalesce(struct net_device *dev, needs_revert[cpu] = true; } + kfree(needs_revert); + return 0; revert_values: @@ -498,6 +490,8 @@ revert_values: qman_dqrr_set_ithresh(portal, prev_thresh); } + kfree(needs_revert); + return res; } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h index 9b43fadb9b11..956767e0869c 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h @@ -48,7 +48,7 @@ DECLARE_EVENT_CLASS(dpaa2_eth_fd, __entry->fd_addr = dpaa2_fd_get_addr(fd); __entry->fd_len = dpaa2_fd_get_len(fd); __entry->fd_offset = dpaa2_fd_get_offset(fd); - __assign_str(name, netdev->name); + __assign_str(name); ), /* This is what gets printed when the trace event is @@ -144,7 +144,7 @@ DECLARE_EVENT_CLASS(dpaa2_eth_buf, __entry->dma_addr = dma_addr; __entry->map_size = map_size; __entry->bpid = bpid; - __assign_str(name, netdev->name); + __assign_str(name); ), /* This is what gets printed when the trace event is diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 888509cf1f21..2ec2c3dab250 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -2585,40 +2585,52 @@ static int dpaa2_eth_set_features(struct net_device *net_dev, return 0; } -static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +static int dpaa2_eth_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct dpaa2_eth_priv *priv = netdev_priv(dev); - struct hwtstamp_config config; if (!dpaa2_ptp) return -EINVAL; - if (copy_from_user(&config, rq->ifr_data, sizeof(config))) - return -EFAULT; - - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: case HWTSTAMP_TX_ONESTEP_SYNC: - priv->tx_tstamp_type = config.tx_type; + priv->tx_tstamp_type = config->tx_type; break; default: return -ERANGE; } - if (config.rx_filter == HWTSTAMP_FILTER_NONE) { + if (config->rx_filter == HWTSTAMP_FILTER_NONE) { priv->rx_tstamp = false; } else { priv->rx_tstamp = true; /* TS is set for all frame types, not only those requested */ - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; } if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC) dpaa2_ptp_onestep_reg_update_method(priv); - return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; +} + +static int dpaa2_eth_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) +{ + struct dpaa2_eth_priv *priv = netdev_priv(dev); + + if (!dpaa2_ptp) + return -EINVAL; + + config->tx_type = priv->tx_tstamp_type; + config->rx_filter = priv->rx_tstamp ? HWTSTAMP_FILTER_ALL : + HWTSTAMP_FILTER_NONE; + + return 0; } static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) @@ -2626,9 +2638,6 @@ static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) struct dpaa2_eth_priv *priv = netdev_priv(dev); int err; - if (cmd == SIOCSHWTSTAMP) - return dpaa2_eth_ts_ioctl(dev, rq, cmd); - mutex_lock(&priv->mac_lock); if (dpaa2_eth_is_type_phy(priv)) { @@ -2698,7 +2707,7 @@ static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu) return err; out: - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } @@ -2896,11 +2905,14 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, static int update_xps(struct dpaa2_eth_priv *priv) { struct net_device *net_dev = priv->net_dev; - struct cpumask xps_mask; - struct dpaa2_eth_fq *fq; int i, num_queues, netdev_queues; + struct dpaa2_eth_fq *fq; + cpumask_var_t xps_mask; int err = 0; + if (!alloc_cpumask_var(&xps_mask, GFP_KERNEL)) + return -ENOMEM; + num_queues = dpaa2_eth_queue_count(priv); netdev_queues = (net_dev->num_tc ? : 1) * num_queues; @@ -2910,16 +2922,17 @@ static int update_xps(struct dpaa2_eth_priv *priv) for (i = 0; i < netdev_queues; i++) { fq = &priv->fq[i % num_queues]; - cpumask_clear(&xps_mask); - cpumask_set_cpu(fq->target_cpu, &xps_mask); + cpumask_clear(xps_mask); + cpumask_set_cpu(fq->target_cpu, xps_mask); - err = netif_set_xps_queue(net_dev, &xps_mask, i); + err = netif_set_xps_queue(net_dev, xps_mask, i); if (err) { netdev_warn_once(net_dev, "Error setting XPS queue\n"); break; } } + free_cpumask_var(xps_mask); return err; } @@ -3030,7 +3043,9 @@ static const struct net_device_ops dpaa2_eth_ops = { .ndo_xsk_wakeup = dpaa2_xsk_wakeup, .ndo_setup_tc = dpaa2_eth_setup_tc, .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid, - .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid + .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid, + .ndo_hwtstamp_get = dpaa2_eth_hwtstamp_get, + .ndo_hwtstamp_set = dpaa2_eth_hwtstamp_set, }; static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx) @@ -4590,12 +4605,13 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev) net_dev->priv_flags |= supported; net_dev->priv_flags &= ~not_supported; + net_dev->lltx = true; /* Features */ net_dev->features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA | - NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO; + NETIF_F_HW_TC | NETIF_F_TSO; net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS; net_dev->hw_features = net_dev->features; net_dev->xdp_features = NETDEV_XDP_ACT_BASIC | diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index e80e9388c71f..74ef77cb7078 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -217,20 +217,15 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev, static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { - u8 *p = data; int i; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { - strscpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) { - strscpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - dpaa2_mac_get_strings(p); + for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) + ethtool_puts(&data, dpaa2_ethtool_stats[i]); + for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) + ethtool_puts(&data, dpaa2_ethtool_extras[i]); + dpaa2_mac_get_strings(&data); break; } } @@ -794,7 +789,7 @@ int dpaa2_phc_index = -1; EXPORT_SYMBOL(dpaa2_phc_index); static int dpaa2_eth_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { if (!dpaa2_ptp) return ethtool_op_get_ts_info(dev, info); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c index a69bb22c37ea..422ce13a7c94 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c @@ -558,15 +558,12 @@ int dpaa2_mac_get_sset_count(void) return DPAA2_MAC_NUM_STATS; } -void dpaa2_mac_get_strings(u8 *data) +void dpaa2_mac_get_strings(u8 **data) { - u8 *p = data; int i; - for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) { - strscpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) + ethtool_puts(data, dpaa2_mac_ethtool_stats[i]); } void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h index c1ec9efd413a..53f8d106d11e 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h @@ -49,7 +49,7 @@ void dpaa2_mac_disconnect(struct dpaa2_mac *mac); int dpaa2_mac_get_sset_count(void); -void dpaa2_mac_get_strings(u8 *data); +void dpaa2_mac_get_strings(u8 **data); void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c index 6bc1988be311..a888f6e6e9b0 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c @@ -170,17 +170,16 @@ dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset) static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { - u8 *p = data; + const char *str; int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < DPAA2_SWITCH_NUM_COUNTERS; i++) { - memcpy(p, dpaa2_switch_ethtool_counters[i].name, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; + str = dpaa2_switch_ethtool_counters[i].name; + ethtool_puts(&data, str); } - dpaa2_mac_get_strings(p); + dpaa2_mac_get_strings(&data); break; } } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c index b6a534a3e0b1..701a87370737 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c @@ -33,6 +33,9 @@ static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls, acl_h = &acl_key->match; acl_m = &acl_key->mask; + if (flow_rule_match_has_control_flags(rule, extack)) + return -EOPNOTSUPP; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; @@ -548,6 +551,9 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls, return -EOPNOTSUPP; } + if (flow_rule_match_has_control_flags(rule, extack)) + return -EOPNOTSUPP; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_match_vlan match; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index f3543a2df68d..147a93bf9fa9 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -590,7 +590,7 @@ static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu) return err; } - netdev->mtu = mtu; + WRITE_ONCE(netdev->mtu, mtu); return 0; } @@ -780,13 +780,14 @@ struct ethsw_dump_ctx { static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry, struct ethsw_dump_ctx *dump) { + struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx; int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC; u32 portid = NETLINK_CB(dump->cb->skb).portid; u32 seq = dump->cb->nlh->nlmsg_seq; struct nlmsghdr *nlh; struct ndmsg *ndm; - if (dump->idx < dump->cb->args[2]) + if (dump->idx < ctx->fdb_idx) goto skip; nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, @@ -2638,13 +2639,14 @@ static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw) static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw) { - int *count, i; + int *count, ret, i; for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) { + ret = dpaa2_switch_add_bufs(ethsw, ethsw->bpid); count = ðsw->buf_count; - *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid); + *count += ret; - if (unlikely(*count < BUFS_PER_CMD)) + if (unlikely(ret < BUFS_PER_CMD)) return -ENOMEM; } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c index 051748b997f3..a466c2379146 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c @@ -55,7 +55,7 @@ static u32 dpaa2_xsk_run_xdp(struct dpaa2_eth_priv *priv, xdp_set_data_meta_invalid(xdp_buff); xdp_buff->rxq = &ch->xdp_rxq; - xsk_buff_dma_sync_for_cpu(xdp_buff, ch->xsk_pool); + xsk_buff_dma_sync_for_cpu(xdp_buff); xdp_act = bpf_prog_run_xdp(xdp_prog, xdp_buff); /* xdp.data pointer may have changed */ diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig index 4d75e6807e92..e917132d3714 100644 --- a/drivers/net/ethernet/freescale/enetc/Kconfig +++ b/drivers/net/ethernet/freescale/enetc/Kconfig @@ -7,13 +7,28 @@ config FSL_ENETC_CORE If compiled as module (M), the module name is fsl-enetc-core. +config NXP_ENETC_PF_COMMON + tristate + help + This module supports common functionality between drivers of + different versions of NXP ENETC PF controllers. + + If compiled as module (M), the module name is nxp-enetc-pf-common. + +config NXP_NETC_LIB + tristate + help + This module provides common functionalities for both ENETC and NETC + Switch, such as NETC Table Management Protocol (NTMP) 2.0, common tc + flower and debugfs interfaces and so on. + config FSL_ENETC tristate "ENETC PF driver" depends on PCI_MSI - select MDIO_DEVRES select FSL_ENETC_CORE select FSL_ENETC_IERB select FSL_ENETC_MDIO + select NXP_ENETC_PF_COMMON select PHYLINK select PCS_LYNX select DIMLIB @@ -24,6 +39,23 @@ config FSL_ENETC If compiled as module (M), the module name is fsl-enetc. +config NXP_ENETC4 + tristate "ENETC4 PF driver" + depends on PCI_MSI + select FSL_ENETC_CORE + select FSL_ENETC_MDIO + select NXP_ENETC_PF_COMMON + select NXP_NETC_LIB + select PHYLINK + select DIMLIB + help + This driver supports NXP ENETC devices with major revision 4. ENETC is + as the NIC functionality in NETC, it supports virtualization/isolation + based on PCIe Single Root IO Virtualization (SR-IOV) and a full range + of TSN standards and NIC offload capabilities. + + If compiled as module (M), the module name is nxp-enetc4. + config FSL_ENETC_VF tristate "ENETC VF driver" depends on PCI_MSI @@ -47,7 +79,7 @@ config FSL_ENETC_IERB config FSL_ENETC_MDIO tristate "ENETC MDIO driver" - depends on PCI && MDIO_DEVRES && MDIO_BUS + depends on PCI && PHYLIB help This driver supports NXP ENETC Central MDIO controller as a PCIe physical function (PF) device. @@ -75,3 +107,17 @@ config FSL_ENETC_QOS enable/disable from user space via Qos commands(tc). In the kernel side, it can be loaded by Qos driver. Currently, it is only support taprio(802.1Qbv) and Credit Based Shaper(802.1Qbu). + +config NXP_NETC_BLK_CTRL + tristate "NETC blocks control driver" + help + This driver configures Integrated Endpoint Register Block (IERB) and + Privileged Register Block (PRB) of NETC. For i.MX platforms, it also + includes the configuration of NETCMIX block. + The IERB contains registers that are used for pre-boot initialization, + debug, and non-customer configuration. The PRB controls global reset + and global error handling for NETC. The NETCMIX block is mainly used + to set MII protocol and PCS protocol of the links, it also contains + settings for some other functions. + + If compiled as module (M), the module name is nxp-netc-blk-ctrl. diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile index b13cbbabb2ea..f1c5ad45fd76 100644 --- a/drivers/net/ethernet/freescale/enetc/Makefile +++ b/drivers/net/ethernet/freescale/enetc/Makefile @@ -3,11 +3,21 @@ obj-$(CONFIG_FSL_ENETC_CORE) += fsl-enetc-core.o fsl-enetc-core-y := enetc.o enetc_cbdr.o enetc_ethtool.o +obj-$(CONFIG_NXP_ENETC_PF_COMMON) += nxp-enetc-pf-common.o +nxp-enetc-pf-common-y := enetc_pf_common.o + +obj-$(CONFIG_NXP_NETC_LIB) += nxp-netc-lib.o +nxp-netc-lib-y := ntmp.o + obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o fsl-enetc-y := enetc_pf.o fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o +obj-$(CONFIG_NXP_ENETC4) += nxp-enetc4.o +nxp-enetc4-y := enetc4_pf.o +nxp-enetc4-$(CONFIG_DEBUG_FS) += enetc4_debugfs.o + obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o fsl-enetc-vf-y := enetc_vf.o @@ -19,3 +29,6 @@ fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o fsl-enetc-ptp-y := enetc_ptp.o + +obj-$(CONFIG_NXP_NETC_BLK_CTRL) += nxp-netc-blk-ctrl.o +nxp-netc-blk-ctrl-y := netc_blk_ctrl.o diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 9f07f4947b63..dcc3fbac3481 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -3,6 +3,7 @@ #include "enetc.h" #include <linux/bpf_trace.h> +#include <linux/clk.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/vmalloc.h> @@ -21,17 +22,56 @@ void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val) { enetc_port_wr(&si->hw, reg, val); if (si->hw_features & ENETC_SI_F_QBU) - enetc_port_wr(&si->hw, reg + ENETC_PMAC_OFFSET, val); + enetc_port_wr(&si->hw, reg + si->drvdata->pmac_offset, val); } EXPORT_SYMBOL_GPL(enetc_port_mac_wr); static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv, u8 preemptible_tcs) { + if (!(priv->si->hw_features & ENETC_SI_F_QBU)) + return; + priv->preemptible_tcs = preemptible_tcs; enetc_mm_commit_preemptible_tcs(priv); } +static int enetc_mac_addr_hash_idx(const u8 *addr) +{ + u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16; + u64 mask = 0; + int res = 0; + int i; + + for (i = 0; i < 8; i++) + mask |= BIT_ULL(i * 6); + + for (i = 0; i < 6; i++) + res |= (hweight64(fold & (mask << i)) & 0x1) << i; + + return res; +} + +void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter, + const unsigned char *addr) +{ + int idx = enetc_mac_addr_hash_idx(addr); + + /* add hash table entry */ + __set_bit(idx, filter->mac_hash_table); + filter->mac_addr_cnt++; +} +EXPORT_SYMBOL_GPL(enetc_add_mac_addr_ht_filter); + +void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter) +{ + filter->mac_addr_cnt = 0; + + bitmap_zero(filter->mac_hash_table, + ENETC_MADDR_HASH_TBL_SZ); +} +EXPORT_SYMBOL_GPL(enetc_reset_mac_addr_filter); + static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv) { int num_tx_rings = priv->num_tx_rings; @@ -142,6 +182,45 @@ static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp, return 0; } +static bool enetc_tx_csum_offload_check(struct sk_buff *skb) +{ + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + case offsetof(struct udphdr, check): + return true; + default: + return false; + } +} + +static bool enetc_skb_is_ipv6(struct sk_buff *skb) +{ + return vlan_get_protocol(skb) == htons(ETH_P_IPV6); +} + +static bool enetc_skb_is_tcp(struct sk_buff *skb) +{ + return skb->csum_offset == offsetof(struct tcphdr, check); +} + +/** + * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame + * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located + * @count: Number of Tx buffer descriptors which need to be unmapped + * @i: Index of the last successfully mapped Tx buffer descriptor + */ +static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i) +{ + while (count--) { + struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; + + enetc_free_tx_frame(tx_ring, tx_swbd); + if (i == 0) + i = tx_ring->bd_count; + i--; + } +} + static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) { bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false; @@ -159,6 +238,29 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) dma_addr_t dma; u8 flags = 0; + enetc_clear_tx_bd(&temp_bd); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + /* Can not support TSD and checksum offload at the same time */ + if (priv->active_offloads & ENETC_F_TXCSUM && + enetc_tx_csum_offload_check(skb) && !tx_ring->tsd_enable) { + temp_bd.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START, + skb_network_offset(skb)); + temp_bd.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN, + skb_network_header_len(skb) / 4); + temp_bd.l3_aux1 |= FIELD_PREP(ENETC_TX_BD_L3T, + enetc_skb_is_ipv6(skb)); + if (enetc_skb_is_tcp(skb)) + temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, + ENETC_TXBD_L4T_TCP); + else + temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, + ENETC_TXBD_L4T_UDP); + flags |= ENETC_TXBD_FLAGS_CSUM_LSO | ENETC_TXBD_FLAGS_L4CS; + } else if (skb_checksum_help(skb)) { + return 0; + } + } + i = tx_ring->next_to_use; txbd = ENETC_TXBD(*tx_ring, i); prefetchw(txbd); @@ -169,7 +271,6 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) temp_bd.addr = cpu_to_le64(dma); temp_bd.buf_len = cpu_to_le16(len); - temp_bd.lstatus = 0; tx_swbd = &tx_ring->tx_swbd[i]; tx_swbd->dma = dma; @@ -232,9 +333,11 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) } if (do_onestep_tstamp) { - u32 lo, hi, val; - u64 sec, nsec; + __be32 new_sec_l, new_nsec; + u32 lo, hi, nsec, val; + __be16 new_sec_h; u8 *data; + u64 sec; lo = enetc_rd_hot(hw, ENETC_SICTR0); hi = enetc_rd_hot(hw, ENETC_SICTR1); @@ -248,13 +351,38 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) /* Update originTimestamp field of Sync packet * - 48 bits seconds field * - 32 bits nanseconds field + * + * In addition, the UDP checksum needs to be updated + * by software after updating originTimestamp field, + * otherwise the hardware will calculate the wrong + * checksum when updating the correction field and + * update it to the packet. */ data = skb_mac_header(skb); - *(__be16 *)(data + offset2) = - htons((sec >> 32) & 0xffff); - *(__be32 *)(data + offset2 + 2) = - htonl(sec & 0xffffffff); - *(__be32 *)(data + offset2 + 6) = htonl(nsec); + new_sec_h = htons((sec >> 32) & 0xffff); + new_sec_l = htonl(sec & 0xffffffff); + new_nsec = htonl(nsec); + if (udp) { + struct udphdr *uh = udp_hdr(skb); + __be32 old_sec_l, old_nsec; + __be16 old_sec_h; + + old_sec_h = *(__be16 *)(data + offset2); + inet_proto_csum_replace2(&uh->check, skb, old_sec_h, + new_sec_h, false); + + old_sec_l = *(__be32 *)(data + offset2 + 2); + inet_proto_csum_replace4(&uh->check, skb, old_sec_l, + new_sec_l, false); + + old_nsec = *(__be32 *)(data + offset2 + 6); + inet_proto_csum_replace4(&uh->check, skb, old_nsec, + new_nsec, false); + } + + *(__be16 *)(data + offset2) = new_sec_h; + *(__be32 *)(data + offset2 + 2) = new_sec_l; + *(__be32 *)(data + offset2 + 6) = new_nsec; /* Configure single-step register */ val = ENETC_PM0_SINGLE_STEP_EN; @@ -325,25 +453,20 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) dma_err: dev_err(tx_ring->dev, "DMA map error"); - do { - tx_swbd = &tx_ring->tx_swbd[i]; - enetc_free_tx_frame(tx_ring, tx_swbd); - if (i == 0) - i = tx_ring->bd_count; - i--; - } while (count--); + enetc_unwind_tx_frame(tx_ring, count, i); return 0; } -static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, - struct enetc_tx_swbd *tx_swbd, - union enetc_tx_bd *txbd, int *i, int hdr_len, - int data_len) +static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, + struct enetc_tx_swbd *tx_swbd, + union enetc_tx_bd *txbd, int *i, int hdr_len, + int data_len) { union enetc_tx_bd txbd_tmp; u8 flags = 0, e_flags = 0; dma_addr_t addr; + int count = 1; enetc_clear_tx_bd(&txbd_tmp); addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; @@ -386,7 +509,10 @@ static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, /* Write the BD */ txbd_tmp.ext.e_flags = e_flags; *txbd = txbd_tmp; + count++; } + + return count; } static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb, @@ -485,8 +611,233 @@ static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso } } +static int enetc_lso_count_descs(const struct sk_buff *skb) +{ + /* 4 BDs: 1 BD for LSO header + 1 BD for extended BD + 1 BD + * for linear area data but not include LSO header, namely + * skb_headlen(skb) - lso_hdr_len (it may be 0, but that's + * okay, we only need to consider the worst case). And 1 BD + * for gap. + */ + return skb_shinfo(skb)->nr_frags + 4; +} + +static int enetc_lso_get_hdr_len(const struct sk_buff *skb) +{ + int hdr_len, tlen; + + tlen = skb_is_gso_tcp(skb) ? tcp_hdrlen(skb) : sizeof(struct udphdr); + hdr_len = skb_transport_offset(skb) + tlen; + + return hdr_len; +} + +static void enetc_lso_start(struct sk_buff *skb, struct enetc_lso_t *lso) +{ + lso->lso_seg_size = skb_shinfo(skb)->gso_size; + lso->ipv6 = enetc_skb_is_ipv6(skb); + lso->tcp = skb_is_gso_tcp(skb); + lso->l3_hdr_len = skb_network_header_len(skb); + lso->l3_start = skb_network_offset(skb); + lso->hdr_len = enetc_lso_get_hdr_len(skb); + lso->total_len = skb->len - lso->hdr_len; +} + +static void enetc_lso_map_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, + int *i, struct enetc_lso_t *lso) +{ + union enetc_tx_bd txbd_tmp, *txbd; + struct enetc_tx_swbd *tx_swbd; + u16 frm_len, frm_len_ext; + u8 flags, e_flags = 0; + dma_addr_t addr; + char *hdr; + + /* Get the first BD of the LSO BDs chain */ + txbd = ENETC_TXBD(*tx_ring, *i); + tx_swbd = &tx_ring->tx_swbd[*i]; + prefetchw(txbd); + + /* Prepare LSO header: MAC + IP + TCP/UDP */ + hdr = tx_ring->tso_headers + *i * TSO_HEADER_SIZE; + memcpy(hdr, skb->data, lso->hdr_len); + addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; + + /* {frm_len_ext, frm_len} indicates the total length of + * large transmit data unit. frm_len contains the 16 least + * significant bits and frm_len_ext contains the 4 most + * significant bits. + */ + frm_len = lso->total_len & 0xffff; + frm_len_ext = (lso->total_len >> 16) & 0xf; + + /* Set the flags of the first BD */ + flags = ENETC_TXBD_FLAGS_EX | ENETC_TXBD_FLAGS_CSUM_LSO | + ENETC_TXBD_FLAGS_LSO | ENETC_TXBD_FLAGS_L4CS; + + enetc_clear_tx_bd(&txbd_tmp); + txbd_tmp.addr = cpu_to_le64(addr); + txbd_tmp.hdr_len = cpu_to_le16(lso->hdr_len); + + /* first BD needs frm_len and offload flags set */ + txbd_tmp.frm_len = cpu_to_le16(frm_len); + txbd_tmp.flags = flags; + + txbd_tmp.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START, lso->l3_start); + /* l3_hdr_size in 32-bits (4 bytes) */ + txbd_tmp.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN, + lso->l3_hdr_len / 4); + if (lso->ipv6) + txbd_tmp.l3_aux1 |= ENETC_TX_BD_L3T; + else + txbd_tmp.l3_aux0 |= ENETC_TX_BD_IPCS; + + txbd_tmp.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, lso->tcp ? + ENETC_TXBD_L4T_TCP : ENETC_TXBD_L4T_UDP); + + /* For the LSO header we do not set the dma address since + * we do not want it unmapped when we do cleanup. We still + * set len so that we count the bytes sent. + */ + tx_swbd->len = lso->hdr_len; + tx_swbd->do_twostep_tstamp = false; + tx_swbd->check_wb = false; + + /* Actually write the header in the BD */ + *txbd = txbd_tmp; + + /* Get the next BD, and the next BD is extended BD */ + enetc_bdr_idx_inc(tx_ring, i); + txbd = ENETC_TXBD(*tx_ring, *i); + tx_swbd = &tx_ring->tx_swbd[*i]; + prefetchw(txbd); + + enetc_clear_tx_bd(&txbd_tmp); + if (skb_vlan_tag_present(skb)) { + /* Setup the VLAN fields */ + txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); + txbd_tmp.ext.tpid = ENETC_TPID_8021Q; + e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS; + } + + /* Write the BD */ + txbd_tmp.ext.e_flags = e_flags; + txbd_tmp.ext.lso_sg_size = cpu_to_le16(lso->lso_seg_size); + txbd_tmp.ext.frm_len_ext = cpu_to_le16(frm_len_ext); + *txbd = txbd_tmp; +} + +static int enetc_lso_map_data(struct enetc_bdr *tx_ring, struct sk_buff *skb, + int *i, struct enetc_lso_t *lso, int *count) +{ + union enetc_tx_bd txbd_tmp, *txbd = NULL; + struct enetc_tx_swbd *tx_swbd; + skb_frag_t *frag; + dma_addr_t dma; + u8 flags = 0; + int len, f; + + len = skb_headlen(skb) - lso->hdr_len; + if (len > 0) { + dma = dma_map_single(tx_ring->dev, skb->data + lso->hdr_len, + len, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + return -ENOMEM; + + enetc_bdr_idx_inc(tx_ring, i); + txbd = ENETC_TXBD(*tx_ring, *i); + tx_swbd = &tx_ring->tx_swbd[*i]; + prefetchw(txbd); + *count += 1; + + enetc_clear_tx_bd(&txbd_tmp); + txbd_tmp.addr = cpu_to_le64(dma); + txbd_tmp.buf_len = cpu_to_le16(len); + + tx_swbd->dma = dma; + tx_swbd->len = len; + tx_swbd->is_dma_page = 0; + tx_swbd->dir = DMA_TO_DEVICE; + } + + frag = &skb_shinfo(skb)->frags[0]; + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) { + if (txbd) + *txbd = txbd_tmp; + + len = skb_frag_size(frag); + dma = skb_frag_dma_map(tx_ring->dev, frag); + if (dma_mapping_error(tx_ring->dev, dma)) + return -ENOMEM; + + /* Get the next BD */ + enetc_bdr_idx_inc(tx_ring, i); + txbd = ENETC_TXBD(*tx_ring, *i); + tx_swbd = &tx_ring->tx_swbd[*i]; + prefetchw(txbd); + *count += 1; + + enetc_clear_tx_bd(&txbd_tmp); + txbd_tmp.addr = cpu_to_le64(dma); + txbd_tmp.buf_len = cpu_to_le16(len); + + tx_swbd->dma = dma; + tx_swbd->len = len; + tx_swbd->is_dma_page = 1; + tx_swbd->dir = DMA_TO_DEVICE; + } + + /* Last BD needs 'F' bit set */ + flags |= ENETC_TXBD_FLAGS_F; + txbd_tmp.flags = flags; + *txbd = txbd_tmp; + + tx_swbd->is_eof = 1; + tx_swbd->skb = skb; + + return 0; +} + +static int enetc_lso_hw_offload(struct enetc_bdr *tx_ring, struct sk_buff *skb) +{ + struct enetc_tx_swbd *tx_swbd; + struct enetc_lso_t lso = {0}; + int err, i, count = 0; + + /* Initialize the LSO handler */ + enetc_lso_start(skb, &lso); + i = tx_ring->next_to_use; + + enetc_lso_map_hdr(tx_ring, skb, &i, &lso); + /* First BD and an extend BD */ + count += 2; + + err = enetc_lso_map_data(tx_ring, skb, &i, &lso, &count); + if (err) + goto dma_err; + + /* Go to the next BD */ + enetc_bdr_idx_inc(tx_ring, &i); + tx_ring->next_to_use = i; + enetc_update_tx_ring_tail(tx_ring); + + return count; + +dma_err: + do { + tx_swbd = &tx_ring->tx_swbd[i]; + enetc_free_tx_frame(tx_ring, tx_swbd); + if (i == 0) + i = tx_ring->bd_count; + i--; + } while (--count); + + return 0; +} + static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) { + struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); int hdr_len, total_len, data_len; struct enetc_tx_swbd *tx_swbd; union enetc_tx_bd *txbd; @@ -518,9 +869,9 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb /* compute the csum over the L4 header */ csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos); - enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len); + count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, + &i, hdr_len, data_len); bd_data_num = 0; - count++; while (data_len > 0) { int size; @@ -544,15 +895,20 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd, tso.data, size, size == data_len); - if (err) + if (err) { + if (i == 0) + i = tx_ring->bd_count; + i--; + goto err_map_data; + } data_len -= size; count++; bd_data_num++; tso_build_data(skb, &tso, size); - if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len)) + if (unlikely(bd_data_num >= priv->max_frags && data_len)) goto err_chained_bd; } @@ -574,13 +930,7 @@ err_map_data: dev_err(tx_ring->dev, "DMA map error"); err_chained_bd: - do { - tx_swbd = &tx_ring->tx_swbd[i]; - enetc_free_tx_frame(tx_ring, tx_swbd); - if (i == 0) - i = tx_ring->bd_count; - i--; - } while (count--); + enetc_unwind_tx_frame(tx_ring, count, i); return 0; } @@ -590,7 +940,7 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, { struct enetc_ndev_priv *priv = netdev_priv(ndev); struct enetc_bdr *tx_ring; - int count, err; + int count; /* Queue one-step Sync packet if already locked */ if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { @@ -604,16 +954,28 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, tx_ring = priv->tx_ring[skb->queue_mapping]; if (skb_is_gso(skb)) { - if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) { - netif_stop_subqueue(ndev, tx_ring->index); - return NETDEV_TX_BUSY; - } + /* LSO data unit lengths of up to 256KB are supported */ + if (priv->active_offloads & ENETC_F_LSO && + (skb->len - enetc_lso_get_hdr_len(skb)) <= + ENETC_LSO_MAX_DATA_LEN) { + if (enetc_bd_unused(tx_ring) < enetc_lso_count_descs(skb)) { + netif_stop_subqueue(ndev, tx_ring->index); + return NETDEV_TX_BUSY; + } - enetc_lock_mdio(); - count = enetc_map_tx_tso_buffs(tx_ring, skb); - enetc_unlock_mdio(); + count = enetc_lso_hw_offload(tx_ring, skb); + } else { + if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) { + netif_stop_subqueue(ndev, tx_ring->index); + return NETDEV_TX_BUSY; + } + + enetc_lock_mdio(); + count = enetc_map_tx_tso_buffs(tx_ring, skb); + enetc_unlock_mdio(); + } } else { - if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) + if (unlikely(skb_shinfo(skb)->nr_frags > priv->max_frags)) if (unlikely(skb_linearize(skb))) goto drop_packet_err; @@ -623,11 +985,6 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, return NETDEV_TX_BUSY; } - if (skb->ip_summed == CHECKSUM_PARTIAL) { - err = skb_checksum_help(skb); - if (err) - goto drop_packet_err; - } enetc_lock_mdio(); count = enetc_map_tx_buffs(tx_ring, skb); enetc_unlock_mdio(); @@ -636,7 +993,7 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, if (unlikely(!count)) goto drop_packet_err; - if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED) + if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED(priv->max_frags)) netif_stop_subqueue(ndev, tx_ring->index); return NETDEV_TX_OK; @@ -700,8 +1057,9 @@ static void enetc_rx_dim_work(struct work_struct *w) net_dim_get_rx_moderation(dim->mode, dim->profile_ix); struct enetc_int_vector *v = container_of(dim, struct enetc_int_vector, rx_dim); + struct enetc_ndev_priv *priv = netdev_priv(v->rx_ring.ndev); - v->rx_ictt = enetc_usecs_to_cycles(moder.usec); + v->rx_ictt = enetc_usecs_to_cycles(moder.usec, priv->sysclk_freq); dim->state = DIM_START_MEASURE; } @@ -718,7 +1076,7 @@ static void enetc_rx_net_dim(struct enetc_int_vector *v) v->rx_ring.stats.packets, v->rx_ring.stats.bytes, &dim_sample); - net_dim(&v->rx_dim, dim_sample); + net_dim(&v->rx_dim, &dim_sample); } static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) @@ -902,7 +1260,9 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) && __netif_subqueue_stopped(ndev, tx_ring->index) && - (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) { + !test_bit(ENETC_TX_DOWN, &priv->flags) && + (enetc_bd_unused(tx_ring) >= + ENETC_TXBDS_MAX_NEEDED(priv->max_frags)))) { netif_wake_subqueue(ndev, tx_ring->index); } @@ -977,7 +1337,6 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) return j; } -#ifdef CONFIG_FSL_ENETC_PTP_CLOCK static void enetc_get_rx_tstamp(struct net_device *ndev, union enetc_rx_bd *rxbd, struct sk_buff *skb) @@ -1001,7 +1360,6 @@ static void enetc_get_rx_tstamp(struct net_device *ndev, shhwtstamps->hwtstamp = ns_to_ktime(tstamp); } } -#endif static void enetc_get_offloads(struct enetc_bdr *rx_ring, union enetc_rx_bd *rxbd, struct sk_buff *skb) @@ -1041,10 +1399,9 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring, __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt)); } -#ifdef CONFIG_FSL_ENETC_PTP_CLOCK - if (priv->active_offloads & ENETC_F_RX_TSTAMP) + if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && + (priv->active_offloads & ENETC_F_RX_TSTAMP)) enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb); -#endif } /* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS, @@ -1380,6 +1737,9 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames, int xdp_tx_bd_cnt, i, k; int xdp_tx_frm_cnt = 0; + if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) + return -ENETDOWN; + enetc_lock_mdio(); tx_ring = priv->xdp_tx_ring[smp_processor_id()]; @@ -1524,7 +1884,16 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first, &rx_ring->rx_swbd[rx_ring_first]); enetc_bdr_idx_inc(rx_ring, &rx_ring_first); } - rx_ring->stats.xdp_drops++; +} + +static void enetc_bulk_flip_buff(struct enetc_bdr *rx_ring, int rx_ring_first, + int rx_ring_last) +{ + while (rx_ring_first != rx_ring_last) { + enetc_flip_rx_buff(rx_ring, + &rx_ring->rx_swbd[rx_ring_first]); + enetc_bdr_idx_inc(rx_ring, &rx_ring_first); + } } static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, @@ -1545,11 +1914,10 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, while (likely(rx_frm_cnt < work_limit)) { union enetc_rx_bd *rxbd, *orig_rxbd; - int orig_i, orig_cleaned_cnt; struct xdp_buff xdp_buff; struct sk_buff *skb; + int orig_i, err; u32 bd_status; - int err; rxbd = enetc_rxbd(rx_ring, i); bd_status = le32_to_cpu(rxbd->r.lstatus); @@ -1564,7 +1932,6 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, break; orig_rxbd = rxbd; - orig_cleaned_cnt = cleaned_cnt; orig_i = i; enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i, @@ -1589,22 +1956,35 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, fallthrough; case XDP_DROP: enetc_xdp_drop(rx_ring, orig_i, i); + rx_ring->stats.xdp_drops++; break; case XDP_PASS: - rxbd = orig_rxbd; - cleaned_cnt = orig_cleaned_cnt; - i = orig_i; - - skb = enetc_build_skb(rx_ring, bd_status, &rxbd, - &i, &cleaned_cnt, - ENETC_RXB_DMA_SIZE_XDP); - if (unlikely(!skb)) + skb = xdp_build_skb_from_buff(&xdp_buff); + /* Probably under memory pressure, stop NAPI */ + if (unlikely(!skb)) { + enetc_xdp_drop(rx_ring, orig_i, i); + rx_ring->stats.xdp_drops++; goto out; + } + + enetc_get_offloads(rx_ring, orig_rxbd, skb); + + /* These buffers are about to be owned by the stack. + * Update our buffer cache (the rx_swbd array elements) + * with their other page halves. + */ + enetc_bulk_flip_buff(rx_ring, orig_i, i); napi_gro_receive(napi, skb); break; case XDP_TX: tx_ring = priv->xdp_tx_ring[rx_ring->index]; + if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) { + enetc_xdp_drop(rx_ring, orig_i, i); + tx_ring->stats.xdp_tx_drops++; + break; + } + xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr, rx_ring, orig_i, i); @@ -1613,7 +1993,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, enetc_xdp_drop(rx_ring, orig_i, i); tx_ring->stats.xdp_tx_drops++; } else { - tx_ring->stats.xdp_tx += xdp_tx_bd_cnt; + tx_ring->stats.xdp_tx++; rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt; xdp_tx_frm_cnt++; /* The XDP_TX enqueue was successful, so we @@ -1635,11 +2015,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, enetc_xdp_drop(rx_ring, orig_i, i); rx_ring->stats.xdp_redirect_failures++; } else { - while (orig_i != i) { - enetc_flip_rx_buff(rx_ring, - &rx_ring->rx_swbd[orig_i]); - enetc_bdr_idx_inc(rx_ring, &orig_i); - } + enetc_bulk_flip_buff(rx_ring, orig_i, i); xdp_redirect_frm_cnt++; rx_ring->stats.xdp_redirect++; } @@ -1729,9 +2105,15 @@ void enetc_get_si_caps(struct enetc_si *si) si->num_rx_rings = (val >> 16) & 0xff; si->num_tx_rings = val & 0xff; - val = enetc_rd(hw, ENETC_SIRFSCAPR); - si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val); - si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE); + val = enetc_rd(hw, ENETC_SIPCAPR0); + if (val & ENETC_SIPCAPR0_RFS) { + val = enetc_rd(hw, ENETC_SIRFSCAPR); + si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val); + si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE); + } else { + /* ENETC which not supports RFS */ + si->num_fs_entries = 0; + } si->num_rss = 0; val = enetc_rd(hw, ENETC_SIPCAPR0); @@ -1742,14 +2124,8 @@ void enetc_get_si_caps(struct enetc_si *si) si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss); } - if (val & ENETC_SIPCAPR0_QBV) - si->hw_features |= ENETC_SI_F_QBV; - - if (val & ENETC_SIPCAPR0_QBU) - si->hw_features |= ENETC_SI_F_QBU; - - if (val & ENETC_SIPCAPR0_PSFP) - si->hw_features |= ENETC_SI_F_PSFP; + if (val & ENETC_SIPCAPR0_LSO) + si->hw_features |= ENETC_SI_F_LSO; } EXPORT_SYMBOL_GPL(enetc_get_si_caps); @@ -2039,13 +2415,35 @@ static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups) for (i = 0; i < si->num_rss; i++) rss_table[i] = i % num_groups; - enetc_set_rss_table(si, rss_table, si->num_rss); + si->ops->set_rss_table(si, rss_table, si->num_rss); kfree(rss_table); return 0; } +static void enetc_set_lso_flags_mask(struct enetc_hw *hw) +{ + enetc_wr(hw, ENETC4_SILSOSFMR0, + SILSOSFMR0_VAL_SET(ENETC4_TCP_NL_SEG_FLAGS_DMASK, + ENETC4_TCP_NL_SEG_FLAGS_DMASK)); + enetc_wr(hw, ENETC4_SILSOSFMR1, 0); +} + +static void enetc_set_rss(struct net_device *ndev, int en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + u32 reg; + + enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings); + + reg = enetc_rd(hw, ENETC_SIMR); + reg &= ~ENETC_SIMR_RSSE; + reg |= (en) ? ENETC_SIMR_RSSE : 0; + enetc_wr(hw, ENETC_SIMR, reg); +} + int enetc_configure_si(struct enetc_ndev_priv *priv) { struct enetc_si *si = priv->si; @@ -2059,10 +2457,16 @@ int enetc_configure_si(struct enetc_ndev_priv *priv) /* enable SI */ enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN); + if (si->hw_features & ENETC_SI_F_LSO) + enetc_set_lso_flags_mask(hw); + if (si->num_rss) { err = enetc_setup_default_rss_table(si, priv->num_rx_rings); if (err) return err; + + if (priv->ndev->features & NETIF_F_RXHASH) + enetc_set_rss(priv->ndev, true); } return 0; @@ -2083,9 +2487,9 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv) */ priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings); priv->num_tx_rings = si->num_tx_rings; - priv->bdr_int_num = cpus; + priv->bdr_int_num = priv->num_rx_rings; priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL; - priv->tx_ictt = ENETC_TXIC_TIMETHR; + priv->tx_ictt = enetc_usecs_to_cycles(600, priv->sysclk_freq); } EXPORT_SYMBOL_GPL(enetc_init_si_rings_params); @@ -2226,18 +2630,24 @@ static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); } -static void enetc_enable_bdrs(struct enetc_ndev_priv *priv) +static void enetc_enable_rx_bdrs(struct enetc_ndev_priv *priv) { struct enetc_hw *hw = &priv->si->hw; int i; - for (i = 0; i < priv->num_tx_rings; i++) - enetc_enable_txbdr(hw, priv->tx_ring[i]); - for (i = 0; i < priv->num_rx_rings; i++) enetc_enable_rxbdr(hw, priv->rx_ring[i]); } +static void enetc_enable_tx_bdrs(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_enable_txbdr(hw, priv->tx_ring[i]); +} + static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) { int idx = rx_ring->index; @@ -2254,18 +2664,24 @@ static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0); } -static void enetc_disable_bdrs(struct enetc_ndev_priv *priv) +static void enetc_disable_rx_bdrs(struct enetc_ndev_priv *priv) { struct enetc_hw *hw = &priv->si->hw; int i; - for (i = 0; i < priv->num_tx_rings; i++) - enetc_disable_txbdr(hw, priv->tx_ring[i]); - for (i = 0; i < priv->num_rx_rings; i++) enetc_disable_rxbdr(hw, priv->rx_ring[i]); } +static void enetc_disable_tx_bdrs(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_disable_txbdr(hw, priv->tx_ring[i]); +} + static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) { int delay = 8, timeout = 100; @@ -2305,12 +2721,11 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv) snprintf(v->name, sizeof(v->name), "%s-rxtx%d", priv->ndev->name, i); - err = request_irq(irq, enetc_msix, 0, v->name, v); + err = request_irq(irq, enetc_msix, IRQF_NO_AUTOEN, v->name, v); if (err) { dev_err(priv->dev, "request_irq() failed!\n"); goto irq_err; } - disable_irq(irq); v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER); v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER); @@ -2464,9 +2879,13 @@ void enetc_start(struct net_device *ndev) enable_irq(irq); } - enetc_enable_bdrs(priv); + enetc_enable_tx_bdrs(priv); + + enetc_enable_rx_bdrs(priv); netif_tx_start_all_queues(ndev); + + clear_bit(ENETC_TX_DOWN, &priv->flags); } EXPORT_SYMBOL_GPL(enetc_start); @@ -2479,10 +2898,14 @@ int enetc_open(struct net_device *ndev) extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); - err = enetc_setup_irqs(priv); + err = clk_prepare_enable(priv->ref_clk); if (err) return err; + err = enetc_setup_irqs(priv); + if (err) + goto err_setup_irqs; + err = enetc_phylink_connect(ndev); if (err) goto err_phy_connect; @@ -2514,6 +2937,8 @@ err_alloc_tx: phylink_disconnect_phy(priv->phylink); err_phy_connect: enetc_free_irqs(priv); +err_setup_irqs: + clk_disable_unprepare(priv->ref_clk); return err; } @@ -2524,9 +2949,15 @@ void enetc_stop(struct net_device *ndev) struct enetc_ndev_priv *priv = netdev_priv(ndev); int i; + set_bit(ENETC_TX_DOWN, &priv->flags); + netif_tx_stop_all_queues(ndev); - enetc_disable_bdrs(priv); + enetc_disable_rx_bdrs(priv); + + enetc_wait_bdrs(priv); + + enetc_disable_tx_bdrs(priv); for (i = 0; i < priv->bdr_int_num; i++) { int irq = pci_irq_vector(priv->si->pdev, @@ -2537,8 +2968,6 @@ void enetc_stop(struct net_device *ndev) napi_disable(&priv->int_vector[i]->napi); } - enetc_wait_bdrs(priv); - enetc_clear_interrupts(priv); } EXPORT_SYMBOL_GPL(enetc_stop); @@ -2563,6 +2992,7 @@ int enetc_close(struct net_device *ndev) enetc_assign_tx_resources(priv, NULL); enetc_free_irqs(priv); + clk_disable_unprepare(priv->ref_clk); return 0; } @@ -2769,7 +3199,7 @@ static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog, if (priv->min_num_stack_tx_queues + num_xdp_tx_queues > priv->num_tx_rings) { NL_SET_ERR_MSG_FMT_MOD(extack, - "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)", + "Reserving %d XDP TXQs leaves under %d for stack (total %d)", num_xdp_tx_queues, priv->min_num_stack_tx_queues, priv->num_tx_rings); @@ -2829,22 +3259,6 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev) } EXPORT_SYMBOL_GPL(enetc_get_stats); -static int enetc_set_rss(struct net_device *ndev, int en) -{ - struct enetc_ndev_priv *priv = netdev_priv(ndev); - struct enetc_hw *hw = &priv->si->hw; - u32 reg; - - enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings); - - reg = enetc_rd(hw, ENETC_SIMR); - reg &= ~ENETC_SIMR_RSSE; - reg |= (en) ? ENETC_SIMR_RSSE : 0; - enetc_wr(hw, ENETC_SIMR, reg); - - return 0; -} - static void enetc_enable_rxvlan(struct net_device *ndev, bool en) { struct enetc_ndev_priv *priv = netdev_priv(ndev); @@ -2882,17 +3296,17 @@ void enetc_set_features(struct net_device *ndev, netdev_features_t features) } EXPORT_SYMBOL_GPL(enetc_set_features); -#ifdef CONFIG_FSL_ENETC_PTP_CLOCK -static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) +int enetc_hwtstamp_set(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct enetc_ndev_priv *priv = netdev_priv(ndev); int err, new_offloads = priv->active_offloads; - struct hwtstamp_config config; - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; + if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) + return -EOPNOTSUPP; - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; break; @@ -2901,6 +3315,9 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) new_offloads |= ENETC_F_TX_TSTAMP; break; case HWTSTAMP_TX_ONESTEP_SYNC: + if (!enetc_si_is_pf(priv->si)) + return -EOPNOTSUPP; + new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP; break; @@ -2908,13 +3325,13 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: new_offloads &= ~ENETC_F_RX_TSTAMP; break; default: new_offloads |= ENETC_F_RX_TSTAMP; - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; } if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) { @@ -2927,41 +3344,35 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) priv->active_offloads = new_offloads; - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; } +EXPORT_SYMBOL_GPL(enetc_hwtstamp_set); -static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr) +int enetc_hwtstamp_get(struct net_device *ndev, + struct kernel_hwtstamp_config *config) { struct enetc_ndev_priv *priv = netdev_priv(ndev); - struct hwtstamp_config config; - config.flags = 0; + if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) + return -EOPNOTSUPP; if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) - config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC; + config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC; else if (priv->active_offloads & ENETC_F_TX_TSTAMP) - config.tx_type = HWTSTAMP_TX_ON; + config->tx_type = HWTSTAMP_TX_ON; else - config.tx_type = HWTSTAMP_TX_OFF; + config->tx_type = HWTSTAMP_TX_OFF; - config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ? - HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; + config->rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ? + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; } -#endif +EXPORT_SYMBOL_GPL(enetc_hwtstamp_get); int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) { struct enetc_ndev_priv *priv = netdev_priv(ndev); -#ifdef CONFIG_FSL_ENETC_PTP_CLOCK - if (cmd == SIOCSHWTSTAMP) - return enetc_hwtstamp_set(ndev, rq); - if (cmd == SIOCGHWTSTAMP) - return enetc_hwtstamp_get(ndev, rq); -#endif if (!priv->phylink) return -EOPNOTSUPP; @@ -2970,13 +3381,100 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) } EXPORT_SYMBOL_GPL(enetc_ioctl); +static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i, + int v_tx_rings) +{ + struct enetc_int_vector *v; + struct enetc_bdr *bdr; + int j, err; + + v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); + if (!v) + return -ENOMEM; + + priv->int_vector[i] = v; + bdr = &v->rx_ring; + bdr->index = i; + bdr->ndev = priv->ndev; + bdr->dev = priv->dev; + bdr->bd_count = priv->rx_bd_count; + bdr->buffer_offset = ENETC_RXB_PAD; + priv->rx_ring[i] = bdr; + + err = __xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0, + ENETC_RXB_DMA_SIZE_XDP); + if (err) + goto free_vector; + + err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, MEM_TYPE_PAGE_SHARED, + NULL); + if (err) { + xdp_rxq_info_unreg(&bdr->xdp.rxq); + goto free_vector; + } + + /* init defaults for adaptive IC */ + if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { + v->rx_ictt = 0x1; + v->rx_dim_en = true; + } + + INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); + netif_napi_add(priv->ndev, &v->napi, enetc_poll); + v->count_tx_rings = v_tx_rings; + + for (j = 0; j < v_tx_rings; j++) { + int idx; + + /* default tx ring mapping policy */ + idx = priv->bdr_int_num * j + i; + __set_bit(idx, &v->tx_rings_map); + bdr = &v->tx_ring[j]; + bdr->index = idx; + bdr->ndev = priv->ndev; + bdr->dev = priv->dev; + bdr->bd_count = priv->tx_bd_count; + priv->tx_ring[idx] = bdr; + } + + return 0; + +free_vector: + priv->rx_ring[i] = NULL; + priv->int_vector[i] = NULL; + kfree(v); + + return err; +} + +static void enetc_int_vector_destroy(struct enetc_ndev_priv *priv, int i) +{ + struct enetc_int_vector *v = priv->int_vector[i]; + struct enetc_bdr *rx_ring = &v->rx_ring; + int j, tx_ring_index; + + xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); + xdp_rxq_info_unreg(&rx_ring->xdp.rxq); + netif_napi_del(&v->napi); + cancel_work_sync(&v->rx_dim.work); + + for (j = 0; j < v->count_tx_rings; j++) { + tx_ring_index = priv->bdr_int_num * j + i; + priv->tx_ring[tx_ring_index] = NULL; + } + + priv->rx_ring[i] = NULL; + priv->int_vector[i] = NULL; + kfree(v); +} + int enetc_alloc_msix(struct enetc_ndev_priv *priv) { struct pci_dev *pdev = priv->si->pdev; + int v_tx_rings, v_remainder; int num_stack_tx_queues; int first_xdp_tx_ring; int i, n, err, nvec; - int v_tx_rings; nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num; /* allocate MSIX for both messaging and Rx/Tx interrupts */ @@ -2990,64 +3488,17 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv) /* # of tx rings per int vector */ v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; + v_remainder = priv->num_tx_rings % priv->bdr_int_num; for (i = 0; i < priv->bdr_int_num; i++) { - struct enetc_int_vector *v; - struct enetc_bdr *bdr; - int j; - - v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); - if (!v) { - err = -ENOMEM; - goto fail; - } - - priv->int_vector[i] = v; - - bdr = &v->rx_ring; - bdr->index = i; - bdr->ndev = priv->ndev; - bdr->dev = priv->dev; - bdr->bd_count = priv->rx_bd_count; - bdr->buffer_offset = ENETC_RXB_PAD; - priv->rx_ring[i] = bdr; - - err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); - if (err) { - kfree(v); - goto fail; - } + /* Distribute the remaining TX rings to the first v_remainder + * interrupt vectors + */ + int num_tx_rings = i < v_remainder ? v_tx_rings + 1 : v_tx_rings; - err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, - MEM_TYPE_PAGE_SHARED, NULL); - if (err) { - xdp_rxq_info_unreg(&bdr->xdp.rxq); - kfree(v); + err = enetc_int_vector_init(priv, i, num_tx_rings); + if (err) goto fail; - } - - /* init defaults for adaptive IC */ - if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { - v->rx_ictt = 0x1; - v->rx_dim_en = true; - } - INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); - netif_napi_add(priv->ndev, &v->napi, enetc_poll); - v->count_tx_rings = v_tx_rings; - - for (j = 0; j < v_tx_rings; j++) { - int idx; - - /* default tx ring mapping policy */ - idx = priv->bdr_int_num * j + i; - __set_bit(idx, &v->tx_rings_map); - bdr = &v->tx_ring[j]; - bdr->index = idx; - bdr->ndev = priv->ndev; - bdr->dev = priv->dev; - bdr->bd_count = priv->tx_bd_count; - priv->tx_ring[idx] = bdr; - } } num_stack_tx_queues = enetc_num_stack_tx_queues(priv); @@ -3067,16 +3518,8 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv) return 0; fail: - while (i--) { - struct enetc_int_vector *v = priv->int_vector[i]; - struct enetc_bdr *rx_ring = &v->rx_ring; - - xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); - xdp_rxq_info_unreg(&rx_ring->xdp.rxq); - netif_napi_del(&v->napi); - cancel_work_sync(&v->rx_dim.work); - kfree(v); - } + while (i--) + enetc_int_vector_destroy(priv, i); pci_free_irq_vectors(pdev); @@ -3088,26 +3531,8 @@ void enetc_free_msix(struct enetc_ndev_priv *priv) { int i; - for (i = 0; i < priv->bdr_int_num; i++) { - struct enetc_int_vector *v = priv->int_vector[i]; - struct enetc_bdr *rx_ring = &v->rx_ring; - - xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); - xdp_rxq_info_unreg(&rx_ring->xdp.rxq); - netif_napi_del(&v->napi); - cancel_work_sync(&v->rx_dim.work); - } - - for (i = 0; i < priv->num_rx_rings; i++) - priv->rx_ring[i] = NULL; - - for (i = 0; i < priv->num_tx_rings; i++) - priv->tx_ring[i] = NULL; - - for (i = 0; i < priv->bdr_int_num; i++) { - kfree(priv->int_vector[i]); - priv->int_vector[i] = NULL; - } + for (i = 0; i < priv->bdr_int_num; i++) + enetc_int_vector_destroy(priv, i); /* disable all MSIX for this device */ pci_free_irq_vectors(priv->si->pdev); @@ -3216,5 +3641,59 @@ void enetc_pci_remove(struct pci_dev *pdev) } EXPORT_SYMBOL_GPL(enetc_pci_remove); +static const struct enetc_drvdata enetc_pf_data = { + .sysclk_freq = ENETC_CLK_400M, + .pmac_offset = ENETC_PMAC_OFFSET, + .max_frags = ENETC_MAX_SKB_FRAGS, + .eth_ops = &enetc_pf_ethtool_ops, +}; + +static const struct enetc_drvdata enetc4_pf_data = { + .sysclk_freq = ENETC_CLK_333M, + .tx_csum = true, + .max_frags = ENETC4_MAX_SKB_FRAGS, + .pmac_offset = ENETC4_PMAC_OFFSET, + .eth_ops = &enetc4_pf_ethtool_ops, +}; + +static const struct enetc_drvdata enetc_vf_data = { + .sysclk_freq = ENETC_CLK_400M, + .max_frags = ENETC_MAX_SKB_FRAGS, + .eth_ops = &enetc_vf_ethtool_ops, +}; + +static const struct enetc_platform_info enetc_info[] = { + { .revision = ENETC_REV_1_0, + .dev_id = ENETC_DEV_ID_PF, + .data = &enetc_pf_data, + }, + { .revision = ENETC_REV_4_1, + .dev_id = NXP_ENETC_PF_DEV_ID, + .data = &enetc4_pf_data, + }, + { .revision = ENETC_REV_1_0, + .dev_id = ENETC_DEV_ID_VF, + .data = &enetc_vf_data, + }, +}; + +int enetc_get_driver_data(struct enetc_si *si) +{ + u16 dev_id = si->pdev->device; + int i; + + for (i = 0; i < ARRAY_SIZE(enetc_info); i++) { + if (si->revision == enetc_info[i].revision && + dev_id == enetc_info[i].dev_id) { + si->drvdata = enetc_info[i].data; + + return 0; + } + } + + return -ERANGE; +} +EXPORT_SYMBOL_GPL(enetc_get_driver_data); + MODULE_DESCRIPTION("NXP ENETC Ethernet driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index a9c2ff22431c..872d2cbd088b 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -8,12 +8,14 @@ #include <linux/dma-mapping.h> #include <linux/skbuff.h> #include <linux/ethtool.h> +#include <linux/fsl/ntmp.h> #include <linux/if_vlan.h> #include <linux/phylink.h> #include <linux/dim.h> #include <net/xdp.h> #include "enetc_hw.h" +#include "enetc4_hw.h" #define ENETC_MAC_MAXFRM_SIZE 9600 #define ENETC_MAX_MTU (ENETC_MAC_MAXFRM_SIZE - \ @@ -21,6 +23,18 @@ #define ENETC_CBD_DATA_MEM_ALIGN 64 +#define ENETC_MADDR_HASH_TBL_SZ 64 + +enum enetc_mac_addr_type {UC, MC, MADDR_TYPE}; + +struct enetc_mac_filter { + union { + char mac_addr[ETH_ALEN]; + DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ); + }; + int mac_addr_cnt; +}; + struct enetc_tx_swbd { union { struct sk_buff *skb; @@ -40,6 +54,18 @@ struct enetc_tx_swbd { u8 qbv_en:1; }; +struct enetc_lso_t { + bool ipv6; + bool tcp; + u8 l3_hdr_len; + u8 hdr_len; /* LSO header length */ + u8 l3_start; + u16 lso_seg_size; + int total_len; /* total data length, not include LSO header */ +}; + +#define ENETC_LSO_MAX_DATA_LEN SZ_256K + #define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE #define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */ #define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */ @@ -58,9 +84,16 @@ struct enetc_rx_swbd { /* ENETC overhead: optional extension BD + 1 BD gap */ #define ENETC_TXBDS_NEEDED(val) ((val) + 2) -/* max # of chained Tx BDs is 15, including head and extension BD */ +/* For LS1028A, max # of chained Tx BDs is 15, including head and + * extension BD. + */ #define ENETC_MAX_SKB_FRAGS 13 -#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1) +/* For ENETC v4 and later versions, max # of chained Tx BDs is 63, + * including head and extension BD, but the range of MAX_SKB_FRAGS + * is 17 ~ 45, so set ENETC4_MAX_SKB_FRAGS to MAX_SKB_FRAGS. + */ +#define ENETC4_MAX_SKB_FRAGS MAX_SKB_FRAGS +#define ENETC_TXBDS_MAX_NEEDED(x) ENETC_TXBDS_NEEDED((x) + 1) struct enetc_ring_stats { unsigned int packets; @@ -184,10 +217,9 @@ static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i) { int hw_idx = i; -#ifdef CONFIG_FSL_ENETC_PTP_CLOCK - if (rx_ring->ext_en) + if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && rx_ring->ext_en) hw_idx = 2 * i; -#endif + return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]); } @@ -199,10 +231,8 @@ static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring, new_rxbd++; -#ifdef CONFIG_FSL_ENETC_PTP_CLOCK - if (rx_ring->ext_en) + if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && rx_ring->ext_en) new_rxbd++; -#endif if (unlikely(++new_index == rx_ring->bd_count)) { new_rxbd = rx_ring->bd_base; @@ -233,6 +263,34 @@ enum enetc_errata { #define ENETC_SI_F_PSFP BIT(0) #define ENETC_SI_F_QBV BIT(1) #define ENETC_SI_F_QBU BIT(2) +#define ENETC_SI_F_LSO BIT(3) + +struct enetc_drvdata { + u32 pmac_offset; /* Only valid for PSI which supports 802.1Qbu */ + u8 tx_csum:1; + u8 max_frags; + u64 sysclk_freq; + const struct ethtool_ops *eth_ops; +}; + +struct enetc_platform_info { + u16 revision; + u16 dev_id; + const struct enetc_drvdata *data; +}; + +struct enetc_si; + +/* + * This structure defines the some common hooks for ENETC PSI and VSI. + * In addition, since VSI only uses the struct enetc_si as its private + * driver data, so this structure also define some hooks specifically + * for VSI. For VSI-specific hooks, the format is ‘vf_*()’. + */ +struct enetc_si_ops { + int (*get_rss_table)(struct enetc_si *si, u32 *table, int count); + int (*set_rss_table)(struct enetc_si *si, const u32 *table, int count); +}; /* PCI IEP device data */ struct enetc_si { @@ -242,18 +300,33 @@ struct enetc_si { struct net_device *ndev; /* back ref. */ - struct enetc_cbdr cbd_ring; + union { + struct enetc_cbdr cbd_ring; /* Only ENETC 1.0 */ + struct ntmp_user ntmp_user; /* ENETC 4.1 and later */ + }; int num_rx_rings; /* how many rings are available in the SI */ int num_tx_rings; int num_fs_entries; int num_rss; /* number of RSS buckets */ unsigned short pad; + u16 revision; int hw_features; + const struct enetc_drvdata *drvdata; + const struct enetc_si_ops *ops; + + struct workqueue_struct *workqueue; + struct work_struct rx_mode_task; + struct dentry *debugfs_root; }; #define ENETC_SI_ALIGN 32 +static inline bool is_enetc_rev1(struct enetc_si *si) +{ + return si->pdev->revision == ENETC_REV1; +} + static inline void *enetc_si_priv(const struct enetc_si *si) { return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN); @@ -305,7 +378,7 @@ struct enetc_cls_rule { int used; }; -#define ENETC_MAX_BDR_INT 2 /* fixed to max # of available cpus */ +#define ENETC_MAX_BDR_INT 6 /* fixed to max # of available cpus */ struct psfp_cap { u32 max_streamid; u32 max_psfp_filter; @@ -324,10 +397,13 @@ enum enetc_active_offloads { ENETC_F_QBV = BIT(9), ENETC_F_QCI = BIT(10), ENETC_F_QBU = BIT(11), + ENETC_F_TXCSUM = BIT(12), + ENETC_F_LSO = BIT(13), }; enum enetc_flags_bit { ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0, + ENETC_TX_DOWN, }; /* interrupt coalescing modes */ @@ -343,7 +419,6 @@ enum enetc_ic_mode { #define ENETC_RXIC_PKTTHR min_t(u32, 256, ENETC_RX_RING_DEFAULT_SIZE / 2) #define ENETC_TXIC_PKTTHR min_t(u32, 128, ENETC_TX_RING_DEFAULT_SIZE / 2) -#define ENETC_TXIC_TIMETHR enetc_usecs_to_cycles(600) struct enetc_ndev_priv { struct net_device *ndev; @@ -358,6 +433,7 @@ struct enetc_ndev_priv { u16 msg_enable; u8 preemptible_tcs; + u8 max_frags; /* The maximum number of BDs for fragments */ enum enetc_active_offloads active_offloads; @@ -391,6 +467,9 @@ struct enetc_ndev_priv { * and link state updates */ struct mutex mm_lock; + + struct clk *ref_clk; /* RGMII/RMII reference clock */ + u64 sysclk_freq; /* NETC system clock frequency */ }; /* Messaging */ @@ -420,6 +499,10 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv); int enetc_alloc_si_resources(struct enetc_ndev_priv *priv); void enetc_free_si_resources(struct enetc_ndev_priv *priv); int enetc_configure_si(struct enetc_ndev_priv *priv); +int enetc_get_driver_data(struct enetc_si *si); +void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter, + const unsigned char *addr); +void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter); int enetc_open(struct net_device *ndev); int enetc_close(struct net_device *ndev); @@ -435,7 +518,16 @@ int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf); int enetc_xdp_xmit(struct net_device *ndev, int num_frames, struct xdp_frame **frames, u32 flags); +int enetc_hwtstamp_get(struct net_device *ndev, + struct kernel_hwtstamp_config *config); +int enetc_hwtstamp_set(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); + /* ethtool */ +extern const struct ethtool_ops enetc_pf_ethtool_ops; +extern const struct ethtool_ops enetc4_pf_ethtool_ops; +extern const struct ethtool_ops enetc_vf_ethtool_ops; void enetc_set_ethtool_ops(struct net_device *ndev); void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link); void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv); @@ -444,15 +536,19 @@ void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv); int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count, struct enetc_cbdr *cbdr); void enetc_teardown_cbdr(struct enetc_cbdr *cbdr); +int enetc4_setup_cbdr(struct enetc_si *si); +void enetc4_teardown_cbdr(struct enetc_si *si); int enetc_set_mac_flt_entry(struct enetc_si *si, int index, char *mac_addr, int si_map); int enetc_clear_mac_flt_entry(struct enetc_si *si, int index); int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, int index); -void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes); +void enetc_set_rss_key(struct enetc_si *si, const u8 *bytes); int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count); int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count); int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd); +int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count); +int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count); static inline void *enetc_cbd_alloc_data_mem(struct enetc_si *si, struct enetc_cbd *cbd, diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c new file mode 100644 index 000000000000..1b1591dce73d --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright 2025 NXP */ + +#include <linux/device.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/string_choices.h> + +#include "enetc_pf.h" +#include "enetc4_debugfs.h" + +static void enetc_show_si_mac_hash_filter(struct seq_file *s, int i) +{ + struct enetc_si *si = s->private; + struct enetc_hw *hw = &si->hw; + u32 hash_h, hash_l; + + hash_l = enetc_port_rd(hw, ENETC4_PSIUMHFR0(i)); + hash_h = enetc_port_rd(hw, ENETC4_PSIUMHFR1(i)); + seq_printf(s, "SI %d unicast MAC hash filter: 0x%08x%08x\n", + i, hash_h, hash_l); + + hash_l = enetc_port_rd(hw, ENETC4_PSIMMHFR0(i)); + hash_h = enetc_port_rd(hw, ENETC4_PSIMMHFR1(i)); + seq_printf(s, "SI %d multicast MAC hash filter: 0x%08x%08x\n", + i, hash_h, hash_l); +} + +static int enetc_mac_filter_show(struct seq_file *s, void *data) +{ + struct enetc_si *si = s->private; + struct enetc_hw *hw = &si->hw; + struct maft_entry_data maft; + struct enetc_pf *pf; + int i, err, num_si; + u32 val; + + pf = enetc_si_priv(si); + num_si = pf->caps.num_vsi + 1; + + val = enetc_port_rd(hw, ENETC4_PSIPMMR); + for (i = 0; i < num_si; i++) { + seq_printf(s, "SI %d Unicast Promiscuous mode: %s\n", i, + str_enabled_disabled(PSIPMMR_SI_MAC_UP(i) & val)); + seq_printf(s, "SI %d Multicast Promiscuous mode: %s\n", i, + str_enabled_disabled(PSIPMMR_SI_MAC_MP(i) & val)); + } + + /* MAC hash filter table */ + for (i = 0; i < num_si; i++) + enetc_show_si_mac_hash_filter(s, i); + + if (!pf->num_mfe) + return 0; + + /* MAC address filter table */ + seq_puts(s, "MAC address filter table\n"); + for (i = 0; i < pf->num_mfe; i++) { + memset(&maft, 0, sizeof(maft)); + err = ntmp_maft_query_entry(&si->ntmp_user, i, &maft); + if (err) + return err; + + seq_printf(s, "Entry %d, MAC: %pM, SI bitmap: 0x%04x\n", i, + maft.keye.mac_addr, le16_to_cpu(maft.cfge.si_bitmap)); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(enetc_mac_filter); + +void enetc_create_debugfs(struct enetc_si *si) +{ + struct net_device *ndev = si->ndev; + struct dentry *root; + + root = debugfs_create_dir(netdev_name(ndev), NULL); + if (IS_ERR(root)) + return; + + si->debugfs_root = root; + + debugfs_create_file("mac_filter", 0444, root, si, &enetc_mac_filter_fops); +} + +void enetc_remove_debugfs(struct enetc_si *si) +{ + debugfs_remove(si->debugfs_root); + si->debugfs_root = NULL; +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h new file mode 100644 index 000000000000..96caca35f79d --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2025 NXP */ + +#ifndef __ENETC4_DEBUGFS_H +#define __ENETC4_DEBUGFS_H + +#if IS_ENABLED(CONFIG_DEBUG_FS) +void enetc_create_debugfs(struct enetc_si *si); +void enetc_remove_debugfs(struct enetc_si *si); +#else +static inline void enetc_create_debugfs(struct enetc_si *si) +{ +} + +static inline void enetc_remove_debugfs(struct enetc_si *si) +{ +} +#endif + +#endif diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h new file mode 100644 index 000000000000..aa25b445d301 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * This header file defines the register offsets and bit fields + * of ENETC4 PF and VFs. Note that the same registers as ENETC + * version 1.0 are defined in the enetc_hw.h file. + * + * Copyright 2024 NXP + */ +#ifndef __ENETC4_HW_H_ +#define __ENETC4_HW_H_ + +#define NXP_ENETC_VENDOR_ID 0x1131 +#define NXP_ENETC_PF_DEV_ID 0xe101 + +/**********************Station interface registers************************/ +/* Station interface LSO segmentation flag mask register 0/1 */ +#define ENETC4_SILSOSFMR0 0x1300 +#define SILSOSFMR0_TCP_MID_SEG GENMASK(27, 16) +#define SILSOSFMR0_TCP_1ST_SEG GENMASK(11, 0) +#define SILSOSFMR0_VAL_SET(first, mid) (FIELD_PREP(SILSOSFMR0_TCP_MID_SEG, mid) | \ + FIELD_PREP(SILSOSFMR0_TCP_1ST_SEG, first)) + +#define ENETC4_SILSOSFMR1 0x1304 +#define SILSOSFMR1_TCP_LAST_SEG GENMASK(11, 0) +#define ENETC4_TCP_FLAGS_FIN BIT(0) +#define ENETC4_TCP_FLAGS_SYN BIT(1) +#define ENETC4_TCP_FLAGS_RST BIT(2) +#define ENETC4_TCP_FLAGS_PSH BIT(3) +#define ENETC4_TCP_FLAGS_ACK BIT(4) +#define ENETC4_TCP_FLAGS_URG BIT(5) +#define ENETC4_TCP_FLAGS_ECE BIT(6) +#define ENETC4_TCP_FLAGS_CWR BIT(7) +#define ENETC4_TCP_FLAGS_NS BIT(8) +/* According to tso_build_hdr(), clear all special flags for not last packet. */ +#define ENETC4_TCP_NL_SEG_FLAGS_DMASK (ENETC4_TCP_FLAGS_FIN | \ + ENETC4_TCP_FLAGS_RST | ENETC4_TCP_FLAGS_PSH) + +/***************************ENETC port registers**************************/ +#define ENETC4_ECAPR0 0x0 +#define ECAPR0_RFS BIT(2) +#define ECAPR0_TSD BIT(5) +#define ECAPR0_RSS BIT(8) +#define ECAPR0_RSC BIT(9) +#define ECAPR0_LSO BIT(10) +#define ECAPR0_WO BIT(13) + +#define ENETC4_ECAPR1 0x4 +#define ECAPR1_NUM_TCS GENMASK(6, 4) +#define ECAPR1_NUM_MCH GENMASK(9, 8) +#define ECAPR1_NUM_UCH GENMASK(11, 10) +#define ECAPR1_NUM_MSIX GENMASK(22, 12) +#define ECAPR1_NUM_VSI GENMASK(27, 24) +#define ECAPR1_NUM_IPV BIT(31) + +#define ENETC4_ECAPR2 0x8 +#define ECAPR2_NUM_TX_BDR GENMASK(9, 0) +#define ECAPR2_NUM_RX_BDR GENMASK(25, 16) + +#define ENETC4_PMR 0x10 +#define PMR_SI_EN(a) BIT((16 + (a))) + +/* Port Pause ON/OFF threshold register */ +#define ENETC4_PPAUONTR 0x108 +#define ENETC4_PPAUOFFTR 0x10c + +/* Port Station interface promiscuous MAC mode register */ +#define ENETC4_PSIPMMR 0x200 +#define PSIPMMR_SI_MAC_UP(a) BIT(a) /* a = SI index */ +#define PSIPMMR_SI_MAC_MP(a) BIT((a) + 16) + +/* Port Station interface promiscuous VLAN mode register */ +#define ENETC4_PSIPVMR 0x204 + +/* Port RSS key register n. n = 0,1,2,...,9 */ +#define ENETC4_PRSSKR(n) ((n) * 0x4 + 0x250) + +/* Port station interface MAC address filtering capability register */ +#define ENETC4_PSIMAFCAPR 0x280 +#define PSIMAFCAPR_NUM_MAC_AFTE GENMASK(11, 0) + +/* Port station interface VLAN filtering capability register */ +#define ENETC4_PSIVLANFCAPR 0x2c0 +#define PSIVLANFCAPR_NUM_VLAN_FTE GENMASK(11, 0) + +/* Port station interface VLAN filtering mode register */ +#define ENETC4_PSIVLANFMR 0x2c4 +#define PSIVLANFMR_VS BIT(0) + +/* Port Station interface a primary MAC address registers */ +#define ENETC4_PSIPMAR0(a) ((a) * 0x80 + 0x2000) +#define ENETC4_PSIPMAR1(a) ((a) * 0x80 + 0x2004) + +/* Port station interface a configuration register 0/2 */ +#define ENETC4_PSICFGR0(a) ((a) * 0x80 + 0x2010) +#define PSICFGR0_VASE BIT(13) +#define PSICFGR0_ASE BIT(15) +#define PSICFGR0_ANTI_SPOOFING (PSICFGR0_VASE | PSICFGR0_ASE) + +#define ENETC4_PSICFGR2(a) ((a) * 0x80 + 0x2018) +#define PSICFGR2_NUM_MSIX GENMASK(5, 0) + +/* Port station interface a unicast MAC hash filter register 0/1 */ +#define ENETC4_PSIUMHFR0(a) ((a) * 0x80 + 0x2050) +#define ENETC4_PSIUMHFR1(a) ((a) * 0x80 + 0x2054) + +/* Port station interface a multicast MAC hash filter register 0/1 */ +#define ENETC4_PSIMMHFR0(a) ((a) * 0x80 + 0x2058) +#define ENETC4_PSIMMHFR1(a) ((a) * 0x80 + 0x205c) + +/* Port station interface a VLAN hash filter register 0/1 */ +#define ENETC4_PSIVHFR0(a) ((a) * 0x80 + 0x2060) +#define ENETC4_PSIVHFR1(a) ((a) * 0x80 + 0x2064) + +#define ENETC4_PMCAPR 0x4004 +#define PMCAPR_HD BIT(8) +#define PMCAPR_FP GENMASK(10, 9) + +/* Port configuration register */ +#define ENETC4_PCR 0x4010 +#define PCR_HDR_FMT BIT(0) +#define PCR_L2DOSE BIT(4) +#define PCR_TIMER_CS BIT(8) +#define PCR_PSPEED GENMASK(29, 16) +#define PCR_PSPEED_VAL(speed) (((speed) / 10 - 1) << 16) + +/* Port MAC address register 0/1 */ +#define ENETC4_PMAR0 0x4020 +#define ENETC4_PMAR1 0x4024 + +/* Port operational register */ +#define ENETC4_POR 0x4100 + +/* Port traffic class a transmit maximum SDU register */ +#define ENETC4_PTCTMSDUR(a) ((a) * 0x20 + 0x4208) +#define PTCTMSDUR_MAXSDU GENMASK(15, 0) +#define PTCTMSDUR_SDU_TYPE GENMASK(17, 16) +#define SDU_TYPE_PPDU 0 +#define SDU_TYPE_MPDU 1 +#define SDU_TYPE_MSDU 2 + +#define ENETC4_PMAC_OFFSET 0x400 +#define ENETC4_PM_CMD_CFG(mac) (0x5008 + (mac) * 0x400) +#define PM_CMD_CFG_TX_EN BIT(0) +#define PM_CMD_CFG_RX_EN BIT(1) +#define PM_CMD_CFG_PAUSE_FWD BIT(7) +#define PM_CMD_CFG_PAUSE_IGN BIT(8) +#define PM_CMD_CFG_TX_ADDR_INS BIT(9) +#define PM_CMD_CFG_LOOP_EN BIT(10) +#define PM_CMD_CFG_LPBK_MODE GENMASK(12, 11) +#define LPBCK_MODE_EXT_TX_CLK 0 +#define LPBCK_MODE_MAC_LEVEL 1 +#define LPBCK_MODE_INT_TX_CLK 2 +#define PM_CMD_CFG_CNT_FRM_EN BIT(13) +#define PM_CMD_CFG_TXP BIT(15) +#define PM_CMD_CFG_SEND_IDLE BIT(16) +#define PM_CMD_CFG_HD_FCEN BIT(18) +#define PM_CMD_CFG_SFD BIT(21) +#define PM_CMD_CFG_TX_FLUSH BIT(22) +#define PM_CMD_CFG_TX_LOWP_EN BIT(23) +#define PM_CMD_CFG_RX_LOWP_EMPTY BIT(24) +#define PM_CMD_CFG_SWR BIT(26) +#define PM_CMD_CFG_TS_MODE BIT(30) +#define PM_CMD_CFG_MG BIT(31) + +/* Port MAC 0/1 Maximum Frame Length Register */ +#define ENETC4_PM_MAXFRM(mac) (0x5014 + (mac) * 0x400) + +/* Port MAC 0/1 Pause Quanta Register */ +#define ENETC4_PM_PAUSE_QUANTA(mac) (0x5054 + (mac) * 0x400) + +/* Port MAC 0/1 Pause Quanta Threshold Register */ +#define ENETC4_PM_PAUSE_THRESH(mac) (0x5064 + (mac) * 0x400) + +/* Port MAC 0 Interface Mode Control Register */ +#define ENETC4_PM_IF_MODE(mac) (0x5300 + (mac) * 0x400) +#define PM_IF_MODE_IFMODE GENMASK(2, 0) +#define IFMODE_XGMII 0 +#define IFMODE_RMII 3 +#define IFMODE_RGMII 4 +#define IFMODE_SGMII 5 +#define PM_IF_MODE_REVMII BIT(3) +#define PM_IF_MODE_M10 BIT(4) +#define PM_IF_MODE_HD BIT(6) +#define PM_IF_MODE_SSP GENMASK(14, 13) +#define SSP_100M 0 +#define SSP_10M 1 +#define SSP_1G 2 +#define PM_IF_MODE_ENA BIT(15) + +#endif diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c new file mode 100644 index 000000000000..b3dc1afeefd1 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c @@ -0,0 +1,1085 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2024 NXP */ + +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/of_net.h> +#include <linux/of_platform.h> +#include <linux/unaligned.h> + +#include "enetc_pf_common.h" +#include "enetc4_debugfs.h" + +#define ENETC_SI_MAX_RING_NUM 8 + +#define ENETC_MAC_FILTER_TYPE_UC BIT(0) +#define ENETC_MAC_FILTER_TYPE_MC BIT(1) +#define ENETC_MAC_FILTER_TYPE_ALL (ENETC_MAC_FILTER_TYPE_UC | \ + ENETC_MAC_FILTER_TYPE_MC) + +struct enetc_mac_addr { + u8 addr[ETH_ALEN]; +}; + +static void enetc4_get_port_caps(struct enetc_pf *pf) +{ + struct enetc_hw *hw = &pf->si->hw; + u32 val; + + val = enetc_port_rd(hw, ENETC4_ECAPR1); + pf->caps.num_vsi = (val & ECAPR1_NUM_VSI) >> 24; + pf->caps.num_msix = ((val & ECAPR1_NUM_MSIX) >> 12) + 1; + + val = enetc_port_rd(hw, ENETC4_ECAPR2); + pf->caps.num_rx_bdr = (val & ECAPR2_NUM_RX_BDR) >> 16; + pf->caps.num_tx_bdr = val & ECAPR2_NUM_TX_BDR; + + val = enetc_port_rd(hw, ENETC4_PMCAPR); + pf->caps.half_duplex = (val & PMCAPR_HD) ? 1 : 0; + + val = enetc_port_rd(hw, ENETC4_PSIMAFCAPR); + pf->caps.mac_filter_num = val & PSIMAFCAPR_NUM_MAC_AFTE; +} + +static void enetc4_pf_set_si_primary_mac(struct enetc_hw *hw, int si, + const u8 *addr) +{ + u16 lower = get_unaligned_le16(addr + 4); + u32 upper = get_unaligned_le32(addr); + + if (si != 0) { + __raw_writel(upper, hw->port + ENETC4_PSIPMAR0(si)); + __raw_writew(lower, hw->port + ENETC4_PSIPMAR1(si)); + } else { + __raw_writel(upper, hw->port + ENETC4_PMAR0); + __raw_writew(lower, hw->port + ENETC4_PMAR1); + } +} + +static void enetc4_pf_get_si_primary_mac(struct enetc_hw *hw, int si, + u8 *addr) +{ + u32 upper; + u16 lower; + + upper = __raw_readl(hw->port + ENETC4_PSIPMAR0(si)); + lower = __raw_readw(hw->port + ENETC4_PSIPMAR1(si)); + + put_unaligned_le32(upper, addr); + put_unaligned_le16(lower, addr + 4); +} + +static void enetc4_pf_set_si_mac_promisc(struct enetc_hw *hw, int si, + bool uc_promisc, bool mc_promisc) +{ + u32 val = enetc_port_rd(hw, ENETC4_PSIPMMR); + + if (uc_promisc) + val |= PSIPMMR_SI_MAC_UP(si); + else + val &= ~PSIPMMR_SI_MAC_UP(si); + + if (mc_promisc) + val |= PSIPMMR_SI_MAC_MP(si); + else + val &= ~PSIPMMR_SI_MAC_MP(si); + + enetc_port_wr(hw, ENETC4_PSIPMMR, val); +} + +static void enetc4_pf_set_si_uc_hash_filter(struct enetc_hw *hw, int si, + u64 hash) +{ + enetc_port_wr(hw, ENETC4_PSIUMHFR0(si), lower_32_bits(hash)); + enetc_port_wr(hw, ENETC4_PSIUMHFR1(si), upper_32_bits(hash)); +} + +static void enetc4_pf_set_si_mc_hash_filter(struct enetc_hw *hw, int si, + u64 hash) +{ + enetc_port_wr(hw, ENETC4_PSIMMHFR0(si), lower_32_bits(hash)); + enetc_port_wr(hw, ENETC4_PSIMMHFR1(si), upper_32_bits(hash)); +} + +static void enetc4_pf_set_loopback(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_si *si = priv->si; + u32 val; + + val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0)); + val = u32_replace_bits(val, en ? 1 : 0, PM_CMD_CFG_LOOP_EN); + /* Default to select MAC level loopback mode if loopback is enabled. */ + val = u32_replace_bits(val, en ? LPBCK_MODE_MAC_LEVEL : 0, + PM_CMD_CFG_LPBK_MODE); + + enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val); +} + +static void enetc4_pf_clear_maft_entries(struct enetc_pf *pf) +{ + int i; + + for (i = 0; i < pf->num_mfe; i++) + ntmp_maft_delete_entry(&pf->si->ntmp_user, i); + + pf->num_mfe = 0; +} + +static int enetc4_pf_add_maft_entries(struct enetc_pf *pf, + struct enetc_mac_addr *mac, + int mac_cnt) +{ + struct maft_entry_data maft = {}; + u16 si_bit = BIT(0); + int i, err; + + maft.cfge.si_bitmap = cpu_to_le16(si_bit); + for (i = 0; i < mac_cnt; i++) { + ether_addr_copy(maft.keye.mac_addr, mac[i].addr); + err = ntmp_maft_add_entry(&pf->si->ntmp_user, i, &maft); + if (unlikely(err)) { + pf->num_mfe = i; + goto clear_maft_entries; + } + } + + pf->num_mfe = mac_cnt; + + return 0; + +clear_maft_entries: + enetc4_pf_clear_maft_entries(pf); + + return err; +} + +static int enetc4_pf_set_uc_exact_filter(struct enetc_pf *pf) +{ + int max_num_mfe = pf->caps.mac_filter_num; + struct enetc_mac_filter mac_filter = {}; + struct net_device *ndev = pf->si->ndev; + struct enetc_hw *hw = &pf->si->hw; + struct enetc_mac_addr *mac_tbl; + struct netdev_hw_addr *ha; + int i = 0, err; + int mac_cnt; + + netif_addr_lock_bh(ndev); + + mac_cnt = netdev_uc_count(ndev); + if (!mac_cnt) { + netif_addr_unlock_bh(ndev); + /* clear both MAC hash and exact filters */ + enetc4_pf_set_si_uc_hash_filter(hw, 0, 0); + enetc4_pf_clear_maft_entries(pf); + + return 0; + } + + if (mac_cnt > max_num_mfe) { + err = -ENOSPC; + goto unlock_netif_addr; + } + + mac_tbl = kcalloc(mac_cnt, sizeof(*mac_tbl), GFP_ATOMIC); + if (!mac_tbl) { + err = -ENOMEM; + goto unlock_netif_addr; + } + + netdev_for_each_uc_addr(ha, ndev) { + enetc_add_mac_addr_ht_filter(&mac_filter, ha->addr); + ether_addr_copy(mac_tbl[i++].addr, ha->addr); + } + + netif_addr_unlock_bh(ndev); + + /* Set temporary unicast hash filters in case of Rx loss when + * updating MAC address filter table + */ + enetc4_pf_set_si_uc_hash_filter(hw, 0, *mac_filter.mac_hash_table); + enetc4_pf_clear_maft_entries(pf); + + if (!enetc4_pf_add_maft_entries(pf, mac_tbl, i)) + enetc4_pf_set_si_uc_hash_filter(hw, 0, 0); + + kfree(mac_tbl); + + return 0; + +unlock_netif_addr: + netif_addr_unlock_bh(ndev); + + return err; +} + +static void enetc4_pf_set_mac_hash_filter(struct enetc_pf *pf, int type) +{ + struct net_device *ndev = pf->si->ndev; + struct enetc_mac_filter *mac_filter; + struct enetc_hw *hw = &pf->si->hw; + struct netdev_hw_addr *ha; + + netif_addr_lock_bh(ndev); + if (type & ENETC_MAC_FILTER_TYPE_UC) { + mac_filter = &pf->mac_filter[UC]; + enetc_reset_mac_addr_filter(mac_filter); + netdev_for_each_uc_addr(ha, ndev) + enetc_add_mac_addr_ht_filter(mac_filter, ha->addr); + + enetc4_pf_set_si_uc_hash_filter(hw, 0, + *mac_filter->mac_hash_table); + } + + if (type & ENETC_MAC_FILTER_TYPE_MC) { + mac_filter = &pf->mac_filter[MC]; + enetc_reset_mac_addr_filter(mac_filter); + netdev_for_each_mc_addr(ha, ndev) + enetc_add_mac_addr_ht_filter(mac_filter, ha->addr); + + enetc4_pf_set_si_mc_hash_filter(hw, 0, + *mac_filter->mac_hash_table); + } + netif_addr_unlock_bh(ndev); +} + +static void enetc4_pf_set_mac_filter(struct enetc_pf *pf, int type) +{ + /* Currently, the MAC address filter table (MAFT) only has 4 entries, + * and multiple multicast addresses for filtering will be configured + * in the default network configuration, so MAFT is only suitable for + * unicast filtering. If the number of unicast addresses exceeds the + * table capacity, the MAC hash filter will be used. + */ + if (type & ENETC_MAC_FILTER_TYPE_UC && enetc4_pf_set_uc_exact_filter(pf)) { + /* Fall back to the MAC hash filter */ + enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_UC); + /* Clear the old MAC exact filter */ + enetc4_pf_clear_maft_entries(pf); + } + + if (type & ENETC_MAC_FILTER_TYPE_MC) + enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_MC); +} + +static const struct enetc_pf_ops enetc4_pf_ops = { + .set_si_primary_mac = enetc4_pf_set_si_primary_mac, + .get_si_primary_mac = enetc4_pf_get_si_primary_mac, +}; + +static int enetc4_pf_struct_init(struct enetc_si *si) +{ + struct enetc_pf *pf = enetc_si_priv(si); + + pf->si = si; + pf->total_vfs = pci_sriov_get_totalvfs(si->pdev); + pf->ops = &enetc4_pf_ops; + + enetc4_get_port_caps(pf); + + return 0; +} + +static u32 enetc4_psicfgr0_val_construct(bool is_vf, u32 num_tx_bdr, u32 num_rx_bdr) +{ + u32 val; + + val = ENETC_PSICFGR0_SET_TXBDR(num_tx_bdr); + val |= ENETC_PSICFGR0_SET_RXBDR(num_rx_bdr); + val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); + + if (is_vf) + val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE; + + return val; +} + +static void enetc4_default_rings_allocation(struct enetc_pf *pf) +{ + struct enetc_hw *hw = &pf->si->hw; + u32 num_rx_bdr, num_tx_bdr, val; + u32 vf_tx_bdr, vf_rx_bdr; + int i, rx_rem, tx_rem; + + if (pf->caps.num_rx_bdr < ENETC_SI_MAX_RING_NUM + pf->caps.num_vsi) + num_rx_bdr = pf->caps.num_rx_bdr - pf->caps.num_vsi; + else + num_rx_bdr = ENETC_SI_MAX_RING_NUM; + + if (pf->caps.num_tx_bdr < ENETC_SI_MAX_RING_NUM + pf->caps.num_vsi) + num_tx_bdr = pf->caps.num_tx_bdr - pf->caps.num_vsi; + else + num_tx_bdr = ENETC_SI_MAX_RING_NUM; + + val = enetc4_psicfgr0_val_construct(false, num_tx_bdr, num_rx_bdr); + enetc_port_wr(hw, ENETC4_PSICFGR0(0), val); + + num_rx_bdr = pf->caps.num_rx_bdr - num_rx_bdr; + rx_rem = num_rx_bdr % pf->caps.num_vsi; + num_rx_bdr = num_rx_bdr / pf->caps.num_vsi; + + num_tx_bdr = pf->caps.num_tx_bdr - num_tx_bdr; + tx_rem = num_tx_bdr % pf->caps.num_vsi; + num_tx_bdr = num_tx_bdr / pf->caps.num_vsi; + + for (i = 0; i < pf->caps.num_vsi; i++) { + vf_tx_bdr = (i < tx_rem) ? num_tx_bdr + 1 : num_tx_bdr; + vf_rx_bdr = (i < rx_rem) ? num_rx_bdr + 1 : num_rx_bdr; + val = enetc4_psicfgr0_val_construct(true, vf_tx_bdr, vf_rx_bdr); + enetc_port_wr(hw, ENETC4_PSICFGR0(i + 1), val); + } +} + +static void enetc4_allocate_si_rings(struct enetc_pf *pf) +{ + enetc4_default_rings_allocation(pf); +} + +static void enetc4_pf_set_si_vlan_promisc(struct enetc_hw *hw, int si, bool en) +{ + u32 val = enetc_port_rd(hw, ENETC4_PSIPVMR); + + if (en) + val |= BIT(si); + else + val &= ~BIT(si); + + enetc_port_wr(hw, ENETC4_PSIPVMR, val); +} + +static void enetc4_set_default_si_vlan_promisc(struct enetc_pf *pf) +{ + struct enetc_hw *hw = &pf->si->hw; + int num_si = pf->caps.num_vsi + 1; + int i; + + /* enforce VLAN promiscuous mode for all SIs */ + for (i = 0; i < num_si; i++) + enetc4_pf_set_si_vlan_promisc(hw, i, true); +} + +/* Allocate the number of MSI-X vectors for per SI. */ +static void enetc4_set_si_msix_num(struct enetc_pf *pf) +{ + struct enetc_hw *hw = &pf->si->hw; + int i, num_msix, total_si; + u32 val; + + total_si = pf->caps.num_vsi + 1; + + num_msix = pf->caps.num_msix / total_si + + pf->caps.num_msix % total_si - 1; + val = num_msix & PSICFGR2_NUM_MSIX; + enetc_port_wr(hw, ENETC4_PSICFGR2(0), val); + + num_msix = pf->caps.num_msix / total_si - 1; + val = num_msix & PSICFGR2_NUM_MSIX; + for (i = 0; i < pf->caps.num_vsi; i++) + enetc_port_wr(hw, ENETC4_PSICFGR2(i + 1), val); +} + +static void enetc4_enable_all_si(struct enetc_pf *pf) +{ + struct enetc_hw *hw = &pf->si->hw; + int num_si = pf->caps.num_vsi + 1; + u32 si_bitmap = 0; + int i; + + /* Master enable for all SIs */ + for (i = 0; i < num_si; i++) + si_bitmap |= PMR_SI_EN(i); + + enetc_port_wr(hw, ENETC4_PMR, si_bitmap); +} + +static void enetc4_configure_port_si(struct enetc_pf *pf) +{ + struct enetc_hw *hw = &pf->si->hw; + + enetc4_allocate_si_rings(pf); + + /* Outer VLAN tag will be used for VLAN filtering */ + enetc_port_wr(hw, ENETC4_PSIVLANFMR, PSIVLANFMR_VS); + + enetc4_set_default_si_vlan_promisc(pf); + + /* Disable SI MAC multicast & unicast promiscuous */ + enetc_port_wr(hw, ENETC4_PSIPMMR, 0); + + enetc4_set_si_msix_num(pf); + + enetc4_enable_all_si(pf); +} + +static void enetc4_pf_reset_tc_msdu(struct enetc_hw *hw) +{ + u32 val = ENETC_MAC_MAXFRM_SIZE; + int tc; + + val = u32_replace_bits(val, SDU_TYPE_MPDU, PTCTMSDUR_SDU_TYPE); + + for (tc = 0; tc < ENETC_NUM_TC; tc++) + enetc_port_wr(hw, ENETC4_PTCTMSDUR(tc), val); +} + +static void enetc4_set_trx_frame_size(struct enetc_pf *pf) +{ + struct enetc_si *si = pf->si; + + enetc_port_mac_wr(si, ENETC4_PM_MAXFRM(0), + ENETC_SET_MAXFRM(ENETC_MAC_MAXFRM_SIZE)); + + enetc4_pf_reset_tc_msdu(&si->hw); +} + +static void enetc4_enable_trx(struct enetc_pf *pf) +{ + struct enetc_hw *hw = &pf->si->hw; + + /* Enable port transmit/receive */ + enetc_port_wr(hw, ENETC4_POR, 0); +} + +static void enetc4_configure_port(struct enetc_pf *pf) +{ + enetc4_configure_port_si(pf); + enetc4_set_trx_frame_size(pf); + enetc_set_default_rss_key(pf); + enetc4_enable_trx(pf); +} + +static int enetc4_init_ntmp_user(struct enetc_si *si) +{ + struct ntmp_user *user = &si->ntmp_user; + + /* For ENETC 4.1, all table versions are 0 */ + memset(&user->tbl, 0, sizeof(user->tbl)); + + return enetc4_setup_cbdr(si); +} + +static void enetc4_free_ntmp_user(struct enetc_si *si) +{ + enetc4_teardown_cbdr(si); +} + +static int enetc4_pf_init(struct enetc_pf *pf) +{ + struct device *dev = &pf->si->pdev->dev; + int err; + + /* Initialize the MAC address for PF and VFs */ + err = enetc_setup_mac_addresses(dev->of_node, pf); + if (err) { + dev_err(dev, "Failed to set MAC addresses\n"); + return err; + } + + err = enetc4_init_ntmp_user(pf->si); + if (err) { + dev_err(dev, "Failed to init CBDR\n"); + return err; + } + + enetc4_configure_port(pf); + + return 0; +} + +static void enetc4_pf_free(struct enetc_pf *pf) +{ + enetc4_free_ntmp_user(pf->si); +} + +static void enetc4_psi_do_set_rx_mode(struct work_struct *work) +{ + struct enetc_si *si = container_of(work, struct enetc_si, rx_mode_task); + struct enetc_pf *pf = enetc_si_priv(si); + struct net_device *ndev = si->ndev; + struct enetc_hw *hw = &si->hw; + bool uc_promisc = false; + bool mc_promisc = false; + int type = 0; + + rtnl_lock(); + + if (ndev->flags & IFF_PROMISC) { + uc_promisc = true; + mc_promisc = true; + } else if (ndev->flags & IFF_ALLMULTI) { + mc_promisc = true; + type = ENETC_MAC_FILTER_TYPE_UC; + } else { + type = ENETC_MAC_FILTER_TYPE_ALL; + } + + enetc4_pf_set_si_mac_promisc(hw, 0, uc_promisc, mc_promisc); + + if (uc_promisc) { + enetc4_pf_set_si_uc_hash_filter(hw, 0, 0); + enetc4_pf_clear_maft_entries(pf); + } + + if (mc_promisc) + enetc4_pf_set_si_mc_hash_filter(hw, 0, 0); + + /* Set new MAC filter */ + enetc4_pf_set_mac_filter(pf, type); + + rtnl_unlock(); +} + +static void enetc4_pf_set_rx_mode(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_si *si = priv->si; + + queue_work(si->workqueue, &si->rx_mode_task); +} + +static int enetc4_pf_set_features(struct net_device *ndev, + netdev_features_t features) +{ + netdev_features_t changed = ndev->features ^ features; + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { + bool promisc_en = !(features & NETIF_F_HW_VLAN_CTAG_FILTER); + + enetc4_pf_set_si_vlan_promisc(hw, 0, promisc_en); + } + + if (changed & NETIF_F_LOOPBACK) + enetc4_pf_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK)); + + enetc_set_features(ndev, features); + + return 0; +} + +static const struct net_device_ops enetc4_ndev_ops = { + .ndo_open = enetc_open, + .ndo_stop = enetc_close, + .ndo_start_xmit = enetc_xmit, + .ndo_get_stats = enetc_get_stats, + .ndo_set_mac_address = enetc_pf_set_mac_addr, + .ndo_set_rx_mode = enetc4_pf_set_rx_mode, + .ndo_set_features = enetc4_pf_set_features, + .ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid, +}; + +static struct phylink_pcs * +enetc4_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface) +{ + struct enetc_pf *pf = phylink_to_enetc_pf(config); + + return pf->pcs; +} + +static void enetc4_mac_config(struct enetc_pf *pf, unsigned int mode, + phy_interface_t phy_mode) +{ + struct enetc_ndev_priv *priv = netdev_priv(pf->si->ndev); + struct enetc_si *si = pf->si; + u32 val; + + val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0)); + val &= ~(PM_IF_MODE_IFMODE | PM_IF_MODE_ENA); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + val |= IFMODE_RGMII; + /* We need to enable auto-negotiation for the MAC + * if its RGMII interface support In-Band status. + */ + if (phylink_autoneg_inband(mode)) + val |= PM_IF_MODE_ENA; + break; + case PHY_INTERFACE_MODE_RMII: + val |= IFMODE_RMII; + break; + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_2500BASEX: + val |= IFMODE_SGMII; + break; + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_XGMII: + case PHY_INTERFACE_MODE_USXGMII: + val |= IFMODE_XGMII; + break; + default: + dev_err(priv->dev, + "Unsupported PHY mode:%d\n", phy_mode); + return; + } + + enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val); +} + +static void enetc4_pl_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + struct enetc_pf *pf = phylink_to_enetc_pf(config); + + enetc4_mac_config(pf, mode, state->interface); +} + +static void enetc4_set_port_speed(struct enetc_ndev_priv *priv, int speed) +{ + u32 old_speed = priv->speed; + u32 val; + + if (speed == old_speed) + return; + + val = enetc_port_rd(&priv->si->hw, ENETC4_PCR); + val &= ~PCR_PSPEED; + + switch (speed) { + case SPEED_100: + case SPEED_1000: + case SPEED_2500: + case SPEED_10000: + val |= (PCR_PSPEED & PCR_PSPEED_VAL(speed)); + break; + case SPEED_10: + default: + val |= (PCR_PSPEED & PCR_PSPEED_VAL(SPEED_10)); + } + + priv->speed = speed; + enetc_port_wr(&priv->si->hw, ENETC4_PCR, val); +} + +static void enetc4_set_rgmii_mac(struct enetc_pf *pf, int speed, int duplex) +{ + struct enetc_si *si = pf->si; + u32 old_val, val; + + old_val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0)); + val = old_val & ~(PM_IF_MODE_ENA | PM_IF_MODE_M10 | PM_IF_MODE_REVMII); + + switch (speed) { + case SPEED_1000: + val = u32_replace_bits(val, SSP_1G, PM_IF_MODE_SSP); + break; + case SPEED_100: + val = u32_replace_bits(val, SSP_100M, PM_IF_MODE_SSP); + break; + case SPEED_10: + val = u32_replace_bits(val, SSP_10M, PM_IF_MODE_SSP); + } + + val = u32_replace_bits(val, duplex == DUPLEX_FULL ? 0 : 1, + PM_IF_MODE_HD); + + if (val == old_val) + return; + + enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val); +} + +static void enetc4_set_rmii_mac(struct enetc_pf *pf, int speed, int duplex) +{ + struct enetc_si *si = pf->si; + u32 old_val, val; + + old_val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0)); + val = old_val & ~(PM_IF_MODE_ENA | PM_IF_MODE_SSP); + + switch (speed) { + case SPEED_100: + val &= ~PM_IF_MODE_M10; + break; + case SPEED_10: + val |= PM_IF_MODE_M10; + } + + val = u32_replace_bits(val, duplex == DUPLEX_FULL ? 0 : 1, + PM_IF_MODE_HD); + + if (val == old_val) + return; + + enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val); +} + +static void enetc4_set_hd_flow_control(struct enetc_pf *pf, bool enable) +{ + struct enetc_si *si = pf->si; + u32 old_val, val; + + if (!pf->caps.half_duplex) + return; + + old_val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0)); + val = u32_replace_bits(old_val, enable ? 1 : 0, PM_CMD_CFG_HD_FCEN); + if (val == old_val) + return; + + enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val); +} + +static void enetc4_set_rx_pause(struct enetc_pf *pf, bool rx_pause) +{ + struct enetc_si *si = pf->si; + u32 old_val, val; + + old_val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0)); + val = u32_replace_bits(old_val, rx_pause ? 0 : 1, PM_CMD_CFG_PAUSE_IGN); + if (val == old_val) + return; + + enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val); +} + +static void enetc4_set_tx_pause(struct enetc_pf *pf, int num_rxbdr, bool tx_pause) +{ + u32 pause_off_thresh = 0, pause_on_thresh = 0; + u32 init_quanta = 0, refresh_quanta = 0; + struct enetc_hw *hw = &pf->si->hw; + u32 rbmr, old_rbmr; + int i; + + for (i = 0; i < num_rxbdr; i++) { + old_rbmr = enetc_rxbdr_rd(hw, i, ENETC_RBMR); + rbmr = u32_replace_bits(old_rbmr, tx_pause ? 1 : 0, ENETC_RBMR_CM); + if (rbmr == old_rbmr) + continue; + + enetc_rxbdr_wr(hw, i, ENETC_RBMR, rbmr); + } + + if (tx_pause) { + /* When the port first enters congestion, send a PAUSE request + * with the maximum number of quanta. When the port exits + * congestion, it will automatically send a PAUSE frame with + * zero quanta. + */ + init_quanta = 0xffff; + + /* Also, set up the refresh timer to send follow-up PAUSE + * frames at half the quanta value, in case the congestion + * condition persists. + */ + refresh_quanta = 0xffff / 2; + + /* Start emitting PAUSE frames when 3 large frames (or more + * smaller frames) have accumulated in the FIFO waiting to be + * DMAed to the RX ring. + */ + pause_on_thresh = 3 * ENETC_MAC_MAXFRM_SIZE; + pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE; + } + + enetc_port_mac_wr(pf->si, ENETC4_PM_PAUSE_QUANTA(0), init_quanta); + enetc_port_mac_wr(pf->si, ENETC4_PM_PAUSE_THRESH(0), refresh_quanta); + enetc_port_wr(hw, ENETC4_PPAUONTR, pause_on_thresh); + enetc_port_wr(hw, ENETC4_PPAUOFFTR, pause_off_thresh); +} + +static void enetc4_enable_mac(struct enetc_pf *pf, bool en) +{ + struct enetc_si *si = pf->si; + u32 val; + + val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0)); + val &= ~(PM_CMD_CFG_TX_EN | PM_CMD_CFG_RX_EN); + val |= en ? (PM_CMD_CFG_TX_EN | PM_CMD_CFG_RX_EN) : 0; + + enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val); +} + +static void enetc4_pl_mac_link_up(struct phylink_config *config, + struct phy_device *phy, unsigned int mode, + phy_interface_t interface, int speed, + int duplex, bool tx_pause, bool rx_pause) +{ + struct enetc_pf *pf = phylink_to_enetc_pf(config); + struct enetc_si *si = pf->si; + struct enetc_ndev_priv *priv; + bool hd_fc = false; + + priv = netdev_priv(si->ndev); + enetc4_set_port_speed(priv, speed); + + if (!phylink_autoneg_inband(mode) && + phy_interface_mode_is_rgmii(interface)) + enetc4_set_rgmii_mac(pf, speed, duplex); + + if (interface == PHY_INTERFACE_MODE_RMII) + enetc4_set_rmii_mac(pf, speed, duplex); + + if (duplex == DUPLEX_FULL) { + /* When preemption is enabled, generation of PAUSE frames + * must be disabled, as stated in the IEEE 802.3 standard. + */ + if (priv->active_offloads & ENETC_F_QBU) + tx_pause = false; + } else { /* DUPLEX_HALF */ + if (tx_pause || rx_pause) + hd_fc = true; + + /* As per 802.3 annex 31B, PAUSE frames are only supported + * when the link is configured for full duplex operation. + */ + tx_pause = false; + rx_pause = false; + } + + enetc4_set_hd_flow_control(pf, hd_fc); + enetc4_set_tx_pause(pf, priv->num_rx_rings, tx_pause); + enetc4_set_rx_pause(pf, rx_pause); + enetc4_enable_mac(pf, true); +} + +static void enetc4_pl_mac_link_down(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + struct enetc_pf *pf = phylink_to_enetc_pf(config); + + enetc4_enable_mac(pf, false); +} + +static const struct phylink_mac_ops enetc_pl_mac_ops = { + .mac_select_pcs = enetc4_pl_mac_select_pcs, + .mac_config = enetc4_pl_mac_config, + .mac_link_up = enetc4_pl_mac_link_up, + .mac_link_down = enetc4_pl_mac_link_down, +}; + +static void enetc4_pci_remove(void *data) +{ + struct pci_dev *pdev = data; + + enetc_pci_remove(pdev); +} + +static int enetc4_link_init(struct enetc_ndev_priv *priv, + struct device_node *node) +{ + struct enetc_pf *pf = enetc_si_priv(priv->si); + struct device *dev = priv->dev; + int err; + + err = of_get_phy_mode(node, &pf->if_mode); + if (err) { + dev_err(dev, "Failed to get PHY mode\n"); + return err; + } + + err = enetc_mdiobus_create(pf, node); + if (err) { + dev_err(dev, "Failed to create MDIO bus\n"); + return err; + } + + err = enetc_phylink_create(priv, node, &enetc_pl_mac_ops); + if (err) { + dev_err(dev, "Failed to create phylink\n"); + goto err_phylink_create; + } + + return 0; + +err_phylink_create: + enetc_mdiobus_destroy(pf); + + return err; +} + +static void enetc4_link_deinit(struct enetc_ndev_priv *priv) +{ + struct enetc_pf *pf = enetc_si_priv(priv->si); + + enetc_phylink_destroy(priv); + enetc_mdiobus_destroy(pf); +} + +static int enetc4_psi_wq_task_init(struct enetc_si *si) +{ + char wq_name[24]; + + INIT_WORK(&si->rx_mode_task, enetc4_psi_do_set_rx_mode); + snprintf(wq_name, sizeof(wq_name), "enetc-%s", pci_name(si->pdev)); + si->workqueue = create_singlethread_workqueue(wq_name); + if (!si->workqueue) + return -ENOMEM; + + return 0; +} + +static int enetc4_pf_netdev_create(struct enetc_si *si) +{ + struct device *dev = &si->pdev->dev; + struct enetc_ndev_priv *priv; + struct net_device *ndev; + int err; + + ndev = alloc_etherdev_mqs(sizeof(struct enetc_ndev_priv), + si->num_tx_rings, si->num_rx_rings); + if (!ndev) + return -ENOMEM; + + priv = netdev_priv(ndev); + priv->ref_clk = devm_clk_get_optional(dev, "ref"); + if (IS_ERR(priv->ref_clk)) { + dev_err(dev, "Get reference clock failed\n"); + err = PTR_ERR(priv->ref_clk); + goto err_clk_get; + } + + enetc_pf_netdev_setup(si, ndev, &enetc4_ndev_ops); + + enetc_init_si_rings_params(priv); + + err = enetc_configure_si(priv); + if (err) { + dev_err(dev, "Failed to configure SI\n"); + goto err_config_si; + } + + err = enetc_alloc_msix(priv); + if (err) { + dev_err(dev, "Failed to alloc MSI-X\n"); + goto err_alloc_msix; + } + + err = enetc4_link_init(priv, dev->of_node); + if (err) + goto err_link_init; + + err = enetc4_psi_wq_task_init(si); + if (err) { + dev_err(dev, "Failed to init workqueue\n"); + goto err_wq_init; + } + + err = register_netdev(ndev); + if (err) { + dev_err(dev, "Failed to register netdev\n"); + goto err_reg_netdev; + } + + return 0; + +err_reg_netdev: + destroy_workqueue(si->workqueue); +err_wq_init: + enetc4_link_deinit(priv); +err_link_init: + enetc_free_msix(priv); +err_alloc_msix: +err_config_si: +err_clk_get: + free_netdev(ndev); + + return err; +} + +static void enetc4_pf_netdev_destroy(struct enetc_si *si) +{ + struct enetc_ndev_priv *priv = netdev_priv(si->ndev); + struct net_device *ndev = si->ndev; + + unregister_netdev(ndev); + cancel_work(&si->rx_mode_task); + destroy_workqueue(si->workqueue); + enetc4_link_deinit(priv); + enetc_free_msix(priv); + free_netdev(ndev); +} + +static const struct enetc_si_ops enetc4_psi_ops = { + .get_rss_table = enetc4_get_rss_table, + .set_rss_table = enetc4_set_rss_table, +}; + +static int enetc4_pf_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct enetc_si *si; + struct enetc_pf *pf; + int err; + + err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf)); + if (err) + return dev_err_probe(dev, err, "PCIe probing failed\n"); + + err = devm_add_action_or_reset(dev, enetc4_pci_remove, pdev); + if (err) + return dev_err_probe(dev, err, + "Add enetc4_pci_remove() action failed\n"); + + /* si is the private data. */ + si = pci_get_drvdata(pdev); + if (!si->hw.port || !si->hw.global) + return dev_err_probe(dev, -ENODEV, + "Couldn't map PF only space\n"); + + si->revision = enetc_get_ip_revision(&si->hw); + si->ops = &enetc4_psi_ops; + err = enetc_get_driver_data(si); + if (err) + return dev_err_probe(dev, err, + "Could not get VF driver data\n"); + + err = enetc4_pf_struct_init(si); + if (err) + return err; + + pf = enetc_si_priv(si); + err = enetc4_pf_init(pf); + if (err) + return err; + + enetc_get_si_caps(si); + + err = enetc4_pf_netdev_create(si); + if (err) + goto err_netdev_create; + + enetc_create_debugfs(si); + + return 0; + +err_netdev_create: + enetc4_pf_free(pf); + + return err; +} + +static void enetc4_pf_remove(struct pci_dev *pdev) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + struct enetc_pf *pf = enetc_si_priv(si); + + enetc_remove_debugfs(si); + enetc4_pf_netdev_destroy(si); + enetc4_pf_free(pf); +} + +static const struct pci_device_id enetc4_pf_id_table[] = { + { PCI_DEVICE(NXP_ENETC_VENDOR_ID, NXP_ENETC_PF_DEV_ID) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, enetc4_pf_id_table); + +static struct pci_driver enetc4_pf_driver = { + .name = KBUILD_MODNAME, + .id_table = enetc4_pf_id_table, + .probe = enetc4_pf_probe, + .remove = enetc4_pf_remove, +}; +module_pci_driver(enetc4_pf_driver); + +MODULE_DESCRIPTION("ENETC4 PF Driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c index 20bfdf7fb4b4..3d5f31879d5c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c @@ -60,6 +60,44 @@ void enetc_teardown_cbdr(struct enetc_cbdr *cbdr) } EXPORT_SYMBOL_GPL(enetc_teardown_cbdr); +int enetc4_setup_cbdr(struct enetc_si *si) +{ + struct ntmp_user *user = &si->ntmp_user; + struct device *dev = &si->pdev->dev; + struct enetc_hw *hw = &si->hw; + struct netc_cbdr_regs regs; + + user->cbdr_num = 1; + user->dev = dev; + user->ring = devm_kcalloc(dev, user->cbdr_num, + sizeof(struct netc_cbdr), GFP_KERNEL); + if (!user->ring) + return -ENOMEM; + + /* set CBDR cache attributes */ + enetc_wr(hw, ENETC_SICAR2, + ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); + + regs.pir = hw->reg + ENETC_SICBDRPIR; + regs.cir = hw->reg + ENETC_SICBDRCIR; + regs.mr = hw->reg + ENETC_SICBDRMR; + regs.bar0 = hw->reg + ENETC_SICBDRBAR0; + regs.bar1 = hw->reg + ENETC_SICBDRBAR1; + regs.lenr = hw->reg + ENETC_SICBDRLENR; + + return ntmp_init_cbdr(user->ring, dev, ®s); +} +EXPORT_SYMBOL_GPL(enetc4_setup_cbdr); + +void enetc4_teardown_cbdr(struct enetc_si *si) +{ + struct ntmp_user *user = &si->ntmp_user; + + ntmp_free_cbdr(user->ring); + user->dev = NULL; +} +EXPORT_SYMBOL_GPL(enetc4_teardown_cbdr); + static void enetc_clean_cbdr(struct enetc_cbdr *ring) { struct enetc_cbd *dest_cbd; @@ -256,3 +294,15 @@ int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count) return enetc_cmd_rss_table(si, (u32 *)table, count, false); } EXPORT_SYMBOL_GPL(enetc_set_rss_table); + +int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count) +{ + return ntmp_rsst_query_entry(&si->ntmp_user, table, count); +} +EXPORT_SYMBOL_GPL(enetc4_get_rss_table); + +int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count) +{ + return ntmp_rsst_update_entry(&si->ntmp_user, table, count); +} +EXPORT_SYMBOL_GPL(enetc4_set_rss_table); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index f7753ea5b57e..d38cd36be4a6 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -141,7 +141,7 @@ static const struct { static const struct { int reg; - char name[ETH_GSTRING_LEN]; + char name[ETH_GSTRING_LEN] __nonstring; } enetc_port_counters[] = { { ENETC_PM_REOCT(0), "MAC rx ethernet octets" }, { ENETC_PM_RALN(0), "MAC rx alignment errors" }, @@ -247,38 +247,25 @@ static int enetc_get_sset_count(struct net_device *ndev, int sset) static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { struct enetc_ndev_priv *priv = netdev_priv(ndev); - u8 *p = data; int i, j; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) { - strscpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < priv->num_tx_rings; i++) { - for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++) { - snprintf(p, ETH_GSTRING_LEN, tx_ring_stats[j], - i); - p += ETH_GSTRING_LEN; - } - } - for (i = 0; i < priv->num_rx_rings; i++) { - for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++) { - snprintf(p, ETH_GSTRING_LEN, rx_ring_stats[j], - i); - p += ETH_GSTRING_LEN; - } - } + for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) + ethtool_puts(&data, enetc_si_counters[i].name); + for (i = 0; i < priv->num_tx_rings; i++) + for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++) + ethtool_sprintf(&data, tx_ring_stats[j], i); + for (i = 0; i < priv->num_rx_rings; i++) + for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++) + ethtool_sprintf(&data, rx_ring_stats[j], i); if (!enetc_si_is_pf(priv->si)) break; - for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) { - strscpy(p, enetc_port_counters[i].name, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) + ethtool_cpy(&data, enetc_port_counters[i].name); + break; } } @@ -638,6 +625,29 @@ static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, return 0; } +/* i.MX95 ENETC does not support RFS table, but we can use ingress port + * filter table to implement Wake-on-LAN filter or drop the matched flow, + * so the implementation will be different from enetc_get_rxnfc() and + * enetc_set_rxnfc(). Therefore, add enetc4_get_rxnfc() for ENETC v4 PF. + */ +static int enetc4_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, + u32 *rule_locs) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + switch (rxnfc->cmd) { + case ETHTOOL_GRXRINGS: + rxnfc->data = priv->num_rx_rings; + break; + case ETHTOOL_GRXFH: + return enetc_get_rsshash(rxnfc); + default: + return -EOPNOTSUPP; + } + + return 0; +} + static int enetc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc) { struct enetc_ndev_priv *priv = netdev_priv(ndev); @@ -690,36 +700,53 @@ static u32 enetc_get_rxfh_indir_size(struct net_device *ndev) return priv->si->num_rss; } +static int enetc_get_rss_key_base(struct enetc_si *si) +{ + if (is_enetc_rev1(si)) + return ENETC_PRSSK(0); + + return ENETC4_PRSSKR(0); +} + +static void enetc_get_rss_key(struct enetc_si *si, const u8 *key) +{ + int base = enetc_get_rss_key_base(si); + struct enetc_hw *hw = &si->hw; + int i; + + for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) + ((u32 *)key)[i] = enetc_port_rd(hw, base + i * 4); +} + static int enetc_get_rxfh(struct net_device *ndev, struct ethtool_rxfh_param *rxfh) { struct enetc_ndev_priv *priv = netdev_priv(ndev); - struct enetc_hw *hw = &priv->si->hw; - int err = 0, i; + struct enetc_si *si = priv->si; + int err = 0; /* return hash function */ rxfh->hfunc = ETH_RSS_HASH_TOP; /* return hash key */ - if (rxfh->key && hw->port) - for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) - ((u32 *)rxfh->key)[i] = enetc_port_rd(hw, - ENETC_PRSSK(i)); + if (rxfh->key && enetc_si_is_pf(si)) + enetc_get_rss_key(si, rxfh->key); /* return RSS table */ if (rxfh->indir) - err = enetc_get_rss_table(priv->si, rxfh->indir, - priv->si->num_rss); + err = si->ops->get_rss_table(si, rxfh->indir, si->num_rss); return err; } -void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes) +void enetc_set_rss_key(struct enetc_si *si, const u8 *bytes) { + int base = enetc_get_rss_key_base(si); + struct enetc_hw *hw = &si->hw; int i; for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) - enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]); + enetc_port_wr(hw, base + i * 4, ((u32 *)bytes)[i]); } EXPORT_SYMBOL_GPL(enetc_set_rss_key); @@ -728,17 +755,16 @@ static int enetc_set_rxfh(struct net_device *ndev, struct netlink_ext_ack *extack) { struct enetc_ndev_priv *priv = netdev_priv(ndev); - struct enetc_hw *hw = &priv->si->hw; + struct enetc_si *si = priv->si; int err = 0; /* set hash key, if PF */ - if (rxfh->key && hw->port) - enetc_set_rss_key(hw, rxfh->key); + if (rxfh->key && enetc_si_is_pf(si)) + enetc_set_rss_key(si, rxfh->key); /* set RSS table */ if (rxfh->indir) - err = enetc_set_rss_table(priv->si, rxfh->indir, - priv->si->num_rss); + err = si->ops->set_rss_table(si, rxfh->indir, si->num_rss); return err; } @@ -775,9 +801,10 @@ static int enetc_get_coalesce(struct net_device *ndev, { struct enetc_ndev_priv *priv = netdev_priv(ndev); struct enetc_int_vector *v = priv->int_vector[0]; + u64 clk_freq = priv->sysclk_freq; - ic->tx_coalesce_usecs = enetc_cycles_to_usecs(priv->tx_ictt); - ic->rx_coalesce_usecs = enetc_cycles_to_usecs(v->rx_ictt); + ic->tx_coalesce_usecs = enetc_cycles_to_usecs(priv->tx_ictt, clk_freq); + ic->rx_coalesce_usecs = enetc_cycles_to_usecs(v->rx_ictt, clk_freq); ic->tx_max_coalesced_frames = ENETC_TXIC_PKTTHR; ic->rx_max_coalesced_frames = ENETC_RXIC_PKTTHR; @@ -793,12 +820,13 @@ static int enetc_set_coalesce(struct net_device *ndev, struct netlink_ext_ack *extack) { struct enetc_ndev_priv *priv = netdev_priv(ndev); + u64 clk_freq = priv->sysclk_freq; u32 rx_ictt, tx_ictt; int i, ic_mode; bool changed; - tx_ictt = enetc_usecs_to_cycles(ic->tx_coalesce_usecs); - rx_ictt = enetc_usecs_to_cycles(ic->rx_coalesce_usecs); + tx_ictt = enetc_usecs_to_cycles(ic->tx_coalesce_usecs, clk_freq); + rx_ictt = enetc_usecs_to_cycles(ic->rx_coalesce_usecs, clk_freq); if (ic->rx_max_coalesced_frames != ENETC_RXIC_PKTTHR) return -EOPNOTSUPP; @@ -841,36 +869,37 @@ static int enetc_set_coalesce(struct net_device *ndev, } static int enetc_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { + struct enetc_ndev_priv *priv = netdev_priv(ndev); int *phc_idx; phc_idx = symbol_get(enetc_phc_index); if (phc_idx) { info->phc_index = *phc_idx; symbol_put(enetc_phc_index); - } else { - info->phc_index = -1; } -#ifdef CONFIG_FSL_ENETC_PTP_CLOCK + if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) { + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; + + return 0; + } + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE | - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + SOF_TIMESTAMPING_TX_SOFTWARE; info->tx_types = (1 << HWTSTAMP_TX_OFF) | - (1 << HWTSTAMP_TX_ON) | - (1 << HWTSTAMP_TX_ONESTEP_SYNC); + (1 << HWTSTAMP_TX_ON); + + if (enetc_si_is_pf(priv->si)) + info->tx_types |= (1 << HWTSTAMP_TX_ONESTEP_SYNC); + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_ALL); -#else - info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; -#endif + return 0; } @@ -1180,7 +1209,7 @@ void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link) } EXPORT_SYMBOL_GPL(enetc_mm_link_state_update); -static const struct ethtool_ops enetc_pf_ethtool_ops = { +const struct ethtool_ops enetc_pf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, @@ -1215,7 +1244,7 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = { .get_mm_stats = enetc_get_mm_stats, }; -static const struct ethtool_ops enetc_vf_ethtool_ops = { +const struct ethtool_ops enetc_vf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, @@ -1236,13 +1265,31 @@ static const struct ethtool_ops enetc_vf_ethtool_ops = { .get_ts_info = enetc_get_ts_info, }; +const struct ethtool_ops enetc4_pf_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, + .get_ringparam = enetc_get_ringparam, + .get_coalesce = enetc_get_coalesce, + .set_coalesce = enetc_set_coalesce, + .get_link_ksettings = enetc_get_link_ksettings, + .set_link_ksettings = enetc_set_link_ksettings, + .get_link = ethtool_op_get_link, + .get_wol = enetc_get_wol, + .set_wol = enetc_set_wol, + .get_pauseparam = enetc_get_pauseparam, + .set_pauseparam = enetc_set_pauseparam, + .get_rxnfc = enetc4_get_rxnfc, + .get_rxfh_key_size = enetc_get_rxfh_key_size, + .get_rxfh_indir_size = enetc_get_rxfh_indir_size, + .get_rxfh = enetc_get_rxfh, + .set_rxfh = enetc_set_rxfh, +}; + void enetc_set_ethtool_ops(struct net_device *ndev) { struct enetc_ndev_priv *priv = netdev_priv(ndev); - if (enetc_si_is_pf(priv->si)) - ndev->ethtool_ops = &enetc_pf_ethtool_ops; - else - ndev->ethtool_ops = &enetc_vf_ethtool_ops; + ndev->ethtool_ops = priv->si->drvdata->eth_ops; } EXPORT_SYMBOL_GPL(enetc_set_ethtool_ops); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 1619943fb263..4098f01479bc 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -6,6 +6,8 @@ #define ENETC_MM_VERIFY_SLEEP_US USEC_PER_MSEC #define ENETC_MM_VERIFY_RETRIES 3 +#define ENETC_NUM_TC 8 + /* ENETC device IDs */ #define ENETC_DEV_ID_PF 0xe100 #define ENETC_DEV_ID_VF 0xef00 @@ -21,10 +23,9 @@ #define ENETC_SICTR0 0x18 #define ENETC_SICTR1 0x1c #define ENETC_SIPCAPR0 0x20 -#define ENETC_SIPCAPR0_PSFP BIT(9) #define ENETC_SIPCAPR0_RSS BIT(8) -#define ENETC_SIPCAPR0_QBV BIT(4) -#define ENETC_SIPCAPR0_QBU BIT(3) +#define ENETC_SIPCAPR0_RFS BIT(2) +#define ENETC_SIPCAPR0_LSO BIT(1) #define ENETC_SIPCAPR1 0x24 #define ENETC_SITGTGR 0x30 #define ENETC_SIRBGCR 0x38 @@ -191,6 +192,9 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_PCAPR0 0x0900 #define ENETC_PCAPR0_RXBDR(val) ((val) >> 24) #define ENETC_PCAPR0_TXBDR(val) (((val) >> 16) & 0xff) +#define ENETC_PCAPR0_PSFP BIT(9) +#define ENETC_PCAPR0_QBV BIT(4) +#define ENETC_PCAPR0_QBU BIT(3) #define ENETC_PCAPR1 0x0904 #define ENETC_PSICFGR0(n) (0x0940 + (n) * 0xc) /* n = SI index */ #define ENETC_PSICFGR0_SET_TXBDR(val) ((val) & 0xff) @@ -368,6 +372,10 @@ enum enetc_bdr_type {TX, RX}; /** Global regs, offset: 2_0000h */ #define ENETC_GLOBAL_BASE 0x20000 #define ENETC_G_EIPBRR0 0x0bf8 +#define EIPBRR0_REVISION GENMASK(15, 0) +#define ENETC_REV_1_0 0x0100 +#define ENETC_REV_4_1 0X0401 + #define ENETC_G_EIPBRR1 0x0bfc #define ENETC_G_EPFBLPR(n) (0xd00 + 4 * (n)) #define ENETC_G_EPFBLPR1_XGMII 0x80000000 @@ -396,18 +404,22 @@ struct enetc_hw { */ extern rwlock_t enetc_mdio_lock; +DECLARE_STATIC_KEY_FALSE(enetc_has_err050089); + /* use this locking primitive only on the fast datapath to * group together multiple non-MDIO register accesses to * minimize the overhead of the lock */ static inline void enetc_lock_mdio(void) { - read_lock(&enetc_mdio_lock); + if (static_branch_unlikely(&enetc_has_err050089)) + read_lock(&enetc_mdio_lock); } static inline void enetc_unlock_mdio(void) { - read_unlock(&enetc_mdio_lock); + if (static_branch_unlikely(&enetc_has_err050089)) + read_unlock(&enetc_mdio_lock); } /* use these accessors only on the fast datapath under @@ -416,14 +428,16 @@ static inline void enetc_unlock_mdio(void) */ static inline u32 enetc_rd_reg_hot(void __iomem *reg) { - lockdep_assert_held(&enetc_mdio_lock); + if (static_branch_unlikely(&enetc_has_err050089)) + lockdep_assert_held(&enetc_mdio_lock); return ioread32(reg); } static inline void enetc_wr_reg_hot(void __iomem *reg, u32 val) { - lockdep_assert_held(&enetc_mdio_lock); + if (static_branch_unlikely(&enetc_has_err050089)) + lockdep_assert_held(&enetc_mdio_lock); iowrite32(val, reg); } @@ -452,9 +466,13 @@ static inline u32 _enetc_rd_mdio_reg_wa(void __iomem *reg) unsigned long flags; u32 val; - write_lock_irqsave(&enetc_mdio_lock, flags); - val = ioread32(reg); - write_unlock_irqrestore(&enetc_mdio_lock, flags); + if (static_branch_unlikely(&enetc_has_err050089)) { + write_lock_irqsave(&enetc_mdio_lock, flags); + val = ioread32(reg); + write_unlock_irqrestore(&enetc_mdio_lock, flags); + } else { + val = ioread32(reg); + } return val; } @@ -463,9 +481,13 @@ static inline void _enetc_wr_mdio_reg_wa(void __iomem *reg, u32 val) { unsigned long flags; - write_lock_irqsave(&enetc_mdio_lock, flags); - iowrite32(val, reg); - write_unlock_irqrestore(&enetc_mdio_lock, flags); + if (static_branch_unlikely(&enetc_has_err050089)) { + write_lock_irqsave(&enetc_mdio_lock, flags); + iowrite32(val, reg); + write_unlock_irqrestore(&enetc_mdio_lock, flags); + } else { + iowrite32(val, reg); + } } #ifdef ioread64 @@ -533,11 +555,23 @@ static inline u64 _enetc_rd_reg64_wa(void __iomem *reg) union enetc_tx_bd { struct { __le64 addr; - __le16 buf_len; + union { + __le16 buf_len; + __le16 hdr_len; /* For LSO, ENETC 4.1 and later */ + }; __le16 frm_len; union { struct { - u8 reserved[3]; + u8 l3_aux0; +#define ENETC_TX_BD_L3_START GENMASK(6, 0) +#define ENETC_TX_BD_IPCS BIT(7) + u8 l3_aux1; +#define ENETC_TX_BD_L3_HDR_LEN GENMASK(6, 0) +#define ENETC_TX_BD_L3T BIT(7) + u8 l4_aux; +#define ENETC_TX_BD_L4T GENMASK(7, 5) +#define ENETC_TXBD_L4T_UDP 1 +#define ENETC_TXBD_L4T_TCP 2 u8 flags; }; /* default layout */ __le32 txstart; @@ -548,23 +582,27 @@ union enetc_tx_bd { __le32 tstamp; __le16 tpid; __le16 vid; - u8 reserved[6]; + __le16 lso_sg_size; /* For ENETC 4.1 and later */ + __le16 frm_len_ext; /* For ENETC 4.1 and later */ + u8 reserved[2]; u8 e_flags; u8 flags; } ext; /* Tx BD extension */ struct { __le32 tstamp; - u8 reserved[10]; + u8 reserved[8]; + __le16 lso_err_count; /* For ENETC 4.1 and later */ u8 status; u8 flags; } wb; /* writeback descriptor */ }; enum enetc_txbd_flags { - ENETC_TXBD_FLAGS_RES0 = BIT(0), /* reserved */ + ENETC_TXBD_FLAGS_L4CS = BIT(0), /* For ENETC 4.1 and later */ ENETC_TXBD_FLAGS_TSE = BIT(1), + ENETC_TXBD_FLAGS_LSO = BIT(1), /* For ENETC 4.1 and later */ ENETC_TXBD_FLAGS_W = BIT(2), - ENETC_TXBD_FLAGS_RES3 = BIT(3), /* reserved */ + ENETC_TXBD_FLAGS_CSUM_LSO = BIT(3), /* For ENETC 4.1 and later */ ENETC_TXBD_FLAGS_TXSTART = BIT(4), ENETC_TXBD_FLAGS_EX = BIT(6), ENETC_TXBD_FLAGS_F = BIT(7) @@ -633,6 +671,8 @@ union enetc_rx_bd { #define ENETC_CBD_FLAGS_SF BIT(7) /* short format */ #define ENETC_CBD_STATUS_MASK 0xf +#define ENETC_TPID_8021Q 0 + struct enetc_cmd_rfse { u8 smac_h[6]; u8 smac_m[6]; @@ -957,15 +997,17 @@ struct enetc_cbd { u8 status_flags; }; -#define ENETC_CLK 400000000ULL -static inline u32 enetc_cycles_to_usecs(u32 cycles) +#define ENETC_CLK_400M 400000000ULL +#define ENETC_CLK_333M 333000000ULL + +static inline u32 enetc_cycles_to_usecs(u32 cycles, u64 clk_freq) { - return (u32)div_u64(cycles * 1000000ULL, ENETC_CLK); + return (u32)div_u64(cycles * 1000000ULL, clk_freq); } -static inline u32 enetc_usecs_to_cycles(u32 usecs) +static inline u32 enetc_usecs_to_cycles(u32 usecs, u64 clk_freq) { - return (u32)div_u64(usecs * ENETC_CLK, 1000000ULL); + return (u32)div_u64(usecs * clk_freq, 1000000ULL); } /* Port traffic class frame preemption register */ diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c index a1b595bd7993..e108cac8288d 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c @@ -4,11 +4,35 @@ #include <linux/of_mdio.h> #include "enetc_pf.h" +#define NETC_EMDIO_VEN_ID 0x1131 +#define NETC_EMDIO_DEV_ID 0xee00 #define ENETC_MDIO_DEV_ID 0xee01 #define ENETC_MDIO_DEV_NAME "FSL PCIe IE Central MDIO" #define ENETC_MDIO_BUS_NAME ENETC_MDIO_DEV_NAME " Bus" #define ENETC_MDIO_DRV_NAME ENETC_MDIO_DEV_NAME " driver" +DEFINE_STATIC_KEY_FALSE(enetc_has_err050089); +EXPORT_SYMBOL_GPL(enetc_has_err050089); + +static void enetc_emdio_enable_err050089(struct pci_dev *pdev) +{ + if (pdev->vendor == PCI_VENDOR_ID_FREESCALE && + pdev->device == ENETC_MDIO_DEV_ID) { + static_branch_inc(&enetc_has_err050089); + dev_info(&pdev->dev, "Enabled ERR050089 workaround\n"); + } +} + +static void enetc_emdio_disable_err050089(struct pci_dev *pdev) +{ + if (pdev->vendor == PCI_VENDOR_ID_FREESCALE && + pdev->device == ENETC_MDIO_DEV_ID) { + static_branch_dec(&enetc_has_err050089); + if (!static_key_enabled(&enetc_has_err050089.key)) + dev_info(&pdev->dev, "Disabled ERR050089 workaround\n"); + } +} + static int enetc_pci_mdio_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -62,6 +86,8 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev, goto err_pci_mem_reg; } + enetc_emdio_enable_err050089(pdev); + err = of_mdiobus_register(bus, dev->of_node); if (err) goto err_mdiobus_reg; @@ -71,6 +97,7 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev, return 0; err_mdiobus_reg: + enetc_emdio_disable_err050089(pdev); pci_release_region(pdev, 0); err_pci_mem_reg: pci_disable_device(pdev); @@ -88,6 +115,9 @@ static void enetc_pci_mdio_remove(struct pci_dev *pdev) struct enetc_mdio_priv *mdio_priv; mdiobus_unregister(bus); + + enetc_emdio_disable_err050089(pdev); + mdio_priv = bus->priv; iounmap(mdio_priv->hw->port); pci_release_region(pdev, 0); @@ -96,6 +126,7 @@ static void enetc_pci_mdio_remove(struct pci_dev *pdev) static const struct pci_device_id enetc_pci_mdio_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_MDIO_DEV_ID) }, + { PCI_DEVICE(NETC_EMDIO_VEN_ID, NETC_EMDIO_DEV_ID) }, { 0, } /* End of table. */ }; MODULE_DEVICE_TABLE(pci, enetc_pci_mdio_id_table); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 11b14555802c..f63a29e2e031 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -1,16 +1,14 @@ // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* Copyright 2017-2019 NXP */ -#include <asm/unaligned.h> -#include <linux/mdio.h> +#include <linux/unaligned.h> #include <linux/module.h> -#include <linux/fsl/enetc_mdio.h> +#include <linux/of.h> #include <linux/of_platform.h> -#include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/pcs-lynx.h> #include "enetc_ierb.h" -#include "enetc_pf.h" +#include "enetc_pf_common.h" #define ENETC_DRV_NAME_STR "ENETC PF driver" @@ -33,18 +31,15 @@ static void enetc_pf_set_primary_mac_addr(struct enetc_hw *hw, int si, __raw_writew(lower, hw->port + ENETC_PSIPMAR1(si)); } -static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr) +static struct phylink_pcs *enetc_pf_create_pcs(struct enetc_pf *pf, + struct mii_bus *bus) { - struct enetc_ndev_priv *priv = netdev_priv(ndev); - struct sockaddr *saddr = addr; - - if (!is_valid_ether_addr(saddr->sa_data)) - return -EADDRNOTAVAIL; - - eth_hw_addr_set(ndev, saddr->sa_data); - enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data); + return lynx_pcs_create_mdiodev(bus, 0); +} - return 0; +static void enetc_pf_destroy_pcs(struct phylink_pcs *pcs) +{ + lynx_pcs_destroy(pcs); } static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map) @@ -77,30 +72,6 @@ static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos) enetc_port_wr(hw, ENETC_PSIVLANR(si), val); } -static int enetc_mac_addr_hash_idx(const u8 *addr) -{ - u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16; - u64 mask = 0; - int res = 0; - int i; - - for (i = 0; i < 8; i++) - mask |= BIT_ULL(i * 6); - - for (i = 0; i < 6; i++) - res |= (hweight64(fold & (mask << i)) & 0x1) << i; - - return res; -} - -static void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter) -{ - filter->mac_addr_cnt = 0; - - bitmap_zero(filter->mac_hash_table, - ENETC_MADDR_HASH_TBL_SZ); -} - static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter, const unsigned char *addr) { @@ -109,16 +80,6 @@ static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter, filter->mac_addr_cnt++; } -static void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter, - const unsigned char *addr) -{ - int idx = enetc_mac_addr_hash_idx(addr); - - /* add hash table entry */ - __set_bit(idx, filter->mac_hash_table); - filter->mac_addr_cnt++; -} - static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type) { bool err = si->errata & ENETC_ERR_UCMCSWP; @@ -255,67 +216,6 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev) enetc_port_wr(hw, ENETC_PSIPMR, psipmr); } -static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx, - unsigned long hash) -{ - enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), lower_32_bits(hash)); - enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), upper_32_bits(hash)); -} - -static int enetc_vid_hash_idx(unsigned int vid) -{ - int res = 0; - int i; - - for (i = 0; i < 6; i++) - res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i; - - return res; -} - -static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash) -{ - int i; - - if (rehash) { - bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE); - - for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) { - int hidx = enetc_vid_hash_idx(i); - - __set_bit(hidx, pf->vlan_ht_filter); - } - } - - enetc_set_vlan_ht_filter(&pf->si->hw, 0, *pf->vlan_ht_filter); -} - -static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid) -{ - struct enetc_ndev_priv *priv = netdev_priv(ndev); - struct enetc_pf *pf = enetc_si_priv(priv->si); - int idx; - - __set_bit(vid, pf->active_vlans); - - idx = enetc_vid_hash_idx(vid); - if (!__test_and_set_bit(idx, pf->vlan_ht_filter)) - enetc_sync_vlan_ht_filter(pf, false); - - return 0; -} - -static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid) -{ - struct enetc_ndev_priv *priv = netdev_priv(ndev); - struct enetc_pf *pf = enetc_si_priv(priv->si); - - __clear_bit(vid, pf->active_vlans); - enetc_sync_vlan_ht_filter(pf, true); - - return 0; -} - static void enetc_set_loopback(struct net_device *ndev, bool en) { struct enetc_ndev_priv *priv = netdev_priv(ndev); @@ -393,56 +293,6 @@ static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en) return 0; } -static int enetc_setup_mac_address(struct device_node *np, struct enetc_pf *pf, - int si) -{ - struct device *dev = &pf->si->pdev->dev; - struct enetc_hw *hw = &pf->si->hw; - u8 mac_addr[ETH_ALEN] = { 0 }; - int err; - - /* (1) try to get the MAC address from the device tree */ - if (np) { - err = of_get_mac_address(np, mac_addr); - if (err == -EPROBE_DEFER) - return err; - } - - /* (2) bootloader supplied MAC address */ - if (is_zero_ether_addr(mac_addr)) - enetc_pf_get_primary_mac_addr(hw, si, mac_addr); - - /* (3) choose a random one */ - if (is_zero_ether_addr(mac_addr)) { - eth_random_addr(mac_addr); - dev_info(dev, "no MAC address specified for SI%d, using %pM\n", - si, mac_addr); - } - - enetc_pf_set_primary_mac_addr(hw, si, mac_addr); - - return 0; -} - -static int enetc_setup_mac_addresses(struct device_node *np, - struct enetc_pf *pf) -{ - int err, i; - - /* The PF might take its MAC from the device tree */ - err = enetc_setup_mac_address(np, pf, 0); - if (err) - return err; - - for (i = 0; i < pf->total_vfs; i++) { - err = enetc_setup_mac_address(NULL, pf, i + 1); - if (err) - return err; - } - - return 0; -} - static void enetc_port_assign_rfs_entries(struct enetc_si *si) { struct enetc_pf *pf = enetc_si_priv(si); @@ -464,6 +314,23 @@ static void enetc_port_assign_rfs_entries(struct enetc_si *si) enetc_port_wr(hw, ENETC_PRFSMR, ENETC_PRFSMR_RFSE); } +static void enetc_port_get_caps(struct enetc_si *si) +{ + struct enetc_hw *hw = &si->hw; + u32 val; + + val = enetc_port_rd(hw, ENETC_PCAPR0); + + if (val & ENETC_PCAPR0_QBV) + si->hw_features |= ENETC_SI_F_QBV; + + if (val & ENETC_PCAPR0_QBU) + si->hw_features |= ENETC_SI_F_QBU; + + if (val & ENETC_PCAPR0_PSFP) + si->hw_features |= ENETC_SI_F_PSFP; +} + static void enetc_port_si_configure(struct enetc_si *si) { struct enetc_pf *pf = enetc_si_priv(si); @@ -471,6 +338,8 @@ static void enetc_port_si_configure(struct enetc_si *si) int num_rings, i; u32 val; + enetc_port_get_caps(si); + val = enetc_port_rd(hw, ENETC_PCAPR0); num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val)); @@ -585,7 +454,6 @@ static void enetc_mac_enable(struct enetc_si *si, bool en) static void enetc_configure_port(struct enetc_pf *pf) { - u8 hash_key[ENETC_RSSHASH_KEY_SIZE]; struct enetc_hw *hw = &pf->si->hw; enetc_configure_port_mac(pf->si); @@ -593,8 +461,7 @@ static void enetc_configure_port(struct enetc_pf *pf) enetc_port_si_configure(pf->si); /* set up hash key */ - get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE); - enetc_set_rss_key(hw, hash_key); + enetc_set_default_rss_key(pf); /* split up RFS entries */ enetc_port_assign_rfs_entries(pf->si); @@ -665,19 +532,11 @@ static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!num_vfs) { enetc_msg_psi_free(pf); - kfree(pf->vf_state); pf->num_vfs = 0; pci_disable_sriov(pdev); } else { pf->num_vfs = num_vfs; - pf->vf_state = kcalloc(num_vfs, sizeof(struct enetc_vf_state), - GFP_KERNEL); - if (!pf->vf_state) { - pf->num_vfs = 0; - return -ENOMEM; - } - err = enetc_msg_psi_init(pf); if (err) { dev_err(&pdev->dev, "enetc_msg_psi_init (%d)\n", err); @@ -696,7 +555,6 @@ static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs) err_en_sriov: enetc_msg_psi_free(pf); err_msg_psi: - kfree(pf->vf_state); pf->num_vfs = 0; return err; @@ -773,189 +631,10 @@ static const struct net_device_ops enetc_ndev_ops = { .ndo_setup_tc = enetc_pf_setup_tc, .ndo_bpf = enetc_setup_bpf, .ndo_xdp_xmit = enetc_xdp_xmit, + .ndo_hwtstamp_get = enetc_hwtstamp_get, + .ndo_hwtstamp_set = enetc_hwtstamp_set, }; -static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, - const struct net_device_ops *ndev_ops) -{ - struct enetc_ndev_priv *priv = netdev_priv(ndev); - - SET_NETDEV_DEV(ndev, &si->pdev->dev); - priv->ndev = ndev; - priv->si = si; - priv->dev = &si->pdev->dev; - si->ndev = ndev; - - priv->msg_enable = (NETIF_MSG_WOL << 1) - 1; - ndev->netdev_ops = ndev_ops; - enetc_set_ethtool_ops(ndev); - ndev->watchdog_timeo = 5 * HZ; - ndev->max_mtu = ENETC_MAX_MTU; - - ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | - NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK | - NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; - ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; - ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM | - NETIF_F_TSO | NETIF_F_TSO6; - - if (si->num_rss) - ndev->hw_features |= NETIF_F_RXHASH; - - ndev->priv_flags |= IFF_UNICAST_FLT; - ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | - NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG | - NETDEV_XDP_ACT_NDO_XMIT_SG; - - if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) { - priv->active_offloads |= ENETC_F_QCI; - ndev->features |= NETIF_F_HW_TC; - ndev->hw_features |= NETIF_F_HW_TC; - } - - /* pick up primary MAC address from SI */ - enetc_load_primary_mac_addr(&si->hw, ndev); -} - -static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np) -{ - struct device *dev = &pf->si->pdev->dev; - struct enetc_mdio_priv *mdio_priv; - struct mii_bus *bus; - int err; - - bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv)); - if (!bus) - return -ENOMEM; - - bus->name = "Freescale ENETC MDIO Bus"; - bus->read = enetc_mdio_read_c22; - bus->write = enetc_mdio_write_c22; - bus->read_c45 = enetc_mdio_read_c45; - bus->write_c45 = enetc_mdio_write_c45; - bus->parent = dev; - mdio_priv = bus->priv; - mdio_priv->hw = &pf->si->hw; - mdio_priv->mdio_base = ENETC_EMDIO_BASE; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); - - err = of_mdiobus_register(bus, np); - if (err) - return dev_err_probe(dev, err, "cannot register MDIO bus\n"); - - pf->mdio = bus; - - return 0; -} - -static void enetc_mdio_remove(struct enetc_pf *pf) -{ - if (pf->mdio) - mdiobus_unregister(pf->mdio); -} - -static int enetc_imdio_create(struct enetc_pf *pf) -{ - struct device *dev = &pf->si->pdev->dev; - struct enetc_mdio_priv *mdio_priv; - struct phylink_pcs *phylink_pcs; - struct mii_bus *bus; - int err; - - bus = mdiobus_alloc_size(sizeof(*mdio_priv)); - if (!bus) - return -ENOMEM; - - bus->name = "Freescale ENETC internal MDIO Bus"; - bus->read = enetc_mdio_read_c22; - bus->write = enetc_mdio_write_c22; - bus->read_c45 = enetc_mdio_read_c45; - bus->write_c45 = enetc_mdio_write_c45; - bus->parent = dev; - bus->phy_mask = ~0; - mdio_priv = bus->priv; - mdio_priv->hw = &pf->si->hw; - mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev)); - - err = mdiobus_register(bus); - if (err) { - dev_err(dev, "cannot register internal MDIO bus (%d)\n", err); - goto free_mdio_bus; - } - - phylink_pcs = lynx_pcs_create_mdiodev(bus, 0); - if (IS_ERR(phylink_pcs)) { - err = PTR_ERR(phylink_pcs); - dev_err(dev, "cannot create lynx pcs (%d)\n", err); - goto unregister_mdiobus; - } - - pf->imdio = bus; - pf->pcs = phylink_pcs; - - return 0; - -unregister_mdiobus: - mdiobus_unregister(bus); -free_mdio_bus: - mdiobus_free(bus); - return err; -} - -static void enetc_imdio_remove(struct enetc_pf *pf) -{ - if (pf->pcs) - lynx_pcs_destroy(pf->pcs); - if (pf->imdio) { - mdiobus_unregister(pf->imdio); - mdiobus_free(pf->imdio); - } -} - -static bool enetc_port_has_pcs(struct enetc_pf *pf) -{ - return (pf->if_mode == PHY_INTERFACE_MODE_SGMII || - pf->if_mode == PHY_INTERFACE_MODE_1000BASEX || - pf->if_mode == PHY_INTERFACE_MODE_2500BASEX || - pf->if_mode == PHY_INTERFACE_MODE_USXGMII); -} - -static int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node) -{ - struct device_node *mdio_np; - int err; - - mdio_np = of_get_child_by_name(node, "mdio"); - if (mdio_np) { - err = enetc_mdio_probe(pf, mdio_np); - - of_node_put(mdio_np); - if (err) - return err; - } - - if (enetc_port_has_pcs(pf)) { - err = enetc_imdio_create(pf); - if (err) { - enetc_mdio_remove(pf); - return err; - } - } - - return 0; -} - -static void enetc_mdiobus_destroy(struct enetc_pf *pf) -{ - enetc_mdio_remove(pf); - enetc_imdio_remove(pf); -} - static struct phylink_pcs * enetc_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface) { @@ -1101,47 +780,6 @@ static const struct phylink_mac_ops enetc_mac_phylink_ops = { .mac_link_down = enetc_pl_mac_link_down, }; -static int enetc_phylink_create(struct enetc_ndev_priv *priv, - struct device_node *node) -{ - struct enetc_pf *pf = enetc_si_priv(priv->si); - struct phylink *phylink; - int err; - - pf->phylink_config.dev = &priv->ndev->dev; - pf->phylink_config.type = PHYLINK_NETDEV; - pf->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | - MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD; - - __set_bit(PHY_INTERFACE_MODE_INTERNAL, - pf->phylink_config.supported_interfaces); - __set_bit(PHY_INTERFACE_MODE_SGMII, - pf->phylink_config.supported_interfaces); - __set_bit(PHY_INTERFACE_MODE_1000BASEX, - pf->phylink_config.supported_interfaces); - __set_bit(PHY_INTERFACE_MODE_2500BASEX, - pf->phylink_config.supported_interfaces); - __set_bit(PHY_INTERFACE_MODE_USXGMII, - pf->phylink_config.supported_interfaces); - phy_interface_set_rgmii(pf->phylink_config.supported_interfaces); - - phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node), - pf->if_mode, &enetc_mac_phylink_ops); - if (IS_ERR(phylink)) { - err = PTR_ERR(phylink); - return err; - } - - priv->phylink = phylink; - - return 0; -} - -static void enetc_phylink_destroy(struct enetc_ndev_priv *priv) -{ - phylink_destroy(priv->phylink); -} - /* Initialize the entire shared memory for the flow steering entries * of this port (PF + VFs) */ @@ -1206,6 +844,11 @@ static int enetc_pf_register_with_ierb(struct pci_dev *pdev) return enetc_ierb_register_pf(ierb_pdev, pdev); } +static const struct enetc_si_ops enetc_psi_ops = { + .get_rss_table = enetc_get_rss_table, + .set_rss_table = enetc_set_rss_table, +}; + static struct enetc_si *enetc_psi_create(struct pci_dev *pdev) { struct enetc_si *si; @@ -1224,6 +867,14 @@ static struct enetc_si *enetc_psi_create(struct pci_dev *pdev) goto out_pci_remove; } + si->revision = enetc_get_ip_revision(&si->hw); + si->ops = &enetc_psi_ops; + err = enetc_get_driver_data(si); + if (err) { + dev_err(&pdev->dev, "Could not get PF driver data\n"); + goto out_pci_remove; + } + err = enetc_setup_cbdr(&pdev->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE, &si->cbd_ring); if (err) @@ -1259,6 +910,14 @@ static void enetc_psi_destroy(struct pci_dev *pdev) enetc_pci_remove(pdev); } +static const struct enetc_pf_ops enetc_pf_ops = { + .set_si_primary_mac = enetc_pf_set_primary_mac_addr, + .get_si_primary_mac = enetc_pf_get_primary_mac_addr, + .create_pcs = enetc_pf_create_pcs, + .destroy_pcs = enetc_pf_destroy_pcs, + .enable_psfp = enetc_psfp_enable, +}; + static int enetc_pf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -1285,7 +944,15 @@ static int enetc_pf_probe(struct pci_dev *pdev, pf = enetc_si_priv(si); pf->si = si; + pf->ops = &enetc_pf_ops; + pf->total_vfs = pci_sriov_get_totalvfs(pdev); + if (pf->total_vfs) { + pf->vf_state = kcalloc(pf->total_vfs, sizeof(struct enetc_vf_state), + GFP_KERNEL); + if (!pf->vf_state) + goto err_alloc_vf_state; + } err = enetc_setup_mac_addresses(node, pf); if (err) @@ -1338,7 +1005,7 @@ static int enetc_pf_probe(struct pci_dev *pdev, if (err) goto err_mdiobus_create; - err = enetc_phylink_create(priv, node); + err = enetc_phylink_create(priv, node, &enetc_mac_phylink_ops); if (err) goto err_phylink_create; @@ -1363,6 +1030,8 @@ err_alloc_si_res: free_netdev(ndev); err_alloc_netdev: err_setup_mac_addresses: + kfree(pf->vf_state); +err_alloc_vf_state: enetc_psi_destroy(pdev); err_psi_create: return err; @@ -1389,6 +1058,7 @@ static void enetc_pf_remove(struct pci_dev *pdev) enetc_free_si_resources(priv); free_netdev(si->ndev); + kfree(pf->vf_state); enetc_psi_destroy(pdev); } diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h index c26bd66e4597..ae407e9e9ee7 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h @@ -5,19 +5,8 @@ #include <linux/phylink.h> #define ENETC_PF_NUM_RINGS 8 - -enum enetc_mac_addr_type {UC, MC, MADDR_TYPE}; #define ENETC_MAX_NUM_MAC_FLT ((ENETC_MAX_NUM_VFS + 1) * MADDR_TYPE) -#define ENETC_MADDR_HASH_TBL_SZ 64 -struct enetc_mac_filter { - union { - char mac_addr[ETH_ALEN]; - DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ); - }; - int mac_addr_cnt; -}; - #define ENETC_VLAN_HT_SIZE 64 enum enetc_vf_flags { @@ -28,6 +17,25 @@ struct enetc_vf_state { enum enetc_vf_flags flags; }; +struct enetc_port_caps { + u32 half_duplex:1; + int num_vsi; + int num_msix; + int num_rx_bdr; + int num_tx_bdr; + int mac_filter_num; +}; + +struct enetc_pf; + +struct enetc_pf_ops { + void (*set_si_primary_mac)(struct enetc_hw *hw, int si, const u8 *addr); + void (*get_si_primary_mac)(struct enetc_hw *hw, int si, u8 *addr); + struct phylink_pcs *(*create_pcs)(struct enetc_pf *pf, struct mii_bus *bus); + void (*destroy_pcs)(struct phylink_pcs *pcs); + int (*enable_psfp)(struct enetc_ndev_priv *priv); +}; + struct enetc_pf { struct enetc_si *si; int num_vfs; /* number of active VFs, after sriov_init */ @@ -50,6 +58,11 @@ struct enetc_pf { phy_interface_t if_mode; struct phylink_config phylink_config; + + struct enetc_port_caps caps; + const struct enetc_pf_ops *ops; + + int num_mfe; /* number of mac address filter table entries */ }; #define phylink_to_enetc_pf(config) \ diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c new file mode 100644 index 000000000000..edf14a95cab7 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c @@ -0,0 +1,426 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2024 NXP */ + +#include <linux/fsl/enetc_mdio.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> + +#include "enetc_pf_common.h" + +static void enetc_set_si_hw_addr(struct enetc_pf *pf, int si, + const u8 *mac_addr) +{ + struct enetc_hw *hw = &pf->si->hw; + + pf->ops->set_si_primary_mac(hw, si, mac_addr); +} + +static void enetc_get_si_hw_addr(struct enetc_pf *pf, int si, u8 *mac_addr) +{ + struct enetc_hw *hw = &pf->si->hw; + + pf->ops->get_si_primary_mac(hw, si, mac_addr); +} + +int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + struct sockaddr *saddr = addr; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(ndev, saddr->sa_data); + enetc_set_si_hw_addr(pf, 0, saddr->sa_data); + + return 0; +} +EXPORT_SYMBOL_GPL(enetc_pf_set_mac_addr); + +static int enetc_setup_mac_address(struct device_node *np, struct enetc_pf *pf, + int si) +{ + struct device *dev = &pf->si->pdev->dev; + u8 mac_addr[ETH_ALEN] = { 0 }; + int err; + + /* (1) try to get the MAC address from the device tree */ + if (np) { + err = of_get_mac_address(np, mac_addr); + if (err == -EPROBE_DEFER) + return err; + } + + /* (2) bootloader supplied MAC address */ + if (is_zero_ether_addr(mac_addr)) + enetc_get_si_hw_addr(pf, si, mac_addr); + + /* (3) choose a random one */ + if (is_zero_ether_addr(mac_addr)) { + eth_random_addr(mac_addr); + dev_info(dev, "no MAC address specified for SI%d, using %pM\n", + si, mac_addr); + } + + enetc_set_si_hw_addr(pf, si, mac_addr); + + return 0; +} + +int enetc_setup_mac_addresses(struct device_node *np, struct enetc_pf *pf) +{ + int err, i; + + /* The PF might take its MAC from the device tree */ + err = enetc_setup_mac_address(np, pf, 0); + if (err) + return err; + + for (i = 0; i < pf->total_vfs; i++) { + err = enetc_setup_mac_address(NULL, pf, i + 1); + if (err) + return err; + } + + return 0; +} +EXPORT_SYMBOL_GPL(enetc_setup_mac_addresses); + +void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, + const struct net_device_ops *ndev_ops) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(si); + + SET_NETDEV_DEV(ndev, &si->pdev->dev); + priv->ndev = ndev; + priv->si = si; + priv->dev = &si->pdev->dev; + si->ndev = ndev; + + priv->msg_enable = (NETIF_MSG_WOL << 1) - 1; + priv->sysclk_freq = si->drvdata->sysclk_freq; + priv->max_frags = si->drvdata->max_frags; + ndev->netdev_ops = ndev_ops; + enetc_set_ethtool_ops(ndev); + ndev->watchdog_timeo = 5 * HZ; + ndev->max_mtu = ENETC_MAX_MTU; + + ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK | + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_L4; + ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_L4; + ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM | + NETIF_F_TSO | NETIF_F_TSO6; + + ndev->priv_flags |= IFF_UNICAST_FLT; + + if (si->drvdata->tx_csum) + priv->active_offloads |= ENETC_F_TXCSUM; + + if (si->hw_features & ENETC_SI_F_LSO) + priv->active_offloads |= ENETC_F_LSO; + + if (si->num_rss) { + ndev->hw_features |= NETIF_F_RXHASH; + ndev->features |= NETIF_F_RXHASH; + } + + /* TODO: currently, i.MX95 ENETC driver does not support advanced features */ + if (!is_enetc_rev1(si)) + goto end; + + ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG | + NETDEV_XDP_ACT_NDO_XMIT_SG; + + if (si->hw_features & ENETC_SI_F_PSFP && pf->ops->enable_psfp && + !pf->ops->enable_psfp(priv)) { + priv->active_offloads |= ENETC_F_QCI; + ndev->features |= NETIF_F_HW_TC; + ndev->hw_features |= NETIF_F_HW_TC; + } + +end: + /* pick up primary MAC address from SI */ + enetc_load_primary_mac_addr(&si->hw, ndev); +} +EXPORT_SYMBOL_GPL(enetc_pf_netdev_setup); + +static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np) +{ + struct device *dev = &pf->si->pdev->dev; + struct enetc_mdio_priv *mdio_priv; + struct mii_bus *bus; + int err; + + bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv)); + if (!bus) + return -ENOMEM; + + bus->name = "Freescale ENETC MDIO Bus"; + bus->read = enetc_mdio_read_c22; + bus->write = enetc_mdio_write_c22; + bus->read_c45 = enetc_mdio_read_c45; + bus->write_c45 = enetc_mdio_write_c45; + bus->parent = dev; + mdio_priv = bus->priv; + mdio_priv->hw = &pf->si->hw; + mdio_priv->mdio_base = ENETC_EMDIO_BASE; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); + + err = of_mdiobus_register(bus, np); + if (err) + return dev_err_probe(dev, err, "cannot register MDIO bus\n"); + + pf->mdio = bus; + + return 0; +} + +static void enetc_mdio_remove(struct enetc_pf *pf) +{ + if (pf->mdio) + mdiobus_unregister(pf->mdio); +} + +static int enetc_imdio_create(struct enetc_pf *pf) +{ + struct device *dev = &pf->si->pdev->dev; + struct enetc_mdio_priv *mdio_priv; + struct phylink_pcs *phylink_pcs; + struct mii_bus *bus; + int err; + + if (!pf->ops->create_pcs) { + dev_err(dev, "Creating PCS is not supported\n"); + + return -EOPNOTSUPP; + } + + bus = mdiobus_alloc_size(sizeof(*mdio_priv)); + if (!bus) + return -ENOMEM; + + bus->name = "Freescale ENETC internal MDIO Bus"; + bus->read = enetc_mdio_read_c22; + bus->write = enetc_mdio_write_c22; + bus->read_c45 = enetc_mdio_read_c45; + bus->write_c45 = enetc_mdio_write_c45; + bus->parent = dev; + bus->phy_mask = ~0; + mdio_priv = bus->priv; + mdio_priv->hw = &pf->si->hw; + mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev)); + + err = mdiobus_register(bus); + if (err) { + dev_err(dev, "cannot register internal MDIO bus (%d)\n", err); + goto free_mdio_bus; + } + + phylink_pcs = pf->ops->create_pcs(pf, bus); + if (IS_ERR(phylink_pcs)) { + err = PTR_ERR(phylink_pcs); + dev_err(dev, "cannot create lynx pcs (%d)\n", err); + goto unregister_mdiobus; + } + + pf->imdio = bus; + pf->pcs = phylink_pcs; + + return 0; + +unregister_mdiobus: + mdiobus_unregister(bus); +free_mdio_bus: + mdiobus_free(bus); + return err; +} + +static void enetc_imdio_remove(struct enetc_pf *pf) +{ + if (pf->pcs && pf->ops->destroy_pcs) + pf->ops->destroy_pcs(pf->pcs); + + if (pf->imdio) { + mdiobus_unregister(pf->imdio); + mdiobus_free(pf->imdio); + } +} + +static bool enetc_port_has_pcs(struct enetc_pf *pf) +{ + return (pf->if_mode == PHY_INTERFACE_MODE_SGMII || + pf->if_mode == PHY_INTERFACE_MODE_1000BASEX || + pf->if_mode == PHY_INTERFACE_MODE_2500BASEX || + pf->if_mode == PHY_INTERFACE_MODE_USXGMII); +} + +int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node) +{ + struct device_node *mdio_np; + int err; + + mdio_np = of_get_child_by_name(node, "mdio"); + if (mdio_np) { + err = enetc_mdio_probe(pf, mdio_np); + + of_node_put(mdio_np); + if (err) + return err; + } + + if (enetc_port_has_pcs(pf)) { + err = enetc_imdio_create(pf); + if (err) { + enetc_mdio_remove(pf); + return err; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(enetc_mdiobus_create); + +void enetc_mdiobus_destroy(struct enetc_pf *pf) +{ + enetc_mdio_remove(pf); + enetc_imdio_remove(pf); +} +EXPORT_SYMBOL_GPL(enetc_mdiobus_destroy); + +int enetc_phylink_create(struct enetc_ndev_priv *priv, struct device_node *node, + const struct phylink_mac_ops *ops) +{ + struct enetc_pf *pf = enetc_si_priv(priv->si); + struct phylink *phylink; + int err; + + pf->phylink_config.dev = &priv->ndev->dev; + pf->phylink_config.type = PHYLINK_NETDEV; + pf->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD; + + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + pf->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, + pf->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + pf->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + pf->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_USXGMII, + pf->phylink_config.supported_interfaces); + phy_interface_set_rgmii(pf->phylink_config.supported_interfaces); + + phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node), + pf->if_mode, ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + return err; + } + + priv->phylink = phylink; + + return 0; +} +EXPORT_SYMBOL_GPL(enetc_phylink_create); + +void enetc_phylink_destroy(struct enetc_ndev_priv *priv) +{ + phylink_destroy(priv->phylink); +} +EXPORT_SYMBOL_GPL(enetc_phylink_destroy); + +void enetc_set_default_rss_key(struct enetc_pf *pf) +{ + u8 hash_key[ENETC_RSSHASH_KEY_SIZE] = {0}; + + /* set up hash key */ + get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE); + enetc_set_rss_key(pf->si, hash_key); +} +EXPORT_SYMBOL_GPL(enetc_set_default_rss_key); + +static int enetc_vid_hash_idx(unsigned int vid) +{ + int res = 0; + int i; + + for (i = 0; i < 6; i++) + res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i; + + return res; +} + +static void enetc_refresh_vlan_ht_filter(struct enetc_pf *pf) +{ + int i; + + bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE); + for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) { + int hidx = enetc_vid_hash_idx(i); + + __set_bit(hidx, pf->vlan_ht_filter); + } +} + +static void enetc_set_si_vlan_ht_filter(struct enetc_si *si, + int si_id, u64 hash) +{ + struct enetc_hw *hw = &si->hw; + int high_reg_off, low_reg_off; + + if (is_enetc_rev1(si)) { + low_reg_off = ENETC_PSIVHFR0(si_id); + high_reg_off = ENETC_PSIVHFR1(si_id); + } else { + low_reg_off = ENETC4_PSIVHFR0(si_id); + high_reg_off = ENETC4_PSIVHFR1(si_id); + } + + enetc_port_wr(hw, low_reg_off, lower_32_bits(hash)); + enetc_port_wr(hw, high_reg_off, upper_32_bits(hash)); +} + +int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + int idx; + + __set_bit(vid, pf->active_vlans); + + idx = enetc_vid_hash_idx(vid); + if (!__test_and_set_bit(idx, pf->vlan_ht_filter)) + enetc_set_si_vlan_ht_filter(pf->si, 0, *pf->vlan_ht_filter); + + return 0; +} +EXPORT_SYMBOL_GPL(enetc_vlan_rx_add_vid); + +int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + + if (__test_and_clear_bit(vid, pf->active_vlans)) { + enetc_refresh_vlan_ht_filter(pf); + enetc_set_si_vlan_ht_filter(pf->si, 0, *pf->vlan_ht_filter); + } + + return 0; +} +EXPORT_SYMBOL_GPL(enetc_vlan_rx_del_vid); + +MODULE_DESCRIPTION("NXP ENETC PF common functionality driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h new file mode 100644 index 000000000000..96d4840a3107 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2024 NXP */ + +#include "enetc_pf.h" + +int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr); +int enetc_setup_mac_addresses(struct device_node *np, struct enetc_pf *pf); +void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, + const struct net_device_ops *ndev_ops); +int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node); +void enetc_mdiobus_destroy(struct enetc_pf *pf); +int enetc_phylink_create(struct enetc_ndev_priv *priv, struct device_node *node, + const struct phylink_mac_ops *ops); +void enetc_phylink_destroy(struct enetc_ndev_priv *priv); +void enetc_set_default_rss_key(struct enetc_pf *pf); +int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid); +int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid); + +static inline u16 enetc_get_ip_revision(struct enetc_hw *hw) +{ + return enetc_global_rd(hw, ENETC_G_EIPBRR0) & EIPBRR0_REVISION; +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index b65da49dd926..ccf86651455c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -336,7 +336,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) * * (enetClockFrequency / portTransmitRate) * 100 */ - hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit, + hi_credit_reg = (u32)div_u64((priv->sysclk_freq * 100ULL) * hi_credit_bit, port_transmit_rate * 1000000ULL); enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index dfcaac302e24..6c4b374bcb0e 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -78,11 +78,18 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr) { struct enetc_ndev_priv *priv = netdev_priv(ndev); struct sockaddr *saddr = addr; + int err; if (!is_valid_ether_addr(saddr->sa_data)) return -EADDRNOTAVAIL; - return enetc_msg_vsi_set_primary_mac_addr(priv, saddr); + err = enetc_msg_vsi_set_primary_mac_addr(priv, saddr); + if (err) + return err; + + eth_hw_addr_set(ndev, saddr->sa_data); + + return 0; } static int enetc_vf_set_features(struct net_device *ndev, @@ -114,6 +121,8 @@ static const struct net_device_ops enetc_ndev_ops = { .ndo_set_features = enetc_vf_set_features, .ndo_eth_ioctl = enetc_ioctl, .ndo_setup_tc = enetc_vf_setup_tc, + .ndo_hwtstamp_get = enetc_hwtstamp_get, + .ndo_hwtstamp_set = enetc_hwtstamp_set, }; static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev, @@ -128,6 +137,8 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev, si->ndev = ndev; priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1; + priv->sysclk_freq = si->drvdata->sysclk_freq; + priv->max_frags = si->drvdata->max_frags; ndev->netdev_ops = ndev_ops; enetc_set_ethtool_ops(ndev); ndev->watchdog_timeo = 5 * HZ; @@ -136,21 +147,30 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_L4; ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_L4; ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; - if (si->num_rss) + if (si->num_rss) { ndev->hw_features |= NETIF_F_RXHASH; + ndev->features |= NETIF_F_RXHASH; + } /* pick up primary MAC address from SI */ enetc_load_primary_mac_addr(&si->hw, ndev); } +static const struct enetc_si_ops enetc_vsi_ops = { + .get_rss_table = enetc_get_rss_table, + .set_rss_table = enetc_set_rss_table, +}; + static int enetc_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -164,6 +184,14 @@ static int enetc_vf_probe(struct pci_dev *pdev, return dev_err_probe(&pdev->dev, err, "PCI probing failed\n"); si = pci_get_drvdata(pdev); + si->revision = ENETC_REV_1_0; + si->ops = &enetc_vsi_ops; + err = enetc_get_driver_data(si); + if (err) { + dev_err_probe(&pdev->dev, err, + "Could not get VF driver data\n"); + goto err_alloc_netdev; + } enetc_get_si_caps(si); diff --git a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c new file mode 100644 index 000000000000..bcb8eefeb93c --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c @@ -0,0 +1,445 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * NXP NETC Blocks Control Driver + * + * Copyright 2024 NXP + * + * This driver is used for pre-initialization of NETC, such as PCS and MII + * protocols, LDID, warm reset, etc. Therefore, all NETC device drivers can + * only be probed after the netc-blk-crtl driver has completed initialization. + * In addition, when the system enters suspend mode, IERB, PRB, and NETCMIX + * will be powered off, except for WOL. Therefore, when the system resumes, + * these blocks need to be reinitialized. + */ + +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/debugfs.h> +#include <linux/delay.h> +#include <linux/fsl/netc_global.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_net.h> +#include <linux/of_platform.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/seq_file.h> + +/* NETCMIX registers */ +#define IMX95_CFG_LINK_IO_VAR 0x0 +#define IO_VAR_16FF_16G_SERDES 0x1 +#define IO_VAR(port, var) (((var) & 0xf) << ((port) << 2)) + +#define IMX95_CFG_LINK_MII_PROT 0x4 +#define CFG_LINK_MII_PORT_0 GENMASK(3, 0) +#define CFG_LINK_MII_PORT_1 GENMASK(7, 4) +#define MII_PROT_MII 0x0 +#define MII_PROT_RMII 0x1 +#define MII_PROT_RGMII 0x2 +#define MII_PROT_SERIAL 0x3 +#define MII_PROT(port, prot) (((prot) & 0xf) << ((port) << 2)) + +#define IMX95_CFG_LINK_PCS_PROT(a) (0x8 + (a) * 4) +#define PCS_PROT_1G_SGMII BIT(0) +#define PCS_PROT_2500M_SGMII BIT(1) +#define PCS_PROT_XFI BIT(3) +#define PCS_PROT_SFI BIT(4) +#define PCS_PROT_10G_SXGMII BIT(6) + +/* NETC privileged register block register */ +#define PRB_NETCRR 0x100 +#define NETCRR_SR BIT(0) +#define NETCRR_LOCK BIT(1) + +#define PRB_NETCSR 0x104 +#define NETCSR_ERROR BIT(0) +#define NETCSR_STATE BIT(1) + +/* NETC integrated endpoint register block register */ +#define IERB_EMDIOFAUXR 0x344 +#define IERB_T0FAUXR 0x444 +#define IERB_EFAUXR(a) (0x3044 + 0x100 * (a)) +#define IERB_VFAUXR(a) (0x4004 + 0x40 * (a)) +#define FAUXR_LDID GENMASK(3, 0) + +/* Platform information */ +#define IMX95_ENETC0_BUS_DEVFN 0x0 +#define IMX95_ENETC1_BUS_DEVFN 0x40 +#define IMX95_ENETC2_BUS_DEVFN 0x80 + +/* Flags for different platforms */ +#define NETC_HAS_NETCMIX BIT(0) + +struct netc_devinfo { + u32 flags; + int (*netcmix_init)(struct platform_device *pdev); + int (*ierb_init)(struct platform_device *pdev); +}; + +struct netc_blk_ctrl { + void __iomem *prb; + void __iomem *ierb; + void __iomem *netcmix; + + const struct netc_devinfo *devinfo; + struct platform_device *pdev; + struct dentry *debugfs_root; +}; + +static void netc_reg_write(void __iomem *base, u32 offset, u32 val) +{ + netc_write(base + offset, val); +} + +static u32 netc_reg_read(void __iomem *base, u32 offset) +{ + return netc_read(base + offset); +} + +static int netc_of_pci_get_bus_devfn(struct device_node *np) +{ + u32 reg[5]; + int error; + + error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg)); + if (error) + return error; + + return (reg[0] >> 8) & 0xffff; +} + +static int netc_get_link_mii_protocol(phy_interface_t interface) +{ + switch (interface) { + case PHY_INTERFACE_MODE_MII: + return MII_PROT_MII; + case PHY_INTERFACE_MODE_RMII: + return MII_PROT_RMII; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + return MII_PROT_RGMII; + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_2500BASEX: + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_XGMII: + case PHY_INTERFACE_MODE_USXGMII: + return MII_PROT_SERIAL; + default: + return -EINVAL; + } +} + +static int imx95_netcmix_init(struct platform_device *pdev) +{ + struct netc_blk_ctrl *priv = platform_get_drvdata(pdev); + struct device_node *np = pdev->dev.of_node; + phy_interface_t interface; + int bus_devfn, mii_proto; + u32 val; + int err; + + /* Default setting of MII protocol */ + val = MII_PROT(0, MII_PROT_RGMII) | MII_PROT(1, MII_PROT_RGMII) | + MII_PROT(2, MII_PROT_SERIAL); + + /* Update the link MII protocol through parsing phy-mode */ + for_each_available_child_of_node_scoped(np, child) { + for_each_available_child_of_node_scoped(child, gchild) { + if (!of_device_is_compatible(gchild, "pci1131,e101")) + continue; + + bus_devfn = netc_of_pci_get_bus_devfn(gchild); + if (bus_devfn < 0) + return -EINVAL; + + if (bus_devfn == IMX95_ENETC2_BUS_DEVFN) + continue; + + err = of_get_phy_mode(gchild, &interface); + if (err) + continue; + + mii_proto = netc_get_link_mii_protocol(interface); + if (mii_proto < 0) + return -EINVAL; + + switch (bus_devfn) { + case IMX95_ENETC0_BUS_DEVFN: + val = u32_replace_bits(val, mii_proto, + CFG_LINK_MII_PORT_0); + break; + case IMX95_ENETC1_BUS_DEVFN: + val = u32_replace_bits(val, mii_proto, + CFG_LINK_MII_PORT_1); + break; + default: + return -EINVAL; + } + } + } + + /* Configure Link I/O variant */ + netc_reg_write(priv->netcmix, IMX95_CFG_LINK_IO_VAR, + IO_VAR(2, IO_VAR_16FF_16G_SERDES)); + /* Configure Link 2 PCS protocol */ + netc_reg_write(priv->netcmix, IMX95_CFG_LINK_PCS_PROT(2), + PCS_PROT_10G_SXGMII); + netc_reg_write(priv->netcmix, IMX95_CFG_LINK_MII_PROT, val); + + return 0; +} + +static bool netc_ierb_is_locked(struct netc_blk_ctrl *priv) +{ + return !!(netc_reg_read(priv->prb, PRB_NETCRR) & NETCRR_LOCK); +} + +static int netc_lock_ierb(struct netc_blk_ctrl *priv) +{ + u32 val; + + netc_reg_write(priv->prb, PRB_NETCRR, NETCRR_LOCK); + + return read_poll_timeout(netc_reg_read, val, !(val & NETCSR_STATE), + 100, 2000, false, priv->prb, PRB_NETCSR); +} + +static int netc_unlock_ierb_with_warm_reset(struct netc_blk_ctrl *priv) +{ + u32 val; + + netc_reg_write(priv->prb, PRB_NETCRR, 0); + + return read_poll_timeout(netc_reg_read, val, !(val & NETCRR_LOCK), + 1000, 100000, true, priv->prb, PRB_NETCRR); +} + +static int imx95_ierb_init(struct platform_device *pdev) +{ + struct netc_blk_ctrl *priv = platform_get_drvdata(pdev); + + /* EMDIO : No MSI-X intterupt */ + netc_reg_write(priv->ierb, IERB_EMDIOFAUXR, 0); + /* ENETC0 PF */ + netc_reg_write(priv->ierb, IERB_EFAUXR(0), 0); + /* ENETC0 VF0 */ + netc_reg_write(priv->ierb, IERB_VFAUXR(0), 1); + /* ENETC0 VF1 */ + netc_reg_write(priv->ierb, IERB_VFAUXR(1), 2); + /* ENETC1 PF */ + netc_reg_write(priv->ierb, IERB_EFAUXR(1), 3); + /* ENETC1 VF0 */ + netc_reg_write(priv->ierb, IERB_VFAUXR(2), 5); + /* ENETC1 VF1 */ + netc_reg_write(priv->ierb, IERB_VFAUXR(3), 6); + /* ENETC2 PF */ + netc_reg_write(priv->ierb, IERB_EFAUXR(2), 4); + /* ENETC2 VF0 */ + netc_reg_write(priv->ierb, IERB_VFAUXR(4), 5); + /* ENETC2 VF1 */ + netc_reg_write(priv->ierb, IERB_VFAUXR(5), 6); + /* NETC TIMER */ + netc_reg_write(priv->ierb, IERB_T0FAUXR, 7); + + return 0; +} + +static int netc_ierb_init(struct platform_device *pdev) +{ + struct netc_blk_ctrl *priv = platform_get_drvdata(pdev); + const struct netc_devinfo *devinfo = priv->devinfo; + int err; + + if (netc_ierb_is_locked(priv)) { + err = netc_unlock_ierb_with_warm_reset(priv); + if (err) { + dev_err(&pdev->dev, "Unlock IERB failed.\n"); + return err; + } + } + + if (devinfo->ierb_init) { + err = devinfo->ierb_init(pdev); + if (err) + return err; + } + + err = netc_lock_ierb(priv); + if (err) { + dev_err(&pdev->dev, "Lock IERB failed.\n"); + return err; + } + + return 0; +} + +#if IS_ENABLED(CONFIG_DEBUG_FS) +static int netc_prb_show(struct seq_file *s, void *data) +{ + struct netc_blk_ctrl *priv = s->private; + u32 val; + + val = netc_reg_read(priv->prb, PRB_NETCRR); + seq_printf(s, "[PRB NETCRR] Lock:%d SR:%d\n", + (val & NETCRR_LOCK) ? 1 : 0, + (val & NETCRR_SR) ? 1 : 0); + + val = netc_reg_read(priv->prb, PRB_NETCSR); + seq_printf(s, "[PRB NETCSR] State:%d Error:%d\n", + (val & NETCSR_STATE) ? 1 : 0, + (val & NETCSR_ERROR) ? 1 : 0); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(netc_prb); + +static void netc_blk_ctrl_create_debugfs(struct netc_blk_ctrl *priv) +{ + struct dentry *root; + + root = debugfs_create_dir("netc_blk_ctrl", NULL); + if (IS_ERR(root)) + return; + + priv->debugfs_root = root; + + debugfs_create_file("prb", 0444, root, priv, &netc_prb_fops); +} + +static void netc_blk_ctrl_remove_debugfs(struct netc_blk_ctrl *priv) +{ + debugfs_remove_recursive(priv->debugfs_root); + priv->debugfs_root = NULL; +} + +#else + +static void netc_blk_ctrl_create_debugfs(struct netc_blk_ctrl *priv) +{ +} + +static void netc_blk_ctrl_remove_debugfs(struct netc_blk_ctrl *priv) +{ +} +#endif + +static int netc_prb_check_error(struct netc_blk_ctrl *priv) +{ + if (netc_reg_read(priv->prb, PRB_NETCSR) & NETCSR_ERROR) + return -1; + + return 0; +} + +static const struct netc_devinfo imx95_devinfo = { + .flags = NETC_HAS_NETCMIX, + .netcmix_init = imx95_netcmix_init, + .ierb_init = imx95_ierb_init, +}; + +static const struct of_device_id netc_blk_ctrl_match[] = { + { .compatible = "nxp,imx95-netc-blk-ctrl", .data = &imx95_devinfo }, + {}, +}; +MODULE_DEVICE_TABLE(of, netc_blk_ctrl_match); + +static int netc_blk_ctrl_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + const struct netc_devinfo *devinfo; + struct device *dev = &pdev->dev; + const struct of_device_id *id; + struct netc_blk_ctrl *priv; + struct clk *ipg_clk; + void __iomem *regs; + int err; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->pdev = pdev; + ipg_clk = devm_clk_get_optional_enabled(dev, "ipg"); + if (IS_ERR(ipg_clk)) + return dev_err_probe(dev, PTR_ERR(ipg_clk), + "Set ipg clock failed\n"); + + id = of_match_device(netc_blk_ctrl_match, dev); + if (!id) + return dev_err_probe(dev, -EINVAL, "Cannot match device\n"); + + devinfo = (struct netc_devinfo *)id->data; + if (!devinfo) + return dev_err_probe(dev, -EINVAL, "No device information\n"); + + priv->devinfo = devinfo; + regs = devm_platform_ioremap_resource_byname(pdev, "ierb"); + if (IS_ERR(regs)) + return dev_err_probe(dev, PTR_ERR(regs), + "Missing IERB resource\n"); + + priv->ierb = regs; + regs = devm_platform_ioremap_resource_byname(pdev, "prb"); + if (IS_ERR(regs)) + return dev_err_probe(dev, PTR_ERR(regs), + "Missing PRB resource\n"); + + priv->prb = regs; + if (devinfo->flags & NETC_HAS_NETCMIX) { + regs = devm_platform_ioremap_resource_byname(pdev, "netcmix"); + if (IS_ERR(regs)) + return dev_err_probe(dev, PTR_ERR(regs), + "Missing NETCMIX resource\n"); + priv->netcmix = regs; + } + + platform_set_drvdata(pdev, priv); + if (devinfo->netcmix_init) { + err = devinfo->netcmix_init(pdev); + if (err) + return dev_err_probe(dev, err, + "Initializing NETCMIX failed\n"); + } + + err = netc_ierb_init(pdev); + if (err) + return dev_err_probe(dev, err, "Initializing IERB failed\n"); + + if (netc_prb_check_error(priv) < 0) + dev_warn(dev, "The current IERB configuration is invalid\n"); + + netc_blk_ctrl_create_debugfs(priv); + + err = of_platform_populate(node, NULL, NULL, dev); + if (err) { + netc_blk_ctrl_remove_debugfs(priv); + return dev_err_probe(dev, err, "of_platform_populate failed\n"); + } + + return 0; +} + +static void netc_blk_ctrl_remove(struct platform_device *pdev) +{ + struct netc_blk_ctrl *priv = platform_get_drvdata(pdev); + + of_platform_depopulate(&pdev->dev); + netc_blk_ctrl_remove_debugfs(priv); +} + +static struct platform_driver netc_blk_ctrl_driver = { + .driver = { + .name = "nxp-netc-blk-ctrl", + .of_match_table = netc_blk_ctrl_match, + }, + .probe = netc_blk_ctrl_probe, + .remove = netc_blk_ctrl_remove, +}; + +module_platform_driver(netc_blk_ctrl_driver); + +MODULE_DESCRIPTION("NXP NETC Blocks Control Driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/ntmp.c b/drivers/net/ethernet/freescale/enetc/ntmp.c new file mode 100644 index 000000000000..ba32c1bbd9e1 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/ntmp.c @@ -0,0 +1,462 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * NETC NTMP (NETC Table Management Protocol) 2.0 Library + * Copyright 2025 NXP + */ + +#include <linux/dma-mapping.h> +#include <linux/fsl/netc_global.h> +#include <linux/iopoll.h> + +#include "ntmp_private.h" + +#define NETC_CBDR_TIMEOUT 1000 /* us */ +#define NETC_CBDR_DELAY_US 10 +#define NETC_CBDR_MR_EN BIT(31) + +#define NTMP_BASE_ADDR_ALIGN 128 +#define NTMP_DATA_ADDR_ALIGN 32 + +/* Define NTMP Table ID */ +#define NTMP_MAFT_ID 1 +#define NTMP_RSST_ID 3 + +/* Generic Update Actions for most tables */ +#define NTMP_GEN_UA_CFGEU BIT(0) +#define NTMP_GEN_UA_STSEU BIT(1) + +#define NTMP_ENTRY_ID_SIZE 4 +#define RSST_ENTRY_NUM 64 +#define RSST_STSE_DATA_SIZE(n) ((n) * 8) +#define RSST_CFGE_DATA_SIZE(n) (n) + +int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev, + const struct netc_cbdr_regs *regs) +{ + int cbd_num = NETC_CBDR_BD_NUM; + size_t size; + + size = cbd_num * sizeof(union netc_cbd) + NTMP_BASE_ADDR_ALIGN; + cbdr->addr_base = dma_alloc_coherent(dev, size, &cbdr->dma_base, + GFP_KERNEL); + if (!cbdr->addr_base) + return -ENOMEM; + + cbdr->dma_size = size; + cbdr->bd_num = cbd_num; + cbdr->regs = *regs; + cbdr->dev = dev; + + /* The base address of the Control BD Ring must be 128 bytes aligned */ + cbdr->dma_base_align = ALIGN(cbdr->dma_base, NTMP_BASE_ADDR_ALIGN); + cbdr->addr_base_align = PTR_ALIGN(cbdr->addr_base, + NTMP_BASE_ADDR_ALIGN); + + cbdr->next_to_clean = 0; + cbdr->next_to_use = 0; + spin_lock_init(&cbdr->ring_lock); + + /* Step 1: Configure the base address of the Control BD Ring */ + netc_write(cbdr->regs.bar0, lower_32_bits(cbdr->dma_base_align)); + netc_write(cbdr->regs.bar1, upper_32_bits(cbdr->dma_base_align)); + + /* Step 2: Configure the producer index register */ + netc_write(cbdr->regs.pir, cbdr->next_to_clean); + + /* Step 3: Configure the consumer index register */ + netc_write(cbdr->regs.cir, cbdr->next_to_use); + + /* Step4: Configure the number of BDs of the Control BD Ring */ + netc_write(cbdr->regs.lenr, cbdr->bd_num); + + /* Step 5: Enable the Control BD Ring */ + netc_write(cbdr->regs.mr, NETC_CBDR_MR_EN); + + return 0; +} +EXPORT_SYMBOL_GPL(ntmp_init_cbdr); + +void ntmp_free_cbdr(struct netc_cbdr *cbdr) +{ + /* Disable the Control BD Ring */ + netc_write(cbdr->regs.mr, 0); + dma_free_coherent(cbdr->dev, cbdr->dma_size, cbdr->addr_base, + cbdr->dma_base); + memset(cbdr, 0, sizeof(*cbdr)); +} +EXPORT_SYMBOL_GPL(ntmp_free_cbdr); + +static int ntmp_get_free_cbd_num(struct netc_cbdr *cbdr) +{ + return (cbdr->next_to_clean - cbdr->next_to_use - 1 + + cbdr->bd_num) % cbdr->bd_num; +} + +static union netc_cbd *ntmp_get_cbd(struct netc_cbdr *cbdr, int index) +{ + return &((union netc_cbd *)(cbdr->addr_base_align))[index]; +} + +static void ntmp_clean_cbdr(struct netc_cbdr *cbdr) +{ + union netc_cbd *cbd; + int i; + + i = cbdr->next_to_clean; + while (netc_read(cbdr->regs.cir) != i) { + cbd = ntmp_get_cbd(cbdr, i); + memset(cbd, 0, sizeof(*cbd)); + i = (i + 1) % cbdr->bd_num; + } + + cbdr->next_to_clean = i; +} + +static int netc_xmit_ntmp_cmd(struct ntmp_user *user, union netc_cbd *cbd) +{ + union netc_cbd *cur_cbd; + struct netc_cbdr *cbdr; + int i, err; + u16 status; + u32 val; + + /* Currently only i.MX95 ENETC is supported, and it only has one + * command BD ring + */ + cbdr = &user->ring[0]; + + spin_lock_bh(&cbdr->ring_lock); + + if (unlikely(!ntmp_get_free_cbd_num(cbdr))) + ntmp_clean_cbdr(cbdr); + + i = cbdr->next_to_use; + cur_cbd = ntmp_get_cbd(cbdr, i); + *cur_cbd = *cbd; + dma_wmb(); + + /* Update producer index of both software and hardware */ + i = (i + 1) % cbdr->bd_num; + cbdr->next_to_use = i; + netc_write(cbdr->regs.pir, i); + + err = read_poll_timeout_atomic(netc_read, val, val == i, + NETC_CBDR_DELAY_US, NETC_CBDR_TIMEOUT, + true, cbdr->regs.cir); + if (unlikely(err)) + goto cbdr_unlock; + + dma_rmb(); + /* Get the writeback command BD, because the caller may need + * to check some other fields of the response header. + */ + *cbd = *cur_cbd; + + /* Check the writeback error status */ + status = le16_to_cpu(cbd->resp_hdr.error_rr) & NTMP_RESP_ERROR; + if (unlikely(status)) { + err = -EIO; + dev_err(user->dev, "Command BD error: 0x%04x\n", status); + } + + ntmp_clean_cbdr(cbdr); + dma_wmb(); + +cbdr_unlock: + spin_unlock_bh(&cbdr->ring_lock); + + return err; +} + +static int ntmp_alloc_data_mem(struct ntmp_dma_buf *data, void **buf_align) +{ + void *buf; + + buf = dma_alloc_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN, + &data->dma, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + data->buf = buf; + *buf_align = PTR_ALIGN(buf, NTMP_DATA_ADDR_ALIGN); + + return 0; +} + +static void ntmp_free_data_mem(struct ntmp_dma_buf *data) +{ + dma_free_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN, + data->buf, data->dma); +} + +static void ntmp_fill_request_hdr(union netc_cbd *cbd, dma_addr_t dma, + int len, int table_id, int cmd, + int access_method) +{ + dma_addr_t dma_align; + + memset(cbd, 0, sizeof(*cbd)); + dma_align = ALIGN(dma, NTMP_DATA_ADDR_ALIGN); + cbd->req_hdr.addr = cpu_to_le64(dma_align); + cbd->req_hdr.len = cpu_to_le32(len); + cbd->req_hdr.cmd = cmd; + cbd->req_hdr.access_method = FIELD_PREP(NTMP_ACCESS_METHOD, + access_method); + cbd->req_hdr.table_id = table_id; + cbd->req_hdr.ver_cci_rr = FIELD_PREP(NTMP_HDR_VERSION, + NTMP_HDR_VER2); + /* For NTMP version 2.0 or later version */ + cbd->req_hdr.npf = cpu_to_le32(NTMP_NPF); +} + +static void ntmp_fill_crd(struct ntmp_cmn_req_data *crd, u8 tblv, + u8 qa, u16 ua) +{ + crd->update_act = cpu_to_le16(ua); + crd->tblv_qact = NTMP_TBLV_QACT(tblv, qa); +} + +static void ntmp_fill_crd_eid(struct ntmp_req_by_eid *rbe, u8 tblv, + u8 qa, u16 ua, u32 entry_id) +{ + ntmp_fill_crd(&rbe->crd, tblv, qa, ua); + rbe->entry_id = cpu_to_le32(entry_id); +} + +static const char *ntmp_table_name(int tbl_id) +{ + switch (tbl_id) { + case NTMP_MAFT_ID: + return "MAC Address Filter Table"; + case NTMP_RSST_ID: + return "RSS Table"; + default: + return "Unknown Table"; + }; +} + +static int ntmp_delete_entry_by_id(struct ntmp_user *user, int tbl_id, + u8 tbl_ver, u32 entry_id, u32 req_len, + u32 resp_len) +{ + struct ntmp_dma_buf data = { + .dev = user->dev, + .size = max(req_len, resp_len), + }; + struct ntmp_req_by_eid *req; + union netc_cbd cbd; + int err; + + err = ntmp_alloc_data_mem(&data, (void **)&req); + if (err) + return err; + + ntmp_fill_crd_eid(req, tbl_ver, 0, 0, entry_id); + ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(req_len, resp_len), + tbl_id, NTMP_CMD_DELETE, NTMP_AM_ENTRY_ID); + + err = netc_xmit_ntmp_cmd(user, &cbd); + if (err) + dev_err(user->dev, + "Failed to delete entry 0x%x of %s, err: %pe", + entry_id, ntmp_table_name(tbl_id), ERR_PTR(err)); + + ntmp_free_data_mem(&data); + + return err; +} + +static int ntmp_query_entry_by_id(struct ntmp_user *user, int tbl_id, + u32 len, struct ntmp_req_by_eid *req, + dma_addr_t dma, bool compare_eid) +{ + struct ntmp_cmn_resp_query *resp; + int cmd = NTMP_CMD_QUERY; + union netc_cbd cbd; + u32 entry_id; + int err; + + entry_id = le32_to_cpu(req->entry_id); + if (le16_to_cpu(req->crd.update_act)) + cmd = NTMP_CMD_QU; + + /* Request header */ + ntmp_fill_request_hdr(&cbd, dma, len, tbl_id, cmd, NTMP_AM_ENTRY_ID); + err = netc_xmit_ntmp_cmd(user, &cbd); + if (err) { + dev_err(user->dev, + "Failed to query entry 0x%x of %s, err: %pe\n", + entry_id, ntmp_table_name(tbl_id), ERR_PTR(err)); + return err; + } + + /* For a few tables, the first field of their response data is not + * entry_id, so directly return success. + */ + if (!compare_eid) + return 0; + + resp = (struct ntmp_cmn_resp_query *)req; + if (unlikely(le32_to_cpu(resp->entry_id) != entry_id)) { + dev_err(user->dev, + "%s: query EID 0x%x doesn't match response EID 0x%x\n", + ntmp_table_name(tbl_id), entry_id, le32_to_cpu(resp->entry_id)); + return -EIO; + } + + return 0; +} + +int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id, + struct maft_entry_data *maft) +{ + struct ntmp_dma_buf data = { + .dev = user->dev, + .size = sizeof(struct maft_req_add), + }; + struct maft_req_add *req; + union netc_cbd cbd; + int err; + + err = ntmp_alloc_data_mem(&data, (void **)&req); + if (err) + return err; + + /* Set mac address filter table request data buffer */ + ntmp_fill_crd_eid(&req->rbe, user->tbl.maft_ver, 0, 0, entry_id); + req->keye = maft->keye; + req->cfge = maft->cfge; + + ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0), + NTMP_MAFT_ID, NTMP_CMD_ADD, NTMP_AM_ENTRY_ID); + err = netc_xmit_ntmp_cmd(user, &cbd); + if (err) + dev_err(user->dev, "Failed to add MAFT entry 0x%x, err: %pe\n", + entry_id, ERR_PTR(err)); + + ntmp_free_data_mem(&data); + + return err; +} +EXPORT_SYMBOL_GPL(ntmp_maft_add_entry); + +int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id, + struct maft_entry_data *maft) +{ + struct ntmp_dma_buf data = { + .dev = user->dev, + .size = sizeof(struct maft_resp_query), + }; + struct maft_resp_query *resp; + struct ntmp_req_by_eid *req; + int err; + + err = ntmp_alloc_data_mem(&data, (void **)&req); + if (err) + return err; + + ntmp_fill_crd_eid(req, user->tbl.maft_ver, 0, 0, entry_id); + err = ntmp_query_entry_by_id(user, NTMP_MAFT_ID, + NTMP_LEN(sizeof(*req), data.size), + req, data.dma, true); + if (err) + goto end; + + resp = (struct maft_resp_query *)req; + maft->keye = resp->keye; + maft->cfge = resp->cfge; + +end: + ntmp_free_data_mem(&data); + + return err; +} +EXPORT_SYMBOL_GPL(ntmp_maft_query_entry); + +int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id) +{ + return ntmp_delete_entry_by_id(user, NTMP_MAFT_ID, user->tbl.maft_ver, + entry_id, NTMP_EID_REQ_LEN, 0); +} +EXPORT_SYMBOL_GPL(ntmp_maft_delete_entry); + +int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table, + int count) +{ + struct ntmp_dma_buf data = {.dev = user->dev}; + struct rsst_req_update *req; + union netc_cbd cbd; + int err, i; + + if (count != RSST_ENTRY_NUM) + /* HW only takes in a full 64 entry table */ + return -EINVAL; + + data.size = struct_size(req, groups, count); + err = ntmp_alloc_data_mem(&data, (void **)&req); + if (err) + return err; + + /* Set the request data buffer */ + ntmp_fill_crd_eid(&req->rbe, user->tbl.rsst_ver, 0, + NTMP_GEN_UA_CFGEU | NTMP_GEN_UA_STSEU, 0); + for (i = 0; i < count; i++) + req->groups[i] = (u8)(table[i]); + + ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0), + NTMP_RSST_ID, NTMP_CMD_UPDATE, NTMP_AM_ENTRY_ID); + + err = netc_xmit_ntmp_cmd(user, &cbd); + if (err) + dev_err(user->dev, "Failed to update RSST entry, err: %pe\n", + ERR_PTR(err)); + + ntmp_free_data_mem(&data); + + return err; +} +EXPORT_SYMBOL_GPL(ntmp_rsst_update_entry); + +int ntmp_rsst_query_entry(struct ntmp_user *user, u32 *table, int count) +{ + struct ntmp_dma_buf data = {.dev = user->dev}; + struct ntmp_req_by_eid *req; + union netc_cbd cbd; + int err, i; + u8 *group; + + if (count != RSST_ENTRY_NUM) + /* HW only takes in a full 64 entry table */ + return -EINVAL; + + data.size = NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count) + + RSST_CFGE_DATA_SIZE(count); + err = ntmp_alloc_data_mem(&data, (void **)&req); + if (err) + return err; + + /* Set the request data buffer */ + ntmp_fill_crd_eid(req, user->tbl.rsst_ver, 0, 0, 0); + ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(sizeof(*req), data.size), + NTMP_RSST_ID, NTMP_CMD_QUERY, NTMP_AM_ENTRY_ID); + err = netc_xmit_ntmp_cmd(user, &cbd); + if (err) { + dev_err(user->dev, "Failed to query RSST entry, err: %pe\n", + ERR_PTR(err)); + goto end; + } + + group = (u8 *)req; + group += NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count); + for (i = 0; i < count; i++) + table[i] = group[i]; + +end: + ntmp_free_data_mem(&data); + + return err; +} +EXPORT_SYMBOL_GPL(ntmp_rsst_query_entry); + +MODULE_DESCRIPTION("NXP NETC Library"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/ntmp_private.h b/drivers/net/ethernet/freescale/enetc/ntmp_private.h new file mode 100644 index 000000000000..34394e40fddd --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/ntmp_private.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * NTMP table request and response data buffer formats + * Copyright 2025 NXP + */ + +#ifndef __NTMP_PRIVATE_H +#define __NTMP_PRIVATE_H + +#include <linux/bitfield.h> +#include <linux/fsl/ntmp.h> + +#define NTMP_EID_REQ_LEN 8 +#define NETC_CBDR_BD_NUM 256 + +union netc_cbd { + struct { + __le64 addr; + __le32 len; +#define NTMP_RESP_LEN GENMASK(19, 0) +#define NTMP_REQ_LEN GENMASK(31, 20) +#define NTMP_LEN(req, resp) (FIELD_PREP(NTMP_REQ_LEN, (req)) | \ + ((resp) & NTMP_RESP_LEN)) + u8 cmd; +#define NTMP_CMD_DELETE BIT(0) +#define NTMP_CMD_UPDATE BIT(1) +#define NTMP_CMD_QUERY BIT(2) +#define NTMP_CMD_ADD BIT(3) +#define NTMP_CMD_QU (NTMP_CMD_QUERY | NTMP_CMD_UPDATE) + u8 access_method; +#define NTMP_ACCESS_METHOD GENMASK(7, 4) +#define NTMP_AM_ENTRY_ID 0 +#define NTMP_AM_EXACT_KEY 1 +#define NTMP_AM_SEARCH 2 +#define NTMP_AM_TERNARY_KEY 3 + u8 table_id; + u8 ver_cci_rr; +#define NTMP_HDR_VERSION GENMASK(5, 0) +#define NTMP_HDR_VER2 2 +#define NTMP_CCI BIT(6) +#define NTMP_RR BIT(7) + __le32 resv[3]; + __le32 npf; +#define NTMP_NPF BIT(15) + } req_hdr; /* NTMP Request Message Header Format */ + + struct { + __le32 resv0[3]; + __le16 num_matched; + __le16 error_rr; +#define NTMP_RESP_ERROR GENMASK(11, 0) +#define NTMP_RESP_RR BIT(15) + __le32 resv1[4]; + } resp_hdr; /* NTMP Response Message Header Format */ +}; + +struct ntmp_dma_buf { + struct device *dev; + size_t size; + void *buf; + dma_addr_t dma; +}; + +struct ntmp_cmn_req_data { + __le16 update_act; + u8 dbg_opt; + u8 tblv_qact; +#define NTMP_QUERY_ACT GENMASK(3, 0) +#define NTMP_TBL_VER GENMASK(7, 4) +#define NTMP_TBLV_QACT(v, a) (FIELD_PREP(NTMP_TBL_VER, (v)) | \ + ((a) & NTMP_QUERY_ACT)) +}; + +struct ntmp_cmn_resp_query { + __le32 entry_id; +}; + +/* Generic structure for request data by entry ID */ +struct ntmp_req_by_eid { + struct ntmp_cmn_req_data crd; + __le32 entry_id; +}; + +/* MAC Address Filter Table Request Data Buffer Format of Add action */ +struct maft_req_add { + struct ntmp_req_by_eid rbe; + struct maft_keye_data keye; + struct maft_cfge_data cfge; +}; + +/* MAC Address Filter Table Response Data Buffer Format of Query action */ +struct maft_resp_query { + __le32 entry_id; + struct maft_keye_data keye; + struct maft_cfge_data cfge; +}; + +/* RSS Table Request Data Buffer Format of Update action */ +struct rsst_req_update { + struct ntmp_req_by_eid rbe; + u8 groups[]; +}; + +#endif diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index a19cb2a786fd..c81f2ea588f2 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -671,8 +671,6 @@ struct fec_enet_private { unsigned int tx_time_itr; unsigned int itr_clk_rate; - /* tx lpi eee mode */ - struct ethtool_keee eee; unsigned int clk_ref_rate; /* ptp clock period in ns*/ @@ -691,10 +689,19 @@ struct fec_enet_private { /* XDP BPF Program */ struct bpf_prog *xdp_prog; + struct { + int pps_enable; + u64 ns_sys, ns_phc; + u32 at_corr; + u8 at_inc_corr; + } ptp_saved_state; + u64 ethtool_stats[]; }; void fec_ptp_init(struct platform_device *pdev, int irq_idx); +void fec_ptp_restore_state(struct fec_enet_private *fep); +void fec_ptp_save_state(struct fec_enet_private *fep); void fec_ptp_stop(struct platform_device *pdev); void fec_ptp_start_cyclecounter(struct net_device *ndev); int fec_ptp_set(struct net_device *ndev, struct kernel_hwtstamp_config *config, diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 8bd213da8fb6..17e9bddb9ddd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -714,7 +714,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, txq->bd.cur = bdp; /* Trigger transmission start */ - writel(0, txq->bd.reg_desc_active); + if (!(fep->quirks & FEC_QUIRK_ERR007885) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active)) + writel(0, txq->bd.reg_desc_active); return 0; } @@ -840,6 +845,8 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, struct fec_enet_private *fep = netdev_priv(ndev); int hdr_len, total_len, data_left; struct bufdesc *bdp = txq->bd.cur; + struct bufdesc *tmp_bdp; + struct bufdesc_ex *ebdp; struct tso_t tso; unsigned int index = 0; int ret; @@ -913,7 +920,34 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, return 0; err_release: - /* TODO: Release all used data descriptors for TSO */ + /* Release all used data descriptors for TSO */ + tmp_bdp = txq->bd.cur; + + while (tmp_bdp != bdp) { + /* Unmap data buffers */ + if (tmp_bdp->cbd_bufaddr && + !IS_TSO_HEADER(txq, fec32_to_cpu(tmp_bdp->cbd_bufaddr))) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(tmp_bdp->cbd_bufaddr), + fec16_to_cpu(tmp_bdp->cbd_datlen), + DMA_TO_DEVICE); + + /* Clear standard buffer descriptor fields */ + tmp_bdp->cbd_sc = 0; + tmp_bdp->cbd_datlen = 0; + tmp_bdp->cbd_bufaddr = 0; + + /* Handle extended descriptor if enabled */ + if (fep->bufdesc_ex) { + ebdp = (struct bufdesc_ex *)tmp_bdp; + ebdp->cbd_esc = 0; + } + + tmp_bdp = fec_enet_get_nextdesc(tmp_bdp, &txq->bd); + } + + dev_kfree_skb_any(skb); + return ret; } @@ -1064,6 +1098,29 @@ static void fec_enet_enable_ring(struct net_device *ndev) } } +/* Whack a reset. We should wait for this. + * For i.MX6SX SOC, enet use AXI bus, we use disable MAC + * instead of reset MAC itself. + */ +static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol) +{ + u32 val; + + if (!allow_wol || !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { + if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES || + ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) { + writel(0, fep->hwp + FEC_ECNTRL); + } else { + writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL); + udelay(10); + } + } else { + val = readl(fep->hwp + FEC_ECNTRL); + val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); + writel(val, fep->hwp + FEC_ECNTRL); + } +} + /* * This function is called to start or restart the FEC during a link * change, transmit timeout, or to reconfigure the FEC. The network @@ -1077,17 +1134,10 @@ fec_restart(struct net_device *ndev) u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 ecntl = FEC_ECR_ETHEREN; - /* Whack a reset. We should wait for this. - * For i.MX6SX SOC, enet use AXI bus, we use disable MAC - * instead of reset MAC itself. - */ - if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES || - ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) { - writel(0, fep->hwp + FEC_ECNTRL); - } else { - writel(1, fep->hwp + FEC_ECNTRL); - udelay(10); - } + if (fep->bufdesc_ex) + fec_ptp_save_state(fep); + + fec_ctrl_reset(fep, false); /* * enet-mac reset will reset mac address registers too, @@ -1244,8 +1294,10 @@ fec_restart(struct net_device *ndev) writel(ecntl, fep->hwp + FEC_ECNTRL); fec_enet_active_rxring(ndev); - if (fep->bufdesc_ex) + if (fep->bufdesc_ex) { fec_ptp_start_cyclecounter(ndev); + fec_ptp_restore_state(fep); + } /* Enable interrupts we wish to service */ if (fep->link) @@ -1336,22 +1388,10 @@ fec_stop(struct net_device *ndev) netdev_err(ndev, "Graceful transmit stop did not complete!\n"); } - /* Whack a reset. We should wait for this. - * For i.MX6SX SOC, enet use AXI bus, we use disable MAC - * instead of reset MAC itself. - */ - if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { - if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { - writel(0, fep->hwp + FEC_ECNTRL); - } else { - writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL); - udelay(10); - } - } else { - val = readl(fep->hwp + FEC_ECNTRL); - val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); - writel(val, fep->hwp + FEC_ECNTRL); - } + if (fep->bufdesc_ex) + fec_ptp_save_state(fep); + + fec_ctrl_reset(fep, true); writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); @@ -1361,6 +1401,15 @@ fec_stop(struct net_device *ndev) writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL); writel(rmii_mode, fep->hwp + FEC_R_CNTRL); } + + if (fep->bufdesc_ex) { + val = readl(fep->hwp + FEC_ECNTRL); + val |= FEC_ECR_EN1588; + writel(val, fep->hwp + FEC_ECNTRL); + + fec_ptp_start_cyclecounter(ndev); + fec_ptp_restore_state(fep); + } } static void @@ -1574,19 +1623,22 @@ static void fec_enet_tx(struct net_device *ndev, int budget) fec_enet_tx_queue(ndev, i, budget); } -static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, +static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, struct bufdesc *bdp, int index) { struct page *new_page; dma_addr_t phys_addr; new_page = page_pool_dev_alloc_pages(rxq->page_pool); - WARN_ON(!new_page); - rxq->rx_skb_info[index].page = new_page; + if (unlikely(!new_page)) + return -ENOMEM; + rxq->rx_skb_info[index].page = new_page; rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM; phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM; bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); + + return 0; } static u32 @@ -1681,6 +1733,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) int cpu = smp_processor_id(); struct xdp_buff xdp; struct page *page; + __fec32 cbd_bufaddr; u32 sub_len = 4; #if !defined(CONFIG_M5272) @@ -1749,12 +1802,17 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) index = fec_enet_get_bd_index(bdp, &rxq->bd); page = rxq->rx_skb_info[index].page; + cbd_bufaddr = bdp->cbd_bufaddr; + if (fec_enet_update_cbd(rxq, bdp, index)) { + ndev->stats.rx_dropped++; + goto rx_processing_done; + } + dma_sync_single_for_cpu(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), + fec32_to_cpu(cbd_bufaddr), pkt_len, DMA_FROM_DEVICE); prefetch(page_address(page)); - fec_enet_update_cbd(rxq, bdp, index); if (xdp_prog) { xdp_buff_clear_frags_flag(&xdp); @@ -2028,14 +2086,14 @@ static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us) return us * (fep->clk_ref_rate / 1000) / 1000; } -static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable) +static int fec_enet_eee_mode_set(struct net_device *ndev, u32 lpi_timer, + bool enable) { struct fec_enet_private *fep = netdev_priv(ndev); - struct ethtool_keee *p = &fep->eee; unsigned int sleep_cycle, wake_cycle; if (enable) { - sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer); + sleep_cycle = fec_enet_us_to_tx_cycle(ndev, lpi_timer); wake_cycle = sleep_cycle; } else { sleep_cycle = 0; @@ -2088,7 +2146,9 @@ static void fec_enet_adjust_link(struct net_device *ndev) napi_enable(&fep->napi); } if (fep->quirks & FEC_QUIRK_HAS_EEE) - fec_enet_eee_mode_set(ndev, phy_dev->enable_tx_lpi); + fec_enet_eee_mode_set(ndev, + phy_dev->eee_cfg.tx_lpi_timer, + phy_dev->enable_tx_lpi); } else { if (fep->link) { netif_stop_queue(ndev); @@ -2762,22 +2822,18 @@ static void fec_enet_get_regs(struct net_device *ndev, } static int fec_enet_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct fec_enet_private *fep = netdev_priv(ndev); if (fep->bufdesc_ex) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (fep->ptp_clock) info->phc_index = ptp_clock_index(fep->ptp_clock); - else - info->phc_index = -1; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); @@ -3168,7 +3224,6 @@ static int fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata) { struct fec_enet_private *fep = netdev_priv(ndev); - struct ethtool_keee *p = &fep->eee; if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) return -EOPNOTSUPP; @@ -3176,8 +3231,6 @@ fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata) if (!netif_running(ndev)) return -ENETDOWN; - edata->tx_lpi_timer = p->tx_lpi_timer; - return phy_ethtool_get_eee(ndev->phydev, edata); } @@ -3185,7 +3238,6 @@ static int fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata) { struct fec_enet_private *fep = netdev_priv(ndev); - struct ethtool_keee *p = &fep->eee; if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) return -EOPNOTSUPP; @@ -3193,8 +3245,6 @@ fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata) if (!netif_running(ndev)) return -ENETDOWN; - p->tx_lpi_timer = edata->tx_lpi_timer; - return phy_ethtool_set_eee(ndev->phydev, edata); } @@ -3674,29 +3724,6 @@ fec_set_mac_address(struct net_device *ndev, void *p) return 0; } -#ifdef CONFIG_NET_POLL_CONTROLLER -/** - * fec_poll_controller - FEC Poll controller function - * @dev: The FEC network adapter - * - * Polled functionality used by netconsole and others in non interrupt mode - * - */ -static void fec_poll_controller(struct net_device *dev) -{ - int i; - struct fec_enet_private *fep = netdev_priv(dev); - - for (i = 0; i < FEC_IRQ_NUM; i++) { - if (fep->irq[i] > 0) { - disable_irq(fep->irq[i]); - fec_enet_interrupt(fep->irq[i], dev); - enable_irq(fep->irq[i]); - } - } -} -#endif - static inline void fec_enet_set_netdev_features(struct net_device *netdev, netdev_features_t features) { @@ -4003,9 +4030,6 @@ static const struct net_device_ops fec_netdev_ops = { .ndo_tx_timeout = fec_timeout, .ndo_set_mac_address = fec_set_mac_address, .ndo_eth_ioctl = phy_do_ioctl_running, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = fec_poll_controller, -#endif .ndo_set_features = fec_set_features, .ndo_bpf = fec_enet_bpf, .ndo_xdp_xmit = fec_enet_xdp_xmit, @@ -4156,6 +4180,14 @@ free_queue_mem: return ret; } +static void fec_enet_deinit(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + netif_napi_del(&fep->napi); + fec_enet_free_queue(ndev); +} + #ifdef CONFIG_OF static int fec_reset_phy(struct platform_device *pdev) { @@ -4550,6 +4582,7 @@ failed_register: fec_enet_mii_remove(fep); failed_mii_init: failed_irq: + fec_enet_deinit(ndev); failed_init: fec_ptp_stop(pdev); failed_reset: @@ -4613,10 +4646,11 @@ fec_drv_remove(struct platform_device *pdev) pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); + fec_enet_deinit(ndev); free_netdev(ndev); } -static int __maybe_unused fec_suspend(struct device *dev) +static int fec_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); @@ -4669,7 +4703,7 @@ static int __maybe_unused fec_suspend(struct device *dev) return 0; } -static int __maybe_unused fec_resume(struct device *dev) +static int fec_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); @@ -4724,7 +4758,7 @@ failed_clk: return ret; } -static int __maybe_unused fec_runtime_suspend(struct device *dev) +static int fec_runtime_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); @@ -4735,7 +4769,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev) return 0; } -static int __maybe_unused fec_runtime_resume(struct device *dev) +static int fec_runtime_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); @@ -4756,20 +4790,20 @@ failed_clk_ipg: } static const struct dev_pm_ops fec_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) - SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) + RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) }; static struct platform_driver fec_driver = { .driver = { .name = DRIVER_NAME, - .pm = &fec_pm_ops, + .pm = pm_ptr(&fec_pm_ops), .of_match_table = fec_dt_ids, .suppress_bind_attrs = true, }, .id_table = fec_devtype, .probe = fec_probe, - .remove_new = fec_drv_remove, + .remove = fec_drv_remove, }; module_platform_driver(fec_driver); diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index ebae71ec26c6..2bfaf14f65c8 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -1040,7 +1040,7 @@ static struct platform_driver mpc52xx_fec_driver = { .of_match_table = mpc52xx_fec_match, }, .probe = mpc52xx_fec_probe, - .remove_new = mpc52xx_fec_remove, + .remove = mpc52xx_fec_remove, #ifdef CONFIG_PM .suspend = mpc52xx_fec_of_suspend, .resume = mpc52xx_fec_of_resume, diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c index 39689826cc8f..3d073f0fae63 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c @@ -94,7 +94,7 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of) goto out_free; } - snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); + snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res.start); bus->priv = priv; bus->parent = dev; @@ -144,7 +144,7 @@ struct platform_driver mpc52xx_fec_mdio_driver = { .of_match_table = mpc52xx_fec_mdio_match, }, .probe = mpc52xx_fec_mdio_probe, - .remove_new = mpc52xx_fec_mdio_remove, + .remove = mpc52xx_fec_mdio_remove, }; /* let fec driver call it, since this has to be registered before it */ diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 181d9bfbee22..876d90832596 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -30,7 +30,6 @@ #include <linux/phy.h> #include <linux/fec.h> #include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/of_net.h> #include "fec.h" @@ -84,13 +83,36 @@ #define FEC_CC_MULT (1 << 31) #define FEC_COUNTER_PERIOD (1 << 31) #define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC -#define FEC_CHANNLE_0 0 -#define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0 +#define DEFAULT_PPS_CHANNEL 0 #define FEC_PTP_MAX_NSEC_PERIOD 4000000000ULL #define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL /** + * fec_ptp_read - read raw cycle counter (to be used by time counter) + * @cc: the cyclecounter structure + * + * this function reads the cyclecounter registers and is called by the + * cyclecounter structure used to construct a ns counter from the + * arbitrary fixed point registers + */ +static u64 fec_ptp_read(const struct cyclecounter *cc) +{ + struct fec_enet_private *fep = + container_of(cc, struct fec_enet_private, cc); + u32 tempval; + + tempval = readl(fep->hwp + FEC_ATIME_CTRL); + tempval |= FEC_T_CTRL_CAPTURE; + writel(tempval, fep->hwp + FEC_ATIME_CTRL); + + if (fep->quirks & FEC_QUIRK_BUG_CAPTURE) + udelay(1); + + return readl(fep->hwp + FEC_ATIME); +} + +/** * fec_ptp_enable_pps * @fep: the fec_enet_private structure handle * @enable: enable the channel pps output @@ -104,14 +126,13 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) struct timespec64 ts; u64 ns; - if (fep->pps_enable == enable) - return 0; - - fep->pps_channel = DEFAULT_PPS_CHANNEL; - fep->reload_period = PPS_OUPUT_RELOAD_PERIOD; - spin_lock_irqsave(&fep->tmreg_lock, flags); + if (fep->pps_enable == enable) { + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + return 0; + } + if (enable) { /* clear capture or output compare interrupt status if have. */ @@ -137,7 +158,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds * to current timer would be next second. */ - tempval = fep->cc.read(&fep->cc); + tempval = fec_ptp_read(&fep->cc); /* Convert the ptp local counter to 1588 timestamp */ ns = timecounter_cyc2time(&fep->tc, tempval); ts = ns_to_timespec64(ns); @@ -212,13 +233,7 @@ static int fec_ptp_pps_perout(struct fec_enet_private *fep) timecounter_read(&fep->tc); /* Get the current ptp hardware time counter */ - temp_val = readl(fep->hwp + FEC_ATIME_CTRL); - temp_val |= FEC_T_CTRL_CAPTURE; - writel(temp_val, fep->hwp + FEC_ATIME_CTRL); - if (fep->quirks & FEC_QUIRK_BUG_CAPTURE) - udelay(1); - - ptp_hc = readl(fep->hwp + FEC_ATIME); + ptp_hc = fec_ptp_read(&fep->cc); /* Convert the ptp local counter to 1588 timestamp */ curr_time = timecounter_cyc2time(&fep->tc, ptp_hc); @@ -273,30 +288,6 @@ static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer) } /** - * fec_ptp_read - read raw cycle counter (to be used by time counter) - * @cc: the cyclecounter structure - * - * this function reads the cyclecounter registers and is called by the - * cyclecounter structure used to construct a ns counter from the - * arbitrary fixed point registers - */ -static u64 fec_ptp_read(const struct cyclecounter *cc) -{ - struct fec_enet_private *fep = - container_of(cc, struct fec_enet_private, cc); - u32 tempval; - - tempval = readl(fep->hwp + FEC_ATIME_CTRL); - tempval |= FEC_T_CTRL_CAPTURE; - writel(tempval, fep->hwp + FEC_ATIME_CTRL); - - if (fep->quirks & FEC_QUIRK_BUG_CAPTURE) - udelay(1); - - return readl(fep->hwp + FEC_ATIME); -} - -/** * fec_ptp_start_cyclecounter - create the cycle counter from hw * @ndev: network device * @@ -532,6 +523,8 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp, int ret = 0; if (rq->type == PTP_CLK_REQ_PPS) { + fep->reload_period = PPS_OUPUT_RELOAD_PERIOD; + ret = fec_ptp_enable_pps(fep, on); return ret; @@ -540,10 +533,9 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp, if (rq->perout.flags) return -EOPNOTSUPP; - if (rq->perout.index != DEFAULT_PPS_CHANNEL) + if (rq->perout.index != fep->pps_channel) return -EOPNOTSUPP; - fep->pps_channel = DEFAULT_PPS_CHANNEL; period.tv_sec = rq->perout.period.sec; period.tv_nsec = rq->perout.period.nsec; period_ns = timespec64_to_ns(&period); @@ -711,12 +703,16 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx) { struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); + struct device_node *np = fep->pdev->dev.of_node; int irq; int ret; fep->ptp_caps.owner = THIS_MODULE; strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name)); + fep->pps_channel = DEFAULT_PPS_CHANNEL; + of_property_read_u32(np, "fsl,pps-channel", &fep->pps_channel); + fep->ptp_caps.max_adj = 250000000; fep->ptp_caps.n_alarm = 0; fep->ptp_caps.n_ext_ts = 0; @@ -742,8 +738,8 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx) INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); - hrtimer_init(&fep->perout_timer, CLOCK_REALTIME, HRTIMER_MODE_REL); - fep->perout_timer.function = fec_ptp_pps_perout_handler; + hrtimer_setup(&fep->perout_timer, fec_ptp_pps_perout_handler, CLOCK_REALTIME, + HRTIMER_MODE_REL); irq = platform_get_irq_byname_optional(pdev, "pps"); if (irq < 0) @@ -768,11 +764,64 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx) schedule_delayed_work(&fep->time_keep, HZ); } +void fec_ptp_save_state(struct fec_enet_private *fep) +{ + unsigned long flags; + u32 atime_inc_corr; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + + fep->ptp_saved_state.pps_enable = fep->pps_enable; + + fep->ptp_saved_state.ns_phc = timecounter_read(&fep->tc); + fep->ptp_saved_state.ns_sys = ktime_get_ns(); + + fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR); + atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK; + fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET); + + spin_unlock_irqrestore(&fep->tmreg_lock, flags); +} + +/* Restore PTP functionality after a reset */ +void fec_ptp_restore_state(struct fec_enet_private *fep) +{ + u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK; + unsigned long flags; + u32 counter; + u64 ns; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + + /* Reset turned it off, so adjust our status flag */ + fep->pps_enable = 0; + + writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR); + atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET; + writel(atime_inc, fep->hwp + FEC_ATIME_INC); + + ns = ktime_get_ns() - fep->ptp_saved_state.ns_sys + fep->ptp_saved_state.ns_phc; + counter = ns & fep->cc.mask; + writel(counter, fep->hwp + FEC_ATIME); + timecounter_init(&fep->tc, &fep->cc, ns); + + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + + /* Restart PPS if needed */ + if (fep->ptp_saved_state.pps_enable) { + /* Re-enable PPS */ + fec_ptp_enable_pps(fep, 1); + } +} + void fec_ptp_stop(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); + if (fep->pps_enable) + fec_ptp_enable_pps(fep, 0); + cancel_delayed_work_sync(&fep->time_keep); hrtimer_cancel(&fep->perout_timer); if (fep->ptp_clock) diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index d96028f01770..11887458f050 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -24,7 +24,6 @@ /* General defines */ #define FMAN_LIODN_TBL 64 /* size of LIODN table */ -#define MAX_NUM_OF_MACS 10 #define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4 #define BASE_RX_PORTID 0x08 #define BASE_TX_PORTID 0x28 @@ -2691,13 +2690,12 @@ static struct fman *read_dts_node(struct platform_device *of_dev) { struct fman *fman; struct device_node *fm_node, *muram_node; + void __iomem *base_addr; struct resource *res; u32 val, range[2]; int err, irq; struct clk *clk; u32 clk_rate; - phys_addr_t phys_base_addr; - resource_size_t mem_size; fman = kzalloc(sizeof(*fman), GFP_KERNEL); if (!fman) @@ -2725,18 +2723,6 @@ static struct fman *read_dts_node(struct platform_device *of_dev) goto fman_node_put; fman->dts_params.err_irq = err; - /* Get the FM address */ - res = platform_get_resource(of_dev, IORESOURCE_MEM, 0); - if (!res) { - err = -EINVAL; - dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n", - __func__); - goto fman_node_put; - } - - phys_base_addr = res->start; - mem_size = resource_size(res); - clk = of_clk_get(fm_node, 0); if (IS_ERR(clk)) { err = PTR_ERR(clk); @@ -2804,24 +2790,16 @@ static struct fman *read_dts_node(struct platform_device *of_dev) } } - fman->dts_params.res = - devm_request_mem_region(&of_dev->dev, phys_base_addr, - mem_size, "fman"); - if (!fman->dts_params.res) { - err = -EBUSY; - dev_err(&of_dev->dev, "%s: request_mem_region() failed\n", - __func__); - goto fman_free; - } - - fman->dts_params.base_addr = - devm_ioremap(&of_dev->dev, phys_base_addr, mem_size); - if (!fman->dts_params.base_addr) { - err = -ENOMEM; + base_addr = devm_platform_get_and_ioremap_resource(of_dev, 0, &res); + if (IS_ERR(base_addr)) { + err = PTR_ERR(base_addr); dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__); goto fman_free; } + fman->dts_params.base_addr = base_addr; + fman->dts_params.res = res; + fman->dev = &of_dev->dev; err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev); diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h index 2ea575a46675..74eb62eba0d7 100644 --- a/drivers/net/ethernet/freescale/fman/fman.h +++ b/drivers/net/ethernet/freescale/fman/fman.h @@ -74,6 +74,9 @@ #define BM_MAX_NUM_OF_POOLS 64 /* Buffers pools */ #define FMAN_PORT_MAX_EXT_POOLS_NUM 8 /* External BM pools per Rx port */ +/* General defines */ +#define MAX_NUM_OF_MACS 10 + struct fman; /* FMan data */ /* Enum for defining port types */ diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 3088da7adf0f..51402dff72c5 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -755,12 +755,12 @@ static struct fman_mac *pcs_to_dtsec(struct phylink_pcs *pcs) return container_of(pcs, struct fman_mac, pcs); } -static void dtsec_pcs_get_state(struct phylink_pcs *pcs, +static void dtsec_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode, struct phylink_link_state *state) { struct fman_mac *dtsec = pcs_to_dtsec(pcs); - phylink_mii_c22_pcs_get_state(dtsec->tbidev, state); + phylink_mii_c22_pcs_get_state(dtsec->tbidev, neg_mode, state); } static int dtsec_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, @@ -1415,7 +1415,6 @@ int dtsec_initialization(struct mac_device *mac_dev, mac_dev->set_exception = dtsec_set_exception; mac_dev->set_allmulti = dtsec_set_allmulti; mac_dev->set_tstamp = dtsec_set_tstamp; - mac_dev->set_multi = fman_set_multi; mac_dev->enable = dtsec_enable; mac_dev->disable = dtsec_disable; @@ -1447,7 +1446,6 @@ int dtsec_initialization(struct mac_device *mac_dev, goto _return_fm_mac_free; } dtsec->pcs.ops = &dtsec_pcs_ops; - dtsec->pcs.neg_mode = true; dtsec->pcs.poll = true; supported = mac_dev->phylink_config.supported_interfaces; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 758535adc9ff..3925441143fa 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -267,7 +267,6 @@ struct memac_cfg { bool reset_on_init; bool pause_ignore; bool promiscuous_mode_enable; - struct fixed_phy_status *fixed_link; u16 max_frame_length; u16 pause_quanta; u32 tx_ipg_length; @@ -1067,7 +1066,6 @@ int memac_initialization(struct mac_device *mac_dev, struct fman_mac_params *params) { int err; - struct device_node *fixed; struct phylink_pcs *pcs; struct fman_mac *memac; unsigned long capabilities; @@ -1089,7 +1087,6 @@ int memac_initialization(struct mac_device *mac_dev, mac_dev->set_exception = memac_set_exception; mac_dev->set_allmulti = memac_set_allmulti; mac_dev->set_tstamp = memac_set_tstamp; - mac_dev->set_multi = fman_set_multi; mac_dev->enable = memac_enable; mac_dev->disable = memac_disable; @@ -1223,18 +1220,15 @@ int memac_initialization(struct mac_device *mac_dev, memac->rgmii_no_half_duplex = true; /* Most boards should use MLO_AN_INBAND, but existing boards don't have - * a managed property. Default to MLO_AN_INBAND if nothing else is - * specified. We need to be careful and not enable this if we have a - * fixed link or if we are using MII or RGMII, since those - * configurations modes don't use in-band autonegotiation. + * a managed property. Default to MLO_AN_INBAND rather than MLO_AN_PHY. + * Phylink will allow this to be overriden by a fixed link. We need to + * be careful and not enable this if we are using MII or RGMII, since + * those configurations modes don't use in-band autonegotiation. */ - fixed = of_get_child_by_name(mac_node, "fixed-link"); - if (!fixed && !of_property_read_bool(mac_node, "fixed-link") && - !of_property_read_bool(mac_node, "managed") && + if (!of_property_read_bool(mac_node, "managed") && mac_dev->phy_if != PHY_INTERFACE_MODE_MII && !phy_interface_mode_is_rgmii(mac_dev->phy_if)) - mac_dev->phylink_config.ovr_an_inband = true; - of_node_put(fixed); + mac_dev->phylink_config.default_an_inband = true; err = memac_init(mac_dev->fman_mac); if (err < 0) diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c index f557d68e5b76..1ed245a2ee01 100644 --- a/drivers/net/ethernet/freescale/fman/fman_muram.c +++ b/drivers/net/ethernet/freescale/fman/fman_muram.c @@ -12,7 +12,6 @@ struct muram_info { struct gen_pool *pool; void __iomem *vbase; - size_t size; phys_addr_t pbase; }; diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index 406e75e9e5ea..e977389f7088 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -987,7 +987,7 @@ static int init_low_level_driver(struct fman_port *port) return -ENODEV; } - /* The code bellow is a trick so the FM will not release the buffer + /* The code below is a trick so the FM will not release the buffer * to BM nor will try to enqueue the frame to QM */ if (port->port_type == FMAN_PORT_TYPE_TX) { @@ -1748,7 +1748,7 @@ static int fman_port_probe(struct platform_device *of_dev) struct resource res; struct resource *dev_res; u32 val; - int err = 0, lenp; + int err = 0; enum fman_port_type port_type; u16 port_speed; u8 port_id; @@ -1795,7 +1795,7 @@ static int fman_port_probe(struct platform_device *of_dev) if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) { port_type = FMAN_PORT_TYPE_TX; port_speed = 1000; - if (of_find_property(port_node, "fsl,fman-10g-port", &lenp)) + if (of_property_read_bool(port_node, "fsl,fman-10g-port")) port_speed = 10000; } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) { @@ -1808,7 +1808,7 @@ static int fman_port_probe(struct platform_device *of_dev) } else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) { port_type = FMAN_PORT_TYPE_RX; port_speed = 1000; - if (of_find_property(port_node, "fsl,fman-10g-port", &lenp)) + if (of_property_read_bool(port_node, "fsl,fman-10g-port")) port_speed = 10000; } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) { diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index c2261d26db5b..fecfca6eba03 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -771,7 +771,6 @@ int tgec_initialization(struct mac_device *mac_dev, mac_dev->set_exception = tgec_set_exception; mac_dev->set_allmulti = tgec_set_allmulti; mac_dev->set_tstamp = tgec_set_tstamp; - mac_dev->set_multi = fman_set_multi; mac_dev->enable = tgec_enable; mac_dev->disable = tgec_disable; diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 9767586b4eb3..a39fcea6a77a 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -32,8 +32,6 @@ MODULE_DESCRIPTION("FSL FMan MAC API based driver"); struct mac_priv_s { u8 cell_index; struct fman *fman; - /* List of multicast addresses */ - struct list_head mc_addr_list; struct platform_device *eth_dev; u16 speed; }; @@ -57,44 +55,6 @@ static void mac_exception(struct mac_device *mac_dev, __func__, ex); } -int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev) -{ - struct mac_priv_s *priv; - struct mac_address *old_addr, *tmp; - struct netdev_hw_addr *ha; - int err; - enet_addr_t *addr; - - priv = mac_dev->priv; - - /* Clear previous address list */ - list_for_each_entry_safe(old_addr, tmp, &priv->mc_addr_list, list) { - addr = (enet_addr_t *)old_addr->addr; - err = mac_dev->remove_hash_mac_addr(mac_dev->fman_mac, addr); - if (err < 0) - return err; - - list_del(&old_addr->list); - kfree(old_addr); - } - - /* Add all the addresses from the new list */ - netdev_for_each_mc_addr(ha, net_dev) { - addr = (enet_addr_t *)ha->addr; - err = mac_dev->add_hash_mac_addr(mac_dev->fman_mac, addr); - if (err < 0) - return err; - - tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC); - if (!tmp) - return -ENOMEM; - - ether_addr_copy(tmp->addr, ha->addr); - list_add(&tmp->list, &priv->mc_addr_list); - } - return 0; -} - static DEFINE_MUTEX(eth_lock); static struct platform_device *dpaa_eth_add_device(int fman_id, @@ -181,8 +141,6 @@ static int mac_probe(struct platform_device *_of_dev) mac_dev->priv = priv; mac_dev->dev = dev; - INIT_LIST_HEAD(&priv->mc_addr_list); - /* Get the FM node */ dev_node = of_get_parent(mac_node); if (!dev_node) { @@ -197,55 +155,72 @@ static int mac_probe(struct platform_device *_of_dev) err = -EINVAL; goto _return_of_node_put; } + mac_dev->fman_dev = &of_dev->dev; /* Get the FMan cell-index */ err = of_property_read_u32(dev_node, "cell-index", &val); if (err) { dev_err(dev, "failed to read cell-index for %pOF\n", dev_node); err = -EINVAL; - goto _return_of_node_put; + goto _return_dev_put; } /* cell-index 0 => FMan id 1 */ fman_id = (u8)(val + 1); - priv->fman = fman_bind(&of_dev->dev); + priv->fman = fman_bind(mac_dev->fman_dev); if (!priv->fman) { dev_err(dev, "fman_bind(%pOF) failed\n", dev_node); err = -ENODEV; - goto _return_of_node_put; + goto _return_dev_put; } + /* Two references have been taken in of_find_device_by_node() + * and fman_bind(). Release one of them here. The second one + * will be released in mac_remove(). + */ + put_device(mac_dev->fman_dev); of_node_put(dev_node); + dev_node = NULL; /* Get the address of the memory mapped registers */ mac_dev->res = platform_get_mem_or_io(_of_dev, 0); if (!mac_dev->res) { dev_err(dev, "could not get registers\n"); - return -EINVAL; + err = -EINVAL; + goto _return_dev_put; } err = devm_request_resource(dev, fman_get_mem_region(priv->fman), mac_dev->res); if (err) { dev_err_probe(dev, err, "could not request resource\n"); - return err; + goto _return_dev_put; } mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start, resource_size(mac_dev->res)); if (!mac_dev->vaddr) { dev_err(dev, "devm_ioremap() failed\n"); - return -EIO; + err = -EIO; + goto _return_dev_put; } - if (!of_device_is_available(mac_node)) - return -ENODEV; + if (!of_device_is_available(mac_node)) { + err = -ENODEV; + goto _return_dev_put; + } /* Get the cell-index */ err = of_property_read_u32(mac_node, "cell-index", &val); if (err) { dev_err(dev, "failed to read cell-index for %pOF\n", mac_node); - return -EINVAL; + err = -EINVAL; + goto _return_dev_put; + } + if (val >= MAX_NUM_OF_MACS) { + dev_err(dev, "cell-index value is too big for %pOF\n", mac_node); + err = -EINVAL; + goto _return_dev_put; } priv->cell_index = (u8)val; @@ -259,22 +234,26 @@ static int mac_probe(struct platform_device *_of_dev) if (unlikely(nph < 0)) { dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n", mac_node); - return nph; + err = nph; + goto _return_dev_put; } if (nph != ARRAY_SIZE(mac_dev->port)) { dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n", mac_node); - return -EINVAL; + err = -EINVAL; + goto _return_dev_put; } - for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { + /* PORT_NUM determines the size of the port array */ + for (i = 0; i < PORT_NUM; i++) { /* Find the port node */ dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i); if (!dev_node) { dev_err(dev, "of_parse_phandle(%pOF, fsl,fman-ports) failed\n", mac_node); - return -EINVAL; + err = -EINVAL; + goto _return_dev_arr_put; } of_dev = of_find_device_by_node(dev_node); @@ -282,17 +261,24 @@ static int mac_probe(struct platform_device *_of_dev) dev_err(dev, "of_find_device_by_node(%pOF) failed\n", dev_node); err = -EINVAL; - goto _return_of_node_put; + goto _return_dev_arr_put; } + mac_dev->fman_port_devs[i] = &of_dev->dev; - mac_dev->port[i] = fman_port_bind(&of_dev->dev); + mac_dev->port[i] = fman_port_bind(mac_dev->fman_port_devs[i]); if (!mac_dev->port[i]) { dev_err(dev, "dev_get_drvdata(%pOF) failed\n", dev_node); err = -EINVAL; - goto _return_of_node_put; + goto _return_dev_arr_put; } + /* Two references have been taken in of_find_device_by_node() + * and fman_port_bind(). Release one of them here. The second + * one will be released in mac_remove(). + */ + put_device(mac_dev->fman_port_devs[i]); of_node_put(dev_node); + dev_node = NULL; } /* Get the PHY connection type */ @@ -312,7 +298,7 @@ static int mac_probe(struct platform_device *_of_dev) err = init(mac_dev, mac_node, ¶ms); if (err < 0) - return err; + goto _return_dev_arr_put; if (!is_zero_ether_addr(mac_dev->addr)) dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr); @@ -327,6 +313,12 @@ static int mac_probe(struct platform_device *_of_dev) return err; +_return_dev_arr_put: + /* mac_dev is kzalloc'ed */ + for (i = 0; i < PORT_NUM; i++) + put_device(mac_dev->fman_port_devs[i]); +_return_dev_put: + put_device(mac_dev->fman_dev); _return_of_node_put: of_node_put(dev_node); return err; @@ -335,6 +327,11 @@ _return_of_node_put: static void mac_remove(struct platform_device *pdev) { struct mac_device *mac_dev = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < PORT_NUM; i++) + put_device(mac_dev->fman_port_devs[i]); + put_device(mac_dev->fman_dev); platform_device_unregister(mac_dev->priv->eth_dev); } @@ -345,7 +342,7 @@ static struct platform_driver mac_driver = { .of_match_table = mac_match, }, .probe = mac_probe, - .remove_new = mac_remove, + .remove = mac_remove, }; builtin_platform_driver(mac_driver); diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index fe747915cc73..955ace338965 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -19,12 +19,13 @@ struct fman_mac; struct mac_priv_s; +#define PORT_NUM 2 struct mac_device { void __iomem *vaddr; struct device *dev; struct resource *res; u8 addr[ETH_ALEN]; - struct fman_port *port[2]; + struct fman_port *port[PORT_NUM]; struct phylink *phylink; struct phylink_config phylink_config; phy_interface_t phy_if; @@ -39,8 +40,6 @@ struct mac_device { int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr); int (*set_allmulti)(struct fman_mac *mac_dev, bool enable); int (*set_tstamp)(struct fman_mac *mac_dev, bool enable); - int (*set_multi)(struct net_device *net_dev, - struct mac_device *mac_dev); int (*set_exception)(struct fman_mac *mac_dev, enum fman_mac_exceptions exception, bool enable); int (*add_hash_mac_addr)(struct fman_mac *mac_dev, @@ -52,6 +51,9 @@ struct mac_device { struct fman_mac *fman_mac; struct mac_priv_s *priv; + + struct device *fman_dev; + struct device *fman_port_devs[PORT_NUM]; }; static inline struct mac_device diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig index 7f20840fde07..57013bf14d7c 100644 --- a/drivers/net/ethernet/freescale/fs_enet/Kconfig +++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig @@ -3,7 +3,7 @@ config FS_ENET tristate "Freescale Ethernet Driver" depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x) select MII - select PHYLIB + select PHYLINK config FS_ENET_MPC5121_FEC def_bool y if (FS_ENET && PPC_MPC512x) diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index cf392faa6105..f563692a4a00 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. * @@ -9,10 +10,6 @@ * * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. */ #include <linux/module.h> @@ -29,17 +26,18 @@ #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> -#include <linux/mii.h> #include <linux/ethtool.h> #include <linux/bitops.h> #include <linux/fs.h> #include <linux/platform_device.h> #include <linux/phy.h> +#include <linux/phylink.h> #include <linux/property.h> #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/pgtable.h> +#include <linux/rtnetlink.h> #include <linux/vmalloc.h> #include <asm/irq.h> @@ -72,6 +70,13 @@ static void fs_set_multicast_list(struct net_device *dev) (*fep->ops->set_multicast_list)(dev); } +static int fs_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + return phylink_mii_ioctl(fep->phylink, ifr, cmd); +} + static void skb_align(struct sk_buff *skb, int align) { int off = ((unsigned long)skb->data) & (align - 1); @@ -84,15 +89,14 @@ static void skb_align(struct sk_buff *skb, int align) static int fs_enet_napi(struct napi_struct *napi, int budget) { struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); - struct net_device *dev = fep->ndev; const struct fs_platform_info *fpi = fep->fpi; - cbd_t __iomem *bdp; + struct net_device *dev = fep->ndev; + int curidx, dirtyidx, received = 0; + int do_wake = 0, do_restart = 0; + int tx_left = TX_RING_SIZE; struct sk_buff *skb, *skbn; - int received = 0; + cbd_t __iomem *bdp; u16 pkt_len, sc; - int curidx; - int dirtyidx, do_wake, do_restart; - int tx_left = TX_RING_SIZE; spin_lock(&fep->tx_lock); bdp = fep->dirty_tx; @@ -100,7 +104,6 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) /* clear status bits for napi*/ (*fep->ops->napi_clear_event)(dev); - do_wake = do_restart = 0; while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0 && tx_left) { dirtyidx = bdp - fep->tx_bd_base; @@ -109,12 +112,9 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) skb = fep->tx_skbuff[dirtyidx]; - /* - * Check for errors. - */ + /* Check for errors. */ if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { - if (sc & BD_ENET_TX_HB) /* No heartbeat */ dev->stats.tx_heartbeat_errors++; if (sc & BD_ENET_TX_LC) /* Late collision */ @@ -130,16 +130,16 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) dev->stats.tx_errors++; do_restart = 1; } - } else + } else { dev->stats.tx_packets++; + } if (sc & BD_ENET_TX_READY) { dev_warn(fep->dev, "HEY! Enet xmit interrupt and TX_READY.\n"); } - /* - * Deferred means some collisions occurred during transmit, + /* Deferred means some collisions occurred during transmit, * but we eventually sent the packet OK. */ if (sc & BD_ENET_TX_DEF) @@ -153,25 +153,20 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), CBDR_DATLEN(bdp), DMA_TO_DEVICE); - /* - * Free the sk buffer associated with this last transmit. - */ + /* Free the sk buffer associated with this last transmit. */ if (skb) { dev_kfree_skb(skb); fep->tx_skbuff[dirtyidx] = NULL; } - /* - * Update pointer to next buffer descriptor to be transmitted. + /* Update pointer to next buffer descriptor to be transmitted. */ if ((sc & BD_ENET_TX_WRAP) == 0) bdp++; else bdp = fep->tx_bd_base; - /* - * Since we have freed up a buffer, the ring is no longer - * full. + /* Since we have freed up a buffer, the ring is no longer full. */ if (++fep->tx_free == MAX_SKB_FRAGS) do_wake = 1; @@ -188,8 +183,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) if (do_wake) netif_wake_queue(dev); - /* - * First, grab all of the stats for the incoming packet. + /* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ bdp = fep->cur_rx; @@ -198,16 +192,13 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) received < budget) { curidx = bdp - fep->rx_bd_base; - /* - * Since we have allocated space to hold a complete frame, + /* Since we have allocated space to hold a complete frame, * the last indicator should be set. */ if ((sc & BD_ENET_RX_LAST) == 0) dev_warn(fep->dev, "rcv is not +last\n"); - /* - * Check for errors. - */ + /* Check for errors. */ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { dev->stats.rx_errors++; @@ -228,9 +219,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) } else { skb = fep->rx_skbuff[curidx]; - /* - * Process the incoming frame. - */ + /* Process the incoming frame */ dev->stats.rx_packets++; pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ dev->stats.rx_bytes += pkt_len + 4; @@ -238,15 +227,15 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) if (pkt_len <= fpi->rx_copybreak) { /* +2 to make IP header L1 cache aligned */ skbn = netdev_alloc_skb(dev, pkt_len + 2); - if (skbn != NULL) { + if (skbn) { skb_reserve(skbn, 2); /* align IP header */ - skb_copy_from_linear_data(skb, - skbn->data, pkt_len); + skb_copy_from_linear_data(skb, skbn->data, + pkt_len); swap(skb, skbn); dma_sync_single_for_cpu(fep->dev, - CBDR_BUFADDR(bdp), - L1_CACHE_ALIGN(pkt_len), - DMA_FROM_DEVICE); + CBDR_BUFADDR(bdp), + L1_CACHE_ALIGN(pkt_len), + DMA_FROM_DEVICE); } } else { skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE); @@ -256,20 +245,18 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) skb_align(skbn, ENET_RX_ALIGN); - dma_unmap_single(fep->dev, - CBDR_BUFADDR(bdp), - L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), - DMA_FROM_DEVICE); + dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE); - dma = dma_map_single(fep->dev, - skbn->data, - L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), - DMA_FROM_DEVICE); + dma = dma_map_single(fep->dev, skbn->data, + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE); CBDW_BUFADDR(bdp, dma); } } - if (skbn != NULL) { + if (skbn) { skb_put(skb, pkt_len); /* Make room */ skb->protocol = eth_type_trans(skb, dev); received++; @@ -284,9 +271,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) CBDW_DATLEN(bdp, 0); CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); - /* - * Update BD pointer to next entry. - */ + /* Update BD pointer to next entry */ if ((sc & BD_ENET_RX_WRAP) == 0) bdp++; else @@ -308,19 +293,16 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) return budget; } -/* - * The interrupt handler. +/* The interrupt handler. * This is called from the MPC core interrupt. */ static irqreturn_t fs_enet_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; + u32 int_events, int_clr_events; struct fs_enet_private *fep; - u32 int_events; - u32 int_clr_events; - int nr, napi_ok; - int handled; + int nr, napi_ok, handled; fep = netdev_priv(dev); @@ -342,12 +324,12 @@ fs_enet_interrupt(int irq, void *dev_id) (*fep->ops->napi_disable)(dev); (*fep->ops->clear_int_events)(dev, fep->ev_napi); - /* NOTE: it is possible for FCCs in NAPI mode */ - /* to submit a spurious interrupt while in poll */ + /* NOTE: it is possible for FCCs in NAPI mode + * to submit a spurious interrupt while in poll + */ if (napi_ok) __napi_schedule(&fep->napi); } - } handled = nr > 0; @@ -357,45 +339,40 @@ fs_enet_interrupt(int irq, void *dev_id) void fs_init_bds(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - cbd_t __iomem *bdp; struct sk_buff *skb; + cbd_t __iomem *bdp; int i; fs_cleanup_bds(dev); - fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; + fep->dirty_tx = fep->tx_bd_base; + fep->cur_tx = fep->tx_bd_base; fep->tx_free = fep->tx_ring; fep->cur_rx = fep->rx_bd_base; - /* - * Initialize the receive buffer descriptors. - */ + /* Initialize the receive buffer descriptors */ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE); - if (skb == NULL) + if (!skb) break; skb_align(skb, ENET_RX_ALIGN); fep->rx_skbuff[i] = skb; - CBDW_BUFADDR(bdp, - dma_map_single(fep->dev, skb->data, - L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), - DMA_FROM_DEVICE)); + CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skb->data, + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE)); CBDW_DATLEN(bdp, 0); /* zero */ CBDW_SC(bdp, BD_ENET_RX_EMPTY | ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); } - /* - * if we failed, fillup remainder - */ + + /* if we failed, fillup remainder */ for (; i < fep->rx_ring; i++, bdp++) { fep->rx_skbuff[i] = NULL; CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); } - /* - * ...and the same for transmit. - */ + /* ...and the same for transmit. */ for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { fep->tx_skbuff[i] = NULL; CBDW_BUFADDR(bdp, 0); @@ -411,32 +388,30 @@ void fs_cleanup_bds(struct net_device *dev) cbd_t __iomem *bdp; int i; - /* - * Reset SKB transmit buffers. - */ + /* Reset SKB transmit buffers. */ for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { - if ((skb = fep->tx_skbuff[i]) == NULL) + skb = fep->tx_skbuff[i]; + if (!skb) continue; /* unmap */ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), - skb->len, DMA_TO_DEVICE); + skb->len, DMA_TO_DEVICE); fep->tx_skbuff[i] = NULL; dev_kfree_skb(skb); } - /* - * Reset SKB receive buffers - */ + /* Reset SKB receive buffers */ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { - if ((skb = fep->rx_skbuff[i]) == NULL) + skb = fep->rx_skbuff[i]; + if (!skb) continue; /* unmap */ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), - L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), - DMA_FROM_DEVICE); + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE); fep->rx_skbuff[i] = NULL; @@ -444,12 +419,8 @@ void fs_cleanup_bds(struct net_device *dev) } } -/**********************************************************************************/ - #ifdef CONFIG_FS_ENET_MPC5121_FEC -/* - * MPC5121 FEC requeries 4-byte alignment for TX data buffer! - */ +/* MPC5121 FEC requires 4-byte alignment for TX data buffer! */ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, struct sk_buff *skb) { @@ -481,15 +452,12 @@ static netdev_tx_t fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); + int curidx, nr_frags, len; cbd_t __iomem *bdp; - int curidx; - u16 sc; - int nr_frags; skb_frag_t *frag; - int len; + u16 sc; #ifdef CONFIG_FS_ENET_MPC5121_FEC - int is_aligned = 1; - int i; + int i, is_aligned = 1; if (!IS_ALIGNED((unsigned long)skb->data, 4)) { is_aligned = 0; @@ -507,8 +475,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (!is_aligned) { skb = tx_skb_align_workaround(dev, skb); if (!skb) { - /* - * We have lost packet due to memory allocation error + /* We have lost packet due to memory allocation error * in tx_skb_align_workaround(). Hopefully original * skb is still valid, so try transmit it later. */ @@ -519,9 +486,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock(&fep->tx_lock); - /* - * Fill in a Tx ring entry - */ + /* Fill in a Tx ring entry */ bdp = fep->cur_tx; nr_frags = skb_shinfo(skb)->nr_frags; @@ -529,8 +494,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); spin_unlock(&fep->tx_lock); - /* - * Ooops. All transmit buffers are full. Bail out. + /* Ooops. All transmit buffers are full. Bail out. * This should not happen, since the tx queue should be stopped. */ dev_warn(fep->dev, "tx queue full!.\n"); @@ -543,12 +507,12 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_bytes += len; if (nr_frags) len -= skb->data_len; + fep->tx_free -= nr_frags + 1; - /* - * Push the data cache so the CPM does not get stale memory data. + /* Push the data cache so the CPM does not get stale memory data. */ CBDW_BUFADDR(bdp, dma_map_single(fep->dev, - skb->data, len, DMA_TO_DEVICE)); + skb->data, len, DMA_TO_DEVICE)); CBDW_DATLEN(bdp, len); fep->mapped_as_page[curidx] = 0; @@ -585,9 +549,11 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) /* note that while FEC does not have this bit * it marks it as available for software use - * yay for hw reuse :) */ + * yay for hw reuse :) + */ if (skb->len <= 60) sc |= BD_ENET_TX_PAD; + CBDC_SC(bdp, BD_ENET_TX_STATS); CBDS_SC(bdp, sc); @@ -599,6 +565,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) bdp++; else bdp = fep->tx_bd_base; + fep->cur_tx = bdp; if (fep->tx_free < MAX_SKB_FRAGS) @@ -623,15 +590,21 @@ static void fs_timeout_work(struct work_struct *work) dev->stats.tx_errors++; - spin_lock_irqsave(&fep->lock, flags); + /* In the event a timeout was detected, but the netdev is brought down + * shortly after, it no longer makes sense to try to recover from the + * timeout. netif_running() will return false when called from the + * .ndo_close() callback. Calling the following recovery code while + * called from .ndo_close() could deadlock on rtnl. + */ + if (!netif_running(dev)) + return; - if (dev->flags & IFF_UP) { - phy_stop(dev->phydev); - (*fep->ops->stop)(dev); - (*fep->ops->restart)(dev); - } + rtnl_lock(); + phylink_stop(fep->phylink); + phylink_start(fep->phylink); + rtnl_unlock(); - phy_start(dev->phydev); + spin_lock_irqsave(&fep->lock, flags); wake = fep->tx_free >= MAX_SKB_FRAGS && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); spin_unlock_irqrestore(&fep->lock, flags); @@ -647,82 +620,37 @@ static void fs_timeout(struct net_device *dev, unsigned int txqueue) schedule_work(&fep->timeout_work); } -/*----------------------------------------------------------------------------- - * generic link-change handler - should be sufficient for most cases - *-----------------------------------------------------------------------------*/ -static void generic_adjust_link(struct net_device *dev) +static void fs_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) { - struct fs_enet_private *fep = netdev_priv(dev); - struct phy_device *phydev = dev->phydev; - int new_state = 0; - - if (phydev->link) { - /* adjust to duplex mode */ - if (phydev->duplex != fep->oldduplex) { - new_state = 1; - fep->oldduplex = phydev->duplex; - } - - if (phydev->speed != fep->oldspeed) { - new_state = 1; - fep->oldspeed = phydev->speed; - } - - if (!fep->oldlink) { - new_state = 1; - fep->oldlink = 1; - } - - if (new_state) - fep->ops->restart(dev); - } else if (fep->oldlink) { - new_state = 1; - fep->oldlink = 0; - fep->oldspeed = 0; - fep->oldduplex = -1; - } + struct net_device *ndev = to_net_dev(config->dev); + struct fs_enet_private *fep = netdev_priv(ndev); + unsigned long flags; - if (new_state && netif_msg_link(fep)) - phy_print_status(phydev); + spin_lock_irqsave(&fep->lock, flags); + fep->ops->restart(ndev, interface, speed, duplex); + spin_unlock_irqrestore(&fep->lock, flags); } - -static void fs_adjust_link(struct net_device *dev) +static void fs_mac_link_down(struct phylink_config *config, + unsigned int mode, phy_interface_t interface) { - struct fs_enet_private *fep = netdev_priv(dev); + struct net_device *ndev = to_net_dev(config->dev); + struct fs_enet_private *fep = netdev_priv(ndev); unsigned long flags; spin_lock_irqsave(&fep->lock, flags); - - if(fep->ops->adjust_link) - fep->ops->adjust_link(dev); - else - generic_adjust_link(dev); - + fep->ops->stop(ndev); spin_unlock_irqrestore(&fep->lock, flags); } -static int fs_init_phy(struct net_device *dev) +static void fs_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) { - struct fs_enet_private *fep = netdev_priv(dev); - struct phy_device *phydev; - phy_interface_t iface; - - fep->oldlink = 0; - fep->oldspeed = 0; - fep->oldduplex = -1; - - iface = fep->fpi->use_rmii ? - PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII; - - phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, - iface); - if (!phydev) { - dev_err(&dev->dev, "Could not attach to PHY\n"); - return -ENODEV; - } - - return 0; + /* Nothing to do */ } static int fs_enet_open(struct net_device *dev) @@ -731,8 +659,9 @@ static int fs_enet_open(struct net_device *dev) int r; int err; - /* to initialize the fep->cur_rx,... */ - /* not doing this, will cause a crash in fs_enet_napi */ + /* to initialize the fep->cur_rx,... + * not doing this, will cause a crash in fs_enet_napi + */ fs_init_bds(fep->ndev); napi_enable(&fep->napi); @@ -746,13 +675,13 @@ static int fs_enet_open(struct net_device *dev) return -EINVAL; } - err = fs_init_phy(dev); + err = phylink_of_phy_connect(fep->phylink, fep->dev->of_node, 0); if (err) { free_irq(fep->interrupt, dev); napi_disable(&fep->napi); return err; } - phy_start(dev->phydev); + phylink_start(fep->phylink); netif_start_queue(dev); @@ -765,28 +694,25 @@ static int fs_enet_close(struct net_device *dev) unsigned long flags; netif_stop_queue(dev); - netif_carrier_off(dev); napi_disable(&fep->napi); - cancel_work_sync(&fep->timeout_work); - phy_stop(dev->phydev); + cancel_work(&fep->timeout_work); + phylink_stop(fep->phylink); spin_lock_irqsave(&fep->lock, flags); spin_lock(&fep->tx_lock); (*fep->ops->stop)(dev); spin_unlock(&fep->tx_lock); spin_unlock_irqrestore(&fep->lock, flags); + phylink_disconnect_phy(fep->phylink); /* release any irqs */ - phy_disconnect(dev->phydev); free_irq(fep->interrupt, dev); return 0; } -/*************************************************************************/ - static void fs_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) + struct ethtool_drvinfo *info) { strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); } @@ -799,7 +725,7 @@ static int fs_get_regs_len(struct net_device *dev) } static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, - void *p) + void *p) { struct fs_enet_private *fep = netdev_priv(dev); unsigned long flags; @@ -818,12 +744,14 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, static u32 fs_get_msglevel(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); + return fep->msg_enable; } static void fs_set_msglevel(struct net_device *dev, u32 value) { struct fs_enet_private *fep = netdev_priv(dev); + fep->msg_enable = value; } @@ -865,6 +793,22 @@ static int fs_set_tunable(struct net_device *dev, return ret; } +static int fs_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + return phylink_ethtool_ksettings_set(fep->phylink, cmd); +} + +static int fs_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + return phylink_ethtool_ksettings_get(fep->phylink, cmd); +} + static const struct ethtool_ops fs_ethtool_ops = { .get_drvinfo = fs_get_drvinfo, .get_regs_len = fs_get_regs_len, @@ -874,14 +818,12 @@ static const struct ethtool_ops fs_ethtool_ops = { .set_msglevel = fs_set_msglevel, .get_regs = fs_get_regs, .get_ts_info = ethtool_op_get_ts_info, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_link_ksettings = fs_ethtool_get_link_ksettings, + .set_link_ksettings = fs_ethtool_set_link_ksettings, .get_tunable = fs_get_tunable, .set_tunable = fs_set_tunable, }; -/**************************************************************************************/ - #ifdef CONFIG_FS_ENET_HAS_FEC #define IS_FEC(ops) ((ops) == &fs_fec_ops) #else @@ -894,7 +836,7 @@ static const struct net_device_ops fs_enet_netdev_ops = { .ndo_start_xmit = fs_enet_start_xmit, .ndo_tx_timeout = fs_timeout, .ndo_set_rx_mode = fs_set_multicast_list, - .ndo_eth_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = fs_eth_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -902,17 +844,23 @@ static const struct net_device_ops fs_enet_netdev_ops = { #endif }; +static const struct phylink_mac_ops fs_enet_phylink_mac_ops = { + .mac_config = fs_mac_config, + .mac_link_down = fs_mac_link_down, + .mac_link_up = fs_mac_link_up, +}; + static int fs_enet_probe(struct platform_device *ofdev) { + int privsize, len, ret = -ENODEV; + struct fs_platform_info *fpi; + struct fs_enet_private *fep; + phy_interface_t phy_mode; const struct fs_ops *ops; struct net_device *ndev; - struct fs_enet_private *fep; - struct fs_platform_info *fpi; + struct phylink *phylink; const u32 *data; struct clk *clk; - int err; - const char *phy_connection_type; - int privsize, len, ret = -ENODEV; ops = device_get_match_data(&ofdev->dev); if (!ops) @@ -930,51 +878,36 @@ static int fs_enet_probe(struct platform_device *ofdev) fpi->cp_command = *data; } + ret = of_get_phy_mode(ofdev->dev.of_node, &phy_mode); + if (ret) { + /* For compatibility, if the mode isn't specified in DT, + * assume MII + */ + phy_mode = PHY_INTERFACE_MODE_MII; + } + fpi->rx_ring = RX_RING_SIZE; fpi->tx_ring = TX_RING_SIZE; fpi->rx_copybreak = 240; fpi->napi_weight = 17; - fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); - if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) { - err = of_phy_register_fixed_link(ofdev->dev.of_node); - if (err) - goto out_free_fpi; - - /* In the case of a fixed PHY, the DT node associated - * to the PHY is the Ethernet MAC DT node. - */ - fpi->phy_node = of_node_get(ofdev->dev.of_node); - } - - if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) { - phy_connection_type = of_get_property(ofdev->dev.of_node, - "phy-connection-type", NULL); - if (phy_connection_type && !strcmp("rmii", phy_connection_type)) - fpi->use_rmii = 1; - } /* make clock lookup non-fatal (the driver is shared among platforms), * but require enable to succeed when a clock was specified/found, * keep a reference to the clock upon successful acquisition */ - clk = devm_clk_get(&ofdev->dev, "per"); - if (!IS_ERR(clk)) { - ret = clk_prepare_enable(clk); - if (ret) - goto out_deregister_fixed_link; - - fpi->clk_per = clk; - } + clk = devm_clk_get_optional_enabled(&ofdev->dev, "per"); + if (IS_ERR(clk)) + goto out_free_fpi; privsize = sizeof(*fep) + - sizeof(struct sk_buff **) * + sizeof(struct sk_buff **) * (fpi->rx_ring + fpi->tx_ring) + sizeof(char) * fpi->tx_ring; ndev = alloc_etherdev(privsize); if (!ndev) { ret = -ENOMEM; - goto out_put; + goto out_free_fpi; } SET_NETDEV_DEV(ndev, &ofdev->dev); @@ -986,9 +919,29 @@ static int fs_enet_probe(struct platform_device *ofdev) fep->fpi = fpi; fep->ops = ops; + fep->phylink_config.dev = &ndev->dev; + fep->phylink_config.type = PHYLINK_NETDEV; + fep->phylink_config.mac_capabilities = MAC_10 | MAC_100; + + __set_bit(PHY_INTERFACE_MODE_MII, + fep->phylink_config.supported_interfaces); + + if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) + __set_bit(PHY_INTERFACE_MODE_RMII, + fep->phylink_config.supported_interfaces); + + phylink = phylink_create(&fep->phylink_config, dev_fwnode(fep->dev), + phy_mode, &fs_enet_phylink_mac_ops); + if (IS_ERR(phylink)) { + ret = PTR_ERR(phylink); + goto out_free_dev; + } + + fep->phylink = phylink; + ret = fep->ops->setup_data(ndev); if (ret) - goto out_free_dev; + goto out_phylink; fep->rx_skbuff = (struct sk_buff **)&fep[1]; fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; @@ -1018,8 +971,6 @@ static int fs_enet_probe(struct platform_device *ofdev) ndev->ethtool_ops = &fs_ethtool_ops; - netif_carrier_off(ndev); - ndev->features |= NETIF_F_SG; ret = register_netdev(ndev); @@ -1034,14 +985,10 @@ out_free_bd: fep->ops->free_bd(ndev); out_cleanup_data: fep->ops->cleanup_data(ndev); +out_phylink: + phylink_destroy(fep->phylink); out_free_dev: free_netdev(ndev); -out_put: - clk_disable_unprepare(fpi->clk_per); -out_deregister_fixed_link: - of_node_put(fpi->phy_node); - if (of_phy_is_fixed_link(ofdev->dev.of_node)) - of_phy_deregister_fixed_link(ofdev->dev.of_node); out_free_fpi: kfree(fpi); return ret; @@ -1057,10 +1004,7 @@ static void fs_enet_remove(struct platform_device *ofdev) fep->ops->free_bd(ndev); fep->ops->cleanup_data(ndev); dev_set_drvdata(fep->dev, NULL); - of_node_put(fep->fpi->phy_node); - clk_disable_unprepare(fep->fpi->clk_per); - if (of_phy_is_fixed_link(ofdev->dev.of_node)) - of_phy_deregister_fixed_link(ofdev->dev.of_node); + phylink_destroy(fep->phylink); free_netdev(ndev); } @@ -1108,15 +1052,15 @@ static struct platform_driver fs_enet_driver = { .of_match_table = fs_enet_match, }, .probe = fs_enet_probe, - .remove_new = fs_enet_remove, + .remove = fs_enet_remove, }; #ifdef CONFIG_NET_POLL_CONTROLLER static void fs_enet_netpoll(struct net_device *dev) { - disable_irq(dev->irq); - fs_enet_interrupt(dev->irq, dev); - enable_irq(dev->irq); + disable_irq(dev->irq); + fs_enet_interrupt(dev->irq, dev); + enable_irq(dev->irq); } #endif diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h index 21c07ac05225..36e4fcc29e36 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h @@ -3,11 +3,11 @@ #define FS_ENET_H #include <linux/clk.h> -#include <linux/mii.h> #include <linux/netdevice.h> #include <linux/types.h> #include <linux/list.h> #include <linux/phy.h> +#include <linux/phylink.h> #include <linux/dma-mapping.h> #ifdef CONFIG_CPM1 @@ -77,8 +77,8 @@ struct fs_ops { void (*free_bd)(struct net_device *dev); void (*cleanup_data)(struct net_device *dev); void (*set_multicast_list)(struct net_device *dev); - void (*adjust_link)(struct net_device *dev); - void (*restart)(struct net_device *dev); + void (*restart)(struct net_device *dev, phy_interface_t interface, + int speed, int duplex); void (*stop)(struct net_device *dev); void (*napi_clear_event)(struct net_device *dev); void (*napi_enable)(struct net_device *dev); @@ -93,14 +93,6 @@ struct fs_ops { void (*tx_restart)(struct net_device *dev); }; -struct phy_info { - unsigned int id; - const char *name; - void (*startup) (struct net_device * dev); - void (*shutdown) (struct net_device * dev); - void (*ack_int) (struct net_device * dev); -}; - /* The FEC stores dest/src/type, data, and checksum for receive packets. */ #define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */ @@ -122,15 +114,9 @@ struct fs_platform_info { u32 dpram_offset; - struct device_node *phy_node; - int rx_ring, tx_ring; /* number of buffers on rx */ int rx_copybreak; /* limit we copy small frames */ int napi_weight; /* NAPI weight */ - - int use_rmii; /* use RMII mode */ - - struct clk *clk_per; /* 'per' clock for register access */ }; struct fs_enet_private { @@ -154,14 +140,11 @@ struct fs_enet_private { cbd_t __iomem *cur_rx; cbd_t __iomem *cur_tx; int tx_free; - const struct phy_info *phy; u32 msg_enable; - struct mii_if_info mii_if; - unsigned int last_mii_status; + struct phylink *phylink; + struct phylink_config phylink_config; int interrupt; - int oldduplex, oldspeed, oldlink; /* current settings */ - /* event masks */ u32 ev_napi; /* mask of NAPI events */ u32 ev; /* event mask */ diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c index e2ffac9eb2ad..be63293511d9 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * FCC driver for Motorola MPC82xx (PQ2). * @@ -6,10 +7,6 @@ * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. */ #include <linux/module.h> @@ -25,7 +22,6 @@ #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> -#include <linux/mii.h> #include <linux/ethtool.h> #include <linux/bitops.h> #include <linux/fs.h> @@ -239,7 +235,8 @@ static void set_multicast_list(struct net_device *dev) set_promiscuous_mode(dev); } -static void restart(struct net_device *dev) +static void restart(struct net_device *dev, phy_interface_t interface, + int speed, int duplex) { struct fs_enet_private *fep = netdev_priv(dev); const struct fs_platform_info *fpi = fep->fpi; @@ -363,8 +360,8 @@ static void restart(struct net_device *dev) fs_init_bds(dev); /* adjust to speed (for RMII mode) */ - if (fpi->use_rmii) { - if (dev->phydev->speed == 100) + if (interface == PHY_INTERFACE_MODE_RMII) { + if (speed == SPEED_100) C8(fcccp, fcc_gfemr, 0x20); else S8(fcccp, fcc_gfemr, 0x20); @@ -386,11 +383,11 @@ static void restart(struct net_device *dev) W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC); - if (fpi->use_rmii) + if (interface == PHY_INTERFACE_MODE_RMII) S32(fccp, fcc_fpsmr, FCC_PSMR_RMII); /* adjust to duplex mode */ - if (dev->phydev->duplex) + if (duplex == DUPLEX_FULL) S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); else C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index cdc89d83cf07..f2ecd20027cf 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Freescale Ethernet controllers * @@ -6,10 +7,6 @@ * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. */ #include <linux/module.h> @@ -26,7 +23,6 @@ #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> -#include <linux/mii.h> #include <linux/ethtool.h> #include <linux/bitops.h> #include <linux/fs.h> @@ -224,7 +220,8 @@ static void set_multicast_list(struct net_device *dev) set_promiscuous_mode(dev); } -static void restart(struct net_device *dev) +static void restart(struct net_device *dev, phy_interface_t interface, + int speed, int duplex) { struct fs_enet_private *fep = netdev_priv(dev); struct fec __iomem *fecp = fep->fec.fecp; @@ -306,13 +303,13 @@ static void restart(struct net_device *dev) * Only set MII/RMII mode - do not touch maximum frame length * configured before. */ - FS(fecp, r_cntrl, fpi->use_rmii ? - FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE); + FS(fecp, r_cntrl, interface == PHY_INTERFACE_MODE_RMII ? + FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE); #endif /* * adjust to duplex mode */ - if (dev->phydev->duplex) { + if (duplex == DUPLEX_FULL) { FC(fecp, r_cntrl, FEC_RCNTRL_DRT); FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ } else { diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c index a64cb6270515..6c97191649de 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx. * @@ -6,10 +7,6 @@ * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. */ #include <linux/module.h> @@ -25,7 +22,6 @@ #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> -#include <linux/mii.h> #include <linux/ethtool.h> #include <linux/bitops.h> #include <linux/fs.h> @@ -131,15 +127,14 @@ static int setup_data(struct net_device *dev) static int allocate_bd(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; + struct fs_platform_info *fpi = fep->fpi; - fep->ring_mem_addr = cpm_muram_alloc((fpi->tx_ring + fpi->rx_ring) * - sizeof(cbd_t), 8); - if (IS_ERR_VALUE(fep->ring_mem_addr)) + fpi->dpram_offset = cpm_muram_alloc((fpi->tx_ring + fpi->rx_ring) * + sizeof(cbd_t), 8); + if (IS_ERR_VALUE(fpi->dpram_offset)) return -ENOMEM; - fep->ring_base = (void __iomem __force*) - cpm_muram_addr(fep->ring_mem_addr); + fep->ring_base = cpm_muram_addr(fpi->dpram_offset); return 0; } @@ -147,9 +142,10 @@ static int allocate_bd(struct net_device *dev) static void free_bd(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; if (fep->ring_base) - cpm_muram_free(fep->ring_mem_addr); + cpm_muram_free(fpi->dpram_offset); } static void cleanup_data(struct net_device *dev) @@ -230,7 +226,8 @@ static void set_multicast_list(struct net_device *dev) * change. This only happens when switching between half and full * duplex. */ -static void restart(struct net_device *dev) +static void restart(struct net_device *dev, phy_interface_t interface, + int speed, int duplex) { struct fs_enet_private *fep = netdev_priv(dev); scc_t __iomem *sccp = fep->scc.sccp; @@ -247,9 +244,9 @@ static void restart(struct net_device *dev) __fs_out8((u8 __iomem *)ep + i, 0); /* point to bds */ - W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr); + W16(ep, sen_genscc.scc_rbase, fpi->dpram_offset); W16(ep, sen_genscc.scc_tbase, - fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring); + fpi->dpram_offset + sizeof(cbd_t) * fpi->rx_ring); /* Initialize function code registers for big-endian. */ @@ -341,7 +338,7 @@ static void restart(struct net_device *dev) W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22); /* Set full duplex mode if needed */ - if (dev->phydev->duplex) + if (duplex == DUPLEX_FULL) S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); /* Restore multicast and promiscuous settings */ diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index f965a2329055..66038e2a4ae3 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. * @@ -6,10 +7,6 @@ * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. */ #include <linux/module.h> @@ -126,7 +123,7 @@ static int fs_mii_bitbang_init(struct mii_bus *bus, struct device_node *np) * we get is an int, and the odds of multiple bitbang mdio buses * is low enough that it's not worth going too crazy. */ - snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); + snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res.start); data = of_get_property(np, "fsl,mdio-pin", &len); if (!data || len != 4) @@ -217,7 +214,7 @@ static struct platform_driver fs_enet_bb_mdio_driver = { .of_match_table = fs_enet_mdio_bb_match, }, .probe = fs_enet_mdio_probe, - .remove_new = fs_enet_mdio_remove, + .remove = fs_enet_mdio_remove, }; module_platform_driver(fs_enet_bb_mdio_driver); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 7bb69727952a..dec31b638941 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. * @@ -6,10 +7,6 @@ * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. */ #include <linux/module.h> @@ -215,7 +212,7 @@ static struct platform_driver fs_enet_fec_mdio_driver = { .of_match_table = fs_enet_mdio_fec_match, }, .probe = fs_enet_mdio_probe, - .remove_new = fs_enet_mdio_remove, + .remove = fs_enet_mdio_remove, }; module_platform_driver(fs_enet_fec_mdio_driver); diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 026f7270a54d..56d2f79fb7e3 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -526,7 +526,7 @@ static struct platform_driver fsl_pq_mdio_driver = { .of_match_table = fsl_pq_mdio_match, }, .probe = fsl_pq_mdio_probe, - .remove_new = fsl_pq_mdio_remove, + .remove = fsl_pq_mdio_remove, }; module_platform_driver(fsl_pq_mdio_driver); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index a811238c018d..bcbcad613512 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -754,6 +754,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; err = of_get_ethdev_address(np, dev); + if (err == -EPROBE_DEFER) + goto err_grp_init; if (err) { eth_hw_addr_random(dev); dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr); @@ -1645,20 +1647,11 @@ static void gfar_configure_serdes(struct net_device *dev) */ static int init_phy(struct net_device *dev) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct gfar_private *priv = netdev_priv(dev); phy_interface_t interface = priv->interface; struct phy_device *phydev; struct ethtool_keee edata; - linkmode_set_bit_array(phy_10_100_features_array, - ARRAY_SIZE(phy_10_100_features_array), - mask); - linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); - linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask); - priv->oldlink = 0; priv->oldspeed = 0; priv->oldduplex = -1; @@ -1673,9 +1666,8 @@ static int init_phy(struct net_device *dev) if (interface == PHY_INTERFACE_MODE_SGMII) gfar_configure_serdes(dev); - /* Remove any features not supported by the controller */ - linkmode_and(phydev->supported, phydev->supported, mask); - linkmode_copy(phydev->advertising, phydev->supported); + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)) + phy_set_max_speed(phydev, SPEED_100); /* Add support for flow control */ phy_support_asym_pause(phydev); @@ -2026,7 +2018,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) if (dev->flags & IFF_UP) stop_gfar(dev); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); if (dev->flags & IFF_UP) startup_gfar(dev); @@ -2069,15 +2061,13 @@ static void gfar_timeout(struct net_device *dev, unsigned int txqueue) schedule_work(&priv->reset_task); } -static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) +static int gfar_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config config; struct gfar_private *priv = netdev_priv(netdev); - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: priv->hwts_tx_en = 0; break; @@ -2090,7 +2080,7 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: if (priv->hwts_rx_en) { priv->hwts_rx_en = 0; @@ -2104,44 +2094,23 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) priv->hwts_rx_en = 1; reset_gfar(netdev); } - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; break; } - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; } -static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) +static int gfar_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { - struct hwtstamp_config config; struct gfar_private *priv = netdev_priv(netdev); - config.flags = 0; - config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - config.rx_filter = (priv->hwts_rx_en ? - HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); - - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} - -static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct phy_device *phydev = dev->phydev; - - if (!netif_running(dev)) - return -EINVAL; - - if (cmd == SIOCSHWTSTAMP) - return gfar_hwtstamp_set(dev, rq); - if (cmd == SIOCGHWTSTAMP) - return gfar_hwtstamp_get(dev, rq); + config->tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config->rx_filter = priv->hwts_rx_en ? HWTSTAMP_FILTER_ALL : + HWTSTAMP_FILTER_NONE; - if (!phydev) - return -ENODEV; - - return phy_mii_ioctl(phydev, rq, cmd); + return 0; } /* Interrupt Handler for Transmit complete */ @@ -2205,8 +2174,9 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) if (unlikely(do_tstamp)) { struct skb_shared_hwtstamps shhwtstamps; - u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) & - ~0x7UL); + __be64 *ns; + + ns = (__be64 *)(((uintptr_t)skb->data + 0x10) & ~0x7UL); memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); @@ -2469,7 +2439,7 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) /* Get receive timestamp from the skb */ if (priv->hwts_rx_en) { struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); - u64 *ns = (u64 *) skb->data; + __be64 *ns = (__be64 *)skb->data; memset(shhwtstamps, 0, sizeof(*shhwtstamps)); shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); @@ -3181,7 +3151,7 @@ static const struct net_device_ops gfar_netdev_ops = { .ndo_set_features = gfar_set_features, .ndo_set_rx_mode = gfar_set_multi, .ndo_tx_timeout = gfar_timeout, - .ndo_eth_ioctl = gfar_ioctl, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_get_stats64 = gfar_get_stats64, .ndo_change_carrier = fixed_phy_change_carrier, .ndo_set_mac_address = gfar_set_mac_addr, @@ -3189,6 +3159,8 @@ static const struct net_device_ops gfar_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = gfar_netpoll, #endif + .ndo_hwtstamp_get = gfar_hwtstamp_get, + .ndo_hwtstamp_set = gfar_hwtstamp_set, }; /* Set up the ethernet device structure, private data, @@ -3640,7 +3612,7 @@ static struct platform_driver gfar_driver = { .of_match_table = gfar_match, }, .probe = gfar_probe, - .remove_new = gfar_remove, + .remove = gfar_remove, }; module_platform_driver(gfar_driver); diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 7a15b9245698..781d92e703cb 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -115,12 +115,14 @@ static const char stat_gstrings[][ETH_GSTRING_LEN] = { static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf) { struct gfar_private *priv = netdev_priv(dev); + int i; if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) - memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN); + for (i = 0; i < GFAR_STATS_LEN; i++) + ethtool_puts(&buf, stat_gstrings[i]); else - memcpy(buf, stat_gstrings, - GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN); + for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) + ethtool_puts(&buf, stat_gstrings[i]); } /* Fill in an array of 64-bit statistics from various sources. @@ -1448,19 +1450,15 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, } static int gfar_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct gfar_private *priv = netdev_priv(dev); struct platform_device *ptp_dev; struct device_node *ptp_node; struct ptp_qoriq *ptp = NULL; - info->phc_index = -1; - if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) { - info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; return 0; } @@ -1478,9 +1476,7 @@ static int gfar_get_ts_info(struct net_device *dev, info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + SOF_TIMESTAMPING_TX_SOFTWARE; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index ab421243a419..affd5a6c44e7 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -26,7 +26,7 @@ #include <linux/dma-mapping.h> #include <linux/mii.h> #include <linux/phy.h> -#include <linux/phy_fixed.h> +#include <linux/phylink.h> #include <linux/workqueue.h> #include <linux/of.h> #include <linux/of_address.h> @@ -34,6 +34,7 @@ #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/platform_device.h> +#include <linux/rtnetlink.h> #include <linux/uaccess.h> #include <asm/irq.h> @@ -132,7 +133,6 @@ static const struct ucc_geth_info ugeth_primary_info = { .transmitFlowControl = 1, .maxGroupAddrInHash = 4, .maxIndAddrInHash = 4, - .prel = 7, .maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */ .minFrameLength = 64, .maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */ @@ -1205,34 +1205,6 @@ static int init_mac_station_addr_regs(u8 address_byte_0, return 0; } -static int init_check_frame_length_mode(int length_check, - u32 __iomem *maccfg2_register) -{ - u32 value = 0; - - value = in_be32(maccfg2_register); - - if (length_check) - value |= MACCFG2_LC; - else - value &= ~MACCFG2_LC; - - out_be32(maccfg2_register, value); - return 0; -} - -static int init_preamble_length(u8 preamble_length, - u32 __iomem *maccfg2_register) -{ - if ((preamble_length < 3) || (preamble_length > 7)) - return -EINVAL; - - clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK, - preamble_length << MACCFG2_PREL_SHIFT); - - return 0; -} - static int init_rx_parameters(int reject_broadcast, int receive_short_frames, int promiscuous, u32 __iomem *upsmr_register) @@ -1287,94 +1259,11 @@ static int init_min_frame_len(u16 min_frame_length, return 0; } -static int adjust_enet_interface(struct ucc_geth_private *ugeth) +static bool phy_interface_mode_is_reduced(phy_interface_t interface) { - struct ucc_geth_info *ug_info; - struct ucc_geth __iomem *ug_regs; - struct ucc_fast __iomem *uf_regs; - int ret_val; - u32 upsmr, maccfg2; - u16 value; - - ugeth_vdbg("%s: IN", __func__); - - ug_info = ugeth->ug_info; - ug_regs = ugeth->ug_regs; - uf_regs = ugeth->uccf->uf_regs; - - /* Set MACCFG2 */ - maccfg2 = in_be32(&ug_regs->maccfg2); - maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; - if ((ugeth->max_speed == SPEED_10) || - (ugeth->max_speed == SPEED_100)) - maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; - else if (ugeth->max_speed == SPEED_1000) - maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; - maccfg2 |= ug_info->padAndCrc; - out_be32(&ug_regs->maccfg2, maccfg2); - - /* Set UPSMR */ - upsmr = in_be32(&uf_regs->upsmr); - upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M | - UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM); - if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { - if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII) - upsmr |= UCC_GETH_UPSMR_RPM; - switch (ugeth->max_speed) { - case SPEED_10: - upsmr |= UCC_GETH_UPSMR_R10M; - fallthrough; - case SPEED_100: - if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) - upsmr |= UCC_GETH_UPSMR_RMM; - } - } - if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { - upsmr |= UCC_GETH_UPSMR_TBIM; - } - if (ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII) - upsmr |= UCC_GETH_UPSMR_SGMM; - - out_be32(&uf_regs->upsmr, upsmr); - - /* Disable autonegotiation in tbi mode, because by default it - comes up in autonegotiation mode. */ - /* Note that this depends on proper setting in utbipar register. */ - if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { - struct ucc_geth_info *ug_info = ugeth->ug_info; - struct phy_device *tbiphy; - - if (!ug_info->tbi_node) - pr_warn("TBI mode requires that the device tree specify a tbi-handle\n"); - - tbiphy = of_phy_find_device(ug_info->tbi_node); - if (!tbiphy) - pr_warn("Could not get TBI device\n"); - - value = phy_read(tbiphy, ENET_TBI_MII_CR); - value &= ~0x1000; /* Turn off autonegotiation */ - phy_write(tbiphy, ENET_TBI_MII_CR, value); - - put_device(&tbiphy->mdio.dev); - } - - init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); - - ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); - if (ret_val != 0) { - if (netif_msg_probe(ugeth)) - pr_err("Preamble length must be between 3 and 7 inclusive\n"); - return ret_val; - } - - return 0; + return phy_interface_mode_is_rgmii(interface) || + interface == PHY_INTERFACE_MODE_RMII || + interface == PHY_INTERFACE_MODE_RTBI; } static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) @@ -1545,108 +1434,7 @@ static void ugeth_activate(struct ucc_geth_private *ugeth) /* allow to xmit again */ netif_tx_wake_all_queues(ugeth->ndev); - __netdev_watchdog_up(ugeth->ndev); -} - -/* Called every time the controller might need to be made - * aware of new link state. The PHY code conveys this - * information through variables in the ugeth structure, and this - * function converts those variables into the appropriate - * register values, and can bring down the device if needed. - */ - -static void adjust_link(struct net_device *dev) -{ - struct ucc_geth_private *ugeth = netdev_priv(dev); - struct ucc_geth __iomem *ug_regs; - struct ucc_fast __iomem *uf_regs; - struct phy_device *phydev = ugeth->phydev; - int new_state = 0; - - ug_regs = ugeth->ug_regs; - uf_regs = ugeth->uccf->uf_regs; - - if (phydev->link) { - u32 tempval = in_be32(&ug_regs->maccfg2); - u32 upsmr = in_be32(&uf_regs->upsmr); - /* Now we make sure that we can be in full duplex mode. - * If not, we operate in half-duplex mode. */ - if (phydev->duplex != ugeth->oldduplex) { - new_state = 1; - if (!(phydev->duplex)) - tempval &= ~(MACCFG2_FDX); - else - tempval |= MACCFG2_FDX; - ugeth->oldduplex = phydev->duplex; - } - - if (phydev->speed != ugeth->oldspeed) { - new_state = 1; - switch (phydev->speed) { - case SPEED_1000: - tempval = ((tempval & - ~(MACCFG2_INTERFACE_MODE_MASK)) | - MACCFG2_INTERFACE_MODE_BYTE); - break; - case SPEED_100: - case SPEED_10: - tempval = ((tempval & - ~(MACCFG2_INTERFACE_MODE_MASK)) | - MACCFG2_INTERFACE_MODE_NIBBLE); - /* if reduced mode, re-set UPSMR.R10M */ - if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { - if (phydev->speed == SPEED_10) - upsmr |= UCC_GETH_UPSMR_R10M; - else - upsmr &= ~UCC_GETH_UPSMR_R10M; - } - break; - default: - if (netif_msg_link(ugeth)) - pr_warn( - "%s: Ack! Speed (%d) is not 10/100/1000!", - dev->name, phydev->speed); - break; - } - ugeth->oldspeed = phydev->speed; - } - - if (!ugeth->oldlink) { - new_state = 1; - ugeth->oldlink = 1; - } - - if (new_state) { - /* - * To change the MAC configuration we need to disable - * the controller. To do so, we have to either grab - * ugeth->lock, which is a bad idea since 'graceful - * stop' commands might take quite a while, or we can - * quiesce driver's activity. - */ - ugeth_quiesce(ugeth); - ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); - - out_be32(&ug_regs->maccfg2, tempval); - out_be32(&uf_regs->upsmr, upsmr); - - ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); - ugeth_activate(ugeth); - } - } else if (ugeth->oldlink) { - new_state = 1; - ugeth->oldlink = 0; - ugeth->oldspeed = 0; - ugeth->oldduplex = -1; - } - - if (new_state && netif_msg_link(ugeth)) - phy_print_status(phydev); + netdev_watchdog_up(ugeth->ndev); } /* Initialize TBI PHY interface for communicating with the @@ -1664,8 +1452,7 @@ static void uec_configure_serdes(struct net_device *dev) struct phy_device *tbiphy; if (!ug_info->tbi_node) { - dev_warn(&dev->dev, "SGMII mode requires that the device " - "tree specify a tbi-handle\n"); + dev_warn(&dev->dev, "SGMII mode requires that the device tree specify a tbi-handle\n"); return; } @@ -1696,34 +1483,145 @@ static void uec_configure_serdes(struct net_device *dev) put_device(&tbiphy->mdio.dev); } -/* Configure the PHY for dev. - * returns 0 if success. -1 if failure - */ -static int init_phy(struct net_device *dev) +static void ugeth_mac_link_up(struct phylink_config *config, struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, bool tx_pause, bool rx_pause) { - struct ucc_geth_private *priv = netdev_priv(dev); - struct ucc_geth_info *ug_info = priv->ug_info; - struct phy_device *phydev; + struct net_device *ndev = to_net_dev(config->dev); + struct ucc_geth_private *ugeth = netdev_priv(ndev); + struct ucc_geth_info *ug_info = ugeth->ug_info; + struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; + struct ucc_fast __iomem *uf_regs = ugeth->uccf->uf_regs; + u32 old_maccfg2, maccfg2 = in_be32(&ug_regs->maccfg2); + u32 old_upsmr, upsmr = in_be32(&uf_regs->upsmr); - priv->oldlink = 0; - priv->oldspeed = 0; - priv->oldduplex = -1; + old_maccfg2 = maccfg2; + old_upsmr = upsmr; - phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, - priv->phy_interface); - if (!phydev) { - dev_err(&dev->dev, "Could not attach to PHY\n"); - return -ENODEV; + /* No length check */ + maccfg2 &= ~MACCFG2_LC; + maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; + upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M | + UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM); + + if (speed == SPEED_10 || speed == SPEED_100) + maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; + else if (speed == SPEED_1000) + maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; + + maccfg2 |= ug_info->padAndCrc; + + if (phy_interface_mode_is_reduced(interface)) { + + if (interface != PHY_INTERFACE_MODE_RMII) + upsmr |= UCC_GETH_UPSMR_RPM; + + switch (speed) { + case SPEED_10: + upsmr |= UCC_GETH_UPSMR_R10M; + fallthrough; + case SPEED_100: + if (interface != PHY_INTERFACE_MODE_RTBI) + upsmr |= UCC_GETH_UPSMR_RMM; + } } - if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) - uec_configure_serdes(dev); + if (interface == PHY_INTERFACE_MODE_TBI || + interface == PHY_INTERFACE_MODE_RTBI) + upsmr |= UCC_GETH_UPSMR_TBIM; - phy_set_max_speed(phydev, priv->max_speed); + if (interface == PHY_INTERFACE_MODE_SGMII) + upsmr |= UCC_GETH_UPSMR_SGMM; - priv->phydev = phydev; + if (duplex == DUPLEX_HALF) + maccfg2 &= ~(MACCFG2_FDX); + else + maccfg2 |= MACCFG2_FDX; - return 0; + if (maccfg2 != old_maccfg2 || upsmr != old_upsmr) { + /* + * To change the MAC configuration we need to disable + * the controller. To do so, we have to either grab + * ugeth->lock, which is a bad idea since 'graceful + * stop' commands might take quite a while, or we can + * quiesce driver's activity. + */ + ugeth_quiesce(ugeth); + ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); + + out_be32(&ug_regs->maccfg2, maccfg2); + out_be32(&uf_regs->upsmr, upsmr); + + ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); + ugeth_activate(ugeth); + } + + if (interface == PHY_INTERFACE_MODE_SGMII) + uec_configure_serdes(ndev); + + if (!phylink_autoneg_inband(mode)) { + ug_info->aufc = 0; + ug_info->receiveFlowControl = rx_pause; + ug_info->transmitFlowControl = tx_pause; + + init_flow_control_params(ug_info->aufc, + ug_info->receiveFlowControl, + ug_info->transmitFlowControl, + ug_info->pausePeriod, + ug_info->extensionField, + &ugeth->uccf->uf_regs->upsmr, + &ugeth->ug_regs->uempr, + &ugeth->ug_regs->maccfg1); + } + + ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); +} + +static void ugeth_mac_link_down(struct phylink_config *config, + unsigned int mode, phy_interface_t interface) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct ucc_geth_private *ugeth = netdev_priv(ndev); + + ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); +} + +static void ugeth_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct ucc_geth_private *ugeth = netdev_priv(ndev); + struct ucc_geth_info *ug_info = ugeth->ug_info; + u16 value; + + if (state->interface == PHY_INTERFACE_MODE_TBI || + state->interface == PHY_INTERFACE_MODE_RTBI) { + struct phy_device *tbiphy; + + if (!ug_info->tbi_node) + pr_warn("TBI mode requires that the device tree specify a tbi-handle\n"); + + tbiphy = of_phy_find_device(ug_info->tbi_node); + if (!tbiphy) + pr_warn("Could not get TBI device\n"); + + value = phy_read(tbiphy, ENET_TBI_MII_CR); + value &= ~0x1000; /* Turn off autonegotiation */ + phy_write(tbiphy, ENET_TBI_MII_CR, value); + + put_device(&tbiphy->mdio.dev); + } + + if (phylink_autoneg_inband(mode)) { + ug_info->aufc = 1; + + init_flow_control_params(ug_info->aufc, 1, 1, + ug_info->pausePeriod, + ug_info->extensionField, + &ugeth->uccf->uf_regs->upsmr, + &ugeth->ug_regs->uempr, + &ugeth->ug_regs->maccfg1); + } } static void ugeth_dump_regs(struct ucc_geth_private *ugeth) @@ -1995,7 +1893,6 @@ static void ucc_geth_set_multi(struct net_device *dev) static void ucc_geth_stop(struct ucc_geth_private *ugeth) { struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; - struct phy_device *phydev = ugeth->phydev; ugeth_vdbg("%s: IN", __func__); @@ -2004,7 +1901,7 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth) * Must be done before disabling the controller * or deadlock may happen. */ - phy_stop(phydev); + phylink_stop(ugeth->phylink); /* Disable the controller */ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); @@ -3246,12 +3143,6 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth) goto err; } - err = adjust_enet_interface(ugeth); - if (err) { - netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n"); - goto err; - } - /* Set MACSTNADDR1, MACSTNADDR2 */ /* For more details see the hardware spec. */ init_mac_station_addr_regs(dev->dev_addr[0], @@ -3263,12 +3154,6 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth) &ugeth->ug_regs->macstnaddr1, &ugeth->ug_regs->macstnaddr2); - err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); - if (err) { - netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n"); - goto err; - } - return 0; err: ucc_geth_stop(ugeth); @@ -3291,10 +3176,10 @@ static int ucc_geth_open(struct net_device *dev) return -EINVAL; } - err = init_phy(dev); + err = phylink_of_phy_connect(ugeth->phylink, ugeth->dev->of_node, 0); if (err) { - netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n"); - return err; + dev_err(&dev->dev, "Could not attach to PHY\n"); + return -ENODEV; } err = ucc_geth_init_mac(ugeth); @@ -3310,13 +3195,13 @@ static int ucc_geth_open(struct net_device *dev) goto err; } - phy_start(ugeth->phydev); + phylink_start(ugeth->phylink); napi_enable(&ugeth->napi); netdev_reset_queue(dev); netif_start_queue(dev); device_set_wakeup_capable(&dev->dev, - qe_alive_during_sleep() || ugeth->phydev->irq); + qe_alive_during_sleep() || dev->phydev->irq); device_set_wakeup_enable(&dev->dev, ugeth->wol_en); return err; @@ -3337,8 +3222,7 @@ static int ucc_geth_close(struct net_device *dev) cancel_work_sync(&ugeth->timeout_work); ucc_geth_stop(ugeth); - phy_disconnect(ugeth->phydev); - ugeth->phydev = NULL; + phylink_disconnect_phy(ugeth->phylink); free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); @@ -3372,7 +3256,7 @@ static void ucc_geth_timeout_work(struct work_struct *work) ucc_geth_stop(ugeth); ucc_geth_init_mac(ugeth); /* Must start PHY here */ - phy_start(ugeth->phydev); + phylink_start(ugeth->phylink); netif_tx_start_all_queues(dev); } @@ -3397,6 +3281,7 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) { struct net_device *ndev = platform_get_drvdata(ofdev); struct ucc_geth_private *ugeth = netdev_priv(ndev); + bool mac_wol = false; if (!netif_running(ndev)) return 0; @@ -3410,14 +3295,17 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) */ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); - if (ugeth->wol_en & WAKE_MAGIC) { + if (ugeth->wol_en & WAKE_MAGIC && !ugeth->phy_wol_en) { setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX); - } else if (!(ugeth->wol_en & WAKE_PHY)) { - phy_stop(ugeth->phydev); + mac_wol = true; } + rtnl_lock(); + phylink_suspend(ugeth->phylink, mac_wol); + rtnl_unlock(); + return 0; } @@ -3451,12 +3339,9 @@ static int ucc_geth_resume(struct platform_device *ofdev) } } - ugeth->oldlink = 0; - ugeth->oldspeed = 0; - ugeth->oldduplex = -1; - - phy_stop(ugeth->phydev); - phy_start(ugeth->phydev); + rtnl_lock(); + phylink_resume(ugeth->phylink); + rtnl_unlock(); napi_enable(&ugeth->napi); netif_device_attach(ndev); @@ -3469,32 +3354,6 @@ static int ucc_geth_resume(struct platform_device *ofdev) #define ucc_geth_resume NULL #endif -static phy_interface_t to_phy_interface(const char *phy_connection_type) -{ - if (strcasecmp(phy_connection_type, "mii") == 0) - return PHY_INTERFACE_MODE_MII; - if (strcasecmp(phy_connection_type, "gmii") == 0) - return PHY_INTERFACE_MODE_GMII; - if (strcasecmp(phy_connection_type, "tbi") == 0) - return PHY_INTERFACE_MODE_TBI; - if (strcasecmp(phy_connection_type, "rmii") == 0) - return PHY_INTERFACE_MODE_RMII; - if (strcasecmp(phy_connection_type, "rgmii") == 0) - return PHY_INTERFACE_MODE_RGMII; - if (strcasecmp(phy_connection_type, "rgmii-id") == 0) - return PHY_INTERFACE_MODE_RGMII_ID; - if (strcasecmp(phy_connection_type, "rgmii-txid") == 0) - return PHY_INTERFACE_MODE_RGMII_TXID; - if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0) - return PHY_INTERFACE_MODE_RGMII_RXID; - if (strcasecmp(phy_connection_type, "rtbi") == 0) - return PHY_INTERFACE_MODE_RTBI; - if (strcasecmp(phy_connection_type, "sgmii") == 0) - return PHY_INTERFACE_MODE_SGMII; - - return PHY_INTERFACE_MODE_MII; -} - static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct ucc_geth_private *ugeth = netdev_priv(dev); @@ -3502,10 +3361,7 @@ static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!netif_running(dev)) return -EINVAL; - if (!ugeth->phydev) - return -ENODEV; - - return phy_mii_ioctl(ugeth->phydev, rq, cmd); + return phylink_mii_ioctl(ugeth->phylink, rq, cmd); } static const struct net_device_ops ucc_geth_netdev_ops = { @@ -3513,7 +3369,6 @@ static const struct net_device_ops ucc_geth_netdev_ops = { .ndo_stop = ucc_geth_close, .ndo_start_xmit = ucc_geth_start_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_change_carrier = fixed_phy_change_carrier, .ndo_set_mac_address = ucc_geth_set_mac_addr, .ndo_set_rx_mode = ucc_geth_set_multi, .ndo_tx_timeout = ucc_geth_timeout, @@ -3553,6 +3408,12 @@ static int ucc_geth_parse_clock(struct device_node *np, const char *which, return 0; } +static const struct phylink_mac_ops ugeth_mac_ops = { + .mac_link_up = ugeth_mac_link_up, + .mac_link_down = ugeth_mac_link_down, + .mac_config = ugeth_mac_config, +}; + static int ucc_geth_probe(struct platform_device* ofdev) { struct device *device = &ofdev->dev; @@ -3560,23 +3421,12 @@ static int ucc_geth_probe(struct platform_device* ofdev) struct net_device *dev = NULL; struct ucc_geth_private *ugeth = NULL; struct ucc_geth_info *ug_info; + struct device_node *phy_node; + struct phylink *phylink; struct resource res; - int err, ucc_num, max_speed = 0; + int err, ucc_num; const unsigned int *prop; phy_interface_t phy_interface; - static const int enet_to_speed[] = { - SPEED_10, SPEED_10, SPEED_10, - SPEED_100, SPEED_100, SPEED_100, - SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000, - }; - static const phy_interface_t enet_to_phy_interface[] = { - PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII, - PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII, - PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII, - PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII, - PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, - PHY_INTERFACE_MODE_SGMII, - }; ugeth_vdbg("%s: IN", __func__); @@ -3591,77 +3441,56 @@ static int ucc_geth_probe(struct platform_device* ofdev) if ((ucc_num < 0) || (ucc_num > 7)) return -ENODEV; - ug_info = kmemdup(&ugeth_primary_info, sizeof(*ug_info), GFP_KERNEL); - if (ug_info == NULL) + ug_info = devm_kmemdup(&ofdev->dev, &ugeth_primary_info, + sizeof(*ug_info), GFP_KERNEL); + if (!ug_info) return -ENOMEM; ug_info->uf_info.ucc_num = ucc_num; err = ucc_geth_parse_clock(np, "rx", &ug_info->uf_info.rx_clock); if (err) - goto err_free_info; + return err; err = ucc_geth_parse_clock(np, "tx", &ug_info->uf_info.tx_clock); if (err) - goto err_free_info; + return err; err = of_address_to_resource(np, 0, &res); if (err) - goto err_free_info; + return err; ug_info->uf_info.regs = res.start; ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); - ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0); - if (!ug_info->phy_node && of_phy_is_fixed_link(np)) { - /* - * In the case of a fixed PHY, the DT node associated - * to the PHY is the Ethernet MAC DT node. - */ - err = of_phy_register_fixed_link(np); - if (err) - goto err_free_info; - ug_info->phy_node = of_node_get(np); - } - /* Find the TBI PHY node. If it's not there, we don't support SGMII */ ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); - /* get the phy interface type, or default to MII */ - prop = of_get_property(np, "phy-connection-type", NULL); - if (!prop) { - /* handle interface property present in old trees */ - prop = of_get_property(ug_info->phy_node, "interface", NULL); - if (prop != NULL) { - phy_interface = enet_to_phy_interface[*prop]; - max_speed = enet_to_speed[*prop]; - } else - phy_interface = PHY_INTERFACE_MODE_MII; - } else { - phy_interface = to_phy_interface((const char *)prop); - } - - /* get speed, or derive from PHY interface */ - if (max_speed == 0) - switch (phy_interface) { - case PHY_INTERFACE_MODE_GMII: - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_TBI: - case PHY_INTERFACE_MODE_RTBI: - case PHY_INTERFACE_MODE_SGMII: - max_speed = SPEED_1000; - break; - default: - max_speed = SPEED_100; - break; + phy_node = of_parse_phandle(np, "phy-handle", 0); + if (phy_node) { + prop = of_get_property(phy_node, "interface", NULL); + if (prop) { + dev_err(&ofdev->dev, + "Device-tree property 'interface' is no longer supported. Please use 'phy-connection-type' instead."); + of_node_put(phy_node); + err = -EINVAL; + goto err_put_tbi; } + of_node_put(phy_node); + } + + err = of_get_phy_mode(np, &phy_interface); + if (err) { + dev_err(&ofdev->dev, "Invalid phy-connection-type"); + goto err_put_tbi; + } - if (max_speed == SPEED_1000) { + if (phy_interface == PHY_INTERFACE_MODE_GMII || + phy_interface_mode_is_rgmii(phy_interface) || + phy_interface == PHY_INTERFACE_MODE_TBI || + phy_interface == PHY_INTERFACE_MODE_RTBI || + phy_interface == PHY_INTERFACE_MODE_SGMII) { unsigned int snums = qe_get_num_of_snums(); - /* configure muram FIFOs for gigabit operation */ ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT; ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT; ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT; @@ -3687,11 +3516,10 @@ static int ucc_geth_probe(struct platform_device* ofdev) ug_info->uf_info.irq); /* Create an ethernet device instance */ - dev = alloc_etherdev(sizeof(*ugeth)); - - if (dev == NULL) { + dev = devm_alloc_etherdev(&ofdev->dev, sizeof(*ugeth)); + if (!dev) { err = -ENOMEM; - goto err_deregister_fixed_link; + goto err_put_tbi; } ugeth = netdev_priv(dev); @@ -3718,21 +3546,50 @@ static int ucc_geth_probe(struct platform_device* ofdev) dev->max_mtu = 1518; ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT); - ugeth->phy_interface = phy_interface; - ugeth->max_speed = max_speed; - /* Carrier starts down, phylib will bring it up */ - netif_carrier_off(dev); + ugeth->phylink_config.dev = &dev->dev; + ugeth->phylink_config.type = PHYLINK_NETDEV; + + ugeth->phylink_config.mac_capabilities = + MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD; + + __set_bit(PHY_INTERFACE_MODE_MII, + ugeth->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_RMII, + ugeth->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_GMII, + ugeth->phylink_config.supported_interfaces); + phy_interface_set_rgmii(ugeth->phylink_config.supported_interfaces); - err = register_netdev(dev); + if (ug_info->tbi_node) { + __set_bit(PHY_INTERFACE_MODE_SGMII, + ugeth->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_TBI, + ugeth->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_RTBI, + ugeth->phylink_config.supported_interfaces); + } + + phylink = phylink_create(&ugeth->phylink_config, dev_fwnode(&dev->dev), + phy_interface, &ugeth_mac_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + goto err_put_tbi; + } + + ugeth->phylink = phylink; + + err = devm_register_netdev(&ofdev->dev, dev); if (err) { if (netif_msg_probe(ugeth)) pr_err("%s: Cannot register net device, aborting\n", dev->name); - goto err_free_netdev; + goto err_destroy_phylink; } - of_get_ethdev_address(np, dev); + err = of_get_ethdev_address(np, dev); + if (err == -EPROBE_DEFER) + goto err_destroy_phylink; ugeth->ug_info = ug_info; ugeth->dev = device; @@ -3741,15 +3598,10 @@ static int ucc_geth_probe(struct platform_device* ofdev) return 0; -err_free_netdev: - free_netdev(dev); -err_deregister_fixed_link: - if (of_phy_is_fixed_link(np)) - of_phy_deregister_fixed_link(np); +err_destroy_phylink: + phylink_destroy(phylink); +err_put_tbi: of_node_put(ug_info->tbi_node); - of_node_put(ug_info->phy_node); -err_free_info: - kfree(ug_info); return err; } @@ -3758,16 +3610,10 @@ static void ucc_geth_remove(struct platform_device* ofdev) { struct net_device *dev = platform_get_drvdata(ofdev); struct ucc_geth_private *ugeth = netdev_priv(dev); - struct device_node *np = ofdev->dev.of_node; - unregister_netdev(dev); ucc_geth_memclean(ugeth); - if (of_phy_is_fixed_link(np)) - of_phy_deregister_fixed_link(np); + phylink_destroy(ugeth->phylink); of_node_put(ugeth->ug_info->tbi_node); - of_node_put(ugeth->ug_info->phy_node); - kfree(ugeth->ug_info); - free_netdev(dev); } static const struct of_device_id ucc_geth_match[] = { @@ -3786,7 +3632,7 @@ static struct platform_driver ucc_geth_driver = { .of_match_table = ucc_geth_match, }, .probe = ucc_geth_probe, - .remove_new = ucc_geth_remove, + .remove = ucc_geth_remove, .suspend = ucc_geth_suspend, .resume = ucc_geth_resume, }; diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h index 4294ed096ebb..84f92f6384e7 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.h +++ b/drivers/net/ethernet/freescale/ucc_geth.h @@ -16,6 +16,7 @@ #include <linux/kernel.h> #include <linux/list.h> +#include <linux/phylink.h> #include <linux/if_ether.h> #include <soc/fsl/qe/immap_qe.h> @@ -889,8 +890,6 @@ struct ucc_geth_hardware_statistics { addresses */ #define TX_TIMEOUT (1*HZ) -#define PHY_INIT_TIMEOUT 100000 -#define PHY_CHANGE_TIME 2 /* Fast Ethernet (10/100 Mbps) */ #define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size @@ -921,7 +920,8 @@ struct ucc_geth_hardware_statistics { #define UCC_GETH_UPSMR_INIT UCC_GETH_UPSMR_RES1 #define UCC_GETH_MACCFG1_INIT 0 -#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1) +#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1 | \ + (7 << MACCFG2_PREL_SHIFT)) /* Ethernet Address Type. */ enum enet_addr_type { @@ -1073,6 +1073,9 @@ struct ucc_geth_tad_params { u16 vid; }; +struct phylink; +struct phylink_config; + /* GETH protocol initialization structure */ struct ucc_geth_info { struct ucc_fast_info uf_info; @@ -1088,7 +1091,6 @@ struct ucc_geth_info { u8 miminumInterFrameGapEnforcement; u8 backToBackInterFrameGap; int ipAddressAlignment; - int lengthCheckRx; u32 mblinterval; u16 nortsrbytetime; u8 fracsiz; @@ -1114,7 +1116,6 @@ struct ucc_geth_info { int transmitFlowControl; u8 maxGroupAddrInHash; u8 maxIndAddrInHash; - u8 prel; u16 maxFrameLength; u16 minFrameLength; u16 maxD1Length; @@ -1125,7 +1126,6 @@ struct ucc_geth_info { u32 eventRegMask; u16 pausePeriod; u16 extensionField; - struct device_node *phy_node; struct device_node *tbi_node; u8 weightfactor[NUM_TX_QUEUES]; u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES]; @@ -1210,14 +1210,12 @@ struct ucc_geth_private { u16 skb_dirtytx[NUM_TX_QUEUES]; struct ugeth_mii_info *mii_info; - struct phy_device *phydev; - phy_interface_t phy_interface; - int max_speed; uint32_t msg_enable; - int oldspeed; - int oldduplex; - int oldlink; - int wol_en; + u32 wol_en; + u32 phy_wol_en; + + struct phylink *phylink; + struct phylink_config phylink_config; struct device_node *node; }; diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c index 601beb93d3b3..1fb49e5a414a 100644 --- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c +++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c @@ -104,14 +104,8 @@ static int uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct ucc_geth_private *ugeth = netdev_priv(netdev); - struct phy_device *phydev = ugeth->phydev; - if (!phydev) - return -ENODEV; - - phy_ethtool_ksettings_get(phydev, cmd); - - return 0; + return phylink_ethtool_ksettings_get(ugeth->phylink, cmd); } static int @@ -119,12 +113,8 @@ uec_set_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { struct ucc_geth_private *ugeth = netdev_priv(netdev); - struct phy_device *phydev = ugeth->phydev; - - if (!phydev) - return -ENODEV; - return phy_ethtool_ksettings_set(phydev, cmd); + return phylink_ethtool_ksettings_set(ugeth->phylink, cmd); } static void @@ -133,12 +123,7 @@ uec_get_pauseparam(struct net_device *netdev, { struct ucc_geth_private *ugeth = netdev_priv(netdev); - pause->autoneg = ugeth->phydev->autoneg; - - if (ugeth->ug_info->receiveFlowControl) - pause->rx_pause = 1; - if (ugeth->ug_info->transmitFlowControl) - pause->tx_pause = 1; + return phylink_ethtool_get_pauseparam(ugeth->phylink, pause); } static int @@ -146,30 +131,11 @@ uec_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct ucc_geth_private *ugeth = netdev_priv(netdev); - int ret = 0; ugeth->ug_info->receiveFlowControl = pause->rx_pause; ugeth->ug_info->transmitFlowControl = pause->tx_pause; - if (ugeth->phydev->autoneg) { - if (netif_running(netdev)) { - /* FIXME: automatically restart */ - netdev_info(netdev, "Please re-open the interface\n"); - } - } else { - struct ucc_geth_info *ug_info = ugeth->ug_info; - - ret = init_flow_control_params(ug_info->aufc, - ug_info->receiveFlowControl, - ug_info->transmitFlowControl, - ug_info->pausePeriod, - ug_info->extensionField, - &ugeth->uccf->uf_regs->upsmr, - &ugeth->ug_regs->uempr, - &ugeth->ug_regs->maccfg1); - } - - return ret; + return phylink_ethtool_set_pauseparam(ugeth->phylink, pause); } static uint32_t @@ -287,20 +253,17 @@ static void uec_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) { struct ucc_geth_private *ugeth = netdev_priv(netdev); u32 stats_mode = ugeth->ug_info->statisticsMode; + int i; - if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) { - memcpy(buf, hw_stat_gstrings, UEC_HW_STATS_LEN * - ETH_GSTRING_LEN); - buf += UEC_HW_STATS_LEN * ETH_GSTRING_LEN; - } - if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { - memcpy(buf, tx_fw_stat_gstrings, UEC_TX_FW_STATS_LEN * - ETH_GSTRING_LEN); - buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN; - } + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) + for (i = 0; i < UEC_HW_STATS_LEN; i++) + ethtool_puts(&buf, hw_stat_gstrings[i]); + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) + for (i = 0; i < UEC_TX_FW_STATS_LEN; i++) + ethtool_puts(&buf, tx_fw_stat_gstrings[i]); if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) - memcpy(buf, rx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN * - ETH_GSTRING_LEN); + for (i = 0; i < UEC_RX_FW_STATS_LEN; i++) + ethtool_puts(&buf, rx_fw_stat_gstrings[i]); } static void uec_get_ethtool_stats(struct net_device *netdev, @@ -346,28 +309,42 @@ uec_get_drvinfo(struct net_device *netdev, static void uec_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct ucc_geth_private *ugeth = netdev_priv(netdev); - struct phy_device *phydev = ugeth->phydev; - if (phydev && phydev->irq) - wol->supported |= WAKE_PHY; + phylink_ethtool_get_wol(ugeth->phylink, wol); + if (qe_alive_during_sleep()) wol->supported |= WAKE_MAGIC; - wol->wolopts = ugeth->wol_en; + wol->wolopts |= ugeth->wol_en; } static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct ucc_geth_private *ugeth = netdev_priv(netdev); - struct phy_device *phydev = ugeth->phydev; + int ret = 0; - if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) - return -EINVAL; - else if (wol->wolopts & WAKE_PHY && (!phydev || !phydev->irq)) + ret = phylink_ethtool_set_wol(ugeth->phylink, wol); + if (ret == -EOPNOTSUPP) { + ugeth->phy_wol_en = 0; + } else if (ret) { + return ret; + } else { + ugeth->phy_wol_en = wol->wolopts; + goto out; + } + + /* If the PHY isn't handling the WoL and the MAC is asked to more than + * WAKE_MAGIC, error-out + */ + if (!ugeth->phy_wol_en && + wol->wolopts & ~WAKE_MAGIC) return -EINVAL; - else if (wol->wolopts & WAKE_MAGIC && !qe_alive_during_sleep()) + + if (wol->wolopts & WAKE_MAGIC && + !qe_alive_during_sleep()) return -EINVAL; +out: ugeth->wol_en = wol->wolopts; device_set_wakeup_enable(&netdev->dev, ugeth->wol_en); |