diff options
Diffstat (limited to 'drivers/net/ethernet/freescale/fec_main.c')
| -rw-r--r-- | drivers/net/ethernet/freescale/fec_main.c | 2769 |
1 files changed, 1962 insertions, 807 deletions
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index a6e323f15637..c685a5c0cc51 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) @@ -21,60 +22,69 @@ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. */ -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/pm_runtime.h> -#include <linux/ptrace.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/interrupt.h> +#include <linux/bitops.h> +#include <linux/bpf.h> +#include <linux/bpf_trace.h> +#include <linux/cacheflush.h> +#include <linux/clk.h> +#include <linux/crc32.h> #include <linux/delay.h> -#include <linux/netdevice.h> +#include <linux/errno.h> #include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/in.h> -#include <linux/ip.h> -#include <net/ip.h> -#include <net/tso.h> -#include <linux/tcp.h> -#include <linux/udp.h> +#include <linux/fec.h> +#include <linux/filter.h> +#include <linux/gpio/consumer.h> #include <linux/icmp.h> -#include <linux/spinlock.h> -#include <linux/workqueue.h> -#include <linux/bitops.h> +#include <linux/if_vlan.h> +#include <linux/in.h> +#include <linux/interrupt.h> #include <linux/io.h> +#include <linux/ioport.h> +#include <linux/ip.h> #include <linux/irq.h> -#include <linux/clk.h> -#include <linux/platform_device.h> +#include <linux/kernel.h> #include <linux/mdio.h> -#include <linux/phy.h> -#include <linux/fec.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/netdevice.h> #include <linux/of.h> -#include <linux/of_device.h> -#include <linux/of_gpio.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/regulator/consumer.h> -#include <linux/if_vlan.h> +#include <linux/phy.h> #include <linux/pinctrl/consumer.h> +#include <linux/phy_fixed.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/prefetch.h> +#include <linux/property.h> +#include <linux/ptrace.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/workqueue.h> +#include <net/ip.h> +#include <net/page_pool/helpers.h> +#include <net/selftests.h> +#include <net/tso.h> #include <soc/imx/cpuidle.h> -#include <asm/cacheflush.h> - #include "fec.h" static void set_multicast_list(struct net_device *ndev); -static void fec_enet_itr_coal_init(struct net_device *ndev); +static void fec_enet_itr_coal_set(struct net_device *ndev); +static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, + int cpu, struct xdp_buff *xdp, + u32 dma_sync_len); #define DRIVER_NAME "fec" -#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) +static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2}; -/* Pause frame feild and FIFO threshold */ -#define FEC_ENET_FCE (1 << 5) #define FEC_ENET_RSEM_V 0x84 #define FEC_ENET_RSFL_V 16 #define FEC_ENET_RAEM_V 0x8 @@ -82,68 +92,116 @@ static void fec_enet_itr_coal_init(struct net_device *ndev); #define FEC_ENET_OPD_V 0xFFF0 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ +#define FEC_ENET_XDP_PASS 0 +#define FEC_ENET_XDP_CONSUMED BIT(0) +#define FEC_ENET_XDP_TX BIT(1) +#define FEC_ENET_XDP_REDIR BIT(2) + +struct fec_devinfo { + u32 quirks; +}; + +static const struct fec_devinfo fec_imx25_info = { + .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR | + FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45, +}; + +static const struct fec_devinfo fec_imx27_info = { + .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG | + FEC_QUIRK_HAS_MDIO_C45, +}; + +static const struct fec_devinfo fec_imx28_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | + FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC | + FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII | + FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45, +}; + +static const struct fec_devinfo fec_imx6q_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII | + FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45, +}; + +static const struct fec_devinfo fec_mvf600_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC | + FEC_QUIRK_HAS_MDIO_C45, +}; + +static const struct fec_devinfo fec_imx6sx_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | + FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | + FEC_QUIRK_HAS_MDIO_C45, +}; + +static const struct fec_devinfo fec_imx6ul_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 | + FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC | + FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII | + FEC_QUIRK_HAS_MDIO_C45, +}; + +static const struct fec_devinfo fec_imx8mq_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | + FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | + FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 | + FEC_QUIRK_HAS_MDIO_C45, +}; + +static const struct fec_devinfo fec_imx8qm_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | + FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | + FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45 | + FEC_QUIRK_JUMBO_FRAME, +}; + +static const struct fec_devinfo fec_s32v234_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_MDIO_C45, +}; + static struct platform_device_id fec_devtype[] = { { /* keep it for coldfire */ .name = DRIVER_NAME, .driver_data = 0, }, { - .name = "imx25-fec", - .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR, - }, { - .name = "imx27-fec", - .driver_data = FEC_QUIRK_MIB_CLEAR, - }, { - .name = "imx28-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | - FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC, - }, { - .name = "imx6q-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | - FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | - FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | - FEC_QUIRK_HAS_RACC, - }, { - .name = "mvf600-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC, - }, { - .name = "imx6sx-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | - FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | - FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | - FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | - FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, - }, { - .name = "imx6ul-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | - FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | - FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 | - FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC | - FEC_QUIRK_HAS_COALESCE, - }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, fec_devtype); -enum imx_fec_type { - IMX25_FEC = 1, /* runs on i.mx25/50/53 */ - IMX27_FEC, /* runs on i.mx27/35/51 */ - IMX28_FEC, - IMX6Q_FEC, - MVF600_FEC, - IMX6SX_FEC, - IMX6UL_FEC, -}; - static const struct of_device_id fec_dt_ids[] = { - { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], }, - { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, - { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, - { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, - { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, - { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, - { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, + { .compatible = "fsl,imx25-fec", .data = &fec_imx25_info, }, + { .compatible = "fsl,imx27-fec", .data = &fec_imx27_info, }, + { .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, }, + { .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, }, + { .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, }, + { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6sx_info, }, + { .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, }, + { .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, }, + { .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, }, + { .compatible = "fsl,s32v234-fec", .data = &fec_s32v234_info, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@ -173,14 +231,17 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #endif /* CONFIG_M5272 */ /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. + * + * 2048 byte skbufs are allocated. However, alignment requirements + * varies between FEC variants. Worst case is 64, so round down by 64. */ -#define PKT_MAXBUF_SIZE 1522 +#define MAX_JUMBO_BUF_SIZE (round_down(16384 - FEC_DRV_RESERVE_SPACE - 64, 64)) +#define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64)) #define PKT_MINBUF_SIZE 64 -#define PKT_MAXBLR_SIZE 1536 /* FEC receive acceleration */ -#define FEC_RACC_IPDIS (1 << 1) -#define FEC_RACC_PRODIS (1 << 2) +#define FEC_RACC_IPDIS BIT(1) +#define FEC_RACC_PRODIS BIT(2) #define FEC_RACC_SHIFT16 BIT(7) #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) @@ -192,24 +253,44 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); * size bits. Other FEC hardware does not, so we need to take that into * account when setting it. */ -#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) -#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) +#ifndef CONFIG_M5272 +#define OPT_ARCH_HAS_MAX_FL 1 #else -#define OPT_FRAME_SIZE 0 +#define OPT_ARCH_HAS_MAX_FL 0 #endif /* FEC MII MMFR bits definition */ #define FEC_MMFR_ST (1 << 30) +#define FEC_MMFR_ST_C45 (0) #define FEC_MMFR_OP_READ (2 << 28) +#define FEC_MMFR_OP_READ_C45 (3 << 28) #define FEC_MMFR_OP_WRITE (1 << 28) +#define FEC_MMFR_OP_ADDR_WRITE (0) #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) #define FEC_MMFR_TA (2 << 16) #define FEC_MMFR_DATA(v) (v & 0xffff) /* FEC ECR bits definition */ -#define FEC_ECR_MAGICEN (1 << 2) -#define FEC_ECR_SLEEP (1 << 3) +#define FEC_ECR_RESET BIT(0) +#define FEC_ECR_ETHEREN BIT(1) +#define FEC_ECR_MAGICEN BIT(2) +#define FEC_ECR_SLEEP BIT(3) +#define FEC_ECR_EN1588 BIT(4) +#define FEC_ECR_SPEED BIT(5) +#define FEC_ECR_BYTESWP BIT(8) +/* FEC RCR bits definition */ +#define FEC_RCR_LOOP BIT(0) +#define FEC_RCR_DRT BIT(1) +#define FEC_RCR_MII BIT(2) +#define FEC_RCR_PROMISC BIT(3) +#define FEC_RCR_BC_REJ BIT(4) +#define FEC_RCR_FLOWCTL BIT(5) +#define FEC_RCR_RGMII BIT(6) +#define FEC_RCR_RMII BIT(8) +#define FEC_RCR_10BASET BIT(9) +#define FEC_RCR_NLC BIT(30) +/* TX WMARK bits */ +#define FEC_TXWMRK_STRFWD BIT(8) #define FEC_MII_TIMEOUT 30000 /* us */ @@ -222,9 +303,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define FEC_WOL_FLAG_ENABLE (0x1 << 1) #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2) -#define COPYBREAK_DEFAULT 256 - -#define TSO_HEADER_SIZE 128 /* Max number of allowed TCP segments for software TSO */ #define FEC_MAX_TSO_SEGS 100 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) @@ -274,16 +352,6 @@ static void swap_buffer(void *bufaddr, int len) swab32s(buf); } -static void swap_buffer2(void *dst_buf, void *src_buf, int len) -{ - int i; - unsigned int *src = src_buf; - unsigned int *dst = dst_buf; - - for (i = 0; i < len; i += 4, src++, dst++) - *dst = swab32p(src); -} - static void fec_dump(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); @@ -305,12 +373,76 @@ static void fec_dump(struct net_device *ndev) fec16_to_cpu(bdp->cbd_sc), fec32_to_cpu(bdp->cbd_bufaddr), fec16_to_cpu(bdp->cbd_datlen), - txq->tx_skbuff[index]); + txq->tx_buf[index].buf_p); bdp = fec_enet_get_nextdesc(bdp, &txq->bd); index++; } while (bdp != txq->bd.base); } +/* + * Coldfire does not support DMA coherent allocations, and has historically used + * a band-aid with a manual flush in fec_enet_rx_queue. + */ +#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) +static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp) +{ + return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp); +} + +static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle) +{ + dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL); +} +#else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ +static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp) +{ + return dma_alloc_coherent(dev, size, handle, gfp); +} + +static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle) +{ + dma_free_coherent(dev, size, cpu_addr, handle); +} +#endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ + +struct fec_dma_devres { + size_t size; + void *vaddr; + dma_addr_t dma_handle; +}; + +static void fec_dmam_release(struct device *dev, void *res) +{ + struct fec_dma_devres *this = res; + + fec_dma_free(dev, this->size, this->vaddr, this->dma_handle); +} + +static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp) +{ + struct fec_dma_devres *dr; + void *vaddr; + + dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp); + if (!dr) + return NULL; + vaddr = fec_dma_alloc(dev, size, handle, gfp); + if (!vaddr) { + devres_free(dr); + return NULL; + } + dr->vaddr = vaddr; + dr->dma_handle = *handle; + dr->size = size; + devres_add(dev, dr); + return vaddr; +} + static inline bool is_ipv4_pkt(struct sk_buff *skb) { return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; @@ -333,6 +465,49 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) return 0; } +static int +fec_enet_create_page_pool(struct fec_enet_private *fep, + struct fec_enet_priv_rx_q *rxq, int size) +{ + struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); + struct page_pool_params pp_params = { + .order = fep->pagepool_order, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .pool_size = size, + .nid = dev_to_node(&fep->pdev->dev), + .dev = &fep->pdev->dev, + .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, + .offset = FEC_ENET_XDP_HEADROOM, + .max_len = fep->rx_frame_size, + }; + int err; + + rxq->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rxq->page_pool)) { + err = PTR_ERR(rxq->page_pool); + rxq->page_pool = NULL; + return err; + } + + err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0); + if (err < 0) + goto err_free_pp; + + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, + rxq->page_pool); + if (err) + goto err_unregister_rxq; + + return 0; + +err_unregister_rxq: + xdp_rxq_info_unreg(&rxq->xdp_rxq); +err_free_pp: + page_pool_destroy(rxq->page_pool); + rxq->page_pool = NULL; + return err; +} + static struct bufdesc * fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, @@ -359,7 +534,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, status = fec16_to_cpu(bdp->cbd_sc); status &= ~BD_ENET_TX_STATS; status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); - frag_len = skb_shinfo(skb)->frags[frag].size; + frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]); /* Handle the last BD specially */ if (frag == nr_frags - 1) { @@ -377,11 +552,12 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; + ebdp->cbd_bdu = 0; ebdp->cbd_esc = cpu_to_fec32(estatus); } - bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; + bufaddr = skb_frag_address(this_frag); index = fec_enet_get_bd_index(bdp, &txq->bd); if (((unsigned long) bufaddr) & fep->tx_align || @@ -518,7 +694,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, index = fec_enet_get_bd_index(last_bdp, &txq->bd); /* Save skb pointer */ - txq->tx_skbuff[index] = skb; + txq->tx_buf[index].buf_p = skb; /* Make sure the updates to rest of the descriptor are performed before * transferring ownership. @@ -536,14 +712,17 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, skb_tx_timestamp(skb); - /* Make sure the update to bdp and tx_skbuff are performed before - * txq->bd.cur. - */ + /* Make sure the update to bdp is performed before txq->bd.cur. */ wmb(); txq->bd.cur = bdp; /* Trigger transmission start */ - writel(0, txq->bd.reg_desc_active); + if (!(fep->quirks & FEC_QUIRK_ERR007885) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active)) + writel(0, txq->bd.reg_desc_active); return 0; } @@ -579,7 +758,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, dev_kfree_skb_any(skb); if (net_ratelimit()) netdev_err(ndev, "Tx DMA memory map failed\n"); - return NETDEV_TX_BUSY; + return NETDEV_TX_OK; } bdp->cbd_datlen = cpu_to_fec16(size); @@ -614,7 +793,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, struct bufdesc *bdp, int index) { struct fec_enet_private *fep = netdev_priv(ndev); - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int hdr_len = skb_tcp_all_headers(skb); struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); void *bufaddr; unsigned long dmabuf; @@ -641,7 +820,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, dev_kfree_skb_any(skb); if (net_ratelimit()) netdev_err(ndev, "Tx DMA memory map failed\n"); - return NETDEV_TX_BUSY; + return NETDEV_TX_OK; } } @@ -667,9 +846,10 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - int total_len, data_left; + int hdr_len, total_len, data_left; struct bufdesc *bdp = txq->bd.cur; + struct bufdesc *tmp_bdp; + struct bufdesc_ex *ebdp; struct tso_t tso; unsigned int index = 0; int ret; @@ -688,7 +868,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, } /* Initialize the TSO handler, and prepare the first payload */ - tso_start(skb, &tso); + hdr_len = tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { @@ -727,7 +907,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, } /* Save skb pointer */ - txq->tx_skbuff[index] = skb; + txq->tx_buf[index].buf_p = skb; skb_tx_timestamp(skb); txq->bd.cur = bdp; @@ -743,7 +923,34 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, return 0; err_release: - /* TODO: Release all used data descriptors for TSO */ + /* Release all used data descriptors for TSO */ + tmp_bdp = txq->bd.cur; + + while (tmp_bdp != bdp) { + /* Unmap data buffers */ + if (tmp_bdp->cbd_bufaddr && + !IS_TSO_HEADER(txq, fec32_to_cpu(tmp_bdp->cbd_bufaddr))) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(tmp_bdp->cbd_bufaddr), + fec16_to_cpu(tmp_bdp->cbd_datlen), + DMA_TO_DEVICE); + + /* Clear standard buffer descriptor fields */ + tmp_bdp->cbd_sc = 0; + tmp_bdp->cbd_datlen = 0; + tmp_bdp->cbd_bufaddr = 0; + + /* Handle extended descriptor if enabled */ + if (fep->bufdesc_ex) { + ebdp = (struct bufdesc_ex *)tmp_bdp; + ebdp->cbd_esc = 0; + } + + tmp_bdp = fec_enet_get_nextdesc(tmp_bdp, &txq->bd); + } + + dev_kfree_skb_any(skb); + return ret; } @@ -803,7 +1010,7 @@ static void fec_enet_bd_init(struct net_device *dev) /* Set the last buffer to wrap */ bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); - bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); + bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP); rxq->bd.cur = rxq->bd.base; } @@ -817,17 +1024,43 @@ static void fec_enet_bd_init(struct net_device *dev) for (i = 0; i < txq->bd.ring_size; i++) { /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = cpu_to_fec16(0); - if (txq->tx_skbuff[i]) { - dev_kfree_skb_any(txq->tx_skbuff[i]); - txq->tx_skbuff[i] = NULL; + if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { + if (bdp->cbd_bufaddr && + !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); + if (txq->tx_buf[i].buf_p) + dev_kfree_skb_any(txq->tx_buf[i].buf_p); + } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { + if (bdp->cbd_bufaddr) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); + + if (txq->tx_buf[i].buf_p) + xdp_return_frame(txq->tx_buf[i].buf_p); + } else { + struct page *page = txq->tx_buf[i].buf_p; + + if (page) + page_pool_put_page(pp_page_to_nmdesc(page)->pp, + page, 0, + false); } + + txq->tx_buf[i].buf_p = NULL; + /* restore default tx buffer type: FEC_TXBUF_T_SKB */ + txq->tx_buf[i].type = FEC_TXBUF_T_SKB; bdp->cbd_bufaddr = cpu_to_fec32(0); bdp = fec_enet_get_nextdesc(bdp, &txq->bd); } /* Set the last buffer to wrap */ bdp = fec_enet_get_prevdesc(bdp, &txq->bd); - bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); + bdp->cbd_sc |= cpu_to_fec16(BD_ENET_TX_WRAP); txq->dirty_tx = bdp; } } @@ -851,7 +1084,7 @@ static void fec_enet_enable_ring(struct net_device *ndev) for (i = 0; i < fep->num_rx_queues; i++) { rxq = fep->rx_queue[i]; writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); - writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); + writel(fep->max_buf_size, fep->hwp + FEC_R_BUFF_SIZE(i)); /* enable DMA1/2 */ if (i) @@ -870,24 +1103,40 @@ static void fec_enet_enable_ring(struct net_device *ndev) } } -static void fec_enet_reset_skb(struct net_device *ndev) +/* Whack a reset. We should wait for this. + * For i.MX6SX SOC, enet use AXI bus, we use disable MAC + * instead of reset MAC itself. + */ +static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol) { - struct fec_enet_private *fep = netdev_priv(ndev); - struct fec_enet_priv_tx_q *txq; - int i, j; - - for (i = 0; i < fep->num_tx_queues; i++) { - txq = fep->tx_queue[i]; + u32 val; - for (j = 0; j < txq->bd.ring_size; j++) { - if (txq->tx_skbuff[j]) { - dev_kfree_skb_any(txq->tx_skbuff[j]); - txq->tx_skbuff[j] = NULL; - } + if (!allow_wol || !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { + if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES || + ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) { + writel(0, fep->hwp + FEC_ECNTRL); + } else { + writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL); + udelay(10); } + } else { + val = readl(fep->hwp + FEC_ECNTRL); + val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); + writel(val, fep->hwp + FEC_ECNTRL); } } +static void fec_set_hw_mac_addr(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | + (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), + fep->hwp + FEC_ADDR_LOW); + writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), + fep->hwp + FEC_ADDR_HIGH); +} + /* * This function is called to start or restart the FEC during a link * change, transmit timeout, or to reconfigure the FEC. The network @@ -897,49 +1146,37 @@ static void fec_restart(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - u32 val; - u32 temp_mac[2]; - u32 rcntl = OPT_FRAME_SIZE | 0x04; - u32 ecntl = 0x2; /* ETHEREN */ + u32 ecntl = FEC_ECR_ETHEREN; + u32 rcntl = FEC_RCR_MII; - /* Whack a reset. We should wait for this. - * For i.MX6SX SOC, enet use AXI bus, we use disable MAC - * instead of reset MAC itself. - */ - if (fep->quirks & FEC_QUIRK_HAS_AVB) { - writel(0, fep->hwp + FEC_ECNTRL); - } else { - writel(1, fep->hwp + FEC_ECNTRL); - udelay(10); - } + if (OPT_ARCH_HAS_MAX_FL) + rcntl |= (fep->netdev->mtu + ETH_HLEN + ETH_FCS_LEN) << 16; + + if (fep->bufdesc_ex) + fec_ptp_save_state(fep); + + fec_ctrl_reset(fep, false); /* * enet-mac reset will reset mac address registers too, * so need to reconfigure it. */ - memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); - writel((__force u32)cpu_to_be32(temp_mac[0]), - fep->hwp + FEC_ADDR_LOW); - writel((__force u32)cpu_to_be32(temp_mac[1]), - fep->hwp + FEC_ADDR_HIGH); + fec_set_hw_mac_addr(ndev); - /* Clear any outstanding interrupt. */ - writel(0xffffffff, fep->hwp + FEC_IEVENT); + /* Clear any outstanding interrupt, except MDIO. */ + writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT); fec_enet_bd_init(ndev); fec_enet_enable_ring(ndev); - /* Reset tx SKB buffers. */ - fec_enet_reset_skb(ndev); - /* Enable MII mode */ if (fep->full_duplex == DUPLEX_FULL) { /* FD enable */ writel(0x04, fep->hwp + FEC_X_CNTRL); } else { /* No Rcv on Xmit */ - rcntl |= 0x02; + rcntl |= FEC_RCR_DRT; writel(0x0, fep->hwp + FEC_X_CNTRL); } @@ -948,7 +1185,8 @@ fec_restart(struct net_device *ndev) #if !defined(CONFIG_M5272) if (fep->quirks & FEC_QUIRK_HAS_RACC) { - val = readl(fep->hwp + FEC_RACC); + u32 val = readl(fep->hwp + FEC_RACC); + /* align IP header */ val |= FEC_RACC_SHIFT16; if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) @@ -957,7 +1195,7 @@ fec_restart(struct net_device *ndev) else val &= ~FEC_RACC_OPTIONS; writel(val, fep->hwp + FEC_RACC); - writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL); + writel(min(fep->rx_frame_size, fep->max_buf_size), fep->hwp + FEC_FTRL); } #endif @@ -967,27 +1205,24 @@ fec_restart(struct net_device *ndev) */ if (fep->quirks & FEC_QUIRK_ENET_MAC) { /* Enable flow control and length check */ - rcntl |= 0x40000000 | 0x00000020; + rcntl |= FEC_RCR_NLC | FEC_RCR_FLOWCTL; /* RGMII, RMII or MII */ - if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || - fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || - fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || - fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) - rcntl |= (1 << 6); + if (phy_interface_mode_is_rgmii(fep->phy_interface)) + rcntl |= FEC_RCR_RGMII; else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) - rcntl |= (1 << 8); + rcntl |= FEC_RCR_RMII; else - rcntl &= ~(1 << 8); + rcntl &= ~FEC_RCR_RMII; /* 1G, 100M or 10M */ if (ndev->phydev) { if (ndev->phydev->speed == SPEED_1000) - ecntl |= (1 << 5); + ecntl |= FEC_ECR_SPEED; else if (ndev->phydev->speed == SPEED_100) - rcntl &= ~(1 << 9); + rcntl &= ~FEC_RCR_10BASET; else - rcntl |= (1 << 9); + rcntl |= FEC_RCR_10BASET; } } else { #ifdef FEC_MIIGSK_ENR @@ -1020,7 +1255,7 @@ fec_restart(struct net_device *ndev) if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && ndev->phydev && ndev->phydev->pause)) { - rcntl |= FEC_ENET_FCE; + rcntl |= FEC_RCR_FLOWCTL; /* set FIFO threshold parameter to reduce overrun */ writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); @@ -1031,7 +1266,7 @@ fec_restart(struct net_device *ndev) /* OPD */ writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); } else { - rcntl &= ~FEC_ENET_FCE; + rcntl &= ~FEC_RCR_FLOWCTL; } #endif /* !defined(CONFIG_M5272) */ @@ -1046,13 +1281,30 @@ fec_restart(struct net_device *ndev) if (fep->quirks & FEC_QUIRK_ENET_MAC) { /* enable ENET endian swap */ - ecntl |= (1 << 8); - /* enable ENET store and forward mode */ - writel(1 << 8, fep->hwp + FEC_X_WMRK); + ecntl |= FEC_ECR_BYTESWP; + + /* When Jumbo Frame is enabled, the FIFO may not be large enough + * to hold an entire frame. In such cases, if the MTU exceeds + * (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN), configure the interface + * to operate in cut-through mode, triggered by the FIFO threshold. + * Otherwise, enable the ENET store-and-forward mode. + */ + if ((fep->quirks & FEC_QUIRK_JUMBO_FRAME) && + (ndev->mtu > (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN))) + writel(0xF, fep->hwp + FEC_X_WMRK); + else + writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK); } if (fep->bufdesc_ex) - ecntl |= (1 << 4); + ecntl |= FEC_ECR_EN1588; + + if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && + fep->rgmii_txc_dly) + ecntl |= FEC_ENET_TXC_DLY; + if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && + fep->rgmii_rxc_dly) + ecntl |= FEC_ENET_RXC_DLY; #ifndef CONFIG_M5272 /* Enable the MIB statistic event counters */ @@ -1063,26 +1315,90 @@ fec_restart(struct net_device *ndev) writel(ecntl, fep->hwp + FEC_ECNTRL); fec_enet_active_rxring(ndev); - if (fep->bufdesc_ex) + if (fep->bufdesc_ex) { fec_ptp_start_cyclecounter(ndev); + fec_ptp_restore_state(fep); + } /* Enable interrupts we wish to service */ if (fep->link) writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); else - writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); + writel(0, fep->hwp + FEC_IMASK); /* Init the interrupt coalescing */ - fec_enet_itr_coal_init(ndev); + if (fep->quirks & FEC_QUIRK_HAS_COALESCE) + fec_enet_itr_coal_set(ndev); +} + +static int fec_enet_ipc_handle_init(struct fec_enet_private *fep) +{ + if (!(of_machine_is_compatible("fsl,imx8qm") || + of_machine_is_compatible("fsl,imx8qxp") || + of_machine_is_compatible("fsl,imx8dxl"))) + return 0; + + return imx_scu_get_handle(&fep->ipc_handle); +} + +static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled) +{ + struct device_node *np = fep->pdev->dev.of_node; + u32 rsrc_id, val; + int idx; + + if (!np || !fep->ipc_handle) + return; + + idx = of_alias_get_id(np, "ethernet"); + if (idx < 0) + idx = 0; + rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0; + + val = enabled ? 1 : 0; + imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val); +} + +static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) +{ + struct fec_platform_data *pdata = fep->pdev->dev.platform_data; + struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr; + + if (stop_gpr->gpr) { + if (enabled) + regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, + BIT(stop_gpr->bit), + BIT(stop_gpr->bit)); + else + regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, + BIT(stop_gpr->bit), 0); + } else if (pdata && pdata->sleep_mode_enable) { + pdata->sleep_mode_enable(enabled); + } else { + fec_enet_ipg_stop_set(fep, enabled); + } +} + +static void fec_irqs_disable(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + writel(0, fep->hwp + FEC_IMASK); +} + +static void fec_irqs_disable_except_wakeup(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + writel(0, fep->hwp + FEC_IMASK); + writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); } static void fec_stop(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct fec_platform_data *pdata = fep->pdev->dev.platform_data; - u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); + u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII; u32 val; /* We cannot expect a graceful transmit stop without link !!! */ @@ -1093,40 +1409,32 @@ fec_stop(struct net_device *ndev) netdev_err(ndev, "Graceful transmit stop did not complete!\n"); } - /* Whack a reset. We should wait for this. - * For i.MX6SX SOC, enet use AXI bus, we use disable MAC - * instead of reset MAC itself. - */ - if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { - if (fep->quirks & FEC_QUIRK_HAS_AVB) { - writel(0, fep->hwp + FEC_ECNTRL); - } else { - writel(1, fep->hwp + FEC_ECNTRL); - udelay(10); - } - writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); - } else { - writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); - val = readl(fep->hwp + FEC_ECNTRL); - val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); - writel(val, fep->hwp + FEC_ECNTRL); + if (fep->bufdesc_ex) + fec_ptp_save_state(fep); - if (pdata && pdata->sleep_mode_enable) - pdata->sleep_mode_enable(true); - } + fec_ctrl_reset(fep, true); writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); /* We have to keep ENET enabled to have MII interrupt stay working */ if (fep->quirks & FEC_QUIRK_ENET_MAC && !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { - writel(2, fep->hwp + FEC_ECNTRL); + writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL); writel(rmii_mode, fep->hwp + FEC_R_CNTRL); } -} + if (fep->bufdesc_ex) { + val = readl(fep->hwp + FEC_ECNTRL); + val |= FEC_ECR_EN1588; + writel(val, fep->hwp + FEC_ECNTRL); + + fec_ptp_start_cyclecounter(ndev); + fec_ptp_restore_state(fep); + } +} static void -fec_timeout(struct net_device *ndev) +fec_timeout(struct net_device *ndev, unsigned int txqueue) { struct fec_enet_private *fep = netdev_priv(ndev); @@ -1148,7 +1456,7 @@ static void fec_enet_timeout_work(struct work_struct *work) napi_disable(&fep->napi); netif_tx_lock_bh(ndev); fec_restart(ndev); - netif_wake_queue(ndev); + netif_tx_wake_all_queues(ndev); netif_tx_unlock_bh(ndev); napi_enable(&fep->napi); } @@ -1171,9 +1479,10 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, } static void -fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) +fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) { struct fec_enet_private *fep; + struct xdp_frame *xdpf; struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; @@ -1181,11 +1490,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) struct netdev_queue *nq; int index = 0; int entries_free; + struct page *page; + int frame_len; fep = netdev_priv(ndev); - queue_id = FEC_ENET_GET_QUQUE(queue_id); - txq = fep->tx_queue[queue_id]; /* get next bdp of dirty_tx */ nq = netdev_get_tx_queue(ndev, queue_id); @@ -1203,16 +1512,45 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) index = fec_enet_get_bd_index(bdp, &txq->bd); - skb = txq->tx_skbuff[index]; - txq->tx_skbuff[index] = NULL; - if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) - dma_unmap_single(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), - fec16_to_cpu(bdp->cbd_datlen), - DMA_TO_DEVICE); - bdp->cbd_bufaddr = cpu_to_fec32(0); - if (!skb) - goto skb_done; + if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { + skb = txq->tx_buf[index].buf_p; + if (bdp->cbd_bufaddr && + !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); + bdp->cbd_bufaddr = cpu_to_fec32(0); + if (!skb) + goto tx_buf_done; + } else { + /* Tx processing cannot call any XDP (or page pool) APIs if + * the "budget" is 0. Because NAPI is called with budget of + * 0 (such as netpoll) indicates we may be in an IRQ context, + * however, we can't use the page pool from IRQ context. + */ + if (unlikely(!budget)) + break; + + if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { + xdpf = txq->tx_buf[index].buf_p; + if (bdp->cbd_bufaddr) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); + } else { + page = txq->tx_buf[index].buf_p; + } + + bdp->cbd_bufaddr = cpu_to_fec32(0); + if (unlikely(!txq->tx_buf[index].buf_p)) { + txq->tx_buf[index].type = FEC_TXBUF_T_SKB; + goto tx_buf_done; + } + + frame_len = fec16_to_cpu(bdp->cbd_datlen); + } /* Check for errors. */ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | @@ -1231,16 +1569,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) ndev->stats.tx_carrier_errors++; } else { ndev->stats.tx_packets++; - ndev->stats.tx_bytes += skb->len; - } - - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && - fep->bufdesc_ex) { - struct skb_shared_hwtstamps shhwtstamps; - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); - skb_tstamp_tx(skb, &shhwtstamps); + if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) + ndev->stats.tx_bytes += skb->len; + else + ndev->stats.tx_bytes += frame_len; } /* Deferred means some collisions occurred during transmit, @@ -1249,10 +1582,36 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) if (status & BD_ENET_TX_DEF) ndev->stats.collisions++; - /* Free the sk buffer associated with this last transmit */ - dev_kfree_skb_any(skb); -skb_done: - /* Make sure the update to bdp and tx_skbuff are performed + if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { + /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who + * are to time stamp the packet, so we still need to check time + * stamping enabled flag. + */ + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS && + fep->hwts_tx_en) && fep->bufdesc_ex) { + struct skb_shared_hwtstamps shhwtstamps; + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + + fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); + skb_tstamp_tx(skb, &shhwtstamps); + } + + /* Free the sk buffer associated with this last transmit */ + napi_consume_skb(skb, budget); + } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { + xdp_return_frame_rx_napi(xdpf); + } else { /* recycle pages of XDP_TX frames */ + /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */ + page_pool_put_page(pp_page_to_nmdesc(page)->pp, page, + 0, true); + } + + txq->tx_buf[index].buf_p = NULL; + /* restore default tx buffer type: FEC_TXBUF_T_SKB */ + txq->tx_buf[index].type = FEC_TXBUF_T_SKB; + +tx_buf_done: + /* Make sure the update to bdp and tx_buf are performed * before dirty_tx */ wmb(); @@ -1263,7 +1622,7 @@ skb_done: /* Since we have freed up a buffer, the ring is no longer full */ - if (netif_queue_stopped(ndev)) { + if (netif_tx_queue_stopped(nq)) { entries_free = fec_enet_get_free_txdesc_num(txq); if (entries_free >= txq->tx_wake_threshold) netif_tx_wake_queue(nq); @@ -1276,63 +1635,112 @@ skb_done: writel(0, txq->bd.reg_desc_active); } -static void -fec_enet_tx(struct net_device *ndev) +static void fec_enet_tx(struct net_device *ndev, int budget) { struct fec_enet_private *fep = netdev_priv(ndev); - u16 queue_id; - /* First process class A queue, then Class B and Best Effort queue */ - for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { - clear_bit(queue_id, &fep->work_tx); - fec_enet_tx_queue(ndev, queue_id); - } - return; + int i; + + /* Make sure that AVB queues are processed first. */ + for (i = fep->num_tx_queues - 1; i >= 0; i--) + fec_enet_tx_queue(ndev, i, budget); } -static int -fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb) +static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, + struct bufdesc *bdp, int index) { - struct fec_enet_private *fep = netdev_priv(ndev); - int off; + struct page *new_page; + dma_addr_t phys_addr; - off = ((unsigned long)skb->data) & fep->rx_align; - if (off) - skb_reserve(skb, fep->rx_align + 1 - off); - - bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE)); - if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) { - if (net_ratelimit()) - netdev_err(ndev, "Rx DMA memory map failed\n"); + new_page = page_pool_dev_alloc_pages(rxq->page_pool); + if (unlikely(!new_page)) return -ENOMEM; - } + + rxq->rx_buf[index] = new_page; + phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM; + bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); return 0; } -static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, - struct bufdesc *bdp, u32 length, bool swap) +static u32 +fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, + struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu) { - struct fec_enet_private *fep = netdev_priv(ndev); - struct sk_buff *new_skb; + unsigned int sync, len = xdp->data_end - xdp->data; + u32 ret = FEC_ENET_XDP_PASS; + struct page *page; + int err; + u32 act; + + act = bpf_prog_run_xdp(prog, xdp); - if (length > fep->rx_copybreak) - return false; + /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover + * max len CPU touch + */ + sync = xdp->data_end - xdp->data; + sync = max(sync, len); + + switch (act) { + case XDP_PASS: + rxq->stats[RX_XDP_PASS]++; + ret = FEC_ENET_XDP_PASS; + break; - new_skb = netdev_alloc_skb(ndev, length); - if (!new_skb) - return false; + case XDP_REDIRECT: + rxq->stats[RX_XDP_REDIRECT]++; + err = xdp_do_redirect(fep->netdev, xdp, prog); + if (unlikely(err)) + goto xdp_err; - dma_sync_single_for_cpu(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), - FEC_ENET_RX_FRSIZE - fep->rx_align, - DMA_FROM_DEVICE); - if (!swap) - memcpy(new_skb->data, (*skb)->data, length); - else - swap_buffer2(new_skb->data, (*skb)->data, length); - *skb = new_skb; + ret = FEC_ENET_XDP_REDIR; + break; + + case XDP_TX: + rxq->stats[RX_XDP_TX]++; + err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync); + if (unlikely(err)) { + rxq->stats[RX_XDP_TX_ERRORS]++; + goto xdp_err; + } - return true; + ret = FEC_ENET_XDP_TX; + break; + + default: + bpf_warn_invalid_xdp_action(fep->netdev, prog, act); + fallthrough; + + case XDP_ABORTED: + fallthrough; /* handle aborts by dropping packet */ + + case XDP_DROP: + rxq->stats[RX_XDP_DROP]++; +xdp_err: + ret = FEC_ENET_XDP_CONSUMED; + page = virt_to_head_page(xdp->data); + page_pool_put_page(rxq->page_pool, page, sync, true); + if (act != XDP_DROP) + trace_xdp_exception(fep->netdev, prog, act); + break; + } + + return ret; +} + +static void fec_enet_rx_vlan(const struct net_device *ndev, struct sk_buff *skb) +{ + if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) { + const struct vlan_ethhdr *vlan_header = skb_vlan_eth_hdr(skb); + const u16 vlan_tag = ntohs(vlan_header->h_vlan_TCI); + + /* Push and remove the vlan tag */ + + memmove(skb->data + VLAN_HLEN, skb->data, ETH_ALEN * 2); + skb_pull(skb, VLAN_HLEN); + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + vlan_tag); + } } /* During a receive, the bd_rx.cur points to the current incoming buffer. @@ -1341,34 +1749,49 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, * effectively tossing the packet. */ static int -fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) +fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget) { struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_priv_rx_q *rxq; struct bufdesc *bdp; unsigned short status; - struct sk_buff *skb_new = NULL; struct sk_buff *skb; ushort pkt_len; - __u8 *data; int pkt_received = 0; struct bufdesc_ex *ebdp = NULL; - bool vlan_packet_rcvd = false; - u16 vlan_tag; int index = 0; - bool is_copybreak; bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; + struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); + u32 ret, xdp_result = FEC_ENET_XDP_PASS; + u32 data_start = FEC_ENET_XDP_HEADROOM; + int cpu = smp_processor_id(); + struct xdp_buff xdp; + struct page *page; + __fec32 cbd_bufaddr; + u32 sub_len = 4; + + /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of + * FEC_RACC_SHIFT16 is set by default in the probe function. + */ + if (fep->quirks & FEC_QUIRK_HAS_RACC) { + data_start += 2; + sub_len += 2; + } -#ifdef CONFIG_M532x +#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) + /* + * Hacky flush of all caches instead of using the DMA API for the TSO + * headers. + */ flush_cache_all(); #endif - queue_id = FEC_ENET_GET_QUQUE(queue_id); rxq = fep->rx_queue[queue_id]; /* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ bdp = rxq->bd.cur; + xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq); while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { @@ -1376,7 +1799,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) break; pkt_received++; - writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); + writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT); /* Check for errors. */ status ^= BD_ENET_RX_LAST; @@ -1408,39 +1831,58 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ndev->stats.rx_packets++; pkt_len = fec16_to_cpu(bdp->cbd_datlen); ndev->stats.rx_bytes += pkt_len; + if (fep->quirks & FEC_QUIRK_HAS_RACC) + ndev->stats.rx_bytes -= 2; index = fec_enet_get_bd_index(bdp, &rxq->bd); - skb = rxq->rx_skbuff[index]; + page = rxq->rx_buf[index]; + cbd_bufaddr = bdp->cbd_bufaddr; + if (fec_enet_update_cbd(rxq, bdp, index)) { + ndev->stats.rx_dropped++; + goto rx_processing_done; + } + + dma_sync_single_for_cpu(&fep->pdev->dev, + fec32_to_cpu(cbd_bufaddr), + pkt_len, + DMA_FROM_DEVICE); + prefetch(page_address(page)); + + if (xdp_prog) { + xdp_buff_clear_frags_flag(&xdp); + /* subtract 16bit shift and FCS */ + xdp_prepare_buff(&xdp, page_address(page), + data_start, pkt_len - sub_len, false); + ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu); + xdp_result |= ret; + if (ret != FEC_ENET_XDP_PASS) + goto rx_processing_done; + } /* The packet length includes FCS, but we don't want to * include that when passing upstream as it messes up * bridging applications. */ - is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4, - need_swap); - if (!is_copybreak) { - skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); - if (unlikely(!skb_new)) { - ndev->stats.rx_dropped++; - goto rx_processing_done; - } - dma_unmap_single(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), - FEC_ENET_RX_FRSIZE - fep->rx_align, - DMA_FROM_DEVICE); + skb = build_skb(page_address(page), + PAGE_SIZE << fep->pagepool_order); + if (unlikely(!skb)) { + page_pool_recycle_direct(rxq->page_pool, page); + ndev->stats.rx_dropped++; + + netdev_err_once(ndev, "build_skb failed!\n"); + goto rx_processing_done; } - prefetch(skb->data - NET_IP_ALIGN); - skb_put(skb, pkt_len - 4); - data = skb->data; + skb_reserve(skb, data_start); + skb_put(skb, pkt_len - sub_len); + skb_mark_for_recycle(skb); - if (!is_copybreak && need_swap) - swap_buffer(data, pkt_len); + if (unlikely(need_swap)) { + u8 *data; -#if !defined(CONFIG_M5272) - if (fep->quirks & FEC_QUIRK_HAS_RACC) - data = skb_pull_inline(skb, 2); -#endif + data = page_address(page) + FEC_ENET_XDP_HEADROOM; + swap_buffer(data, pkt_len); + } /* Extract the enhanced buffer descriptor */ ebdp = NULL; @@ -1448,20 +1890,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ebdp = (struct bufdesc_ex *)bdp; /* If this is a VLAN packet remove the VLAN Tag */ - vlan_packet_rcvd = false; - if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && - fep->bufdesc_ex && - (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) { - /* Push and remove the vlan tag */ - struct vlan_hdr *vlan_header = - (struct vlan_hdr *) (data + ETH_HLEN); - vlan_tag = ntohs(vlan_header->h_vlan_TCI); - - vlan_packet_rcvd = true; - - memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); - skb_pull(skb, VLAN_HLEN); - } + if (fep->bufdesc_ex && + (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) + fec_enet_rx_vlan(ndev, skb); skb->protocol = eth_type_trans(skb, ndev); @@ -1480,24 +1911,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) } } - /* Handle received VLAN packets */ - if (vlan_packet_rcvd) - __vlan_hwaccel_put_tag(skb, - htons(ETH_P_8021Q), - vlan_tag); - + skb_record_rx_queue(skb, queue_id); napi_gro_receive(&fep->napi, skb); - if (is_copybreak) { - dma_sync_single_for_device(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), - FEC_ENET_RX_FRSIZE - fep->rx_align, - DMA_FROM_DEVICE); - } else { - rxq->rx_skbuff[index] = skb_new; - fec_enet_new_rxbdp(ndev, bdp, skb_new); - } - rx_processing_done: /* Clear the status flags for this buffer */ status &= ~BD_ENET_RX_STATS; @@ -1528,51 +1944,37 @@ rx_processing_done: writel(0, rxq->bd.reg_desc_active); } rxq->bd.cur = bdp; + + if (xdp_result & FEC_ENET_XDP_REDIR) + xdp_do_flush(); + return pkt_received; } -static int -fec_enet_rx(struct net_device *ndev, int budget) +static int fec_enet_rx(struct net_device *ndev, int budget) { - int pkt_received = 0; - u16 queue_id; struct fec_enet_private *fep = netdev_priv(ndev); + int i, done = 0; - for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { - int ret; - - ret = fec_enet_rx_queue(ndev, - budget - pkt_received, queue_id); - - if (ret < budget - pkt_received) - clear_bit(queue_id, &fep->work_rx); + /* Make sure that AVB queues are processed first. */ + for (i = fep->num_rx_queues - 1; i >= 0; i--) + done += fec_enet_rx_queue(ndev, i, budget - done); - pkt_received += ret; - } - return pkt_received; + return done; } -static bool -fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) +static bool fec_enet_collect_events(struct fec_enet_private *fep) { - if (int_events == 0) - return false; + uint int_events; - if (int_events & FEC_ENET_RXF) - fep->work_rx |= (1 << 2); - if (int_events & FEC_ENET_RXF_1) - fep->work_rx |= (1 << 0); - if (int_events & FEC_ENET_RXF_2) - fep->work_rx |= (1 << 1); + int_events = readl(fep->hwp + FEC_IEVENT); - if (int_events & FEC_ENET_TXF) - fep->work_tx |= (1 << 2); - if (int_events & FEC_ENET_TXF_1) - fep->work_tx |= (1 << 0); - if (int_events & FEC_ENET_TXF_2) - fep->work_tx |= (1 << 1); + /* Don't clear MDIO events, we poll for those */ + int_events &= ~FEC_ENET_MII; - return true; + writel(int_events, fep->hwp + FEC_IEVENT); + + return int_events != 0; } static irqreturn_t @@ -1580,31 +1982,18 @@ fec_enet_interrupt(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct fec_enet_private *fep = netdev_priv(ndev); - uint int_events; irqreturn_t ret = IRQ_NONE; - int_events = readl(fep->hwp + FEC_IEVENT); - writel(int_events, fep->hwp + FEC_IEVENT); - fec_enet_collect_events(fep, int_events); - - if ((fep->work_tx || fep->work_rx) && fep->link) { + if (fec_enet_collect_events(fep) && fep->link) { ret = IRQ_HANDLED; if (napi_schedule_prep(&fep->napi)) { - /* Disable the NAPI interrupts */ - writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK); + /* Disable interrupts */ + writel(0, fep->hwp + FEC_IMASK); __napi_schedule(&fep->napi); } } - if (int_events & FEC_ENET_MII) { - ret = IRQ_HANDLED; - complete(&fep->mdio_done); - } - - if (fep->ptp_clock) - fec_ptp_check_pps_event(fep); - return ret; } @@ -1612,25 +2001,27 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget) { struct net_device *ndev = napi->dev; struct fec_enet_private *fep = netdev_priv(ndev); - int pkts; - - pkts = fec_enet_rx(ndev, budget); + int done = 0; - fec_enet_tx(ndev); + do { + done += fec_enet_rx(ndev, budget - done); + fec_enet_tx(ndev, budget); + } while ((done < budget) && fec_enet_collect_events(fep)); - if (pkts < budget) { - napi_complete_done(napi, pkts); + if (done < budget) { + napi_complete_done(napi, done); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); } - return pkts; + + return done; } /* ------------------------------------------------------------------------- */ -static void fec_get_mac(struct net_device *ndev) +static int fec_get_mac(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); unsigned char *iap, tmpaddr[ETH_ALEN]; + int ret; /* * try to get mac address in following order: @@ -1646,9 +2037,11 @@ static void fec_get_mac(struct net_device *ndev) if (!is_valid_ether_addr(iap)) { struct device_node *np = fep->pdev->dev.of_node; if (np) { - const char *mac = of_get_mac_address(np); - if (mac) - iap = (unsigned char *) mac; + ret = of_get_mac_address(np, tmpaddr); + if (!ret) + iap = tmpaddr; + else if (ret == -EPROBE_DEFER) + return ret; } } @@ -1660,6 +2053,8 @@ static void fec_get_mac(struct net_device *ndev) if (FEC_FLASHMAC) iap = (unsigned char *)FEC_FLASHMAC; #else + struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); + if (pdata) iap = (unsigned char *)&pdata->mac; #endif @@ -1681,18 +2076,17 @@ static void fec_get_mac(struct net_device *ndev) */ if (!is_valid_ether_addr(iap)) { /* Report it and use a random ethernet address instead */ - netdev_err(ndev, "Invalid MAC address: %pM\n", iap); + dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap); eth_hw_addr_random(ndev); - netdev_info(ndev, "Using random MAC address: %pM\n", - ndev->dev_addr); - return; + dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n", + ndev->dev_addr); + return 0; } - memcpy(ndev->dev_addr, iap, ETH_ALEN); - /* Adjust MAC if using macaddr */ - if (iap == macaddr) - ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id; + eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0); + + return 0; } /* ------------------------------------------------------------------------- */ @@ -1700,18 +2094,43 @@ static void fec_get_mac(struct net_device *ndev) /* * Phy section */ + +/* LPI Sleep Ts count base on tx clk (clk_ref). + * The lpi sleep cnt value = X us / (cycle_ns). + */ +static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + return us * (fep->clk_ref_rate / 1000) / 1000; +} + +static int fec_enet_eee_mode_set(struct net_device *ndev, u32 lpi_timer, + bool enable) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int sleep_cycle, wake_cycle; + + if (enable) { + sleep_cycle = fec_enet_us_to_tx_cycle(ndev, lpi_timer); + wake_cycle = sleep_cycle; + } else { + sleep_cycle = 0; + wake_cycle = 0; + } + + writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP); + writel(wake_cycle, fep->hwp + FEC_LPI_WAKE); + + return 0; +} + static void fec_enet_adjust_link(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); struct phy_device *phy_dev = ndev->phydev; int status_change = 0; - /* Prevent a state halted on mii error */ - if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { - phy_dev->state = PHY_RESUMING; - return; - } - /* * If the netdev is down, or is going down, we're not interested * in link state events, so just mark our idea of the link as down @@ -1737,15 +2156,21 @@ static void fec_enet_adjust_link(struct net_device *ndev) /* if any of the above changed restart the FEC */ if (status_change) { + netif_stop_queue(ndev); napi_disable(&fep->napi); netif_tx_lock_bh(ndev); fec_restart(ndev); - netif_wake_queue(ndev); + netif_tx_wake_all_queues(ndev); netif_tx_unlock_bh(ndev); napi_enable(&fep->napi); } + if (fep->quirks & FEC_QUIRK_HAS_EEE) + fec_enet_eee_mode_set(ndev, + phy_dev->eee_cfg.tx_lpi_timer, + phy_dev->enable_tx_lpi); } else { if (fep->link) { + netif_stop_queue(ndev); napi_disable(&fep->napi); netif_tx_lock_bh(ndev); fec_stop(ndev); @@ -1760,95 +2185,208 @@ static void fec_enet_adjust_link(struct net_device *ndev) phy_print_status(phy_dev); } -static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +static int fec_enet_mdio_wait(struct fec_enet_private *fep) +{ + uint ievent; + int ret; + + ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent, + ievent & FEC_ENET_MII, 2, 30000); + + if (!ret) + writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); + + return ret; +} + +static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum) { struct fec_enet_private *fep = bus->priv; struct device *dev = &fep->pdev->dev; - unsigned long time_left; - int ret = 0; + int ret = 0, frame_start, frame_addr, frame_op; - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret < 0) return ret; - fep->mii_timeout = 0; - reinit_completion(&fep->mdio_done); + /* C22 read */ + frame_op = FEC_MMFR_OP_READ; + frame_start = FEC_MMFR_ST; + frame_addr = regnum; /* start a read op */ - writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | - FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | - FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); + writel(frame_start | frame_op | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | + FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); /* wait for end of transfer */ - time_left = wait_for_completion_timeout(&fep->mdio_done, - usecs_to_jiffies(FEC_MII_TIMEOUT)); - if (time_left == 0) { - fep->mii_timeout = 1; + ret = fec_enet_mdio_wait(fep); + if (ret) { netdev_err(fep->netdev, "MDIO read timeout\n"); - ret = -ETIMEDOUT; goto out; } ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); out: - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; } -static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, - u16 value) +static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id, + int devad, int regnum) { struct fec_enet_private *fep = bus->priv; struct device *dev = &fep->pdev->dev; - unsigned long time_left; - int ret; + int ret = 0, frame_start, frame_op; - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret < 0) return ret; - else - ret = 0; - fep->mii_timeout = 0; - reinit_completion(&fep->mdio_done); + frame_start = FEC_MMFR_ST_C45; + + /* write address */ + writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | + FEC_MMFR_TA | (regnum & 0xFFFF), + fep->hwp + FEC_MII_DATA); + + /* wait for end of transfer */ + ret = fec_enet_mdio_wait(fep); + if (ret) { + netdev_err(fep->netdev, "MDIO address write timeout\n"); + goto out; + } + + frame_op = FEC_MMFR_OP_READ_C45; + + /* start a read op */ + writel(frame_start | frame_op | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | + FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); + + /* wait for end of transfer */ + ret = fec_enet_mdio_wait(fep); + if (ret) { + netdev_err(fep->netdev, "MDIO read timeout\n"); + goto out; + } + + ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); + +out: + pm_runtime_put_autosuspend(dev); + + return ret; +} + +static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum, + u16 value) +{ + struct fec_enet_private *fep = bus->priv; + struct device *dev = &fep->pdev->dev; + int ret, frame_start, frame_addr; + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; + + /* C22 write */ + frame_start = FEC_MMFR_ST; + frame_addr = regnum; /* start a write op */ - writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | - FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | - FEC_MMFR_TA | FEC_MMFR_DATA(value), - fep->hwp + FEC_MII_DATA); + writel(frame_start | FEC_MMFR_OP_WRITE | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | + FEC_MMFR_TA | FEC_MMFR_DATA(value), + fep->hwp + FEC_MII_DATA); /* wait for end of transfer */ - time_left = wait_for_completion_timeout(&fep->mdio_done, - usecs_to_jiffies(FEC_MII_TIMEOUT)); - if (time_left == 0) { - fep->mii_timeout = 1; + ret = fec_enet_mdio_wait(fep); + if (ret) netdev_err(fep->netdev, "MDIO write timeout\n"); - ret = -ETIMEDOUT; + + pm_runtime_put_autosuspend(dev); + + return ret; +} + +static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id, + int devad, int regnum, u16 value) +{ + struct fec_enet_private *fep = bus->priv; + struct device *dev = &fep->pdev->dev; + int ret, frame_start; + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; + + frame_start = FEC_MMFR_ST_C45; + + /* write address */ + writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | + FEC_MMFR_TA | (regnum & 0xFFFF), + fep->hwp + FEC_MII_DATA); + + /* wait for end of transfer */ + ret = fec_enet_mdio_wait(fep); + if (ret) { + netdev_err(fep->netdev, "MDIO address write timeout\n"); + goto out; } - pm_runtime_mark_last_busy(dev); + /* start a write op */ + writel(frame_start | FEC_MMFR_OP_WRITE | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | + FEC_MMFR_TA | FEC_MMFR_DATA(value), + fep->hwp + FEC_MII_DATA); + + /* wait for end of transfer */ + ret = fec_enet_mdio_wait(fep); + if (ret) + netdev_err(fep->netdev, "MDIO write timeout\n"); + +out: pm_runtime_put_autosuspend(dev); return ret; } +static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct phy_device *phy_dev = ndev->phydev; + + if (phy_dev) { + phy_reset_after_clk_enable(phy_dev); + } else if (fep->phy_node) { + /* + * If the PHY still is not bound to the MAC, but there is + * OF PHY node and a matching PHY device instance already, + * use the OF PHY node to obtain the PHY device instance, + * and then use that PHY device instance when triggering + * the PHY reset. + */ + phy_dev = of_phy_find_device(fep->phy_node); + phy_reset_after_clk_enable(phy_dev); + if (phy_dev) + put_device(&phy_dev->mdio.dev); + } +} + static int fec_enet_clk_enable(struct net_device *ndev, bool enable) { struct fec_enet_private *fep = netdev_priv(ndev); int ret; if (enable) { - ret = clk_prepare_enable(fep->clk_ahb); - if (ret) - return ret; - ret = clk_prepare_enable(fep->clk_enet_out); if (ret) - goto failed_clk_enet_out; + return ret; if (fep->clk_ptp) { mutex_lock(&fep->ptp_clk_mutex); @@ -1865,8 +2403,13 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) ret = clk_prepare_enable(fep->clk_ref); if (ret) goto failed_clk_ref; + + ret = clk_prepare_enable(fep->clk_2x_txclk); + if (ret) + goto failed_clk_2x_txclk; + + fec_enet_phy_reset_after_clk_enable(ndev); } else { - clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) { mutex_lock(&fep->ptp_clk_mutex); @@ -1875,77 +2418,107 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) mutex_unlock(&fep->ptp_clk_mutex); } clk_disable_unprepare(fep->clk_ref); + clk_disable_unprepare(fep->clk_2x_txclk); } return 0; -failed_clk_ref: +failed_clk_2x_txclk: if (fep->clk_ref) clk_disable_unprepare(fep->clk_ref); +failed_clk_ref: + if (fep->clk_ptp) { + mutex_lock(&fep->ptp_clk_mutex); + clk_disable_unprepare(fep->clk_ptp); + fep->ptp_clk_on = false; + mutex_unlock(&fep->ptp_clk_mutex); + } failed_clk_ptp: - if (fep->clk_enet_out) - clk_disable_unprepare(fep->clk_enet_out); -failed_clk_enet_out: - clk_disable_unprepare(fep->clk_ahb); + clk_disable_unprepare(fep->clk_enet_out); return ret; } +static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep, + struct device_node *np) +{ + u32 rgmii_tx_delay, rgmii_rx_delay; + + /* For rgmii tx internal delay, valid values are 0ps and 2000ps */ + if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) { + if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) { + dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps"); + return -EINVAL; + } else if (rgmii_tx_delay == 2000) { + fep->rgmii_txc_dly = true; + } + } + + /* For rgmii rx internal delay, valid values are 0ps and 2000ps */ + if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) { + if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) { + dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps"); + return -EINVAL; + } else if (rgmii_rx_delay == 2000) { + fep->rgmii_rxc_dly = true; + } + } + + return 0; +} + static int fec_enet_mii_probe(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct phy_device *phy_dev = NULL; - char mdio_bus_id[MII_BUS_ID_SIZE]; - char phy_name[MII_BUS_ID_SIZE + 3]; - int phy_id; - int dev_id = fep->dev_id; + struct phy_device *phy_dev; + int ret; if (fep->phy_node) { phy_dev = of_phy_connect(ndev, fep->phy_node, &fec_enet_adjust_link, 0, fep->phy_interface); - if (!phy_dev) + if (!phy_dev) { + netdev_err(ndev, "Unable to connect to phy\n"); return -ENODEV; + } } else { /* check for attached phy */ - for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { - if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) - continue; - if (dev_id--) - continue; - strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); - break; - } + phy_dev = phy_find_first(fep->mii_bus); + if (fep->dev_id && phy_dev) + phy_dev = phy_find_next(fep->mii_bus, phy_dev); - if (phy_id >= PHY_MAX_ADDR) { + if (!phy_dev) { netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); - strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); - phy_id = 0; + phy_dev = fixed_phy_register_100fd(); + if (IS_ERR(phy_dev)) { + netdev_err(ndev, "could not register fixed PHY\n"); + return PTR_ERR(phy_dev); + } } - snprintf(phy_name, sizeof(phy_name), - PHY_ID_FMT, mdio_bus_id, phy_id); - phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, - fep->phy_interface); - } + ret = phy_connect_direct(ndev, phy_dev, &fec_enet_adjust_link, + fep->phy_interface); + if (ret) { + if (phy_is_pseudo_fixed_link(phy_dev)) + fixed_phy_unregister(phy_dev); + netdev_err(ndev, "could not attach to PHY\n"); + return ret; + } - if (IS_ERR(phy_dev)) { - netdev_err(ndev, "could not attach to PHY\n"); - return PTR_ERR(phy_dev); } /* mask with MAC supported features */ if (fep->quirks & FEC_QUIRK_HAS_GBIT) { - phy_dev->supported &= PHY_GBIT_FEATURES; - phy_dev->supported &= ~SUPPORTED_1000baseT_Half; -#if !defined(CONFIG_M5272) - phy_dev->supported |= SUPPORTED_Pause; -#endif + phy_set_max_speed(phy_dev, 1000); + phy_remove_link_mode(phy_dev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + phy_support_sym_pause(phy_dev); } else - phy_dev->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phy_dev, 100); - phy_dev->advertising = phy_dev->supported; + if (fep->quirks & FEC_QUIRK_HAS_EEE) + phy_support_eee(phy_dev); fep->link = 0; fep->full_duplex = 0; @@ -1960,9 +2533,12 @@ static int fec_enet_mii_init(struct platform_device *pdev) static struct mii_bus *fec0_mii_bus; struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); + bool suppress_preamble = false; + struct phy_device *phydev; struct device_node *node; int err = -ENXIO; u32 mii_speed, holdtime; + u32 bus_freq; /* * The i.MX28 dual fec interfaces are not equal. @@ -1990,17 +2566,23 @@ static int fec_enet_mii_init(struct platform_device *pdev) return -ENOENT; } - fep->mii_timeout = 0; + bus_freq = 2500000; /* 2.5MHz by default */ + node = of_get_child_by_name(pdev->dev.of_node, "mdio"); + if (node) { + of_property_read_u32(node, "clock-frequency", &bus_freq); + suppress_preamble = of_property_read_bool(node, + "suppress-preamble"); + } /* - * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) + * Set MII speed (= clk_get_rate() / 2 * phy_speed) * * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 * Reference Manual has an error on this, and gets fixed on i.MX6Q * document. */ - mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); + mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2); if (fep->quirks & FEC_QUIRK_ENET_MAC) mii_speed--; if (mii_speed > 63) { @@ -2027,8 +2609,26 @@ static int fec_enet_mii_init(struct platform_device *pdev) fep->phy_speed = mii_speed << 1 | holdtime << 8; + if (suppress_preamble) + fep->phy_speed |= BIT(7); + + if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) { + /* Clear MMFR to avoid to generate MII event by writing MSCR. + * MII event generation condition: + * - writing MSCR: + * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero & + * mscr_reg_data_in[7:0] != 0 + * - writing MMFR: + * - mscr[7:0]_not_zero + */ + writel(0, fep->hwp + FEC_MII_DATA); + } + writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); + /* Clear any pending transaction complete indication */ + writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); + fep->mii_bus = mdiobus_alloc(); if (fep->mii_bus == NULL) { err = -ENOMEM; @@ -2036,23 +2636,25 @@ static int fec_enet_mii_init(struct platform_device *pdev) } fep->mii_bus->name = "fec_enet_mii_bus"; - fep->mii_bus->read = fec_enet_mdio_read; - fep->mii_bus->write = fec_enet_mdio_write; + fep->mii_bus->read = fec_enet_mdio_read_c22; + fep->mii_bus->write = fec_enet_mdio_write_c22; + if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) { + fep->mii_bus->read_c45 = fec_enet_mdio_read_c45; + fep->mii_bus->write_c45 = fec_enet_mdio_write_c45; + } snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, fep->dev_id + 1); fep->mii_bus->priv = fep; fep->mii_bus->parent = &pdev->dev; - node = of_get_child_by_name(pdev->dev.of_node, "mdio"); - if (node) { - err = of_mdiobus_register(fep->mii_bus, node); - of_node_put(node); - } else { - err = mdiobus_register(fep->mii_bus); - } - + err = of_mdiobus_register(fep->mii_bus, node); if (err) goto err_out_free_mdiobus; + of_node_put(node); + + /* find all the PHY devices on the bus and set mac_managed_pm to true */ + mdiobus_for_each_phy(fep->mii_bus, phydev) + phydev->mac_managed_pm = true; mii_cnt++; @@ -2065,6 +2667,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) err_out_free_mdiobus: mdiobus_free(fep->mii_bus); err_out: + of_node_put(node); return err; } @@ -2081,10 +2684,9 @@ static void fec_enet_get_drvinfo(struct net_device *ndev, { struct fec_enet_private *fep = netdev_priv(ndev); - strlcpy(info->driver, fep->pdev->dev.driver->name, + strscpy(info->driver, fep->pdev->dev.driver->name, sizeof(info->driver)); - strlcpy(info->version, "Revision: 1.0", sizeof(info->version)); - strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); + strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); } static int fec_enet_get_regs_len(struct net_device *ndev) @@ -2101,8 +2703,8 @@ static int fec_enet_get_regs_len(struct net_device *ndev) } /* List of registers that can be safety be read to dump them with ethtool */ -#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) +#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST) +static __u32 fec_enet_register_version = 2; static u32 fec_enet_register_offset[] = { FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, @@ -2132,7 +2734,33 @@ static u32 fec_enet_register_offset[] = { IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, IEEE_R_FDXFC, IEEE_R_OCTETS_OK }; +/* for i.MX6ul */ +static u32 fec_enet_register_offset_6ul[] = { + FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, + FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, + FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0, + FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, + FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0, + FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, + FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, + RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, + RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, + RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, + RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, + RMON_T_P_GTE2048, RMON_T_OCTETS, + IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, + IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, + IEEE_T_FDXFC, IEEE_T_OCTETS_OK, + RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, + RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, + RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, + RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, + RMON_R_P_GTE2048, RMON_R_OCTETS, + IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, + IEEE_R_FDXFC, IEEE_R_OCTETS_OK +}; #else +static __u32 fec_enet_register_version = 1; static u32 fec_enet_register_offset[] = { FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, @@ -2149,36 +2777,57 @@ static u32 fec_enet_register_offset[] = { static void fec_enet_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *regbuf) { + u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset); struct fec_enet_private *fep = netdev_priv(ndev); u32 __iomem *theregs = (u32 __iomem *)fep->hwp; + u32 *reg_list = fec_enet_register_offset; + struct device *dev = &fep->pdev->dev; u32 *buf = (u32 *)regbuf; u32 i, off; + int ret; + +#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST) + if (of_machine_is_compatible("fsl,imx6ul")) { + reg_list = fec_enet_register_offset_6ul; + reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul); + } +#endif + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return; + + regs->version = fec_enet_register_version; memset(buf, 0, regs->len); - for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { - off = fec_enet_register_offset[i] / 4; + for (i = 0; i < reg_cnt; i++) { + off = reg_list[i]; + + if ((off == FEC_R_BOUND || off == FEC_R_FSTART) && + !(fep->quirks & FEC_QUIRK_HAS_FRREG)) + continue; + + off >>= 2; buf[off] = readl(&theregs[off]); } + + pm_runtime_put_autosuspend(dev); } static int fec_enet_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct fec_enet_private *fep = netdev_priv(ndev); if (fep->bufdesc_ex) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (fep->ptp_clock) info->phc_index = ptp_clock_index(fep->ptp_clock); - else - info->phc_index = -1; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); @@ -2223,13 +2872,8 @@ static int fec_enet_set_pauseparam(struct net_device *ndev, fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; - if (pause->rx_pause || pause->autoneg) { - ndev->phydev->supported |= ADVERTISED_Pause; - ndev->phydev->advertising |= ADVERTISED_Pause; - } else { - ndev->phydev->supported &= ~ADVERTISED_Pause; - ndev->phydev->advertising &= ~ADVERTISED_Pause; - } + phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause, + pause->autoneg); if (pause->autoneg) { if (netif_running(ndev)) @@ -2240,7 +2884,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev, napi_disable(&fep->napi); netif_tx_lock_bh(ndev); fec_restart(ndev); - netif_wake_queue(ndev); + netif_tx_wake_all_queues(ndev); netif_tx_unlock_bh(ndev); napi_enable(&fep->napi); } @@ -2316,6 +2960,16 @@ static const struct fec_stat { #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64)) +static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = { + "rx_xdp_redirect", /* RX_XDP_REDIRECT = 0, */ + "rx_xdp_pass", /* RX_XDP_PASS, */ + "rx_xdp_drop", /* RX_XDP_DROP, */ + "rx_xdp_tx", /* RX_XDP_TX, */ + "rx_xdp_tx_errors", /* RX_XDP_TX_ERRORS, */ + "tx_xdp_xmit", /* TX_XDP_XMIT, */ + "tx_xdp_xmit_errors", /* TX_XDP_XMIT_ERRORS, */ +}; + static void fec_enet_update_ethtool_stats(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); @@ -2325,6 +2979,42 @@ static void fec_enet_update_ethtool_stats(struct net_device *dev) fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset); } +static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data) +{ + u64 xdp_stats[XDP_STATS_TOTAL] = { 0 }; + struct fec_enet_priv_rx_q *rxq; + int i, j; + + for (i = fep->num_rx_queues - 1; i >= 0; i--) { + rxq = fep->rx_queue[i]; + + for (j = 0; j < XDP_STATS_TOTAL; j++) + xdp_stats[j] += rxq->stats[j]; + } + + memcpy(data, xdp_stats, sizeof(xdp_stats)); +} + +static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data) +{ +#ifdef CONFIG_PAGE_POOL_STATS + struct page_pool_stats stats = {}; + struct fec_enet_priv_rx_q *rxq; + int i; + + for (i = fep->num_rx_queues - 1; i >= 0; i--) { + rxq = fep->rx_queue[i]; + + if (!rxq->page_pool) + continue; + + page_pool_get_stats(rxq->page_pool, &stats); + } + + page_pool_ethtool_stats_get(data, &stats); +#endif +} + static void fec_enet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -2334,6 +3024,12 @@ static void fec_enet_get_ethtool_stats(struct net_device *dev, fec_enet_update_ethtool_stats(dev); memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE); + data += FEC_STATS_SIZE / sizeof(u64); + + fec_enet_get_xdp_stats(fep, data); + data += XDP_STATS_TOTAL; + + fec_enet_page_pool_stats(fep, data); } static void fec_enet_get_strings(struct net_device *netdev, @@ -2342,18 +3038,33 @@ static void fec_enet_get_strings(struct net_device *netdev, int i; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(fec_stats); i++) - memcpy(data + i * ETH_GSTRING_LEN, - fec_stats[i].name, ETH_GSTRING_LEN); + for (i = 0; i < ARRAY_SIZE(fec_stats); i++) { + ethtool_puts(&data, fec_stats[i].name); + } + for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) { + ethtool_puts(&data, fec_xdp_stat_strs[i]); + } + page_pool_ethtool_stats_get_strings(data); + + break; + case ETH_SS_TEST: + net_selftest_get_strings(data); break; } } static int fec_enet_get_sset_count(struct net_device *dev, int sset) { + int count; + switch (sset) { case ETH_SS_STATS: - return ARRAY_SIZE(fec_stats); + count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL; + count += page_pool_ethtool_stats_get_count(); + return count; + + case ETH_SS_TEST: + return net_selftest_get_count(); default: return -EOPNOTSUPP; } @@ -2362,7 +3073,8 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset) static void fec_enet_clear_ethtool_stats(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); - int i; + struct fec_enet_priv_rx_q *rxq; + int i, j; /* Disable MIB statistics counters */ writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT); @@ -2370,6 +3082,12 @@ static void fec_enet_clear_ethtool_stats(struct net_device *dev) for (i = 0; i < ARRAY_SIZE(fec_stats); i++) writel(0, fep->hwp + fec_stats[i].offset); + for (i = fep->num_rx_queues - 1; i >= 0; i--) { + rxq = fep->rx_queue[i]; + for (j = 0; j < XDP_STATS_TOTAL; j++) + rxq->stats[j] = 0; + } + /* Don't disable MIB statistics counters */ writel(0, fep->hwp + FEC_MIB_CTRLSTAT); } @@ -2400,31 +3118,29 @@ static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) static void fec_enet_itr_coal_set(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - int rx_itr, tx_itr; + u32 rx_itr = 0, tx_itr = 0; + int rx_ictt, tx_ictt; - /* Must be greater than zero to avoid unpredictable behavior */ - if (!fep->rx_time_itr || !fep->rx_pkts_itr || - !fep->tx_time_itr || !fep->tx_pkts_itr) - return; + rx_ictt = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); + tx_ictt = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); - /* Select enet system clock as Interrupt Coalescing - * timer Clock Source - */ - rx_itr = FEC_ITR_CLK_SEL; - tx_itr = FEC_ITR_CLK_SEL; - - /* set ICFT and ICTT */ - rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); - rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); - tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); - tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); + if (rx_ictt > 0 && fep->rx_pkts_itr > 1) { + /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */ + rx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL; + rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); + rx_itr |= FEC_ITR_ICTT(rx_ictt); + } - rx_itr |= FEC_ITR_EN; - tx_itr |= FEC_ITR_EN; + if (tx_ictt > 0 && fep->tx_pkts_itr > 1) { + /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */ + tx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL; + tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); + tx_itr |= FEC_ITR_ICTT(tx_ictt); + } writel(tx_itr, fep->hwp + FEC_TXIC0); writel(rx_itr, fep->hwp + FEC_RXIC0); - if (fep->quirks & FEC_QUIRK_HAS_AVB) { + if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { writel(tx_itr, fep->hwp + FEC_TXIC1); writel(rx_itr, fep->hwp + FEC_RXIC1); writel(tx_itr, fep->hwp + FEC_TXIC2); @@ -2432,8 +3148,10 @@ static void fec_enet_itr_coal_set(struct net_device *ndev) } } -static int -fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) +static int fec_enet_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct fec_enet_private *fep = netdev_priv(ndev); @@ -2449,34 +3167,37 @@ fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) return 0; } -static int -fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) +static int fec_enet_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct fec_enet_private *fep = netdev_priv(ndev); + struct device *dev = &fep->pdev->dev; unsigned int cycle; if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) return -EOPNOTSUPP; if (ec->rx_max_coalesced_frames > 255) { - pr_err("Rx coalesced frames exceed hardware limitation\n"); + dev_err(dev, "Rx coalesced frames exceed hardware limitation\n"); return -EINVAL; } if (ec->tx_max_coalesced_frames > 255) { - pr_err("Tx coalesced frame exceed hardware limitation\n"); + dev_err(dev, "Tx coalesced frame exceed hardware limitation\n"); return -EINVAL; } - cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); + cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs); if (cycle > 0xFFFF) { - pr_err("Rx coalesced usec exceed hardware limitation\n"); + dev_err(dev, "Rx coalesced usec exceed hardware limitation\n"); return -EINVAL; } - cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); + cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs); if (cycle > 0xFFFF) { - pr_err("Rx coalesced usec exceed hardware limitation\n"); + dev_err(dev, "Tx coalesced usec exceed hardware limitation\n"); return -EINVAL; } @@ -2491,55 +3212,32 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) return 0; } -static void fec_enet_itr_coal_init(struct net_device *ndev) +static int +fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata) { - struct ethtool_coalesce ec; + struct fec_enet_private *fep = netdev_priv(ndev); - ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; - ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; + if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) + return -EOPNOTSUPP; - ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; - ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; + if (!netif_running(ndev)) + return -ENETDOWN; - fec_enet_set_coalesce(ndev, &ec); + return phy_ethtool_get_eee(ndev->phydev, edata); } -static int fec_enet_get_tunable(struct net_device *netdev, - const struct ethtool_tunable *tuna, - void *data) +static int +fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata) { - struct fec_enet_private *fep = netdev_priv(netdev); - int ret = 0; - - switch (tuna->id) { - case ETHTOOL_RX_COPYBREAK: - *(u32 *)data = fep->rx_copybreak; - break; - default: - ret = -EINVAL; - break; - } - - return ret; -} + struct fec_enet_private *fep = netdev_priv(ndev); -static int fec_enet_set_tunable(struct net_device *netdev, - const struct ethtool_tunable *tuna, - const void *data) -{ - struct fec_enet_private *fep = netdev_priv(netdev); - int ret = 0; + if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) + return -EOPNOTSUPP; - switch (tuna->id) { - case ETHTOOL_RX_COPYBREAK: - fep->rx_copybreak = *(u32 *)data; - break; - default: - ret = -EINVAL; - break; - } + if (!netif_running(ndev)) + return -ENETDOWN; - return ret; + return phy_ethtool_set_eee(ndev->phydev, edata); } static void @@ -2567,20 +3265,17 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) return -EINVAL; device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC); - if (device_may_wakeup(&ndev->dev)) { + if (device_may_wakeup(&ndev->dev)) fep->wol_flag |= FEC_WOL_FLAG_ENABLE; - if (fep->irq[0] > 0) - enable_irq_wake(fep->irq[0]); - } else { + else fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); - if (fep->irq[0] > 0) - disable_irq_wake(fep->irq[0]); - } return 0; } static const struct ethtool_ops fec_enet_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, .get_drvinfo = fec_enet_get_drvinfo, .get_regs_len = fec_enet_get_regs_len, .get_regs = fec_enet_get_regs, @@ -2596,71 +3291,62 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .get_sset_count = fec_enet_get_sset_count, #endif .get_ts_info = fec_enet_get_ts_info, - .get_tunable = fec_enet_get_tunable, - .set_tunable = fec_enet_set_tunable, .get_wol = fec_enet_get_wol, .set_wol = fec_enet_set_wol, + .get_eee = fec_enet_get_eee, + .set_eee = fec_enet_set_eee, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, + .self_test = net_selftest, }; -static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - struct phy_device *phydev = ndev->phydev; - - if (!netif_running(ndev)) - return -EINVAL; - - if (!phydev) - return -ENODEV; - - if (fep->bufdesc_ex) { - if (cmd == SIOCSHWTSTAMP) - return fec_ptp_set(ndev, rq); - if (cmd == SIOCGHWTSTAMP) - return fec_ptp_get(ndev, rq); - } - - return phy_mii_ioctl(phydev, rq, cmd); -} - static void fec_enet_free_buffers(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); unsigned int i; - struct sk_buff *skb; - struct bufdesc *bdp; struct fec_enet_priv_tx_q *txq; struct fec_enet_priv_rx_q *rxq; unsigned int q; for (q = 0; q < fep->num_rx_queues; q++) { rxq = fep->rx_queue[q]; - bdp = rxq->bd.base; - for (i = 0; i < rxq->bd.ring_size; i++) { - skb = rxq->rx_skbuff[i]; - rxq->rx_skbuff[i] = NULL; - if (skb) { - dma_unmap_single(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), - FEC_ENET_RX_FRSIZE - fep->rx_align, - DMA_FROM_DEVICE); - dev_kfree_skb(skb); - } - bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); - } + for (i = 0; i < rxq->bd.ring_size; i++) + page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i], + false); + + for (i = 0; i < XDP_STATS_TOTAL; i++) + rxq->stats[i] = 0; + + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) + xdp_rxq_info_unreg(&rxq->xdp_rxq); + page_pool_destroy(rxq->page_pool); + rxq->page_pool = NULL; } for (q = 0; q < fep->num_tx_queues; q++) { txq = fep->tx_queue[q]; - bdp = txq->bd.base; for (i = 0; i < txq->bd.ring_size; i++) { kfree(txq->tx_bounce[i]); txq->tx_bounce[i] = NULL; - skb = txq->tx_skbuff[i]; - txq->tx_skbuff[i] = NULL; - dev_kfree_skb(skb); + + if (!txq->tx_buf[i].buf_p) { + txq->tx_buf[i].type = FEC_TXBUF_T_SKB; + continue; + } + + if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { + dev_kfree_skb(txq->tx_buf[i].buf_p); + } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { + xdp_return_frame(txq->tx_buf[i].buf_p); + } else { + struct page *page = txq->tx_buf[i].buf_p; + + page_pool_put_page(pp_page_to_nmdesc(page)->pp, + page, 0, false); + } + + txq->tx_buf[i].buf_p = NULL; + txq->tx_buf[i].type = FEC_TXBUF_T_SKB; } } } @@ -2674,10 +3360,9 @@ static void fec_enet_free_queue(struct net_device *ndev) for (i = 0; i < fep->num_tx_queues; i++) if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { txq = fep->tx_queue[i]; - dma_free_coherent(&fep->pdev->dev, - txq->bd.ring_size * TSO_HEADER_SIZE, - txq->tso_hdrs, - txq->tso_hdrs_dma); + fec_dma_free(&fep->pdev->dev, + txq->bd.ring_size * TSO_HEADER_SIZE, + txq->tso_hdrs, txq->tso_hdrs_dma); } for (i = 0; i < fep->num_rx_queues; i++) @@ -2705,13 +3390,11 @@ static int fec_enet_alloc_queue(struct net_device *ndev) fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; - txq->tx_wake_threshold = - (txq->bd.ring_size - txq->tx_stop_threshold) / 2; + txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS; - txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev, + txq->tso_hdrs = fec_dma_alloc(&fep->pdev->dev, txq->bd.ring_size * TSO_HEADER_SIZE, - &txq->tso_hdrs_dma, - GFP_KERNEL); + &txq->tso_hdrs_dma, GFP_KERNEL); if (!txq->tso_hdrs) { ret = -ENOMEM; goto alloc_failed; @@ -2740,24 +3423,43 @@ static int fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) { struct fec_enet_private *fep = netdev_priv(ndev); - unsigned int i; - struct sk_buff *skb; - struct bufdesc *bdp; struct fec_enet_priv_rx_q *rxq; + dma_addr_t phys_addr; + struct bufdesc *bdp; + struct page *page; + int i, err; rxq = fep->rx_queue[queue]; bdp = rxq->bd.base; + + err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size); + if (err < 0) { + netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err); + return err; + } + + /* Some platforms require the RX buffer must be 64 bytes alignment. + * Some platforms require 16 bytes alignment. And some platforms + * require 4 bytes alignment. But since the page pool have been + * introduced into the driver, the address of RX buffer is always + * the page address plus FEC_ENET_XDP_HEADROOM, and + * FEC_ENET_XDP_HEADROOM is 256 bytes. Therefore, this address can + * satisfy all platforms. To prevent future modifications to + * FEC_ENET_XDP_HEADROOM from ignoring this hardware limitation, a + * BUILD_BUG_ON() test has been added, which ensures that + * FEC_ENET_XDP_HEADROOM provides the required alignment. + */ + BUILD_BUG_ON(FEC_ENET_XDP_HEADROOM & 0x3f); + for (i = 0; i < rxq->bd.ring_size; i++) { - skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); - if (!skb) + page = page_pool_dev_alloc_pages(rxq->page_pool); + if (!page) goto err_alloc; - if (fec_enet_new_rxbdp(ndev, bdp, skb)) { - dev_kfree_skb(skb); - goto err_alloc; - } + phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM; + bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); - rxq->rx_skbuff[i] = skb; + rxq->rx_buf[i] = page; bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); if (fep->bufdesc_ex) { @@ -2770,7 +3472,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) /* Set the last buffer to wrap. */ bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); - bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); + bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP); return 0; err_alloc: @@ -2806,7 +3508,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) /* Set the last buffer to wrap. */ bdp = fec_enet_get_prevdesc(bdp, &txq->bd); - bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); + bdp->cbd_sc |= cpu_to_fec16(BD_ENET_TX_WRAP); return 0; @@ -2835,8 +3537,9 @@ fec_enet_open(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); int ret; + bool reset_again; - ret = pm_runtime_get_sync(&fep->pdev->dev); + ret = pm_runtime_resume_and_get(&fep->pdev->dev); if (ret < 0) return ret; @@ -2845,6 +3548,17 @@ fec_enet_open(struct net_device *ndev) if (ret) goto clk_enable; + /* During the first fec_enet_open call the PHY isn't probed at this + * point. Therefore the phy_reset_after_clk_enable() call within + * fec_enet_clk_enable() fails. As we need this reset in order to be + * sure the PHY is working correctly we check if we need to reset again + * later when the PHY is probed + */ + if (ndev->phydev && ndev->phydev->drv) + reset_again = false; + else + reset_again = true; + /* I should reset the ring buffers here, but I don't yet know * a simple way to do that. */ @@ -2856,6 +3570,12 @@ fec_enet_open(struct net_device *ndev) /* Init MAC prior to mii bus probe */ fec_restart(ndev); + /* Call phy_reset_after_clk_enable() again if it failed during + * phy_reset_after_clk_enable() before because the PHY wasn't probed. + */ + if (reset_again) + fec_enet_phy_reset_after_clk_enable(ndev); + /* Probe and connect to PHY when open the interface */ ret = fec_enet_mii_probe(ndev); if (ret) @@ -2864,6 +3584,9 @@ fec_enet_open(struct net_device *ndev) if (fep->quirks & FEC_QUIRK_ERR006687) imx6q_cpuidle_fec_irqs_used(); + if (fep->quirks & FEC_QUIRK_HAS_PMQOS) + cpu_latency_qos_add_request(&fep->pm_qos_req, 0); + napi_enable(&fep->napi); phy_start(ndev->phydev); netif_tx_start_all_queues(ndev); @@ -2878,7 +3601,6 @@ err_enet_mii_probe: err_enet_alloc: fec_enet_clk_enable(ndev, false); clk_enable: - pm_runtime_mark_last_busy(&fep->pdev->dev); pm_runtime_put_autosuspend(&fep->pdev->dev); pinctrl_pm_select_sleep_state(&fep->pdev->dev); return ret; @@ -2888,8 +3610,9 @@ static int fec_enet_close(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); + struct phy_device *phy_dev = ndev->phydev; - phy_stop(ndev->phydev); + phy_stop(phy_dev); if (netif_device_present(ndev)) { napi_disable(&fep->napi); @@ -2897,7 +3620,10 @@ fec_enet_close(struct net_device *ndev) fec_stop(ndev); } - phy_disconnect(ndev->phydev); + phy_disconnect(phy_dev); + + if (!fep->phy_node && phy_is_pseudo_fixed_link(phy_dev)) + fixed_phy_unregister(phy_dev); if (fep->quirks & FEC_QUIRK_ERR006687) imx6q_cpuidle_fec_irqs_unused(); @@ -2905,8 +3631,10 @@ fec_enet_close(struct net_device *ndev) fec_enet_update_ethtool_stats(ndev); fec_enet_clk_enable(ndev, false); + if (fep->quirks & FEC_QUIRK_HAS_PMQOS) + cpu_latency_qos_remove_request(&fep->pm_qos_req); + pinctrl_pm_select_sleep_state(&fep->pdev->dev); - pm_runtime_mark_last_busy(&fep->pdev->dev); pm_runtime_put_autosuspend(&fep->pdev->dev); fec_enet_free_buffers(ndev); @@ -2925,13 +3653,12 @@ fec_enet_close(struct net_device *ndev) */ #define FEC_HASH_BITS 6 /* #bits in hash */ -#define CRC32_POLY 0xEDB88320 static void set_multicast_list(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); struct netdev_hw_addr *ha; - unsigned int i, bit, data, crc, tmp; + unsigned int crc, tmp; unsigned char hash; unsigned int hash_high = 0, hash_low = 0; @@ -2959,15 +3686,7 @@ static void set_multicast_list(struct net_device *ndev) /* Add the addresses in hash register */ netdev_for_each_mc_addr(ha, ndev) { /* calculate crc32 value of mac address */ - crc = 0xffffffff; - - for (i = 0; i < ndev->addr_len; i++) { - data = ha->addr[i]; - for (bit = 0; bit < 8; bit++, data >>= 1) { - crc = (crc >> 1) ^ - (((crc ^ data) & 1) ? CRC32_POLY : 0); - } - } + crc = ether_crc_le(ndev->addr_len, ha->addr); /* only upper 6 bits (FEC_HASH_BITS) are used * which point to specific bit in the hash registers @@ -2988,13 +3707,12 @@ static void set_multicast_list(struct net_device *ndev) static int fec_set_mac_address(struct net_device *ndev, void *p) { - struct fec_enet_private *fep = netdev_priv(ndev); struct sockaddr *addr = p; if (addr) { if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, addr->sa_data); } /* Add netif status check here to avoid system hang in below case: @@ -3005,36 +3723,10 @@ fec_set_mac_address(struct net_device *ndev, void *p) if (!netif_running(ndev)) return 0; - writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | - (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), - fep->hwp + FEC_ADDR_LOW); - writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), - fep->hwp + FEC_ADDR_HIGH); - return 0; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -/** - * fec_poll_controller - FEC Poll controller function - * @dev: The FEC network adapter - * - * Polled functionality used by netconsole and others in non interrupt mode - * - */ -static void fec_poll_controller(struct net_device *dev) -{ - int i; - struct fec_enet_private *fep = netdev_priv(dev); + fec_set_hw_mac_addr(ndev); - for (i = 0; i < FEC_IRQ_NUM; i++) { - if (fep->irq[i] > 0) { - disable_irq(fep->irq[i]); - fec_enet_interrupt(fep->irq[i], dev); - enable_irq(fep->irq[i]); - } - } + return 0; } -#endif static inline void fec_enet_set_netdev_features(struct net_device *netdev, netdev_features_t features) @@ -3075,19 +3767,296 @@ static int fec_set_features(struct net_device *netdev, return 0; } +static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + u16 vlan_tag = 0; + + if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) + return netdev_pick_tx(ndev, skb, NULL); + + /* VLAN is present in the payload.*/ + if (eth_type_vlan(skb->protocol)) { + struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb); + + vlan_tag = ntohs(vhdr->h_vlan_TCI); + /* VLAN is present in the skb but not yet pushed in the payload.*/ + } else if (skb_vlan_tag_present(skb)) { + vlan_tag = skb->vlan_tci; + } else { + return vlan_tag; + } + + return fec_enet_vlan_pri_to_queue[vlan_tag >> 13]; +} + +static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct fec_enet_private *fep = netdev_priv(dev); + bool is_run = netif_running(dev); + struct bpf_prog *old_prog; + + switch (bpf->command) { + case XDP_SETUP_PROG: + /* No need to support the SoCs that require to + * do the frame swap because the performance wouldn't be + * better than the skb mode. + */ + if (fep->quirks & FEC_QUIRK_SWAP_FRAME) + return -EOPNOTSUPP; + + if (!bpf->prog) + xdp_features_clear_redirect_target(dev); + + if (is_run) { + napi_disable(&fep->napi); + netif_tx_disable(dev); + } + + old_prog = xchg(&fep->xdp_prog, bpf->prog); + if (old_prog) + bpf_prog_put(old_prog); + + fec_restart(dev); + + if (is_run) { + napi_enable(&fep->napi); + netif_tx_start_all_queues(dev); + } + + if (bpf->prog) + xdp_features_set_redirect_target(dev, false); + + return 0; + + case XDP_SETUP_XSK_POOL: + return -EOPNOTSUPP; + + default: + return -EOPNOTSUPP; + } +} + +static int +fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index) +{ + if (unlikely(index < 0)) + return 0; + + return (index % fep->num_tx_queues); +} + +static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, + struct fec_enet_priv_tx_q *txq, + void *frame, u32 dma_sync_len, + bool ndo_xmit) +{ + unsigned int index, status, estatus; + struct bufdesc *bdp; + dma_addr_t dma_addr; + int entries_free; + u16 frame_len; + + entries_free = fec_enet_get_free_txdesc_num(txq); + if (entries_free < MAX_SKB_FRAGS + 1) { + netdev_err_once(fep->netdev, "NOT enough BD for SG!\n"); + return -EBUSY; + } + + /* Fill in a Tx ring entry */ + bdp = txq->bd.cur; + status = fec16_to_cpu(bdp->cbd_sc); + status &= ~BD_ENET_TX_STATS; + + index = fec_enet_get_bd_index(bdp, &txq->bd); + + if (ndo_xmit) { + struct xdp_frame *xdpf = frame; + + dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data, + xdpf->len, DMA_TO_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, dma_addr)) + return -ENOMEM; + + frame_len = xdpf->len; + txq->tx_buf[index].buf_p = xdpf; + txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO; + } else { + struct xdp_buff *xdpb = frame; + struct page *page; + + page = virt_to_page(xdpb->data); + dma_addr = page_pool_get_dma_addr(page) + + (xdpb->data - xdpb->data_hard_start); + dma_sync_single_for_device(&fep->pdev->dev, dma_addr, + dma_sync_len, DMA_BIDIRECTIONAL); + frame_len = xdpb->data_end - xdpb->data; + txq->tx_buf[index].buf_p = page; + txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX; + } + + status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); + if (fep->bufdesc_ex) + estatus = BD_ENET_TX_INT; + + bdp->cbd_bufaddr = cpu_to_fec32(dma_addr); + bdp->cbd_datlen = cpu_to_fec16(frame_len); + + if (fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + + if (fep->quirks & FEC_QUIRK_HAS_AVB) + estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); + + ebdp->cbd_bdu = 0; + ebdp->cbd_esc = cpu_to_fec32(estatus); + } + + /* Make sure the updates to rest of the descriptor are performed before + * transferring ownership. + */ + dma_wmb(); + + /* Send it on its way. Tell FEC it's ready, interrupt when done, + * it's the last BD of the frame, and to put the CRC on the end. + */ + status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); + bdp->cbd_sc = cpu_to_fec16(status); + + /* If this was the last BD in the ring, start at the beginning again. */ + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); + + /* Make sure the update to bdp are performed before txq->bd.cur. */ + dma_wmb(); + + txq->bd.cur = bdp; + + /* Trigger transmission start */ + writel(0, txq->bd.reg_desc_active); + + return 0; +} + +static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, + int cpu, struct xdp_buff *xdp, + u32 dma_sync_len) +{ + struct fec_enet_priv_tx_q *txq; + struct netdev_queue *nq; + int queue, ret; + + queue = fec_enet_xdp_get_tx_queue(fep, cpu); + txq = fep->tx_queue[queue]; + nq = netdev_get_tx_queue(fep->netdev, queue); + + __netif_tx_lock(nq, cpu); + + /* Avoid tx timeout as XDP shares the queue with kernel stack */ + txq_trans_cond_update(nq); + ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false); + + __netif_tx_unlock(nq); + + return ret; +} + +static int fec_enet_xdp_xmit(struct net_device *dev, + int num_frames, + struct xdp_frame **frames, + u32 flags) +{ + struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_priv_tx_q *txq; + int cpu = smp_processor_id(); + unsigned int sent_frames = 0; + struct netdev_queue *nq; + unsigned int queue; + int i; + + queue = fec_enet_xdp_get_tx_queue(fep, cpu); + txq = fep->tx_queue[queue]; + nq = netdev_get_tx_queue(fep->netdev, queue); + + __netif_tx_lock(nq, cpu); + + /* Avoid tx timeout as XDP shares the queue with kernel stack */ + txq_trans_cond_update(nq); + for (i = 0; i < num_frames; i++) { + if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0) + break; + sent_frames++; + } + + __netif_tx_unlock(nq); + + return sent_frames; +} + +static int fec_hwtstamp_get(struct net_device *ndev, + struct kernel_hwtstamp_config *config) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (!netif_running(ndev)) + return -EINVAL; + + if (!fep->bufdesc_ex) + return -EOPNOTSUPP; + + fec_ptp_get(ndev, config); + + return 0; +} + +static int fec_hwtstamp_set(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (!netif_running(ndev)) + return -EINVAL; + + if (!fep->bufdesc_ex) + return -EOPNOTSUPP; + + return fec_ptp_set(ndev, config, extack); +} + +static int fec_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int order; + + if (netif_running(ndev)) + return -EBUSY; + + order = get_order(new_mtu + ETH_HLEN + ETH_FCS_LEN + + FEC_DRV_RESERVE_SPACE); + fep->rx_frame_size = (PAGE_SIZE << order) - FEC_DRV_RESERVE_SPACE; + fep->pagepool_order = order; + WRITE_ONCE(ndev->mtu, new_mtu); + + return 0; +} + static const struct net_device_ops fec_netdev_ops = { .ndo_open = fec_enet_open, .ndo_stop = fec_enet_close, .ndo_start_xmit = fec_enet_start_xmit, + .ndo_select_queue = fec_enet_select_queue, .ndo_set_rx_mode = set_multicast_list, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = fec_timeout, .ndo_set_mac_address = fec_set_mac_address, - .ndo_do_ioctl = fec_enet_ioctl, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = fec_poll_controller, -#endif + .ndo_change_mtu = fec_change_mtu, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_features = fec_set_features, + .ndo_bpf = fec_enet_bpf, + .ndo_xdp_xmit = fec_enet_xdp_xmit, + .ndo_hwtstamp_get = fec_hwtstamp_get, + .ndo_hwtstamp_set = fec_hwtstamp_set, }; static const unsigned short offset_des_active_rxq[] = { @@ -3112,33 +4081,44 @@ static int fec_enet_init(struct net_device *ndev) unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : sizeof(struct bufdesc); unsigned dsize_log2 = __fls(dsize); + int ret; WARN_ON(dsize != (1 << dsize_log2)); -#if defined(CONFIG_ARM) - fep->rx_align = 0xf; +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) fep->tx_align = 0xf; #else - fep->rx_align = 0x3; fep->tx_align = 0x3; #endif + fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT; + fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT; + fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT; + fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT; + + /* Check mask of the streaming and coherent API */ + ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32)); + if (ret < 0) { + dev_warn(&fep->pdev->dev, "No suitable DMA available\n"); + return ret; + } - fec_enet_alloc_queue(ndev); + ret = fec_enet_alloc_queue(ndev); + if (ret) + return ret; bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; /* Allocate memory for buffer descriptors. */ - cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, - GFP_KERNEL); + cbd_base = fec_dmam_alloc(&fep->pdev->dev, bd_size, &bd_dma, + GFP_KERNEL); if (!cbd_base) { - return -ENOMEM; + ret = -ENOMEM; + goto free_queue_mem; } - memset(cbd_base, 0, bd_size); - /* Get the Ethernet address */ - fec_get_mac(ndev); - /* make sure MAC we just acquired is programmed into the hw */ - fec_set_mac_address(ndev, NULL); + ret = fec_get_mac(ndev); + if (ret) + goto free_queue_mem; /* Set receive and transmit descriptor base. */ for (i = 0; i < fep->num_rx_queues; i++) { @@ -3180,14 +4160,14 @@ static int fec_enet_init(struct net_device *ndev) ndev->ethtool_ops = &fec_enet_ethtool_ops; writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); - netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); + netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi); if (fep->quirks & FEC_QUIRK_HAS_VLAN) /* enable hw VLAN support */ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; if (fep->quirks & FEC_QUIRK_HAS_CSUM) { - ndev->gso_max_segs = FEC_MAX_TSO_SEGS; + netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS); /* enable hw accelerator */ ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM @@ -3195,13 +4175,15 @@ static int fec_enet_init(struct net_device *ndev) fep->csum_flags |= FLAG_RX_CSUM_ENABLED; } - if (fep->quirks & FEC_QUIRK_HAS_AVB) { + if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) fep->tx_align = 0; - fep->rx_align = 0x3f; - } ndev->hw_features = ndev->features; + if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME)) + ndev->xdp_features = NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT; + fec_restart(ndev); if (fep->quirks & FEC_QUIRK_MIB_CLEAR) @@ -3210,15 +4192,27 @@ static int fec_enet_init(struct net_device *ndev) fec_enet_update_ethtool_stats(ndev); return 0; + +free_queue_mem: + fec_enet_free_queue(ndev); + return ret; +} + +static void fec_enet_deinit(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + netif_napi_del(&fep->napi); + fec_enet_free_queue(ndev); } #ifdef CONFIG_OF static int fec_reset_phy(struct platform_device *pdev) { - int err, phy_reset; - bool active_high = false; + struct gpio_desc *phy_reset; int msec = 1, phy_post_delay = 0; struct device_node *np = pdev->dev.of_node; + int err; if (!np) return 0; @@ -3228,33 +4222,26 @@ static int fec_reset_phy(struct platform_device *pdev) if (!err && msec > 1000) msec = 1; - phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); - if (phy_reset == -EPROBE_DEFER) - return phy_reset; - else if (!gpio_is_valid(phy_reset)) - return 0; - err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay); /* valid reset duration should be less than 1s */ if (!err && phy_post_delay > 1000) return -EINVAL; - active_high = of_property_read_bool(np, "phy-reset-active-high"); + phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset", + GPIOD_OUT_HIGH); + if (IS_ERR(phy_reset)) + return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset), + "failed to get phy-reset-gpios\n"); - err = devm_gpio_request_one(&pdev->dev, phy_reset, - active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, - "phy-reset"); - if (err) { - dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); - return err; - } + if (!phy_reset) + return 0; if (msec > 20) msleep(msec); else usleep_range(msec * 1000, msec * 1000 + 1000); - gpio_set_value_cansleep(phy_reset, !active_high); + gpiod_set_value_cansleep(phy_reset, 0); if (!phy_post_delay) return 0; @@ -3309,19 +4296,80 @@ fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) } +static int fec_enet_get_irq_cnt(struct platform_device *pdev) +{ + int irq_cnt = platform_irq_count(pdev); + + if (irq_cnt > FEC_IRQ_NUM) + irq_cnt = FEC_IRQ_NUM; /* last for pps */ + else if (irq_cnt == 2) + irq_cnt = 1; /* last for pps */ + else if (irq_cnt <= 0) + irq_cnt = 1; /* At least 1 irq is needed */ + return irq_cnt; +} + +static void fec_enet_get_wakeup_irq(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); + + if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2) + fep->wake_irq = fep->irq[2]; + else + fep->wake_irq = fep->irq[0]; +} + +static int fec_enet_init_stop_mode(struct fec_enet_private *fep, + struct device_node *np) +{ + struct device_node *gpr_np; + u32 out_val[3]; + int ret = 0; + + gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0); + if (!gpr_np) + return 0; + + ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val, + ARRAY_SIZE(out_val)); + if (ret) { + dev_dbg(&fep->pdev->dev, "no stop mode property\n"); + goto out; + } + + fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np); + if (IS_ERR(fep->stop_gpr.gpr)) { + dev_err(&fep->pdev->dev, "could not find gpr regmap\n"); + ret = PTR_ERR(fep->stop_gpr.gpr); + fep->stop_gpr.gpr = NULL; + goto out; + } + + fep->stop_gpr.reg = out_val[1]; + fep->stop_gpr.bit = out_val[2]; + +out: + of_node_put(gpr_np); + + return ret; +} + static int fec_probe(struct platform_device *pdev) { struct fec_enet_private *fep; struct fec_platform_data *pdata; + phy_interface_t interface; struct net_device *ndev; int i, irq, ret = 0; - struct resource *r; - const struct of_device_id *of_id; static int dev_id; struct device_node *np = pdev->dev.of_node, *phy_node; int num_tx_qs; int num_rx_qs; + char irq_name[8]; + int irq_cnt; + const struct fec_devinfo *dev_info; fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); @@ -3336,26 +4384,24 @@ fec_probe(struct platform_device *pdev) /* setup board info structure */ fep = netdev_priv(ndev); - of_id = of_match_device(fec_dt_ids, &pdev->dev); - if (of_id) - pdev->id_entry = of_id->data; - fep->quirks = pdev->id_entry->driver_data; + dev_info = device_get_match_data(&pdev->dev); + if (!dev_info) + dev_info = (const struct fec_devinfo *)pdev->id_entry->driver_data; + if (dev_info) + fep->quirks = dev_info->quirks; fep->netdev = ndev; fep->num_rx_queues = num_rx_qs; fep->num_tx_queues = num_tx_qs; -#if !defined(CONFIG_M5272) /* default enable pause frame auto negotiation */ if (fep->quirks & FEC_QUIRK_HAS_GBIT) fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; -#endif /* Select default pin state */ pinctrl_pm_select_default_state(&pdev->dev); - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - fep->hwp = devm_ioremap_resource(&pdev->dev, r); + fep->hwp = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(fep->hwp)) { ret = PTR_ERR(fep->hwp); goto failed_ioremap; @@ -3371,9 +4417,17 @@ fec_probe(struct platform_device *pdev) !of_property_read_bool(np, "fsl,err006687-workaround-present")) fep->quirks |= FEC_QUIRK_ERR006687; - if (of_get_property(np, "fsl,magic-packet", NULL)) + ret = fec_enet_ipc_handle_init(fep); + if (ret) + goto failed_ipc_init; + + if (of_property_read_bool(np, "fsl,magic-packet")) fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; + ret = fec_enet_init_stop_mode(fep, np); + if (ret) + goto failed_stop_mode; + phy_node = of_parse_phandle(np, "phy-handle", 0); if (!phy_node && of_phy_is_fixed_link(np)) { ret = of_phy_register_fixed_link(np); @@ -3386,17 +4440,21 @@ fec_probe(struct platform_device *pdev) } fep->phy_node = phy_node; - ret = of_get_phy_mode(pdev->dev.of_node); - if (ret < 0) { + ret = of_get_phy_mode(pdev->dev.of_node, &interface); + if (ret) { pdata = dev_get_platdata(&pdev->dev); if (pdata) fep->phy_interface = pdata->phy; else fep->phy_interface = PHY_INTERFACE_MODE_MII; } else { - fep->phy_interface = ret; + fep->phy_interface = interface; } + ret = fec_enet_parse_rgmii_delay(fep, np); + if (ret) + goto failed_rgmii_delay; + fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(fep->clk_ipg)) { ret = PTR_ERR(fep->clk_ipg); @@ -3412,17 +4470,29 @@ fec_probe(struct platform_device *pdev) fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); /* enet_out is optional, depends on board */ - fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); - if (IS_ERR(fep->clk_enet_out)) - fep->clk_enet_out = NULL; + fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out"); + if (IS_ERR(fep->clk_enet_out)) { + ret = PTR_ERR(fep->clk_enet_out); + goto failed_clk; + } fep->ptp_clk_on = false; mutex_init(&fep->ptp_clk_mutex); /* clk_ref is optional, depends on board */ - fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); - if (IS_ERR(fep->clk_ref)) - fep->clk_ref = NULL; + fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref"); + if (IS_ERR(fep->clk_ref)) { + ret = PTR_ERR(fep->clk_ref); + goto failed_clk; + } + fep->clk_ref_rate = clk_get_rate(fep->clk_ref); + + /* clk_2x_txclk is optional, depends on board */ + if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) { + fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk"); + if (IS_ERR(fep->clk_2x_txclk)) + fep->clk_2x_txclk = NULL; + } fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); @@ -3438,17 +4508,23 @@ fec_probe(struct platform_device *pdev) ret = clk_prepare_enable(fep->clk_ipg); if (ret) goto failed_clk_ipg; + ret = clk_prepare_enable(fep->clk_ahb); + if (ret) + goto failed_clk_ahb; - fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); + fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); if (!IS_ERR(fep->reg_phy)) { ret = regulator_enable(fep->reg_phy); if (ret) { dev_err(&pdev->dev, "Failed to enable phy regulator: %d\n", ret); - clk_disable_unprepare(fep->clk_ipg); goto failed_regulator; } } else { + if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto failed_regulator; + } fep->reg_phy = NULL; } @@ -3462,18 +4538,20 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_reset; + irq_cnt = fec_enet_get_irq_cnt(pdev); if (fep->bufdesc_ex) - fec_ptp_init(pdev); + fec_ptp_init(pdev, irq_cnt); ret = fec_enet_init(ndev); if (ret) goto failed_init; - for (i = 0; i < FEC_IRQ_NUM; i++) { - irq = platform_get_irq(pdev, i); + for (i = 0; i < irq_cnt; i++) { + snprintf(irq_name, sizeof(irq_name), "int%d", i); + irq = platform_get_irq_byname_optional(pdev, irq_name); + if (irq < 0) + irq = platform_get_irq(pdev, i); if (irq < 0) { - if (i) - break; ret = irq; goto failed_irq; } @@ -3485,7 +4563,9 @@ fec_probe(struct platform_device *pdev) fep->irq[i] = irq; } - init_completion(&fep->mdio_done); + /* Decide which interrupt line is wakeup capable */ + fec_enet_get_wakeup_irq(pdev); + ret = fec_enet_mii_init(pdev); if (ret) goto failed_mii_init; @@ -3495,6 +4575,16 @@ fec_probe(struct platform_device *pdev) fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&pdev->dev); + fep->pagepool_order = 0; + fep->rx_frame_size = FEC_ENET_RX_FRSIZE; + + if (fep->quirks & FEC_QUIRK_JUMBO_FRAME) + fep->max_buf_size = MAX_JUMBO_BUF_SIZE; + else + fep->max_buf_size = PKT_MAXBUF_SIZE; + + ndev->max_mtu = fep->max_buf_size - ETH_HLEN - ETH_FCS_LEN; + ret = register_netdev(ndev); if (ret) goto failed_register; @@ -3505,10 +4595,8 @@ fec_probe(struct platform_device *pdev) if (fep->bufdesc_ex && fep->ptp_clock) netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); - fep->rx_copybreak = COPYBREAK_DEFAULT; INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); - pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); return 0; @@ -3517,33 +4605,48 @@ failed_register: fec_enet_mii_remove(fep); failed_mii_init: failed_irq: + fec_enet_deinit(ndev); failed_init: fec_ptp_stop(pdev); - if (fep->reg_phy) - regulator_disable(fep->reg_phy); failed_reset: - pm_runtime_put(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); + if (fep->reg_phy) + regulator_disable(fep->reg_phy); failed_regulator: + clk_disable_unprepare(fep->clk_ahb); +failed_clk_ahb: + clk_disable_unprepare(fep->clk_ipg); failed_clk_ipg: fec_enet_clk_enable(ndev, false); failed_clk: +failed_rgmii_delay: if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); -failed_phy: of_node_put(phy_node); +failed_stop_mode: +failed_ipc_init: +failed_phy: + dev_id--; failed_ioremap: free_netdev(ndev); return ret; } -static int +static void fec_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); struct device_node *np = pdev->dev.of_node; + int ret; + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) + dev_err(&pdev->dev, + "Failed to resume device in remove callback (%pe)\n", + ERR_PTR(ret)); cancel_work_sync(&fep->tx_timeout_work); fec_ptp_stop(pdev); @@ -3551,18 +4654,30 @@ fec_drv_remove(struct platform_device *pdev) fec_enet_mii_remove(fep); if (fep->reg_phy) regulator_disable(fep->reg_phy); + if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(fep->phy_node); - free_netdev(ndev); - return 0; + /* After pm_runtime_get_sync() failed, the clks are still off, so skip + * disabling them again. + */ + if (ret >= 0) { + clk_disable_unprepare(fep->clk_ahb); + clk_disable_unprepare(fep->clk_ipg); + } + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + fec_enet_deinit(ndev); + free_netdev(ndev); } -static int __maybe_unused fec_suspend(struct device *dev) +static int fec_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + int ret; rtnl_lock(); if (netif_running(ndev)) { @@ -3574,9 +4689,28 @@ static int __maybe_unused fec_suspend(struct device *dev) netif_device_detach(ndev); netif_tx_unlock_bh(ndev); fec_stop(ndev); - fec_enet_clk_enable(ndev, false); - if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) + if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { + fec_irqs_disable(ndev); pinctrl_pm_select_sleep_state(&fep->pdev->dev); + } else { + fec_irqs_disable_except_wakeup(ndev); + if (fep->wake_irq > 0) { + disable_irq(fep->wake_irq); + enable_irq_wake(fep->wake_irq); + } + fec_enet_stop_mode(fep, true); + } + /* It's safe to disable clocks since interrupts are masked */ + fec_enet_clk_enable(ndev, false); + + fep->rpm_active = !pm_runtime_status_suspended(dev); + if (fep->rpm_active) { + ret = pm_runtime_force_suspend(dev); + if (ret < 0) { + rtnl_unlock(); + return ret; + } + } } rtnl_unlock(); @@ -3592,11 +4726,10 @@ static int __maybe_unused fec_suspend(struct device *dev) return 0; } -static int __maybe_unused fec_resume(struct device *dev) +static int fec_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); - struct fec_platform_data *pdata = fep->pdev->dev.platform_data; int ret; int val; @@ -3608,14 +4741,21 @@ static int __maybe_unused fec_resume(struct device *dev) rtnl_lock(); if (netif_running(ndev)) { + if (fep->rpm_active) + pm_runtime_force_resume(dev); + ret = fec_enet_clk_enable(ndev, true); if (ret) { rtnl_unlock(); goto failed_clk; } if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { - if (pdata && pdata->sleep_mode_enable) - pdata->sleep_mode_enable(false); + fec_enet_stop_mode(fep, false); + if (fep->wake_irq) { + disable_irq_wake(fep->wake_irq); + enable_irq(fep->wake_irq); + } + val = readl(fep->hwp + FEC_ECNTRL); val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); writel(val, fep->hwp + FEC_ECNTRL); @@ -3628,6 +4768,7 @@ static int __maybe_unused fec_resume(struct device *dev) netif_device_attach(ndev); netif_tx_unlock_bh(ndev); napi_enable(&fep->napi); + phy_init_hw(ndev->phydev); phy_start(ndev->phydev); } rtnl_unlock(); @@ -3640,41 +4781,55 @@ failed_clk: return ret; } -static int __maybe_unused fec_runtime_suspend(struct device *dev) +static int fec_runtime_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); return 0; } -static int __maybe_unused fec_runtime_resume(struct device *dev) +static int fec_runtime_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + int ret; - return clk_prepare_enable(fep->clk_ipg); + ret = clk_prepare_enable(fep->clk_ahb); + if (ret) + return ret; + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; + + return 0; + +failed_clk_ipg: + clk_disable_unprepare(fep->clk_ahb); + return ret; } static const struct dev_pm_ops fec_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) - SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) + RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) }; static struct platform_driver fec_driver = { .driver = { .name = DRIVER_NAME, - .pm = &fec_pm_ops, + .pm = pm_ptr(&fec_pm_ops), .of_match_table = fec_dt_ids, + .suppress_bind_attrs = true, }, .id_table = fec_devtype, .probe = fec_probe, - .remove = fec_drv_remove, + .remove = fec_drv_remove, }; module_platform_driver(fec_driver); -MODULE_ALIAS("platform:"DRIVER_NAME); +MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver"); MODULE_LICENSE("GPL"); |
