diff options
Diffstat (limited to 'drivers/net/ethernet/xilinx')
-rw-r--r-- | drivers/net/ethernet/xilinx/ll_temac_main.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/xilinx/xilinx_axienet.h | 147 | ||||
-rw-r--r-- | drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 520 | ||||
-rw-r--r-- | drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c | 23 | ||||
-rw-r--r-- | drivers/net/ethernet/xilinx/xilinx_emaclite.c | 26 |
5 files changed, 581 insertions, 139 deletions
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 9df39cf8b097..edb36ff07a0c 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1443,7 +1443,7 @@ static int temac_probe(struct platform_device *pdev) } /* map device registers */ - lp->regs = devm_platform_ioremap_resource_byname(pdev, 0); + lp->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(lp->regs)) { dev_err(&pdev->dev, "could not map TEMAC registers\n"); return -ENOMEM; @@ -1649,7 +1649,7 @@ MODULE_DEVICE_TABLE(of, temac_of_match); static struct platform_driver temac_driver = { .probe = temac_probe, - .remove_new = temac_remove, + .remove = temac_remove, .driver = { .name = "xilinx_temac", .of_match_table = temac_of_match, diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 807ead678551..a3f4f3e42587 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -29,26 +29,26 @@ /* Configuration options */ /* Accept all incoming packets. Default: disabled (cleared) */ -#define XAE_OPTION_PROMISC (1 << 0) +#define XAE_OPTION_PROMISC BIT(0) /* Jumbo frame support for Tx & Rx. Default: disabled (cleared) */ -#define XAE_OPTION_JUMBO (1 << 1) +#define XAE_OPTION_JUMBO BIT(1) /* VLAN Rx & Tx frame support. Default: disabled (cleared) */ -#define XAE_OPTION_VLAN (1 << 2) +#define XAE_OPTION_VLAN BIT(2) /* Enable recognition of flow control frames on Rx. Default: enabled (set) */ -#define XAE_OPTION_FLOW_CONTROL (1 << 4) +#define XAE_OPTION_FLOW_CONTROL BIT(4) /* Strip FCS and PAD from incoming frames. Note: PAD from VLAN frames is not * stripped. Default: disabled (set) */ -#define XAE_OPTION_FCS_STRIP (1 << 5) +#define XAE_OPTION_FCS_STRIP BIT(5) /* Generate FCS field and add PAD automatically for outgoing frames. * Default: enabled (set) */ -#define XAE_OPTION_FCS_INSERT (1 << 6) +#define XAE_OPTION_FCS_INSERT BIT(6) /* Enable Length/Type error checking for incoming frames. When this option is * set, the MAC will filter frames that have a mismatched type/length field @@ -56,13 +56,13 @@ * types of frames are encountered. When this option is cleared, the MAC will * allow these types of frames to be received. Default: enabled (set) */ -#define XAE_OPTION_LENTYPE_ERR (1 << 7) +#define XAE_OPTION_LENTYPE_ERR BIT(7) /* Enable the transmitter. Default: enabled (set) */ -#define XAE_OPTION_TXEN (1 << 11) +#define XAE_OPTION_TXEN BIT(11) /* Enable the receiver. Default: enabled (set) */ -#define XAE_OPTION_RXEN (1 << 12) +#define XAE_OPTION_RXEN BIT(12) /* Default options set when device is initialized or reset */ #define XAE_OPTION_DEFAULTS \ @@ -120,6 +120,9 @@ #define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */ #define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */ +/* Constant to convert delay counts to microseconds */ +#define XAXIDMA_DELAY_SCALE (125ULL * USEC_PER_SEC) + /* Default TX/RX Threshold and delay timer values for SGDMA mode */ #define XAXIDMA_DFT_TX_THRESHOLD 24 #define XAXIDMA_DFT_TX_USEC 50 @@ -156,22 +159,27 @@ #define XAE_TPID0_OFFSET 0x00000028 /* VLAN TPID0 register */ #define XAE_TPID1_OFFSET 0x0000002C /* VLAN TPID1 register */ #define XAE_PPST_OFFSET 0x00000030 /* PCS PMA Soft Temac Status Reg */ +#define XAE_STATS_OFFSET 0x00000200 /* Statistics counters */ #define XAE_RCW0_OFFSET 0x00000400 /* Rx Configuration Word 0 */ #define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */ #define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */ #define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */ -#define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */ -#define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */ +#define XAE_EMMC_OFFSET 0x00000410 /* MAC speed configuration */ +#define XAE_PHYC_OFFSET 0x00000414 /* RX Max Frame Configuration */ #define XAE_ID_OFFSET 0x000004F8 /* Identification register */ -#define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */ -#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */ -#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */ -#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */ +#define XAE_ABILITY_OFFSET 0x000004FC /* Ability Register offset */ +#define XAE_MDIO_MC_OFFSET 0x00000500 /* MDIO Setup */ +#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MDIO Control */ +#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MDIO Write Data */ +#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MDIO Read Data */ #define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */ #define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */ -#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */ +#define XAE_FMI_OFFSET 0x00000708 /* Frame Filter Control */ +#define XAE_FFE_OFFSET 0x0000070C /* Frame Filter Enable */ #define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */ #define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */ +#define XAE_AM0_OFFSET 0x00000750 /* Frame Filter Mask Value Bytes 3-0 */ +#define XAE_AM1_OFFSET 0x00000754 /* Frame Filter Mask Value Bytes 7-4 */ #define XAE_TX_VLAN_DATA_OFFSET 0x00004000 /* TX VLAN data table address */ #define XAE_RX_VLAN_DATA_OFFSET 0x00008000 /* RX VLAN data table address */ @@ -283,6 +291,16 @@ #define XAE_PHYC_SGLINKSPD_100 0x40000000 /* SGMII link 100 Mbit */ #define XAE_PHYC_SGLINKSPD_1000 0x80000000 /* SGMII link 1000 Mbit */ +/* Bit masks for Axi Ethernet ability register */ +#define XAE_ABILITY_PFC BIT(16) +#define XAE_ABILITY_FRAME_FILTER BIT(10) +#define XAE_ABILITY_HALF_DUPLEX BIT(9) +#define XAE_ABILITY_STATS BIT(8) +#define XAE_ABILITY_2_5G BIT(3) +#define XAE_ABILITY_1G BIT(2) +#define XAE_ABILITY_100M BIT(1) +#define XAE_ABILITY_10M BIT(0) + /* Bit masks for Axi Ethernet MDIO interface MC register */ #define XAE_MDIO_MC_MDIOEN_MASK 0x00000040 /* MII management enable */ #define XAE_MDIO_MC_CLOCK_DIVIDE_MAX 0x3F /* Maximum MDIO divisor */ @@ -308,7 +326,7 @@ */ #define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF -/* Bit masks for Axi Ethernet FMI register */ +/* Bit masks for Axi Ethernet FMC register */ #define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */ #define XAE_FMI_IND_MASK 0x00000003 /* Index Mask */ @@ -326,11 +344,12 @@ #define XAE_MULTICAST_CAM_TABLE_NUM 4 /* Axi Ethernet Synthesis features */ -#define XAE_FEATURE_PARTIAL_RX_CSUM (1 << 0) -#define XAE_FEATURE_PARTIAL_TX_CSUM (1 << 1) -#define XAE_FEATURE_FULL_RX_CSUM (1 << 2) -#define XAE_FEATURE_FULL_TX_CSUM (1 << 3) -#define XAE_FEATURE_DMA_64BIT (1 << 4) +#define XAE_FEATURE_PARTIAL_RX_CSUM BIT(0) +#define XAE_FEATURE_PARTIAL_TX_CSUM BIT(1) +#define XAE_FEATURE_FULL_RX_CSUM BIT(2) +#define XAE_FEATURE_FULL_TX_CSUM BIT(3) +#define XAE_FEATURE_DMA_64BIT BIT(4) +#define XAE_FEATURE_STATS BIT(5) #define XAE_NO_CSUM_OFFLOAD 0 @@ -344,6 +363,61 @@ #define XLNX_MII_STD_SELECT_REG 0x11 #define XLNX_MII_STD_SELECT_SGMII BIT(0) +/* enum temac_stat - TEMAC statistics counters + * + * Index of statistics counters within the TEMAC. This must match the + * order/offset of hardware registers exactly. + */ +enum temac_stat { + STAT_RX_BYTES = 0, + STAT_TX_BYTES, + STAT_UNDERSIZE_FRAMES, + STAT_FRAGMENT_FRAMES, + STAT_RX_64_BYTE_FRAMES, + STAT_RX_65_127_BYTE_FRAMES, + STAT_RX_128_255_BYTE_FRAMES, + STAT_RX_256_511_BYTE_FRAMES, + STAT_RX_512_1023_BYTE_FRAMES, + STAT_RX_1024_MAX_BYTE_FRAMES, + STAT_RX_OVERSIZE_FRAMES, + STAT_TX_64_BYTE_FRAMES, + STAT_TX_65_127_BYTE_FRAMES, + STAT_TX_128_255_BYTE_FRAMES, + STAT_TX_256_511_BYTE_FRAMES, + STAT_TX_512_1023_BYTE_FRAMES, + STAT_TX_1024_MAX_BYTE_FRAMES, + STAT_TX_OVERSIZE_FRAMES, + STAT_RX_GOOD_FRAMES, + STAT_RX_FCS_ERRORS, + STAT_RX_BROADCAST_FRAMES, + STAT_RX_MULTICAST_FRAMES, + STAT_RX_CONTROL_FRAMES, + STAT_RX_LENGTH_ERRORS, + STAT_RX_VLAN_FRAMES, + STAT_RX_PAUSE_FRAMES, + STAT_RX_CONTROL_OPCODE_ERRORS, + STAT_TX_GOOD_FRAMES, + STAT_TX_BROADCAST_FRAMES, + STAT_TX_MULTICAST_FRAMES, + STAT_TX_UNDERRUN_ERRORS, + STAT_TX_CONTROL_FRAMES, + STAT_TX_VLAN_FRAMES, + STAT_TX_PAUSE_FRAMES, + STAT_TX_SINGLE_COLLISION_FRAMES, + STAT_TX_MULTIPLE_COLLISION_FRAMES, + STAT_TX_DEFERRED_FRAMES, + STAT_TX_LATE_COLLISIONS, + STAT_TX_EXCESS_COLLISIONS, + STAT_TX_EXCESS_DEFERRAL, + STAT_RX_ALIGNMENT_ERRORS, + STAT_TX_PFC_FRAMES, + STAT_RX_PFC_FRAMES, + STAT_USER_DEFINED0, + STAT_USER_DEFINED1, + STAT_USER_DEFINED2, + STAT_COUNT, +}; + /** * struct axidma_bd - Axi Dma buffer descriptor layout * @next: MM2S/S2MM Next Descriptor Pointer @@ -359,6 +433,7 @@ * @app2: MM2S/S2MM User Application Field 2. * @app3: MM2S/S2MM User Application Field 3. * @app4: MM2S/S2MM User Application Field 4. + * @skb: Pointer to SKB transferred using DMA */ struct axidma_bd { u32 next; /* Physical address of next buffer descriptor */ @@ -399,7 +474,6 @@ struct skbuf_dma_descriptor { * struct axienet_local - axienet private per device data * @ndev: Pointer for net_device to which it will be attached. * @dev: Pointer to device structure - * @phy_node: Pointer to device node structure * @phylink: Pointer to phylink instance * @phylink_config: phylink configuration settings * @pcs_phy: Reference to PCS/PMA PHY if used @@ -434,7 +508,19 @@ struct skbuf_dma_descriptor { * @tx_packets: TX packet count for statistics * @tx_bytes: TX byte count for statistics * @tx_stat_sync: Synchronization object for TX stats + * @hw_stat_base: Base offset for statistics counters. This may be nonzero if + * the statistics counteres were reset or wrapped around. + * @hw_last_counter: Last-seen value of each statistic counter + * @reset_in_progress: Set while we are performing a reset and statistics + * counters may be invalid + * @hw_stats_seqcount: Sequence counter for @hw_stat_base, @hw_last_counter, + * and @reset_in_progress. + * @stats_lock: Lock for @hw_stats_seqcount + * @stats_work: Work for reading the hardware statistics counters often enough + * to catch overflows. * @dma_err_task: Work structure to process Axi DMA errors + * @stopping: Set when @dma_err_task shouldn't do anything because we are + * about to stop the device. * @tx_irq: Axidma TX IRQ number * @rx_irq: Axidma RX IRQ number * @eth_irq: Ethernet core IRQ number @@ -446,8 +532,6 @@ struct skbuf_dma_descriptor { * supported, the maximum frame size would be 9k. Else it is * 1522 bytes (assuming support for basic VLAN) * @rxmem: Stores rx memory size for jumbo frame handling. - * @csum_offload_on_tx_path: Stores the checksum selection on TX side. - * @csum_offload_on_rx_path: Stores the checksum selection on RX side. * @coalesce_count_rx: Store the irq coalesce on RX side. * @coalesce_usec_rx: IRQ coalesce delay for RX * @coalesce_count_tx: Store the irq coalesce on TX side. @@ -505,7 +589,15 @@ struct axienet_local { u64_stats_t tx_bytes; struct u64_stats_sync tx_stat_sync; + u64 hw_stat_base[STAT_COUNT]; + u32 hw_last_counter[STAT_COUNT]; + seqcount_mutex_t hw_stats_seqcount; + struct mutex stats_lock; + struct delayed_work stats_work; + bool reset_in_progress; + struct work_struct dma_err_task; + bool stopping; int tx_irq; int rx_irq; @@ -518,9 +610,6 @@ struct axienet_local { u32 max_frm_size; u32 rxmem; - int csum_offload_on_tx_path; - int csum_offload_on_rx_path; - u32 coalesce_count_rx; u32 coalesce_usec_rx; u32 coalesce_count_tx; @@ -537,7 +626,7 @@ struct axienet_local { }; /** - * struct axiethernet_option - Used to set axi ethernet hardware options + * struct axienet_option - Used to set axi ethernet hardware options * @opt: Option to be set. * @reg: Register offset to be written for setting the option * @m_or: Mask to be ORed for setting the option in the register diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index aaf780fd4f5e..f33178f90c42 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -238,11 +238,8 @@ static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) /* 1 Timeout Interval = 125 * (clock period of SG clock) */ result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate, - (u64)125000000); - if (result > 255) - result = 255; - - return result; + XAXIDMA_DELAY_SCALE); + return min(result, FIELD_MAX(XAXIDMA_DELAY_MASK)); } /** @@ -415,6 +412,7 @@ static void axienet_set_mac_address(struct net_device *ndev, static int netdev_set_mac_address(struct net_device *ndev, void *p) { struct sockaddr *addr = p; + axienet_set_mac_address(ndev, addr->sa_data); return 0; } @@ -432,25 +430,31 @@ static int netdev_set_mac_address(struct net_device *ndev, void *p) */ static void axienet_set_multicast_list(struct net_device *ndev) { - int i; + int i = 0; u32 reg, af0reg, af1reg; struct axienet_local *lp = netdev_priv(ndev); - if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || - netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { - /* We must make the kernel realize we had to move into - * promiscuous mode. If it was a promiscuous mode request - * the flag is already set. If not we set it. - */ - ndev->flags |= IFF_PROMISC; - reg = axienet_ior(lp, XAE_FMI_OFFSET); + reg = axienet_ior(lp, XAE_FMI_OFFSET); + reg &= ~XAE_FMI_PM_MASK; + if (ndev->flags & IFF_PROMISC) reg |= XAE_FMI_PM_MASK; + else + reg &= ~XAE_FMI_PM_MASK; + axienet_iow(lp, XAE_FMI_OFFSET, reg); + + if (ndev->flags & IFF_ALLMULTI || + netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { + reg &= 0xFFFFFF00; axienet_iow(lp, XAE_FMI_OFFSET, reg); - dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); + axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */ + axienet_iow(lp, XAE_AF1_OFFSET, 0); + axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */ + axienet_iow(lp, XAE_AM1_OFFSET, 0); + axienet_iow(lp, XAE_FFE_OFFSET, 1); + i = 1; } else if (!netdev_mc_empty(ndev)) { struct netdev_hw_addr *ha; - i = 0; netdev_for_each_mc_addr(ha, ndev) { if (i >= XAE_MULTICAST_CAM_TABLE_NUM) break; @@ -463,30 +467,24 @@ static void axienet_set_multicast_list(struct net_device *ndev) af1reg = (ha->addr[4]); af1reg |= (ha->addr[5] << 8); - reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; + reg &= 0xFFFFFF00; reg |= i; axienet_iow(lp, XAE_FMI_OFFSET, reg); axienet_iow(lp, XAE_AF0_OFFSET, af0reg); axienet_iow(lp, XAE_AF1_OFFSET, af1reg); + axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff); + axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff); + axienet_iow(lp, XAE_FFE_OFFSET, 1); i++; } - } else { - reg = axienet_ior(lp, XAE_FMI_OFFSET); - reg &= ~XAE_FMI_PM_MASK; + } + for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { + reg &= 0xFFFFFF00; + reg |= i; axienet_iow(lp, XAE_FMI_OFFSET, reg); - - for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { - reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; - reg |= i; - - axienet_iow(lp, XAE_FMI_OFFSET, reg); - axienet_iow(lp, XAE_AF0_OFFSET, 0); - axienet_iow(lp, XAE_AF1_OFFSET, 0); - } - - dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); + axienet_iow(lp, XAE_FFE_OFFSET, 0); } } @@ -518,11 +516,55 @@ static void axienet_setoptions(struct net_device *ndev, u32 options) lp->options |= options; } +static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat) +{ + u32 counter; + + if (lp->reset_in_progress) + return lp->hw_stat_base[stat]; + + counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); + return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]); +} + +static void axienet_stats_update(struct axienet_local *lp, bool reset) +{ + enum temac_stat stat; + + write_seqcount_begin(&lp->hw_stats_seqcount); + lp->reset_in_progress = reset; + for (stat = 0; stat < STAT_COUNT; stat++) { + u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); + + lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat]; + lp->hw_last_counter[stat] = counter; + } + write_seqcount_end(&lp->hw_stats_seqcount); +} + +static void axienet_refresh_stats(struct work_struct *work) +{ + struct axienet_local *lp = container_of(work, struct axienet_local, + stats_work.work); + + mutex_lock(&lp->stats_lock); + axienet_stats_update(lp, false); + mutex_unlock(&lp->stats_lock); + + /* Just less than 2^32 bytes at 2.5 GBit/s */ + schedule_delayed_work(&lp->stats_work, 13 * HZ); +} + static int __axienet_device_reset(struct axienet_local *lp) { u32 value; int ret; + /* Save statistics counters in case they will be reset */ + mutex_lock(&lp->stats_lock); + if (lp->features & XAE_FEATURE_STATS) + axienet_stats_update(lp, true); + /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset * process of Axi DMA takes a while to complete as all pending * commands/transfers will be flushed or completed during this @@ -537,7 +579,7 @@ static int __axienet_device_reset(struct axienet_local *lp) XAXIDMA_TX_CR_OFFSET); if (ret) { dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); - return ret; + goto out; } /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ @@ -547,10 +589,29 @@ static int __axienet_device_reset(struct axienet_local *lp) XAE_IS_OFFSET); if (ret) { dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); - return ret; + goto out; } - return 0; + /* Update statistics counters with new values */ + if (lp->features & XAE_FEATURE_STATS) { + enum temac_stat stat; + + write_seqcount_begin(&lp->hw_stats_seqcount); + lp->reset_in_progress = false; + for (stat = 0; stat < STAT_COUNT; stat++) { + u32 counter = + axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); + + lp->hw_stat_base[stat] += + lp->hw_last_counter[stat] - counter; + lp->hw_last_counter[stat] = counter; + } + write_seqcount_end(&lp->hw_stats_seqcount); + } + +out: + mutex_unlock(&lp->stats_lock); + return ret; } /** @@ -613,8 +674,7 @@ static int axienet_device_reset(struct net_device *ndev) lp->options |= XAE_OPTION_VLAN; lp->options &= (~XAE_OPTION_JUMBO); - if ((ndev->mtu > XAE_MTU) && - (ndev->mtu <= XAE_JUMBO_MTU)) { + if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) { lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + XAE_TRL_SIZE; @@ -673,15 +733,15 @@ static int axienet_device_reset(struct net_device *ndev) * * Would either be called after a successful transmit operation, or after * there was an error when setting up the chain. - * Returns the number of descriptors handled. + * Returns the number of packets handled. */ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, int nr_bds, bool force, u32 *sizep, int budget) { struct axidma_bd *cur_p; unsigned int status; + int i, packets = 0; dma_addr_t phys; - int i; for (i = 0; i < nr_bds; i++) { cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; @@ -700,8 +760,10 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), DMA_TO_DEVICE); - if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) + if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) { napi_consume_skb(cur_p->skb, budget); + packets++; + } cur_p->app0 = 0; cur_p->app1 = 0; @@ -717,7 +779,13 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; } - return i; + if (!force) { + lp->tx_bd_ci += i; + if (lp->tx_bd_ci >= lp->tx_bd_num) + lp->tx_bd_ci %= lp->tx_bd_num; + } + + return packets; } /** @@ -853,13 +921,13 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev) skbuf_dma->sg_len = sg_len; dma_tx_desc->callback_param = lp; dma_tx_desc->callback_result = axienet_dma_tx_cb; - dmaengine_submit(dma_tx_desc); - dma_async_issue_pending(lp->tx_chan); txq = skb_get_tx_queue(lp->ndev, skb); netdev_tx_sent_queue(txq, skb->len); netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS); + dmaengine_submit(dma_tx_desc); + dma_async_issue_pending(lp->tx_chan); return NETDEV_TX_OK; xmit_error_unmap_sg: @@ -890,13 +958,10 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget) u32 size = 0; int packets; - packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget); + packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false, + &size, budget); if (packets) { - lp->tx_bd_ci += packets; - if (lp->tx_bd_ci >= lp->tx_bd_num) - lp->tx_bd_ci %= lp->tx_bd_num; - u64_stats_update_begin(&lp->tx_stat_sync); u64_stats_add(&lp->tx_packets, packets); u64_stats_add(&lp->tx_bytes, size); @@ -983,6 +1048,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (net_ratelimit()) netdev_err(ndev, "TX DMA mapping error\n"); ndev->stats.tx_dropped++; + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } desc_set_phys_addr(lp, phys, cur_p); @@ -1003,6 +1069,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ndev->stats.tx_dropped++; axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1, true, NULL, 0); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } desc_set_phys_addr(lp, phys, cur_p); @@ -1125,9 +1192,7 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget) csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { skb->ip_summed = CHECKSUM_UNNECESSARY; } - } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && - skb->protocol == htons(ETH_P_IP) && - skb->len > 64) { + } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); skb->ip_summed = CHECKSUM_COMPLETE; } @@ -1221,9 +1286,10 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) u32 cr = lp->tx_dma_cr; cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); - axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); - - napi_schedule(&lp->napi_tx); + if (napi_schedule_prep(&lp->napi_tx)) { + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); + __napi_schedule(&lp->napi_tx); + } } return IRQ_HANDLED; @@ -1265,9 +1331,10 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) u32 cr = lp->rx_dma_cr; cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); - axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); - - napi_schedule(&lp->napi_rx); + if (napi_schedule_prep(&lp->napi_rx)) { + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); + __napi_schedule(&lp->napi_rx); + } } return IRQ_HANDLED; @@ -1296,7 +1363,7 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev) ndev->stats.rx_missed_errors++; if (pending & XAE_INT_RXRJECT_MASK) - ndev->stats.rx_frame_errors++; + ndev->stats.rx_dropped++; axienet_iow(lp, XAE_IS_OFFSET, pending); return IRQ_HANDLED; @@ -1459,6 +1526,7 @@ static int axienet_init_legacy_dma(struct net_device *ndev) struct axienet_local *lp = netdev_priv(ndev); /* Enable worker thread for Axi DMA error handling */ + lp->stopping = false; INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); napi_enable(&lp->napi_rx); @@ -1514,8 +1582,6 @@ static int axienet_open(struct net_device *ndev) int ret; struct axienet_local *lp = netdev_priv(ndev); - dev_dbg(&ndev->dev, "%s\n", __func__); - /* When we do an Axi Ethernet reset, it resets the complete core * including the MDIO. MDIO must be disabled before resetting. * Hold MDIO bus lock to avoid MDIO accesses during the reset. @@ -1532,6 +1598,9 @@ static int axienet_open(struct net_device *ndev) phylink_start(lp->phylink); + /* Start the statistics refresh work */ + schedule_delayed_work(&lp->stats_work, 0); + if (lp->use_dmaengine) { /* Enable interrupts for Axi Ethernet core (if defined) */ if (lp->eth_irq > 0) { @@ -1556,6 +1625,7 @@ err_free_eth_irq: if (lp->eth_irq > 0) free_irq(lp->eth_irq, ndev); err_phy: + cancel_delayed_work_sync(&lp->stats_work); phylink_stop(lp->phylink); phylink_disconnect_phy(lp->phylink); return ret; @@ -1576,13 +1646,16 @@ static int axienet_stop(struct net_device *ndev) struct axienet_local *lp = netdev_priv(ndev); int i; - dev_dbg(&ndev->dev, "axienet_close()\n"); - if (!lp->use_dmaengine) { + WRITE_ONCE(lp->stopping, true); + flush_work(&lp->dma_err_task); + napi_disable(&lp->napi_tx); napi_disable(&lp->napi_rx); } + cancel_delayed_work_sync(&lp->stats_work); + phylink_stop(lp->phylink); phylink_disconnect_phy(lp->phylink); @@ -1641,7 +1714,7 @@ static int axienet_change_mtu(struct net_device *ndev, int new_mtu) XAE_TRL_SIZE) > lp->rxmem) return -EINVAL; - ndev->mtu = new_mtu; + WRITE_ONCE(ndev->mtu, new_mtu); return 0; } @@ -1657,6 +1730,7 @@ static int axienet_change_mtu(struct net_device *ndev, int new_mtu) static void axienet_poll_controller(struct net_device *ndev) { struct axienet_local *lp = netdev_priv(ndev); + disable_irq(lp->tx_irq); disable_irq(lp->rx_irq); axienet_rx_irq(lp->tx_irq, ndev); @@ -1695,6 +1769,35 @@ axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_packets = u64_stats_read(&lp->tx_packets); stats->tx_bytes = u64_stats_read(&lp->tx_bytes); } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start)); + + if (!(lp->features & XAE_FEATURE_STATS)) + return; + + do { + start = read_seqcount_begin(&lp->hw_stats_seqcount); + stats->rx_length_errors = + axienet_stat(lp, STAT_RX_LENGTH_ERRORS); + stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS); + stats->rx_frame_errors = + axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS); + stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) + + axienet_stat(lp, STAT_FRAGMENT_FRAMES) + + stats->rx_length_errors + + stats->rx_crc_errors + + stats->rx_frame_errors; + stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES); + + stats->tx_aborted_errors = + axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS); + stats->tx_fifo_errors = + axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS); + stats->tx_window_errors = + axienet_stat(lp, STAT_TX_LATE_COLLISIONS); + stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) + + stats->tx_aborted_errors + + stats->tx_fifo_errors + + stats->tx_window_errors; + } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); } static const struct net_device_ops axienet_netdev_ops = { @@ -1945,19 +2048,36 @@ axienet_ethtools_set_coalesce(struct net_device *ndev, struct axienet_local *lp = netdev_priv(ndev); if (netif_running(ndev)) { - netdev_err(ndev, - "Please stop netif before applying configuration\n"); - return -EFAULT; + NL_SET_ERR_MSG(extack, + "Please stop netif before applying configuration"); + return -EBUSY; + } + + if (ecoalesce->rx_max_coalesced_frames > 255 || + ecoalesce->tx_max_coalesced_frames > 255) { + NL_SET_ERR_MSG(extack, "frames must be less than 256"); + return -EINVAL; + } + + if (!ecoalesce->rx_max_coalesced_frames || + !ecoalesce->tx_max_coalesced_frames) { + NL_SET_ERR_MSG(extack, "frames must be non-zero"); + return -EINVAL; + } + + if ((ecoalesce->rx_max_coalesced_frames > 1 && + !ecoalesce->rx_coalesce_usecs) || + (ecoalesce->tx_max_coalesced_frames > 1 && + !ecoalesce->tx_coalesce_usecs)) { + NL_SET_ERR_MSG(extack, + "usecs must be non-zero when frames is greater than one"); + return -EINVAL; } - if (ecoalesce->rx_max_coalesced_frames) - lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; - if (ecoalesce->rx_coalesce_usecs) - lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; - if (ecoalesce->tx_max_coalesced_frames) - lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; - if (ecoalesce->tx_coalesce_usecs) - lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; + lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; + lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; + lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; + lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; return 0; } @@ -1987,6 +2107,213 @@ static int axienet_ethtools_nway_reset(struct net_device *dev) return phylink_ethtool_nway_reset(lp->phylink); } +static void axienet_ethtools_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct axienet_local *lp = netdev_priv(dev); + unsigned int start; + + do { + start = read_seqcount_begin(&lp->hw_stats_seqcount); + data[0] = axienet_stat(lp, STAT_RX_BYTES); + data[1] = axienet_stat(lp, STAT_TX_BYTES); + data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES); + data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES); + data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES); + data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES); + data[8] = axienet_stat(lp, STAT_USER_DEFINED0); + data[9] = axienet_stat(lp, STAT_USER_DEFINED1); + data[10] = axienet_stat(lp, STAT_USER_DEFINED2); + } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); +} + +static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = { + "Received bytes", + "Transmitted bytes", + "RX Good VLAN Tagged Frames", + "TX Good VLAN Tagged Frames", + "TX Good PFC Frames", + "RX Good PFC Frames", + "User Defined Counter 0", + "User Defined Counter 1", + "User Defined Counter 2", +}; + +static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, axienet_ethtool_stats_strings, + sizeof(axienet_ethtool_stats_strings)); + break; + } +} + +static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset) +{ + struct axienet_local *lp = netdev_priv(dev); + + switch (sset) { + case ETH_SS_STATS: + if (lp->features & XAE_FEATURE_STATS) + return ARRAY_SIZE(axienet_ethtool_stats_strings); + fallthrough; + default: + return -EOPNOTSUPP; + } +} + +static void +axienet_ethtools_get_pause_stats(struct net_device *dev, + struct ethtool_pause_stats *pause_stats) +{ + struct axienet_local *lp = netdev_priv(dev); + unsigned int start; + + if (!(lp->features & XAE_FEATURE_STATS)) + return; + + do { + start = read_seqcount_begin(&lp->hw_stats_seqcount); + pause_stats->tx_pause_frames = + axienet_stat(lp, STAT_TX_PAUSE_FRAMES); + pause_stats->rx_pause_frames = + axienet_stat(lp, STAT_RX_PAUSE_FRAMES); + } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); +} + +static void +axienet_ethtool_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct axienet_local *lp = netdev_priv(dev); + unsigned int start; + + if (!(lp->features & XAE_FEATURE_STATS)) + return; + + do { + start = read_seqcount_begin(&lp->hw_stats_seqcount); + mac_stats->FramesTransmittedOK = + axienet_stat(lp, STAT_TX_GOOD_FRAMES); + mac_stats->SingleCollisionFrames = + axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES); + mac_stats->MultipleCollisionFrames = + axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES); + mac_stats->FramesReceivedOK = + axienet_stat(lp, STAT_RX_GOOD_FRAMES); + mac_stats->FrameCheckSequenceErrors = + axienet_stat(lp, STAT_RX_FCS_ERRORS); + mac_stats->AlignmentErrors = + axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS); + mac_stats->FramesWithDeferredXmissions = + axienet_stat(lp, STAT_TX_DEFERRED_FRAMES); + mac_stats->LateCollisions = + axienet_stat(lp, STAT_TX_LATE_COLLISIONS); + mac_stats->FramesAbortedDueToXSColls = + axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS); + mac_stats->MulticastFramesXmittedOK = + axienet_stat(lp, STAT_TX_MULTICAST_FRAMES); + mac_stats->BroadcastFramesXmittedOK = + axienet_stat(lp, STAT_TX_BROADCAST_FRAMES); + mac_stats->FramesWithExcessiveDeferral = + axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL); + mac_stats->MulticastFramesReceivedOK = + axienet_stat(lp, STAT_RX_MULTICAST_FRAMES); + mac_stats->BroadcastFramesReceivedOK = + axienet_stat(lp, STAT_RX_BROADCAST_FRAMES); + mac_stats->InRangeLengthErrors = + axienet_stat(lp, STAT_RX_LENGTH_ERRORS); + } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); +} + +static void +axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct axienet_local *lp = netdev_priv(dev); + unsigned int start; + + if (!(lp->features & XAE_FEATURE_STATS)) + return; + + do { + start = read_seqcount_begin(&lp->hw_stats_seqcount); + ctrl_stats->MACControlFramesTransmitted = + axienet_stat(lp, STAT_TX_CONTROL_FRAMES); + ctrl_stats->MACControlFramesReceived = + axienet_stat(lp, STAT_RX_CONTROL_FRAMES); + ctrl_stats->UnsupportedOpcodesReceived = + axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS); + } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); +} + +static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = { + { 64, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 16384 }, + { }, +}; + +static void +axienet_ethtool_get_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct axienet_local *lp = netdev_priv(dev); + unsigned int start; + + if (!(lp->features & XAE_FEATURE_STATS)) + return; + + do { + start = read_seqcount_begin(&lp->hw_stats_seqcount); + rmon_stats->undersize_pkts = + axienet_stat(lp, STAT_UNDERSIZE_FRAMES); + rmon_stats->oversize_pkts = + axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES); + rmon_stats->fragments = + axienet_stat(lp, STAT_FRAGMENT_FRAMES); + + rmon_stats->hist[0] = + axienet_stat(lp, STAT_RX_64_BYTE_FRAMES); + rmon_stats->hist[1] = + axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES); + rmon_stats->hist[2] = + axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES); + rmon_stats->hist[3] = + axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES); + rmon_stats->hist[4] = + axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES); + rmon_stats->hist[5] = + axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES); + rmon_stats->hist[6] = + rmon_stats->oversize_pkts; + + rmon_stats->hist_tx[0] = + axienet_stat(lp, STAT_TX_64_BYTE_FRAMES); + rmon_stats->hist_tx[1] = + axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES); + rmon_stats->hist_tx[2] = + axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES); + rmon_stats->hist_tx[3] = + axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES); + rmon_stats->hist_tx[4] = + axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES); + rmon_stats->hist_tx[5] = + axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES); + rmon_stats->hist_tx[6] = + axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES); + } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); + + *ranges = axienet_rmon_ranges; +} + static const struct ethtool_ops axienet_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USECS, @@ -2003,6 +2330,13 @@ static const struct ethtool_ops axienet_ethtool_ops = { .get_link_ksettings = axienet_ethtools_get_link_ksettings, .set_link_ksettings = axienet_ethtools_set_link_ksettings, .nway_reset = axienet_ethtools_nway_reset, + .get_ethtool_stats = axienet_ethtools_get_ethtool_stats, + .get_strings = axienet_ethtools_get_strings, + .get_sset_count = axienet_ethtools_get_sset_count, + .get_pause_stats = axienet_ethtools_get_pause_stats, + .get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats, + .get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats, + .get_rmon_stats = axienet_ethtool_get_rmon_stats, }; static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs) @@ -2011,11 +2345,12 @@ static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs) } static void axienet_pcs_get_state(struct phylink_pcs *pcs, + unsigned int neg_mode, struct phylink_link_state *state) { struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; - phylink_mii_c22_pcs_get_state(pcs_phy, state); + phylink_mii_c22_pcs_get_state(pcs_phy, neg_mode, state); } static void axienet_pcs_an_restart(struct phylink_pcs *pcs) @@ -2153,6 +2488,10 @@ static void axienet_dma_err_handler(struct work_struct *work) dma_err_task); struct net_device *ndev = lp->ndev; + /* Don't bother if we are going to stop anyway */ + if (READ_ONCE(lp->stopping)) + return; + napi_disable(&lp->napi_tx); napi_disable(&lp->napi_rx); @@ -2219,9 +2558,9 @@ static void axienet_dma_err_handler(struct work_struct *work) ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); axienet_set_mac_address(ndev, NULL); axienet_set_multicast_list(ndev); - axienet_setoptions(ndev, lp->options); napi_enable(&lp->napi_rx); napi_enable(&lp->napi_tx); + axienet_setoptions(ndev, lp->options); } /** @@ -2254,7 +2593,6 @@ static int axienet_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, &pdev->dev); - ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ ndev->features = NETIF_F_SG; ndev->ethtool_ops = &axienet_ethtool_ops; @@ -2272,6 +2610,10 @@ static int axienet_probe(struct platform_device *pdev) u64_stats_init(&lp->rx_stat_sync); u64_stats_init(&lp->tx_stat_sync); + mutex_init(&lp->stats_lock); + seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock); + INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats); + lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); if (!lp->axi_clk) { /* For backward compatibility, if named AXI clock is not present, @@ -2312,42 +2654,35 @@ static int axienet_probe(struct platform_device *pdev) /* Setup checksum offload, but default to off if not specified */ lp->features = 0; + if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS) + lp->features |= XAE_FEATURE_STATS; + ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); if (!ret) { switch (value) { case 1: - lp->csum_offload_on_tx_path = - XAE_FEATURE_PARTIAL_TX_CSUM; lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; - /* Can checksum TCP/UDP over IPv4. */ - ndev->features |= NETIF_F_IP_CSUM; + /* Can checksum any contiguous range */ + ndev->features |= NETIF_F_HW_CSUM; break; case 2: - lp->csum_offload_on_tx_path = - XAE_FEATURE_FULL_TX_CSUM; lp->features |= XAE_FEATURE_FULL_TX_CSUM; /* Can checksum TCP/UDP over IPv4. */ ndev->features |= NETIF_F_IP_CSUM; break; - default: - lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; } } ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); if (!ret) { switch (value) { case 1: - lp->csum_offload_on_rx_path = - XAE_FEATURE_PARTIAL_RX_CSUM; lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; + ndev->features |= NETIF_F_RXCSUM; break; case 2: - lp->csum_offload_on_rx_path = - XAE_FEATURE_FULL_RX_CSUM; lp->features |= XAE_FEATURE_FULL_RX_CSUM; + ndev->features |= NETIF_F_RXCSUM; break; - default: - lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; } } /* For supporting jumbo frames, the Axi Ethernet hardware must have @@ -2397,7 +2732,7 @@ static int axienet_probe(struct platform_device *pdev) goto cleanup_clk; } - if (!of_find_property(pdev->dev.of_node, "dmas", NULL)) { + if (!of_property_present(pdev->dev.of_node, "dmas")) { /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); @@ -2562,6 +2897,7 @@ static int axienet_probe(struct platform_device *pdev) lp->phylink_config.dev = &ndev->dev; lp->phylink_config.type = PHYLINK_NETDEV; + lp->phylink_config.mac_managed_pm = true; lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10FD | MAC_100FD | MAC_1000FD; @@ -2679,7 +3015,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops, static struct platform_driver axienet_driver = { .probe = axienet_probe, - .remove_new = axienet_remove, + .remove = axienet_remove, .shutdown = axienet_shutdown, .driver = { .name = "xilinx_axienet", diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c index 2f07fde361aa..9ca2643c921e 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c @@ -20,7 +20,14 @@ #define DEFAULT_MDIO_FREQ 2500000 /* 2.5 MHz */ #define DEFAULT_HOST_CLOCK 150000000 /* 150 MHz */ -/* Wait till MDIO interface is ready to accept a new transaction.*/ +/** + * axienet_mdio_wait_until_ready - MDIO wait function + * @lp: Pointer to axienet local data structure. + * + * Return : 0 on success, Negative value on errors + * + * Wait till MDIO interface is ready to accept a new transaction. + */ static int axienet_mdio_wait_until_ready(struct axienet_local *lp) { u32 val; @@ -30,14 +37,24 @@ static int axienet_mdio_wait_until_ready(struct axienet_local *lp) 1, 20000); } -/* Enable the MDIO MDC. Called prior to a read/write operation */ +/** + * axienet_mdio_mdc_enable - MDIO MDC enable function + * @lp: Pointer to axienet local data structure. + * + * Enable the MDIO MDC. Called prior to a read/write operation + */ static void axienet_mdio_mdc_enable(struct axienet_local *lp) { axienet_iow(lp, XAE_MDIO_MC_OFFSET, ((u32)lp->mii_clk_div | XAE_MDIO_MC_MDIOEN_MASK)); } -/* Disable the MDIO MDC. Called after a read/write operation*/ +/** + * axienet_mdio_mdc_disable - MDIO MDC disable function + * @lp: Pointer to axienet local data structure. + * + * Disable the MDIO MDC. Called after a read/write operation + */ static void axienet_mdio_mdc_disable(struct axienet_local *lp) { u32 mc_reg; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 765aa516aada..ecf47107146d 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -7,6 +7,7 @@ * Copyright (c) 2007 - 2013 Xilinx, Inc. */ +#include <linux/clk.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/uaccess.h> @@ -1091,13 +1092,14 @@ static int xemaclite_of_probe(struct platform_device *ofdev) struct net_device *ndev = NULL; struct net_local *lp = NULL; struct device *dev = &ofdev->dev; + struct clk *clkin; int rc = 0; dev_info(dev, "Device Tree Probing\n"); /* Create an ethernet device instance */ - ndev = alloc_etherdev(sizeof(struct net_local)); + ndev = devm_alloc_etherdev(dev, sizeof(struct net_local)); if (!ndev) return -ENOMEM; @@ -1110,16 +1112,13 @@ static int xemaclite_of_probe(struct platform_device *ofdev) /* Get IRQ for the device */ rc = platform_get_irq(ofdev, 0); if (rc < 0) - goto error; + return rc; ndev->irq = rc; - res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); - lp->base_addr = devm_ioremap_resource(&ofdev->dev, res); - if (IS_ERR(lp->base_addr)) { - rc = PTR_ERR(lp->base_addr); - goto error; - } + lp->base_addr = devm_platform_get_and_ioremap_resource(ofdev, 0, &res); + if (IS_ERR(lp->base_addr)) + return PTR_ERR(lp->base_addr); ndev->mem_start = res->start; ndev->mem_end = res->end; @@ -1130,6 +1129,11 @@ static int xemaclite_of_probe(struct platform_device *ofdev) lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong"); lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); + clkin = devm_clk_get_optional_enabled(&ofdev->dev, NULL); + if (IS_ERR(clkin)) + return dev_err_probe(&ofdev->dev, PTR_ERR(clkin), + "Failed to get and enable clock from Device Tree\n"); + rc = of_get_ethdev_address(ofdev->dev.of_node, ndev); if (rc) { dev_warn(dev, "No MAC address found, using random\n"); @@ -1168,8 +1172,6 @@ static int xemaclite_of_probe(struct platform_device *ofdev) put_node: of_node_put(lp->phy_node); -error: - free_netdev(ndev); return rc; } @@ -1198,8 +1200,6 @@ static void xemaclite_of_remove(struct platform_device *of_dev) of_node_put(lp->phy_node); lp->phy_node = NULL; - - free_netdev(ndev); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1258,7 +1258,7 @@ static struct platform_driver xemaclite_of_driver = { .of_match_table = xemaclite_of_match, }, .probe = xemaclite_of_probe, - .remove_new = xemaclite_of_remove, + .remove = xemaclite_of_remove, }; module_platform_driver(xemaclite_of_driver); |