diff options
author | David S. Miller <davem@davemloft.net> | 2023-11-19 19:46:40 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2023-11-19 19:46:40 +0000 |
commit | 69d5ee8c1291c3e45ef12d531f28907bfbdbd9da (patch) | |
tree | a17804ae22b65155f935196e42bcdc13b24a903f | |
parent | ac40916a3f7243efbe6e129ebf495b5c33a3adfe (diff) | |
parent | ebd7bf60e21c567a7fbe0e2a7bc4be8406ff8093 (diff) |
Merge branch 'am65-cpsw-ethtool-mac-stats'
Roger Quadros says:
===================
net: eth: am65-cpsw: add ethtool MAC stats
Gets 'ethtool -S eth0 --groups eth-mac' command to work.
Also set default TX channels to maximum available and does
cleanup in am65_cpsw_nuss_common_open() error path.
Changelog:
v2:
- add __iomem to *stats, to prevent sparse warning
- clean up RX descriptors and free up SKB in error handling of
am65_cpsw_nuss_common_open()
- Re-arrange some funcitons to avoid forward declaration
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/ti/am65-cpsw-ethtool.c | 26 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/am65-cpsw-nuss.c | 197 |
2 files changed, 139 insertions, 84 deletions
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c index c51e2af91f69..b9e1d568604b 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c +++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c @@ -662,6 +662,31 @@ static void am65_cpsw_get_ethtool_stats(struct net_device *ndev, hw_stats[i].offset); } +static void am65_cpsw_get_eth_mac_stats(struct net_device *ndev, + struct ethtool_eth_mac_stats *s) +{ + struct am65_cpsw_port *port = am65_ndev_to_port(ndev); + struct am65_cpsw_stats_regs __iomem *stats; + + stats = port->stat_base; + + s->FramesTransmittedOK = readl_relaxed(&stats->tx_good_frames); + s->SingleCollisionFrames = readl_relaxed(&stats->tx_single_coll_frames); + s->MultipleCollisionFrames = readl_relaxed(&stats->tx_mult_coll_frames); + s->FramesReceivedOK = readl_relaxed(&stats->rx_good_frames); + s->FrameCheckSequenceErrors = readl_relaxed(&stats->rx_crc_errors); + s->AlignmentErrors = readl_relaxed(&stats->rx_align_code_errors); + s->OctetsTransmittedOK = readl_relaxed(&stats->tx_octets); + s->FramesWithDeferredXmissions = readl_relaxed(&stats->tx_deferred_frames); + s->LateCollisions = readl_relaxed(&stats->tx_late_collisions); + s->CarrierSenseErrors = readl_relaxed(&stats->tx_carrier_sense_errors); + s->OctetsReceivedOK = readl_relaxed(&stats->rx_octets); + s->MulticastFramesXmittedOK = readl_relaxed(&stats->tx_multicast_frames); + s->BroadcastFramesXmittedOK = readl_relaxed(&stats->tx_broadcast_frames); + s->MulticastFramesReceivedOK = readl_relaxed(&stats->rx_multicast_frames); + s->BroadcastFramesReceivedOK = readl_relaxed(&stats->rx_broadcast_frames); +}; + static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) { @@ -729,6 +754,7 @@ const struct ethtool_ops am65_cpsw_ethtool_ops_slave = { .get_sset_count = am65_cpsw_get_sset_count, .get_strings = am65_cpsw_get_strings, .get_ethtool_stats = am65_cpsw_get_ethtool_stats, + .get_eth_mac_stats = am65_cpsw_get_eth_mac_stats, .get_ts_info = am65_cpsw_get_ethtool_ts_info, .get_priv_flags = am65_cpsw_get_ethtool_priv_flags, .set_priv_flags = am65_cpsw_set_ethtool_priv_flags, diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index ece9f8df98ae..7992a76ed4d8 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -136,6 +136,8 @@ NETIF_MSG_IFUP | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) +#define AM65_CPSW_DEFAULT_TX_CHNS 8 + static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave, const u8 *dev_addr) { @@ -367,10 +369,81 @@ static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common); static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port); static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port); +static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma) +{ + struct am65_cpsw_rx_chn *rx_chn = data; + struct cppi5_host_desc_t *desc_rx; + struct sk_buff *skb; + dma_addr_t buf_dma; + u32 buf_dma_len; + void **swdata; + + desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); + swdata = cppi5_hdesc_get_swdata(desc_rx); + skb = *swdata; + cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); + k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); + + dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); + + dev_kfree_skb_any(skb); +} + +static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn, + struct cppi5_host_desc_t *desc) +{ + struct cppi5_host_desc_t *first_desc, *next_desc; + dma_addr_t buf_dma, next_desc_dma; + u32 buf_dma_len; + + first_desc = desc; + next_desc = first_desc; + + cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); + k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); + + dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE); + + next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc); + k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); + while (next_desc_dma) { + next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, + next_desc_dma); + cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len); + k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); + + dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len, + DMA_TO_DEVICE); + + next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc); + k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); + + k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); + } + + k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); +} + +static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma) +{ + struct am65_cpsw_tx_chn *tx_chn = data; + struct cppi5_host_desc_t *desc_tx; + struct sk_buff *skb; + void **swdata; + + desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); + swdata = cppi5_hdesc_get_swdata(desc_tx); + skb = *(swdata); + am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); + + dev_kfree_skb_any(skb); +} + static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) { struct am65_cpsw_host *host_p = am65_common_get_host(common); - int port_idx, i, ret; + int port_idx, i, ret, tx; struct sk_buff *skb; u32 val, port_mask; @@ -437,8 +510,12 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) AM65_CPSW_MAX_PACKET_SIZE, GFP_KERNEL); if (!skb) { + ret = -ENOMEM; dev_err(common->dev, "cannot allocate skb\n"); - return -ENOMEM; + if (i) + goto fail_rx; + + return ret; } ret = am65_cpsw_nuss_rx_push(common, skb); @@ -447,17 +524,28 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) "cannot submit skb to channel rx, error %d\n", ret); kfree_skb(skb); + if (i) + goto fail_rx; + return ret; } - kmemleak_not_leak(skb); } - k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn); - for (i = 0; i < common->tx_ch_num; i++) { - ret = k3_udma_glue_enable_tx_chn(common->tx_chns[i].tx_chn); - if (ret) - return ret; - napi_enable(&common->tx_chns[i].napi_tx); + ret = k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn); + if (ret) { + dev_err(common->dev, "couldn't enable rx chn: %d\n", ret); + goto fail_rx; + } + + for (tx = 0; tx < common->tx_ch_num; tx++) { + ret = k3_udma_glue_enable_tx_chn(common->tx_chns[tx].tx_chn); + if (ret) { + dev_err(common->dev, "couldn't enable tx chn %d: %d\n", + tx, ret); + tx--; + goto fail_tx; + } + napi_enable(&common->tx_chns[tx].napi_tx); } napi_enable(&common->napi_rx); @@ -468,10 +556,22 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) dev_dbg(common->dev, "cpsw_nuss started\n"); return 0; -} -static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma); -static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma); +fail_tx: + while (tx >= 0) { + napi_disable(&common->tx_chns[tx].napi_tx); + k3_udma_glue_disable_tx_chn(common->tx_chns[tx].tx_chn); + tx--; + } + + k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn); + +fail_rx: + k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, 0, + &common->rx_chns, + am65_cpsw_nuss_rx_cleanup, 0); + return ret; +} static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common) { @@ -646,27 +746,6 @@ runtime_put: return ret; } -static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma) -{ - struct am65_cpsw_rx_chn *rx_chn = data; - struct cppi5_host_desc_t *desc_rx; - struct sk_buff *skb; - dma_addr_t buf_dma; - u32 buf_dma_len; - void **swdata; - - desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); - swdata = cppi5_hdesc_get_swdata(desc_rx); - skb = *swdata; - cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); - k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); - - dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); - k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - - dev_kfree_skb_any(skb); -} - static void am65_cpsw_nuss_rx_ts(struct sk_buff *skb, u32 *psdata) { struct skb_shared_hwtstamps *ssh; @@ -840,56 +919,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) return num_rx; } -static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn, - struct cppi5_host_desc_t *desc) -{ - struct cppi5_host_desc_t *first_desc, *next_desc; - dma_addr_t buf_dma, next_desc_dma; - u32 buf_dma_len; - - first_desc = desc; - next_desc = first_desc; - - cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); - k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); - - dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE); - - next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc); - k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); - while (next_desc_dma) { - next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, - next_desc_dma); - cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len); - k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); - - dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len, - DMA_TO_DEVICE); - - next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc); - k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); - - k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); - } - - k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); -} - -static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma) -{ - struct am65_cpsw_tx_chn *tx_chn = data; - struct cppi5_host_desc_t *desc_tx; - struct sk_buff *skb; - void **swdata; - - desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); - swdata = cppi5_hdesc_get_swdata(desc_tx); - skb = *(swdata); - am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); - - dev_kfree_skb_any(skb); -} - static struct sk_buff * am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn, dma_addr_t desc_dma) @@ -2897,7 +2926,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) common->rx_flow_id_base = -1; init_completion(&common->tdown_complete); - common->tx_ch_num = 1; + common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS; common->pf_p0_rx_ptype_rrobin = false; common->default_vlan = 1; |