diff options
Diffstat (limited to 'drivers/net/ethernet')
190 files changed, 4336 insertions, 2120 deletions
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 43c71f6b314f..08630ee94251 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -162,8 +162,8 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i, self->msix_entry_mask |= (1 << i); if (pdev->msix_enabled && affinity_mask) - irq_set_affinity_hint(pci_irq_vector(pdev, i), - affinity_mask); + irq_update_affinity_hint(pci_irq_vector(pdev, i), + affinity_mask); } return err; @@ -187,7 +187,7 @@ void aq_pci_func_free_irqs(struct aq_nic_s *self) continue; if (pdev->msix_enabled) - irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL); + irq_update_affinity_hint(pci_irq_vector(pdev, i), NULL); free_irq(pci_irq_vector(pdev, i), irq_data); self->msix_entry_mask &= ~(1U << i); } diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 31ee477dd131..8283aeee35fb 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -111,6 +111,7 @@ static void arc_emac_tx_clean(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; + struct device *dev = ndev->dev.parent; unsigned int i; for (i = 0; i < TX_BD_NUM; i++) { @@ -140,7 +141,7 @@ static void arc_emac_tx_clean(struct net_device *ndev) stats->tx_bytes += skb->len; } - dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr), + dma_unmap_single(dev, dma_unmap_addr(tx_buff, addr), dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); /* return the sk_buff to system */ @@ -174,6 +175,7 @@ static void arc_emac_tx_clean(struct net_device *ndev) static int arc_emac_rx(struct net_device *ndev, int budget) { struct arc_emac_priv *priv = netdev_priv(ndev); + struct device *dev = ndev->dev.parent; unsigned int work_done; for (work_done = 0; work_done < budget; work_done++) { @@ -223,9 +225,9 @@ static int arc_emac_rx(struct net_device *ndev, int budget) continue; } - addr = dma_map_single(&ndev->dev, (void *)skb->data, + addr = dma_map_single(dev, (void *)skb->data, EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(&ndev->dev, addr)) { + if (dma_mapping_error(dev, addr)) { if (net_ratelimit()) netdev_err(ndev, "cannot map dma buffer\n"); dev_kfree_skb(skb); @@ -237,7 +239,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget) } /* unmap previosly mapped skb */ - dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), + dma_unmap_single(dev, dma_unmap_addr(rx_buff, addr), dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); pktlen = info & LEN_MASK; @@ -423,6 +425,7 @@ static int arc_emac_open(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); struct phy_device *phy_dev = ndev->phydev; + struct device *dev = ndev->dev.parent; int i; phy_dev->autoneg = AUTONEG_ENABLE; @@ -445,9 +448,9 @@ static int arc_emac_open(struct net_device *ndev) if (unlikely(!rx_buff->skb)) return -ENOMEM; - addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, + addr = dma_map_single(dev, (void *)rx_buff->skb->data, EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(&ndev->dev, addr)) { + if (dma_mapping_error(dev, addr)) { netdev_err(ndev, "cannot dma map\n"); dev_kfree_skb(rx_buff->skb); return -ENOMEM; @@ -548,6 +551,7 @@ static void arc_emac_set_rx_mode(struct net_device *ndev) static void arc_free_tx_queue(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); + struct device *dev = ndev->dev.parent; unsigned int i; for (i = 0; i < TX_BD_NUM; i++) { @@ -555,7 +559,7 @@ static void arc_free_tx_queue(struct net_device *ndev) struct buffer_state *tx_buff = &priv->tx_buff[i]; if (tx_buff->skb) { - dma_unmap_single(&ndev->dev, + dma_unmap_single(dev, dma_unmap_addr(tx_buff, addr), dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); @@ -579,6 +583,7 @@ static void arc_free_tx_queue(struct net_device *ndev) static void arc_free_rx_queue(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); + struct device *dev = ndev->dev.parent; unsigned int i; for (i = 0; i < RX_BD_NUM; i++) { @@ -586,7 +591,7 @@ static void arc_free_rx_queue(struct net_device *ndev) struct buffer_state *rx_buff = &priv->rx_buff[i]; if (rx_buff->skb) { - dma_unmap_single(&ndev->dev, + dma_unmap_single(dev, dma_unmap_addr(rx_buff, addr), dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); @@ -679,6 +684,7 @@ static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) unsigned int len, *txbd_curr = &priv->txbd_curr; struct net_device_stats *stats = &ndev->stats; __le32 *info = &priv->txbd[*txbd_curr].info; + struct device *dev = ndev->dev.parent; dma_addr_t addr; if (skb_padto(skb, ETH_ZLEN)) @@ -692,10 +698,9 @@ static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_BUSY; } - addr = dma_map_single(&ndev->dev, (void *)skb->data, len, - DMA_TO_DEVICE); + addr = dma_map_single(dev, (void *)skb->data, len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&ndev->dev, addr))) { + if (unlikely(dma_mapping_error(dev, addr))) { stats->tx_dropped++; stats->tx_errors++; dev_kfree_skb_any(skb); diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 87f40c2ba904..078b1a72c161 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -133,6 +133,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) struct arc_emac_mdio_bus_data *data = &priv->bus_data; struct device_node *np = priv->dev->of_node; const char *name = "Synopsys MII Bus"; + struct device_node *mdio_node; struct mii_bus *bus; int error; @@ -164,7 +165,13 @@ int arc_mdio_probe(struct arc_emac_priv *priv) snprintf(bus->id, MII_BUS_ID_SIZE, "%s", bus->name); - error = of_mdiobus_register(bus, priv->dev->of_node); + /* Backwards compatibility for EMAC nodes without MDIO subnode. */ + mdio_node = of_get_child_by_name(np, "mdio"); + if (!mdio_node) + mdio_node = of_node_get(np); + + error = of_mdiobus_register(bus, mdio_node); + of_node_put(mdio_node); if (error) { mdiobus_free(bus); return dev_err_probe(priv->dev, error, diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c index 67928b5d8a26..9da5ae29a105 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c @@ -101,14 +101,14 @@ static int bcmasp_get_sset_count(struct net_device *dev, int string_set) static void bcmasp_get_strings(struct net_device *dev, u32 stringset, u8 *data) { + const char *str; unsigned int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < BCMASP_STATS_LEN; i++) { - memcpy(data + i * ETH_GSTRING_LEN, - bcmasp_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); + str = bcmasp_gstrings_stats[i].stat_string; + ethtool_puts(&data, str); } break; default: diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index e5e03aaa49f9..65e3a0656a4c 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1339,14 +1339,14 @@ static int bcm_enet_get_sset_count(struct net_device *netdev, static void bcm_enet_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { + const char *str; int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < BCM_ENET_STATS_LEN; i++) { - memcpy(data + i * ETH_GSTRING_LEN, - bcm_enet_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); + str = bcm_enet_gstrings_stats[i].stat_string; + ethtool_puts(&data, str); } break; } @@ -2503,14 +2503,14 @@ static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = { static void bcm_enetsw_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { + const char *str; int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { - memcpy(data + i * ETH_GSTRING_LEN, - bcm_enetsw_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); + str = bcm_enetsw_gstrings_stats[i].stat_string; + ethtool_puts(&data, str); } break; } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 031e9e0cca53..42672c63f108 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -346,32 +346,22 @@ static void bcm_sysport_get_strings(struct net_device *dev, { struct bcm_sysport_priv *priv = netdev_priv(dev); const struct bcm_sysport_stats *s; - char buf[128]; - int i, j; + int i; switch (stringset) { case ETH_SS_STATS: - for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { + for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { s = &bcm_sysport_gstrings_stats[i]; if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type)) continue; - memcpy(data + j * ETH_GSTRING_LEN, s->stat_string, - ETH_GSTRING_LEN); - j++; + ethtool_puts(&data, s->stat_string); } for (i = 0; i < dev->num_tx_queues; i++) { - snprintf(buf, sizeof(buf), "txq%d_packets", i); - memcpy(data + j * ETH_GSTRING_LEN, buf, - ETH_GSTRING_LEN); - j++; - - snprintf(buf, sizeof(buf), "txq%d_bytes", i); - memcpy(data + j * ETH_GSTRING_LEN, buf, - ETH_GSTRING_LEN); - j++; + ethtool_sprintf(&data, "txq%d_packets", i); + ethtool_sprintf(&data, "txq%d_bytes", i); } break; default: diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 6ffdc4229407..a461ec612e95 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1367,8 +1367,7 @@ static void bgmac_get_strings(struct net_device *dev, u32 stringset, return; for (i = 0; i < BGMAC_STATS_LEN; i++) - strscpy(data + i * ETH_GSTRING_LEN, - bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN); + ethtool_puts(&data, bgmac_get_strings_stats[i].name); } static void bgmac_get_ethtool_stats(struct net_device *dev, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index adf7b6b94941..44199855ebfb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -39,34 +39,34 @@ static const struct { int size; char string[ETH_GSTRING_LEN]; } bnx2x_q_stats_arr[] = { -/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" }, +/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" }, { Q_STATS_OFFSET32(total_unicast_packets_received_hi), - 8, "[%s]: rx_ucast_packets" }, + 8, "[%d]: rx_ucast_packets" }, { Q_STATS_OFFSET32(total_multicast_packets_received_hi), - 8, "[%s]: rx_mcast_packets" }, + 8, "[%d]: rx_mcast_packets" }, { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), - 8, "[%s]: rx_bcast_packets" }, - { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" }, + 8, "[%d]: rx_bcast_packets" }, + { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" }, { Q_STATS_OFFSET32(rx_err_discard_pkt), - 4, "[%s]: rx_phy_ip_err_discards"}, + 4, "[%d]: rx_phy_ip_err_discards"}, { Q_STATS_OFFSET32(rx_skb_alloc_failed), - 4, "[%s]: rx_skb_alloc_discard" }, - { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" }, - { Q_STATS_OFFSET32(driver_xoff), 4, "[%s]: tx_exhaustion_events" }, - { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" }, + 4, "[%d]: rx_skb_alloc_discard" }, + { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" }, + { Q_STATS_OFFSET32(driver_xoff), 4, "[%d]: tx_exhaustion_events" }, + { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" }, /* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), - 8, "[%s]: tx_ucast_packets" }, + 8, "[%d]: tx_ucast_packets" }, { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), - 8, "[%s]: tx_mcast_packets" }, + 8, "[%d]: tx_mcast_packets" }, { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), - 8, "[%s]: tx_bcast_packets" }, + 8, "[%d]: tx_bcast_packets" }, { Q_STATS_OFFSET32(total_tpa_aggregations_hi), - 8, "[%s]: tpa_aggregations" }, + 8, "[%d]: tpa_aggregations" }, { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), - 8, "[%s]: tpa_aggregated_frames"}, - { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"}, + 8, "[%d]: tpa_aggregated_frames"}, + { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%d]: tpa_bytes"}, { Q_STATS_OFFSET32(driver_filtered_tx_pkt), - 4, "[%s]: driver_filtered_tx_pkt" } + 4, "[%d]: driver_filtered_tx_pkt" } }; #define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr) @@ -3184,49 +3184,43 @@ static u32 bnx2x_get_private_flags(struct net_device *dev) static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { struct bnx2x *bp = netdev_priv(dev); - int i, j, k, start; - char queue_name[MAX_QUEUE_NAME_LEN+1]; + const char *str; + int i, j, start; switch (stringset) { case ETH_SS_STATS: - k = 0; if (is_multi(bp)) { for_each_eth_queue(bp, i) { - memset(queue_name, 0, sizeof(queue_name)); - snprintf(queue_name, sizeof(queue_name), - "%d", i); - for (j = 0; j < BNX2X_NUM_Q_STATS; j++) - snprintf(buf + (k + j)*ETH_GSTRING_LEN, - ETH_GSTRING_LEN, - bnx2x_q_stats_arr[j].string, - queue_name); - k += BNX2X_NUM_Q_STATS; + for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { + str = bnx2x_q_stats_arr[j].string; + ethtool_sprintf(&buf, str, i); + } } } - for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { + for (i = 0; i < BNX2X_NUM_STATS; i++) { if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i)) continue; - strcpy(buf + (k + j)*ETH_GSTRING_LEN, - bnx2x_stats_arr[i].string); - j++; + ethtool_puts(&buf, bnx2x_stats_arr[i].string); } break; case ETH_SS_TEST: + if (IS_VF(bp)) + break; /* First 4 tests cannot be done in MF mode */ if (!IS_MF(bp)) start = 0; else start = 4; - memcpy(buf, bnx2x_tests_str_arr + start, - ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp)); + for (i = start; i < BNX2X_NUM_TESTS_SF; i++) + ethtool_puts(&buf, bnx2x_tests_str_arr[i]); break; case ETH_SS_PRIV_FLAGS: - memcpy(buf, bnx2x_private_arr, - ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN); + for (i = 0; i < BNX2X_PRI_FLAG_LEN; i++) + ethtool_puts(&buf, bnx2x_private_arr[i]); break; } } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index ca42b81133d7..4c1302a8f72d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -864,6 +864,11 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) bnapi->events &= ~BNXT_TX_CMP_EVENT; } +static bool bnxt_separate_head_pool(void) +{ + return PAGE_SIZE > BNXT_RX_PAGE_SIZE; +} + static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, struct bnxt_rx_ring_info *rxr, unsigned int *offset, @@ -886,27 +891,19 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, } static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping, + struct bnxt_rx_ring_info *rxr, gfp_t gfp) { - u8 *data; - struct pci_dev *pdev = bp->pdev; + unsigned int offset; + struct page *page; - if (gfp == GFP_ATOMIC) - data = napi_alloc_frag(bp->rx_buf_size); - else - data = netdev_alloc_frag(bp->rx_buf_size); - if (!data) + page = page_pool_alloc_frag(rxr->head_pool, &offset, + bp->rx_buf_size, gfp); + if (!page) return NULL; - *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, - bp->rx_buf_use_size, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - - if (dma_mapping_error(&pdev->dev, *mapping)) { - skb_free_frag(data); - data = NULL; - } - return data; + *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset; + return page_address(page) + offset; } int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, @@ -928,7 +925,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, rx_buf->data = page; rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset; } else { - u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp); + u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp); if (!data) return -ENOMEM; @@ -1179,13 +1176,14 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, } skb = napi_build_skb(data, bp->rx_buf_size); - dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, - bp->rx_dir, DMA_ATTR_WEAK_ORDERING); + dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, + bp->rx_dir); if (!skb) { - skb_free_frag(data); + page_pool_free_va(rxr->head_pool, data, true); return NULL; } + skb_mark_for_recycle(skb); skb_reserve(skb, bp->rx_offset); skb_put(skb, offset_and_len & 0xffff); return skb; @@ -1840,7 +1838,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, u8 *new_data; dma_addr_t new_mapping; - new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC); + new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr, + GFP_ATOMIC); if (!new_data) { bnxt_abort_tpa(cpr, idx, agg_bufs); cpr->sw_stats->rx.rx_oom_discards += 1; @@ -1852,16 +1851,16 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, tpa_info->mapping = new_mapping; skb = napi_build_skb(data, bp->rx_buf_size); - dma_unmap_single_attrs(&bp->pdev->dev, mapping, - bp->rx_buf_use_size, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); + dma_sync_single_for_cpu(&bp->pdev->dev, mapping, + bp->rx_buf_use_size, bp->rx_dir); if (!skb) { - skb_free_frag(data); + page_pool_free_va(rxr->head_pool, data, true); bnxt_abort_tpa(cpr, idx, agg_bufs); cpr->sw_stats->rx.rx_oom_discards += 1; return NULL; } + skb_mark_for_recycle(skb); skb_reserve(skb, bp->rx_offset); skb_put(skb, len); } @@ -2254,11 +2253,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; - unsigned long flags; - spin_lock_irqsave(&ptp->ptp_lock, flags); - ns = timecounter_cyc2time(&ptp->tc, ts); - spin_unlock_irqrestore(&ptp->ptp_lock, flags); + ns = bnxt_timecounter_cyc2time(ptp, ts); memset(skb_hwtstamps(skb), 0, sizeof(*skb_hwtstamps(skb))); skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); @@ -2764,12 +2760,12 @@ static int bnxt_async_event_process(struct bnxt *bp, if (!ptp) goto async_event_process_exit; - spin_lock_irqsave(&ptp->ptp_lock, flags); bnxt_ptp_update_current_time(bp); ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) << BNXT_PHC_BITS) | ptp->current_time); + write_seqlock_irqsave(&ptp->ptp_lock, flags); bnxt_ptp_rtc_timecounter_init(ptp, ns); - spin_unlock_irqrestore(&ptp->ptp_lock, flags); + write_sequnlock_irqrestore(&ptp->ptp_lock, flags); } break; } @@ -3311,28 +3307,22 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) { - struct pci_dev *pdev = bp->pdev; int i, max_idx; max_idx = bp->rx_nr_pages * RX_DESC_CNT; for (i = 0; i < max_idx; i++) { struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; - dma_addr_t mapping = rx_buf->mapping; void *data = rx_buf->data; if (!data) continue; rx_buf->data = NULL; - if (BNXT_RX_PAGE_MODE(bp)) { + if (BNXT_RX_PAGE_MODE(bp)) page_pool_recycle_direct(rxr->page_pool, data); - } else { - dma_unmap_single_attrs(&pdev->dev, mapping, - bp->rx_buf_use_size, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - skb_free_frag(data); - } + else + page_pool_free_va(rxr->head_pool, data, true); } } @@ -3359,7 +3349,6 @@ static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; - struct pci_dev *pdev = bp->pdev; struct bnxt_tpa_idx_map *map; int i; @@ -3373,13 +3362,8 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) if (!data) continue; - dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, - bp->rx_buf_use_size, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - tpa_info->data = NULL; - - skb_free_frag(data); + page_pool_free_va(rxr->head_pool, data, false); } skip_rx_tpa_free: @@ -3595,7 +3579,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp) xdp_rxq_info_unreg(&rxr->xdp_rxq); page_pool_destroy(rxr->page_pool); - rxr->page_pool = NULL; + if (rxr->page_pool != rxr->head_pool) + page_pool_destroy(rxr->head_pool); + rxr->page_pool = rxr->head_pool = NULL; kfree(rxr->rx_agg_bmap); rxr->rx_agg_bmap = NULL; @@ -3613,6 +3599,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, int numa_node) { struct page_pool_params pp = { 0 }; + struct page_pool *pool; pp.pool_size = bp->rx_agg_ring_size; if (BNXT_RX_PAGE_MODE(bp)) @@ -3625,14 +3612,25 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, pp.max_len = PAGE_SIZE; pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; - rxr->page_pool = page_pool_create(&pp); - if (IS_ERR(rxr->page_pool)) { - int err = PTR_ERR(rxr->page_pool); + pool = page_pool_create(&pp); + if (IS_ERR(pool)) + return PTR_ERR(pool); + rxr->page_pool = pool; - rxr->page_pool = NULL; - return err; + if (bnxt_separate_head_pool()) { + pp.pool_size = max(bp->rx_ring_size, 1024); + pool = page_pool_create(&pp); + if (IS_ERR(pool)) + goto err_destroy_pp; } + rxr->head_pool = pool; + return 0; + +err_destroy_pp: + page_pool_destroy(rxr->page_pool); + rxr->page_pool = NULL; + return PTR_ERR(pool); } static int bnxt_alloc_rx_rings(struct bnxt *bp) @@ -4183,7 +4181,8 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) u8 *data; for (i = 0; i < bp->max_tpa; i++) { - data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL); + data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, + GFP_KERNEL); if (!data) return -ENOMEM; @@ -10885,7 +10884,7 @@ static void bnxt_free_irq(struct bnxt *bp) irq = &bp->irq_tbl[map_idx]; if (irq->requested) { if (irq->have_cpumask) { - irq_set_affinity_hint(irq->vector, NULL); + irq_update_affinity_hint(irq->vector, NULL); free_cpumask_var(irq->cpu_mask); irq->have_cpumask = 0; } @@ -10940,10 +10939,10 @@ static int bnxt_request_irq(struct bnxt *bp) irq->have_cpumask = 1; cpumask_set_cpu(cpumask_local_spread(i, numa_node), irq->cpu_mask); - rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); + rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask); if (rc) { netdev_warn(bp->dev, - "Set affinity failed, IRQ = %d\n", + "Update affinity hint failed, IRQ = %d\n", irq->vector); break; } @@ -13496,12 +13495,13 @@ static void bnxt_force_fw_reset(struct bnxt *bp) test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) return; + /* we have to serialize with bnxt_refclk_read()*/ if (ptp) { unsigned long flags; - spin_lock_irqsave(&ptp->ptp_lock, flags); + write_seqlock_irqsave(&ptp->ptp_lock, flags); set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - spin_unlock_irqrestore(&ptp->ptp_lock, flags); + write_sequnlock_irqrestore(&ptp->ptp_lock, flags); } else { set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); } @@ -13565,12 +13565,13 @@ void bnxt_fw_reset(struct bnxt *bp) struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; int n = 0, tmo; + /* we have to serialize with bnxt_refclk_read()*/ if (ptp) { unsigned long flags; - spin_lock_irqsave(&ptp->ptp_lock, flags); + write_seqlock_irqsave(&ptp->ptp_lock, flags); set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - spin_unlock_irqrestore(&ptp->ptp_lock, flags); + write_sequnlock_irqrestore(&ptp->ptp_lock, flags); } else { set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 69231e85140b..649955fa3e37 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1105,6 +1105,7 @@ struct bnxt_rx_ring_info { struct bnxt_ring_struct rx_agg_ring_struct; struct xdp_rxq_info xdp_rxq; struct page_pool *page_pool; + struct page_pool *head_pool; }; struct bnxt_rx_sw_stats { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 6ef06579df53..cfd2c65b1c90 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1124,14 +1124,15 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) fkeys = &fltr->fkeys; fmasks = &fltr->fmasks; if (fkeys->basic.n_proto == htons(ETH_P_IP)) { - if (fkeys->basic.ip_proto == IPPROTO_ICMP || - fkeys->basic.ip_proto == IPPROTO_RAW) { + if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) { fs->flow_type = IP_USER_FLOW; fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; - if (fkeys->basic.ip_proto == IPPROTO_ICMP) - fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP; - else - fs->h_u.usr_ip4_spec.proto = IPPROTO_RAW; + fs->h_u.usr_ip4_spec.proto = BNXT_IP_PROTO_WILDCARD; + fs->m_u.usr_ip4_spec.proto = 0; + } else if (fkeys->basic.ip_proto == IPPROTO_ICMP) { + fs->flow_type = IP_USER_FLOW; + fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP; fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK; } else if (fkeys->basic.ip_proto == IPPROTO_TCP) { fs->flow_type = TCP_V4_FLOW; @@ -1153,13 +1154,13 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst; } } else { - if (fkeys->basic.ip_proto == IPPROTO_ICMPV6 || - fkeys->basic.ip_proto == IPPROTO_RAW) { + if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) { fs->flow_type = IPV6_USER_FLOW; - if (fkeys->basic.ip_proto == IPPROTO_ICMPV6) - fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6; - else - fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_RAW; + fs->h_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_WILDCARD; + fs->m_u.usr_ip6_spec.l4_proto = 0; + } else if (fkeys->basic.ip_proto == IPPROTO_ICMPV6) { + fs->flow_type = IPV6_USER_FLOW; + fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6; fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK; } else if (fkeys->basic.ip_proto == IPPROTO_TCP) { fs->flow_type = TCP_V6_FLOW; @@ -1282,10 +1283,12 @@ static int bnxt_add_l2_cls_rule(struct bnxt *bp, static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec, struct ethtool_usrip4_spec *ip_mask) { + u8 mproto = ip_mask->proto; + u8 sproto = ip_spec->proto; + if (ip_mask->l4_4_bytes || ip_mask->tos || ip_spec->ip_ver != ETH_RX_NFC_IP4 || - ip_mask->proto != BNXT_IP_PROTO_FULL_MASK || - (ip_spec->proto != IPPROTO_RAW && ip_spec->proto != IPPROTO_ICMP)) + (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMP))) return false; return true; } @@ -1293,10 +1296,11 @@ static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec, static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec, struct ethtool_usrip6_spec *ip_mask) { + u8 mproto = ip_mask->l4_proto; + u8 sproto = ip_spec->l4_proto; + if (ip_mask->l4_4_bytes || ip_mask->tclass || - ip_mask->l4_proto != BNXT_IP_PROTO_FULL_MASK || - (ip_spec->l4_proto != IPPROTO_RAW && - ip_spec->l4_proto != IPPROTO_ICMPV6)) + (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMPV6))) return false; return true; } @@ -1350,7 +1354,8 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec; struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec; - fkeys->basic.ip_proto = ip_spec->proto; + fkeys->basic.ip_proto = ip_mask->proto ? ip_spec->proto + : BNXT_IP_PROTO_WILDCARD; fkeys->basic.n_proto = htons(ETH_P_IP); fkeys->addrs.v4addrs.src = ip_spec->ip4src; fmasks->addrs.v4addrs.src = ip_mask->ip4src; @@ -1381,7 +1386,8 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec; struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec; - fkeys->basic.ip_proto = ip_spec->l4_proto; + fkeys->basic.ip_proto = ip_mask->l4_proto ? ip_spec->l4_proto + : BNXT_IP_PROTO_WILDCARD; fkeys->basic.n_proto = htons(ETH_P_IPV6); fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src; fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index e2ee030237d4..33b86ede1ce5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -44,6 +44,7 @@ struct bnxt_led_cfg { #define BNXT_PXP_REG_LEN 0x3110 #define BNXT_IP_PROTO_FULL_MASK 0xFF +#define BNXT_IP_PROTO_WILDCARD 0x0 extern const struct ethtool_ops bnxt_ethtool_ops; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index fa514be87650..075ccd589845 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -67,15 +67,15 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info, if (BNXT_PTP_USE_RTC(ptp->bp)) return bnxt_ptp_cfg_settime(ptp->bp, ns); - spin_lock_irqsave(&ptp->ptp_lock, flags); + write_seqlock_irqsave(&ptp->ptp_lock, flags); timecounter_init(&ptp->tc, &ptp->cc, ns); - spin_unlock_irqrestore(&ptp->ptp_lock, flags); + write_sequnlock_irqrestore(&ptp->ptp_lock, flags); return 0; } /* Caller holds ptp_lock */ -static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts, - u64 *ns) +static int __bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts, + u64 *ns) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; u32 high_before, high_now, low; @@ -98,17 +98,50 @@ static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts, return 0; } -static void bnxt_ptp_get_current_time(struct bnxt *bp) +static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts, + u64 *ns) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; unsigned long flags; + int rc; + + /* We have to serialize reg access and FW reset */ + read_seqlock_excl_irqsave(&ptp->ptp_lock, flags); + rc = __bnxt_refclk_read(bp, sts, ns); + read_sequnlock_excl_irqrestore(&ptp->ptp_lock, flags); + return rc; +} + +static int bnxt_refclk_read_low(struct bnxt *bp, struct ptp_system_timestamp *sts, + u32 *low) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + unsigned long flags; + + /* We have to serialize reg access and FW reset */ + read_seqlock_excl_irqsave(&ptp->ptp_lock, flags); + + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + read_sequnlock_excl_irqrestore(&ptp->ptp_lock, flags); + return -EIO; + } + + ptp_read_system_prets(sts); + *low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]); + ptp_read_system_postts(sts); + + read_sequnlock_excl_irqrestore(&ptp->ptp_lock, flags); + return 0; +} + +static void bnxt_ptp_get_current_time(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; if (!ptp) return; - spin_lock_irqsave(&ptp->ptp_lock, flags); - WRITE_ONCE(ptp->old_time, ptp->current_time); + WRITE_ONCE(ptp->old_time, ptp->current_time >> BNXT_HI_TIMER_SHIFT); bnxt_refclk_read(bp, NULL, &ptp->current_time); - spin_unlock_irqrestore(&ptp->ptp_lock, flags); } static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts, @@ -151,36 +184,32 @@ static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info, { struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, ptp_info); - unsigned long flags; u64 ns, cycles; + u32 low; int rc; - spin_lock_irqsave(&ptp->ptp_lock, flags); - rc = bnxt_refclk_read(ptp->bp, sts, &cycles); - if (rc) { - spin_unlock_irqrestore(&ptp->ptp_lock, flags); + rc = bnxt_refclk_read_low(ptp->bp, sts, &low); + if (rc) return rc; - } - ns = timecounter_cyc2time(&ptp->tc, cycles); - spin_unlock_irqrestore(&ptp->ptp_lock, flags); + + cycles = bnxt_extend_cycles_32b_to_48b(ptp, low); + ns = bnxt_timecounter_cyc2time(ptp, cycles); *ts = ns_to_timespec64(ns); return 0; } -/* Caller holds ptp_lock */ void bnxt_ptp_update_current_time(struct bnxt *bp) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; bnxt_refclk_read(ptp->bp, NULL, &ptp->current_time); - WRITE_ONCE(ptp->old_time, ptp->current_time); + WRITE_ONCE(ptp->old_time, ptp->current_time >> BNXT_HI_TIMER_SHIFT); } static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta) { struct hwrm_port_mac_cfg_input *req; - unsigned long flags; int rc; rc = hwrm_req_init(ptp->bp, req, HWRM_PORT_MAC_CFG); @@ -194,9 +223,7 @@ static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta) if (rc) { netdev_err(ptp->bp->dev, "ptp adjphc failed. rc = %x\n", rc); } else { - spin_lock_irqsave(&ptp->ptp_lock, flags); bnxt_ptp_update_current_time(ptp->bp); - spin_unlock_irqrestore(&ptp->ptp_lock, flags); } return rc; @@ -211,9 +238,9 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) if (BNXT_PTP_USE_RTC(ptp->bp)) return bnxt_ptp_adjphc(ptp, delta); - spin_lock_irqsave(&ptp->ptp_lock, flags); + write_seqlock_irqsave(&ptp->ptp_lock, flags); timecounter_adjtime(&ptp->tc, delta); - spin_unlock_irqrestore(&ptp->ptp_lock, flags); + write_sequnlock_irqrestore(&ptp->ptp_lock, flags); return 0; } @@ -246,10 +273,10 @@ static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) if (!BNXT_MH(bp)) return bnxt_ptp_adjfine_rtc(bp, scaled_ppm); - spin_lock_irqsave(&ptp->ptp_lock, flags); + write_seqlock_irqsave(&ptp->ptp_lock, flags); timecounter_read(&ptp->tc); ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm); - spin_unlock_irqrestore(&ptp->ptp_lock, flags); + write_sequnlock_irqrestore(&ptp->ptp_lock, flags); return 0; } @@ -257,13 +284,10 @@ void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; struct ptp_clock_event event; - unsigned long flags; u64 ns, pps_ts; pps_ts = EVENT_PPS_TS(data2, data1); - spin_lock_irqsave(&ptp->ptp_lock, flags); - ns = timecounter_cyc2time(&ptp->tc, pps_ts); - spin_unlock_irqrestore(&ptp->ptp_lock, flags); + ns = bnxt_timecounter_cyc2time(ptp, pps_ts); switch (EVENT_DATA2_PPS_EVENT_TYPE(data2)) { case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL: @@ -400,17 +424,13 @@ static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns, { u64 cycles_now; u64 nsec_now, nsec_delta; - unsigned long flags; int rc; - spin_lock_irqsave(&ptp->ptp_lock, flags); rc = bnxt_refclk_read(ptp->bp, NULL, &cycles_now); - if (rc) { - spin_unlock_irqrestore(&ptp->ptp_lock, flags); + if (rc) return rc; - } - nsec_now = timecounter_cyc2time(&ptp->tc, cycles_now); - spin_unlock_irqrestore(&ptp->ptp_lock, flags); + + nsec_now = bnxt_timecounter_cyc2time(ptp, cycles_now); nsec_delta = target_ns - nsec_now; *cycles_delta = div64_u64(nsec_delta << ptp->cc.shift, ptp->cc.mult); @@ -687,7 +707,7 @@ static u64 bnxt_cc_read(const struct cyclecounter *cc) struct bnxt_ptp_cfg *ptp = container_of(cc, struct bnxt_ptp_cfg, cc); u64 ns = 0; - bnxt_refclk_read(ptp->bp, NULL, &ns); + __bnxt_refclk_read(ptp->bp, NULL, &ns); return ns; } @@ -697,7 +717,6 @@ static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot) struct skb_shared_hwtstamps timestamp; struct bnxt_ptp_tx_req *txts_req; unsigned long now = jiffies; - unsigned long flags; u64 ts = 0, ns = 0; u32 tmo = 0; int rc; @@ -711,9 +730,7 @@ static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot) tmo, slot); if (!rc) { memset(×tamp, 0, sizeof(timestamp)); - spin_lock_irqsave(&ptp->ptp_lock, flags); - ns = timecounter_cyc2time(&ptp->tc, ts); - spin_unlock_irqrestore(&ptp->ptp_lock, flags); + ns = bnxt_timecounter_cyc2time(ptp, ts); timestamp.hwtstamp = ns_to_ktime(ns); skb_tstamp_tx(txts_req->tx_skb, ×tamp); ptp->stats.ts_pkts++; @@ -767,9 +784,9 @@ next_slot: bnxt_ptp_get_current_time(bp); ptp->next_period = now + HZ; if (time_after_eq(now, ptp->next_overflow_check)) { - spin_lock_irqsave(&ptp->ptp_lock, flags); + write_seqlock_irqsave(&ptp->ptp_lock, flags); timecounter_read(&ptp->tc); - spin_unlock_irqrestore(&ptp->ptp_lock, flags); + write_sequnlock_irqrestore(&ptp->ptp_lock, flags); ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD; } if (rc == -EAGAIN) @@ -808,15 +825,11 @@ void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod) int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; - u64 time; if (!ptp) return -ENODEV; - BNXT_READ_TIME64(ptp, time, ptp->old_time); - *ts = (time & BNXT_HI_TIMER_MASK) | pkt_ts; - if (pkt_ts < (time & BNXT_LO_TIMER_MASK)) - *ts += BNXT_LO_TIMER_MASK + 1; + *ts = bnxt_extend_cycles_32b_to_48b(ptp, pkt_ts); return 0; } @@ -829,7 +842,6 @@ void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi, u32 opaque = tscmp->tx_ts_cmp_opaque; struct bnxt_tx_ring_info *txr; struct bnxt_sw_tx_bd *tx_buf; - unsigned long flags; u64 ts, ns; u16 cons; @@ -844,9 +856,7 @@ void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi, le32_to_cpu(tscmp->tx_ts_cmp_flags_type), le32_to_cpu(tscmp->tx_ts_cmp_errors_v)); } else { - spin_lock_irqsave(&ptp->ptp_lock, flags); - ns = timecounter_cyc2time(&ptp->tc, ts); - spin_unlock_irqrestore(&ptp->ptp_lock, flags); + ns = bnxt_timecounter_cyc2time(ptp, ts); timestamp.hwtstamp = ns_to_ktime(ns); skb_tstamp_tx(tx_buf->skb, ×tamp); } @@ -955,6 +965,7 @@ static bool bnxt_pps_config_ok(struct bnxt *bp) static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + unsigned long flags; if (!ptp->ptp_clock) { memset(&ptp->cc, 0, sizeof(ptp->cc)); @@ -971,8 +982,11 @@ static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc) } ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD; } - if (init_tc) + if (init_tc) { + write_seqlock_irqsave(&ptp->ptp_lock, flags); timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real())); + write_sequnlock_irqrestore(&ptp->ptp_lock, flags); + } } /* Caller holds ptp_lock */ @@ -1005,9 +1019,9 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg) if (rc) return rc; } - spin_lock_irqsave(&bp->ptp_cfg->ptp_lock, flags); + write_seqlock_irqsave(&bp->ptp_cfg->ptp_lock, flags); bnxt_ptp_rtc_timecounter_init(bp->ptp_cfg, ns); - spin_unlock_irqrestore(&bp->ptp_cfg->ptp_lock, flags); + write_sequnlock_irqrestore(&bp->ptp_cfg->ptp_lock, flags); return 0; } @@ -1042,7 +1056,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) bnxt_ptp_free(bp); WRITE_ONCE(ptp->tx_avail, BNXT_MAX_TX_TS); - spin_lock_init(&ptp->ptp_lock); + seqlock_init(&ptp->ptp_lock); spin_lock_init(&ptp->ptp_tx_lock); if (BNXT_PTP_USE_RTC(bp)) { @@ -1075,12 +1089,8 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) atomic64_set(&ptp->stats.ts_err, 0); if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { - unsigned long flags; - - spin_lock_irqsave(&ptp->ptp_lock, flags); bnxt_refclk_read(bp, NULL, &ptp->current_time); - WRITE_ONCE(ptp->old_time, ptp->current_time); - spin_unlock_irqrestore(&ptp->ptp_lock, flags); + WRITE_ONCE(ptp->old_time, ptp->current_time >> BNXT_HI_TIMER_SHIFT); ptp_schedule_worker(ptp->ptp_clock, 0); } ptp->txts_tmo = BNXT_PTP_DFLT_TX_TMO; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index f322466ecad3..c7851f8c971c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -21,6 +21,7 @@ #define BNXT_DEVCLK_FREQ 1000000 #define BNXT_LO_TIMER_MASK 0x0000ffffffffUL #define BNXT_HI_TIMER_MASK 0xffff00000000UL +#define BNXT_HI_TIMER_SHIFT 24 #define BNXT_PTP_DFLT_TX_TMO 1000 /* ms */ #define BNXT_PTP_QTS_TIMEOUT 1000 @@ -102,14 +103,15 @@ struct bnxt_ptp_cfg { struct timecounter tc; struct bnxt_pps pps_info; /* serialize timecounter access */ - spinlock_t ptp_lock; + seqlock_t ptp_lock; /* serialize ts tx request queuing */ spinlock_t ptp_tx_lock; u64 current_time; - u64 old_time; unsigned long next_period; unsigned long next_overflow_check; u32 cmult; + /* cache of upper 24 bits of cyclecoutner. 8 bits are used to check for roll-over */ + u32 old_time; /* a 23b shift cyclecounter will overflow in ~36 mins. Check overflow every 18 mins. */ #define BNXT_PHC_OVERFLOW_PERIOD (18 * 60 * HZ) @@ -145,20 +147,6 @@ struct bnxt_ptp_cfg { struct bnxt_ptp_stats stats; }; -#if BITS_PER_LONG == 32 -#define BNXT_READ_TIME64(ptp, dst, src) \ -do { \ - unsigned long flags; \ - \ - spin_lock_irqsave(&(ptp)->ptp_lock, flags); \ - (dst) = (src); \ - spin_unlock_irqrestore(&(ptp)->ptp_lock, flags); \ -} while (0) -#else -#define BNXT_READ_TIME64(ptp, dst, src) \ - ((dst) = READ_ONCE(src)) -#endif - #define BNXT_PTP_INC_TX_AVAIL(ptp) \ do { \ spin_lock_bh(&(ptp)->ptp_tx_lock); \ @@ -182,4 +170,27 @@ void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns); int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg); int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg); void bnxt_ptp_clear(struct bnxt *bp); +static inline u64 bnxt_timecounter_cyc2time(struct bnxt_ptp_cfg *ptp, u64 ts) +{ + unsigned int seq; + u64 ns; + + do { + seq = read_seqbegin(&ptp->ptp_lock); + ns = timecounter_cyc2time(&ptp->tc, ts); + } while (read_seqretry(&ptp->ptp_lock, seq)); + + return ns; +} + +static inline u64 bnxt_extend_cycles_32b_to_48b(struct bnxt_ptp_cfg *ptp, u32 ts) +{ + u64 time, cycles; + + time = (u64)READ_ONCE(ptp->old_time) << BNXT_HI_TIMER_SHIFT; + cycles = (time & BNXT_HI_TIMER_MASK) | ts; + if (ts < (time & BNXT_LO_TIMER_MASK)) + cycles += BNXT_LO_TIMER_MASK + 1; + return cycles; +} #endif diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 53a949eb9180..3e93f957430b 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1144,14 +1144,14 @@ static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, u8 *data) { + const char *str; int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < BCMGENET_STATS_LEN; i++) { - memcpy(data + i * ETH_GSTRING_LEN, - bcmgenet_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); + str = bcmgenet_gstrings_stats[i].stat_string; + ethtool_puts(&data, str); } break; } diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 0cc3644ee855..10b7e02ba4d0 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -23,10 +23,8 @@ #define ENIC_BARS_MAX 6 -#define ENIC_WQ_MAX 8 -#define ENIC_RQ_MAX 8 -#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) -#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) +#define ENIC_WQ_MAX 256 +#define ENIC_RQ_MAX 256 #define ENIC_WQ_NAPI_BUDGET 256 @@ -162,6 +160,17 @@ struct enic_rq_stats { u64 desc_skip; /* Rx pkt went into later buffer */ }; +struct enic_wq { + spinlock_t lock; /* spinlock for wq */ + struct vnic_wq vwq; + struct enic_wq_stats stats; +} ____cacheline_aligned; + +struct enic_rq { + struct vnic_rq vrq; + struct enic_rq_stats stats; +} ____cacheline_aligned; + /* Per-instance private data structure */ struct enic { struct net_device *netdev; @@ -173,8 +182,8 @@ struct enic { struct work_struct reset; struct work_struct tx_hang_reset; struct work_struct change_mtu_work; - struct msix_entry msix_entry[ENIC_INTR_MAX]; - struct enic_msix_entry msix[ENIC_INTR_MAX]; + struct msix_entry *msix_entry; + struct enic_msix_entry *msix; u32 msg_enable; spinlock_t devcmd_lock; u8 mac_addr[ETH_ALEN]; @@ -193,28 +202,25 @@ struct enic { bool enic_api_busy; struct enic_port_profile *pp; - /* work queue cache line section */ - ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX]; - spinlock_t wq_lock[ENIC_WQ_MAX]; - struct enic_wq_stats wq_stats[ENIC_WQ_MAX]; + struct enic_wq *wq; + unsigned int wq_avail; unsigned int wq_count; u16 loop_enable; u16 loop_tag; - /* receive queue cache line section */ - ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX]; - struct enic_rq_stats rq_stats[ENIC_RQ_MAX]; + struct enic_rq *rq; + unsigned int rq_avail; unsigned int rq_count; struct vxlan_offload vxlan; - struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX]; + struct napi_struct *napi; - /* interrupt resource cache line section */ - ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX]; + struct vnic_intr *intr; + unsigned int intr_avail; unsigned int intr_count; u32 __iomem *legacy_pba; /* memory-mapped */ - /* completion queue cache line section */ - ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX]; + struct vnic_cq *cq; + unsigned int cq_avail; unsigned int cq_count; struct enic_rfs_flw_tbl rfs_h; u32 rx_copybreak; @@ -272,18 +278,28 @@ static inline unsigned int enic_msix_wq_intr(struct enic *enic, return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; } -static inline unsigned int enic_msix_err_intr(struct enic *enic) -{ - return enic->rq_count + enic->wq_count; -} +/* MSIX interrupts are organized as the error interrupt, then the notify + * interrupt followed by all the I/O interrupts. The error interrupt needs + * to fit in 7 bits due to hardware constraints + */ +#define ENIC_MSIX_RESERVED_INTR 2 +#define ENIC_MSIX_ERR_INTR 0 +#define ENIC_MSIX_NOTIFY_INTR 1 +#define ENIC_MSIX_IO_INTR_BASE ENIC_MSIX_RESERVED_INTR +#define ENIC_MSIX_MIN_INTR (ENIC_MSIX_RESERVED_INTR + 2) #define ENIC_LEGACY_IO_INTR 0 #define ENIC_LEGACY_ERR_INTR 1 #define ENIC_LEGACY_NOTIFY_INTR 2 +static inline unsigned int enic_msix_err_intr(struct enic *enic) +{ + return ENIC_MSIX_ERR_INTR; +} + static inline unsigned int enic_msix_notify_intr(struct enic *enic) { - return enic->rq_count + enic->wq_count + 1; + return ENIC_MSIX_NOTIFY_INTR; } static inline bool enic_is_err_intr(struct enic *enic, int intr) diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 4fe85780a950..95b071153fed 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -337,7 +337,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev, for (i = 0; i < NUM_ENIC_GEN_STATS; i++) *(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index]; for (i = 0; i < enic->rq_count; i++) { - struct enic_rq_stats *rqstats = &enic->rq_stats[i]; + struct enic_rq_stats *rqstats = &enic->rq[i].stats; int index; for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) { @@ -346,7 +346,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev, } } for (i = 0; i < enic->wq_count; i++) { - struct enic_wq_stats *wqstats = &enic->wq_stats[i]; + struct enic_wq_stats *wqstats = &enic->wq[i].stats; int index; for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) { @@ -695,8 +695,8 @@ static void enic_get_channels(struct net_device *netdev, switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_MSIX: - channels->max_rx = ENIC_RQ_MAX; - channels->max_tx = ENIC_WQ_MAX; + channels->max_rx = min(enic->rq_avail, ENIC_RQ_MAX); + channels->max_tx = min(enic->wq_avail, ENIC_WQ_MAX); channels->rx_count = enic->rq_count; channels->tx_count = enic->wq_count; break; diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index ffed14b63d41..9913952ccb42 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -342,8 +342,8 @@ static void enic_wq_free_buf(struct vnic_wq *wq, { struct enic *enic = vnic_dev_priv(wq->vdev); - enic->wq_stats[wq->index].cq_work++; - enic->wq_stats[wq->index].cq_bytes += buf->len; + enic->wq[wq->index].stats.cq_work++; + enic->wq[wq->index].stats.cq_bytes += buf->len; enic_free_wq_buf(wq, buf); } @@ -352,20 +352,20 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, { struct enic *enic = vnic_dev_priv(vdev); - spin_lock(&enic->wq_lock[q_number]); + spin_lock(&enic->wq[q_number].lock); - vnic_wq_service(&enic->wq[q_number], cq_desc, + vnic_wq_service(&enic->wq[q_number].vwq, cq_desc, completed_index, enic_wq_free_buf, opaque); if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) && - vnic_wq_desc_avail(&enic->wq[q_number]) >= + vnic_wq_desc_avail(&enic->wq[q_number].vwq) >= (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) { netif_wake_subqueue(enic->netdev, q_number); - enic->wq_stats[q_number].wake++; + enic->wq[q_number].stats.wake++; } - spin_unlock(&enic->wq_lock[q_number]); + spin_unlock(&enic->wq[q_number].lock); return 0; } @@ -377,7 +377,7 @@ static bool enic_log_q_error(struct enic *enic) bool err = false; for (i = 0; i < enic->wq_count; i++) { - error_status = vnic_wq_error_status(&enic->wq[i]); + error_status = vnic_wq_error_status(&enic->wq[i].vwq); err |= error_status; if (error_status) netdev_err(enic->netdev, "WQ[%d] error_status %d\n", @@ -385,7 +385,7 @@ static bool enic_log_q_error(struct enic *enic) } for (i = 0; i < enic->rq_count; i++) { - error_status = vnic_rq_error_status(&enic->rq[i]); + error_status = vnic_rq_error_status(&enic->rq[i].vrq); err |= error_status; if (error_status) netdev_err(enic->netdev, "RQ[%d] error_status %d\n", @@ -598,9 +598,9 @@ static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); /* The enic_queue_wq_desc() above does not do HW checksum */ - enic->wq_stats[wq->index].csum_none++; - enic->wq_stats[wq->index].packets++; - enic->wq_stats[wq->index].bytes += skb->len; + enic->wq[wq->index].stats.csum_none++; + enic->wq[wq->index].stats.packets++; + enic->wq[wq->index].stats.bytes += skb->len; return err; } @@ -634,9 +634,9 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, if (!eop) err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); - enic->wq_stats[wq->index].csum_partial++; - enic->wq_stats[wq->index].packets++; - enic->wq_stats[wq->index].bytes += skb->len; + enic->wq[wq->index].stats.csum_partial++; + enic->wq[wq->index].stats.packets++; + enic->wq[wq->index].stats.bytes += skb->len; return err; } @@ -699,11 +699,11 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, if (skb->encapsulation) { hdr_len = skb_inner_tcp_all_headers(skb); enic_preload_tcp_csum_encap(skb); - enic->wq_stats[wq->index].encap_tso++; + enic->wq[wq->index].stats.encap_tso++; } else { hdr_len = skb_tcp_all_headers(skb); enic_preload_tcp_csum(skb); - enic->wq_stats[wq->index].tso++; + enic->wq[wq->index].stats.tso++; } /* Queue WQ_ENET_MAX_DESC_LEN length descriptors @@ -757,8 +757,8 @@ tso_out_stats: pkts = len / mss; if ((len % mss) > 0) pkts++; - enic->wq_stats[wq->index].packets += pkts; - enic->wq_stats[wq->index].bytes += (len + (pkts * hdr_len)); + enic->wq[wq->index].stats.packets += pkts; + enic->wq[wq->index].stats.bytes += (len + (pkts * hdr_len)); return 0; } @@ -792,9 +792,9 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq, if (!eop) err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); - enic->wq_stats[wq->index].encap_csum++; - enic->wq_stats[wq->index].packets++; - enic->wq_stats[wq->index].bytes += skb->len; + enic->wq[wq->index].stats.encap_csum++; + enic->wq[wq->index].stats.packets++; + enic->wq[wq->index].stats.bytes += skb->len; return err; } @@ -812,7 +812,7 @@ static inline int enic_queue_wq_skb(struct enic *enic, /* VLAN tag from trunking driver */ vlan_tag_insert = 1; vlan_tag = skb_vlan_tag_get(skb); - enic->wq_stats[wq->index].add_vlan++; + enic->wq[wq->index].stats.add_vlan++; } else if (enic->loop_enable) { vlan_tag = enic->loop_tag; loopback = 1; @@ -859,11 +859,11 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, struct netdev_queue *txq; txq_map = skb_get_queue_mapping(skb) % enic->wq_count; - wq = &enic->wq[txq_map]; + wq = &enic->wq[txq_map].vwq; if (skb->len <= 0) { dev_kfree_skb_any(skb); - enic->wq_stats[wq->index].null_pkt++; + enic->wq[wq->index].stats.null_pkt++; return NETDEV_TX_OK; } @@ -878,19 +878,19 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && skb_linearize(skb)) { dev_kfree_skb_any(skb); - enic->wq_stats[wq->index].skb_linear_fail++; + enic->wq[wq->index].stats.skb_linear_fail++; return NETDEV_TX_OK; } - spin_lock(&enic->wq_lock[txq_map]); + spin_lock(&enic->wq[txq_map].lock); if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { netif_tx_stop_queue(txq); /* This is a hard error, log it */ netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); - spin_unlock(&enic->wq_lock[txq_map]); - enic->wq_stats[wq->index].desc_full_awake++; + spin_unlock(&enic->wq[txq_map].lock); + enic->wq[wq->index].stats.desc_full_awake++; return NETDEV_TX_BUSY; } @@ -899,14 +899,14 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) { netif_tx_stop_queue(txq); - enic->wq_stats[wq->index].stopped++; + enic->wq[wq->index].stats.stopped++; } skb_tx_timestamp(skb); if (!netdev_xmit_more() || netif_xmit_stopped(txq)) vnic_wq_doorbell(wq); error: - spin_unlock(&enic->wq_lock[txq_map]); + spin_unlock(&enic->wq[txq_map].lock); return NETDEV_TX_OK; } @@ -940,10 +940,10 @@ static void enic_get_stats(struct net_device *netdev, net_stats->rx_errors = stats->rx.rx_errors; net_stats->multicast = stats->rx.rx_multicast_frames_ok; - for (i = 0; i < ENIC_RQ_MAX; i++) { - struct enic_rq_stats *rqs = &enic->rq_stats[i]; + for (i = 0; i < enic->rq_count; i++) { + struct enic_rq_stats *rqs = &enic->rq[i].stats; - if (!enic->rq->ctrl) + if (!enic->rq[i].vrq.ctrl) break; pkt_truncated += rqs->pkt_truncated; bad_fcs += rqs->bad_fcs; @@ -1313,7 +1313,7 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq) } skb = netdev_alloc_skb_ip_align(netdev, len); if (!skb) { - enic->rq_stats[rq->index].no_skb++; + enic->rq[rq->index].stats.no_skb++; return -ENOMEM; } @@ -1366,7 +1366,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, struct net_device *netdev = enic->netdev; struct sk_buff *skb; struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; - struct enic_rq_stats *rqstats = &enic->rq_stats[rq->index]; + struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats; u8 type, color, eop, sop, ingress_port, vlan_stripped; u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; @@ -1512,7 +1512,7 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, { struct enic *enic = vnic_dev_priv(vdev); - vnic_rq_service(&enic->rq[q_number], cq_desc, + vnic_rq_service(&enic->rq[q_number].vrq, cq_desc, completed_index, VNIC_RQ_RETURN_DESC, enic_rq_indicate_buf, opaque); @@ -1609,7 +1609,7 @@ static int enic_poll(struct napi_struct *napi, int budget) 0 /* don't unmask intr */, 0 /* don't reset intr timer */); - err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); + err = vnic_rq_fill(&enic->rq[0].vrq, enic_rq_alloc_buf); /* Buffer allocation failed. Stay in polling * mode so we can try to fill the ring again. @@ -1621,7 +1621,7 @@ static int enic_poll(struct napi_struct *napi, int budget) /* Call the function which refreshes the intr coalescing timer * value based on the traffic. */ - enic_calc_int_moderation(enic, &enic->rq[0]); + enic_calc_int_moderation(enic, &enic->rq[0].vrq); if ((rq_work_done < budget) && napi_complete_done(napi, rq_work_done)) { @@ -1630,11 +1630,11 @@ static int enic_poll(struct napi_struct *napi, int budget) */ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) - enic_set_int_moderation(enic, &enic->rq[0]); + enic_set_int_moderation(enic, &enic->rq[0].vrq); vnic_intr_unmask(&enic->intr[intr]); - enic->rq_stats[0].napi_complete++; + enic->rq[0].stats.napi_complete++; } else { - enic->rq_stats[0].napi_repoll++; + enic->rq[0].stats.napi_repoll++; } return rq_work_done; @@ -1683,7 +1683,7 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget) struct net_device *netdev = napi->dev; struct enic *enic = netdev_priv(netdev); unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count; - struct vnic_wq *wq = &enic->wq[wq_index]; + struct vnic_wq *wq = &enic->wq[wq_index].vwq; unsigned int cq; unsigned int intr; unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET; @@ -1737,7 +1737,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) 0 /* don't unmask intr */, 0 /* don't reset intr timer */); - err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); + err = vnic_rq_fill(&enic->rq[rq].vrq, enic_rq_alloc_buf); /* Buffer allocation failed. Stay in polling mode * so we can try to fill the ring again. @@ -1749,7 +1749,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) /* Call the function which refreshes the intr coalescing timer * value based on the traffic. */ - enic_calc_int_moderation(enic, &enic->rq[rq]); + enic_calc_int_moderation(enic, &enic->rq[rq].vrq); if ((work_done < budget) && napi_complete_done(napi, work_done)) { @@ -1758,11 +1758,11 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) */ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) - enic_set_int_moderation(enic, &enic->rq[rq]); + enic_set_int_moderation(enic, &enic->rq[rq].vrq); vnic_intr_unmask(&enic->intr[intr]); - enic->rq_stats[rq].napi_complete++; + enic->rq[rq].stats.napi_complete++; } else { - enic->rq_stats[rq].napi_repoll++; + enic->rq[rq].stats.napi_repoll++; } return work_done; @@ -1792,7 +1792,7 @@ static void enic_free_intr(struct enic *enic) free_irq(enic->pdev->irq, enic); break; case VNIC_DEV_INTR_MODE_MSIX: - for (i = 0; i < ARRAY_SIZE(enic->msix); i++) + for (i = 0; i < enic->intr_count; i++) if (enic->msix[i].requested) free_irq(enic->msix_entry[i].vector, enic->msix[i].devid); @@ -1859,7 +1859,7 @@ static int enic_request_intr(struct enic *enic) enic->msix[intr].isr = enic_isr_msix_notify; enic->msix[intr].devid = enic; - for (i = 0; i < ARRAY_SIZE(enic->msix); i++) + for (i = 0; i < enic->intr_count; i++) enic->msix[i].requested = 0; for (i = 0; i < enic->intr_count; i++) { @@ -1989,10 +1989,10 @@ static int enic_open(struct net_device *netdev) for (i = 0; i < enic->rq_count; i++) { /* enable rq before updating rq desc */ - vnic_rq_enable(&enic->rq[i]); - vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); + vnic_rq_enable(&enic->rq[i].vrq); + vnic_rq_fill(&enic->rq[i].vrq, enic_rq_alloc_buf); /* Need at least one buffer on ring to get going */ - if (vnic_rq_desc_used(&enic->rq[i]) == 0) { + if (vnic_rq_desc_used(&enic->rq[i].vrq) == 0) { netdev_err(netdev, "Unable to alloc receive buffers\n"); err = -ENOMEM; goto err_out_free_rq; @@ -2000,7 +2000,7 @@ static int enic_open(struct net_device *netdev) } for (i = 0; i < enic->wq_count; i++) - vnic_wq_enable(&enic->wq[i]); + vnic_wq_enable(&enic->wq[i].vwq); if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) enic_dev_add_station_addr(enic); @@ -2027,9 +2027,9 @@ static int enic_open(struct net_device *netdev) err_out_free_rq: for (i = 0; i < enic->rq_count; i++) { - ret = vnic_rq_disable(&enic->rq[i]); + ret = vnic_rq_disable(&enic->rq[i].vrq); if (!ret) - vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); + vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf); } enic_dev_notify_unset(enic); err_out_free_intr: @@ -2071,12 +2071,12 @@ static int enic_stop(struct net_device *netdev) enic_dev_del_station_addr(enic); for (i = 0; i < enic->wq_count; i++) { - err = vnic_wq_disable(&enic->wq[i]); + err = vnic_wq_disable(&enic->wq[i].vwq); if (err) return err; } for (i = 0; i < enic->rq_count; i++) { - err = vnic_rq_disable(&enic->rq[i]); + err = vnic_rq_disable(&enic->rq[i].vrq); if (err) return err; } @@ -2086,9 +2086,9 @@ static int enic_stop(struct net_device *netdev) enic_free_intr(enic); for (i = 0; i < enic->wq_count; i++) - vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); + vnic_wq_clean(&enic->wq[i].vwq, enic_free_wq_buf); for (i = 0; i < enic->rq_count; i++) - vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); + vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf); for (i = 0; i < enic->cq_count; i++) vnic_cq_clean(&enic->cq[i]); for (i = 0; i < enic->intr_count; i++) @@ -2442,112 +2442,56 @@ static void enic_tx_hang_reset(struct work_struct *work) static int enic_set_intr_mode(struct enic *enic) { - unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); - unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX); unsigned int i; + int num_intr; /* Set interrupt mode (INTx, MSI, MSI-X) depending * on system capabilities. * * Try MSI-X first - * - * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs - * (the second to last INTR is used for WQ/RQ errors) - * (the last INTR is used for notifications) - */ - - BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); - for (i = 0; i < n + m + 2; i++) - enic->msix_entry[i].entry = i; - - /* Use multiple RQs if RSS is enabled */ - if (ENIC_SETTING(enic, RSS) && - enic->config.intr_mode < 1 && - enic->rq_count >= n && - enic->wq_count >= m && - enic->cq_count >= n + m && - enic->intr_count >= n + m + 2) { - - if (pci_enable_msix_range(enic->pdev, enic->msix_entry, - n + m + 2, n + m + 2) > 0) { - - enic->rq_count = n; - enic->wq_count = m; - enic->cq_count = n + m; - enic->intr_count = n + m + 2; - - vnic_dev_set_intr_mode(enic->vdev, - VNIC_DEV_INTR_MODE_MSIX); - - return 0; - } - } - if (enic->config.intr_mode < 1 && - enic->rq_count >= 1 && - enic->wq_count >= m && - enic->cq_count >= 1 + m && - enic->intr_count >= 1 + m + 2) { - if (pci_enable_msix_range(enic->pdev, enic->msix_entry, - 1 + m + 2, 1 + m + 2) > 0) { - - enic->rq_count = 1; - enic->wq_count = m; - enic->cq_count = 1 + m; - enic->intr_count = 1 + m + 2; - + enic->intr_avail >= ENIC_MSIX_MIN_INTR) { + for (i = 0; i < enic->intr_avail; i++) + enic->msix_entry[i].entry = i; + + num_intr = pci_enable_msix_range(enic->pdev, enic->msix_entry, + ENIC_MSIX_MIN_INTR, + enic->intr_avail); + if (num_intr > 0) { vnic_dev_set_intr_mode(enic->vdev, - VNIC_DEV_INTR_MODE_MSIX); - + VNIC_DEV_INTR_MODE_MSIX); + enic->intr_avail = num_intr; return 0; } } /* Next try MSI * - * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR + * We need 1 INTR */ if (enic->config.intr_mode < 2 && - enic->rq_count >= 1 && - enic->wq_count >= 1 && - enic->cq_count >= 2 && - enic->intr_count >= 1 && + enic->intr_avail >= 1 && !pci_enable_msi(enic->pdev)) { - - enic->rq_count = 1; - enic->wq_count = 1; - enic->cq_count = 2; - enic->intr_count = 1; - + enic->intr_avail = 1; vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); - return 0; } /* Next try INTx * - * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs + * We need 3 INTRs * (the first INTR is used for WQ/RQ) * (the second INTR is used for WQ/RQ errors) * (the last INTR is used for notifications) */ if (enic->config.intr_mode < 3 && - enic->rq_count >= 1 && - enic->wq_count >= 1 && - enic->cq_count >= 2 && - enic->intr_count >= 3) { - - enic->rq_count = 1; - enic->wq_count = 1; - enic->cq_count = 2; - enic->intr_count = 3; - + enic->intr_avail >= 3) { + enic->intr_avail = 3; vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); - return 0; } @@ -2572,11 +2516,81 @@ static void enic_clear_intr_mode(struct enic *enic) vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); } +static int enic_adjust_resources(struct enic *enic) +{ + unsigned int max_queues; + unsigned int rq_default; + unsigned int rq_avail; + unsigned int wq_avail; + + if (enic->rq_avail < 1 || enic->wq_avail < 1 || enic->cq_avail < 2) { + dev_err(enic_get_dev(enic), + "Not enough resources available rq: %d wq: %d cq: %d\n", + enic->rq_avail, enic->wq_avail, + enic->cq_avail); + return -ENOSPC; + } + + if (is_kdump_kernel()) { + dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n"); + enic->rq_avail = 1; + enic->wq_avail = 1; + enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS; + enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS; + enic->config.mtu = min_t(u16, 1500, enic->config.mtu); + } + + /* if RSS isn't set, then we can only use one RQ */ + if (!ENIC_SETTING(enic, RSS)) + enic->rq_avail = 1; + + switch (vnic_dev_get_intr_mode(enic->vdev)) { + case VNIC_DEV_INTR_MODE_INTX: + case VNIC_DEV_INTR_MODE_MSI: + enic->rq_count = 1; + enic->wq_count = 1; + enic->cq_count = 2; + enic->intr_count = enic->intr_avail; + break; + case VNIC_DEV_INTR_MODE_MSIX: + /* Adjust the number of wqs/rqs/cqs/interrupts that will be + * used based on which resource is the most constrained + */ + wq_avail = min(enic->wq_avail, ENIC_WQ_MAX); + rq_default = netif_get_num_default_rss_queues(); + rq_avail = min3(enic->rq_avail, ENIC_RQ_MAX, rq_default); + max_queues = min(enic->cq_avail, + enic->intr_avail - ENIC_MSIX_RESERVED_INTR); + if (wq_avail + rq_avail <= max_queues) { + enic->rq_count = rq_avail; + enic->wq_count = wq_avail; + } else { + /* recalculate wq/rq count */ + if (rq_avail < wq_avail) { + enic->rq_count = min(rq_avail, max_queues / 2); + enic->wq_count = max_queues - enic->rq_count; + } else { + enic->wq_count = min(wq_avail, max_queues / 2); + enic->rq_count = max_queues - enic->wq_count; + } + } + enic->cq_count = enic->rq_count + enic->wq_count; + enic->intr_count = enic->cq_count + ENIC_MSIX_RESERVED_INTR; + + break; + default: + dev_err(enic_get_dev(enic), "Unknown interrupt mode\n"); + return -EINVAL; + } + + return 0; +} + static void enic_get_queue_stats_rx(struct net_device *dev, int idx, struct netdev_queue_stats_rx *rxs) { struct enic *enic = netdev_priv(dev); - struct enic_rq_stats *rqstats = &enic->rq_stats[idx]; + struct enic_rq_stats *rqstats = &enic->rq[idx].stats; rxs->bytes = rqstats->bytes; rxs->packets = rqstats->packets; @@ -2590,7 +2604,7 @@ static void enic_get_queue_stats_tx(struct net_device *dev, int idx, struct netdev_queue_stats_tx *txs) { struct enic *enic = netdev_priv(dev); - struct enic_wq_stats *wqstats = &enic->wq_stats[idx]; + struct enic_wq_stats *wqstats = &enic->wq[idx].stats; txs->bytes = wqstats->bytes; txs->packets = wqstats->packets; @@ -2674,6 +2688,71 @@ static const struct netdev_stat_ops enic_netdev_stat_ops = { .get_base_stats = enic_get_base_stats, }; +static void enic_free_enic_resources(struct enic *enic) +{ + kfree(enic->wq); + enic->wq = NULL; + + kfree(enic->rq); + enic->rq = NULL; + + kfree(enic->cq); + enic->cq = NULL; + + kfree(enic->napi); + enic->napi = NULL; + + kfree(enic->msix_entry); + enic->msix_entry = NULL; + + kfree(enic->msix); + enic->msix = NULL; + + kfree(enic->intr); + enic->intr = NULL; +} + +static int enic_alloc_enic_resources(struct enic *enic) +{ + enic->wq = kcalloc(enic->wq_avail, sizeof(struct enic_wq), GFP_KERNEL); + if (!enic->wq) + goto free_queues; + + enic->rq = kcalloc(enic->rq_avail, sizeof(struct enic_rq), GFP_KERNEL); + if (!enic->rq) + goto free_queues; + + enic->cq = kcalloc(enic->cq_avail, sizeof(struct vnic_cq), GFP_KERNEL); + if (!enic->cq) + goto free_queues; + + enic->napi = kcalloc(enic->wq_avail + enic->rq_avail, + sizeof(struct napi_struct), GFP_KERNEL); + if (!enic->napi) + goto free_queues; + + enic->msix_entry = kcalloc(enic->intr_avail, sizeof(struct msix_entry), + GFP_KERNEL); + if (!enic->msix_entry) + goto free_queues; + + enic->msix = kcalloc(enic->intr_avail, sizeof(struct enic_msix_entry), + GFP_KERNEL); + if (!enic->msix) + goto free_queues; + + enic->intr = kcalloc(enic->intr_avail, sizeof(struct vnic_intr), + GFP_KERNEL); + if (!enic->intr) + goto free_queues; + + return 0; + +free_queues: + enic_free_enic_resources(enic); + return -ENOMEM; +} + static void enic_dev_deinit(struct enic *enic) { unsigned int i; @@ -2691,18 +2770,7 @@ static void enic_dev_deinit(struct enic *enic) enic_free_vnic_resources(enic); enic_clear_intr_mode(enic); enic_free_affinity_hint(enic); -} - -static void enic_kdump_kernel_config(struct enic *enic) -{ - if (is_kdump_kernel()) { - dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n"); - enic->rq_count = 1; - enic->wq_count = 1; - enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS; - enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS; - enic->config.mtu = min_t(u16, 1500, enic->config.mtu); - } + enic_free_enic_resources(enic); } static int enic_dev_init(struct enic *enic) @@ -2734,19 +2802,26 @@ static int enic_dev_init(struct enic *enic) enic_get_res_counts(enic); - /* modify resource count if we are in kdump_kernel - */ - enic_kdump_kernel_config(enic); + err = enic_alloc_enic_resources(enic); + if (err) { + dev_err(dev, "Failed to allocate enic resources\n"); + return err; + } - /* Set interrupt mode based on resource counts and system - * capabilities - */ + /* Set interrupt mode based on system capabilities */ err = enic_set_intr_mode(enic); if (err) { dev_err(dev, "Failed to set intr mode based on resource " "counts and system capabilities, aborting\n"); - return err; + goto err_out_free_vnic_resources; + } + + /* Adjust resource counts based on most constrained resources */ + err = enic_adjust_resources(enic); + if (err) { + dev_err(dev, "Failed to adjust resources\n"); + goto err_out_free_vnic_resources; } /* Allocate and configure vNIC resources @@ -2788,6 +2863,7 @@ err_out_free_vnic_resources: enic_free_affinity_hint(enic); enic_clear_intr_mode(enic); enic_free_vnic_resources(enic); + enic_free_enic_resources(enic); return err; } @@ -2993,7 +3069,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); for (i = 0; i < enic->wq_count; i++) - spin_lock_init(&enic->wq_lock[i]); + spin_lock_init(&enic->wq[i].lock); /* Register net device */ diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c index 1c48aebdbab0..126125199833 100644 --- a/drivers/net/ethernet/cisco/enic/enic_res.c +++ b/drivers/net/ethernet/cisco/enic/enic_res.c @@ -176,9 +176,9 @@ void enic_free_vnic_resources(struct enic *enic) unsigned int i; for (i = 0; i < enic->wq_count; i++) - vnic_wq_free(&enic->wq[i]); + vnic_wq_free(&enic->wq[i].vwq); for (i = 0; i < enic->rq_count; i++) - vnic_rq_free(&enic->rq[i]); + vnic_rq_free(&enic->rq[i].vrq); for (i = 0; i < enic->cq_count; i++) vnic_cq_free(&enic->cq[i]); for (i = 0; i < enic->intr_count; i++) @@ -187,16 +187,21 @@ void enic_free_vnic_resources(struct enic *enic) void enic_get_res_counts(struct enic *enic) { - enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ); - enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ); - enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ); - enic->intr_count = vnic_dev_get_res_count(enic->vdev, - RES_TYPE_INTR_CTRL); + enic->wq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ); + enic->rq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ); + enic->cq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ); + enic->intr_avail = vnic_dev_get_res_count(enic->vdev, + RES_TYPE_INTR_CTRL); + + enic->wq_count = enic->wq_avail; + enic->rq_count = enic->rq_avail; + enic->cq_count = enic->cq_avail; + enic->intr_count = enic->intr_avail; dev_info(enic_get_dev(enic), "vNIC resources avail: wq %d rq %d cq %d intr %d\n", - enic->wq_count, enic->rq_count, - enic->cq_count, enic->intr_count); + enic->wq_avail, enic->rq_avail, + enic->cq_avail, enic->intr_avail); } void enic_init_vnic_resources(struct enic *enic) @@ -221,9 +226,12 @@ void enic_init_vnic_resources(struct enic *enic) switch (intr_mode) { case VNIC_DEV_INTR_MODE_INTX: + error_interrupt_enable = 1; + error_interrupt_offset = ENIC_LEGACY_ERR_INTR; + break; case VNIC_DEV_INTR_MODE_MSIX: error_interrupt_enable = 1; - error_interrupt_offset = enic->intr_count - 2; + error_interrupt_offset = enic_msix_err_intr(enic); break; default: error_interrupt_enable = 0; @@ -233,7 +241,7 @@ void enic_init_vnic_resources(struct enic *enic) for (i = 0; i < enic->rq_count; i++) { cq_index = i; - vnic_rq_init(&enic->rq[i], + vnic_rq_init(&enic->rq[i].vrq, cq_index, error_interrupt_enable, error_interrupt_offset); @@ -241,7 +249,7 @@ void enic_init_vnic_resources(struct enic *enic) for (i = 0; i < enic->wq_count; i++) { cq_index = enic->rq_count + i; - vnic_wq_init(&enic->wq[i], + vnic_wq_init(&enic->wq[i].vwq, cq_index, error_interrupt_enable, error_interrupt_offset); @@ -249,15 +257,15 @@ void enic_init_vnic_resources(struct enic *enic) /* Init CQ resources * - * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI - * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X + * All CQs point to INTR[0] for INTx, MSI + * CQ[i] point to INTR[ENIC_MSIX_IO_INTR_BASE + i] for MSI-X */ for (i = 0; i < enic->cq_count; i++) { switch (intr_mode) { case VNIC_DEV_INTR_MODE_MSIX: - interrupt_offset = i; + interrupt_offset = ENIC_MSIX_IO_INTR_BASE + i; break; default: interrupt_offset = 0; @@ -322,7 +330,7 @@ int enic_alloc_vnic_resources(struct enic *enic) */ for (i = 0; i < enic->wq_count; i++) { - err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i, + err = vnic_wq_alloc(enic->vdev, &enic->wq[i].vwq, i, enic->config.wq_desc_count, sizeof(struct wq_enet_desc)); if (err) @@ -330,7 +338,7 @@ int enic_alloc_vnic_resources(struct enic *enic) } for (i = 0; i < enic->rq_count; i++) { - err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i, + err = vnic_rq_alloc(enic->vdev, &enic->rq[i].vrq, i, enic->config.rq_desc_count, sizeof(struct rq_enet_desc)); if (err) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h index 6f0e58a2a58a..9e1d44ae92cc 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h @@ -56,7 +56,7 @@ DECLARE_EVENT_CLASS(dpaa_eth_fd, __entry->fd_format = qm_fd_get_format(fd); __entry->fd_offset = qm_fd_get_offset(fd); __entry->fd_length = qm_fd_get_length(fd); - __entry->fd_status = fd->status; + __entry->fd_status = __be32_to_cpu(fd->status); __assign_str(name); ), diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c index 31dbe89dd3a9..fc41078c4f5d 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c @@ -632,7 +632,7 @@ static int enetc4_pf_netdev_create(struct enetc_si *si) priv = netdev_priv(ndev); priv->ref_clk = devm_clk_get_optional(dev, "ref"); if (IS_ERR(priv->ref_clk)) { - dev_err(dev, "Get referencce clock failed\n"); + dev_err(dev, "Get reference clock failed\n"); err = PTR_ERR(priv->ref_clk); goto err_clk_get; } diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index a76ce41eb197..c47b4a743d93 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -610,19 +610,11 @@ static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!num_vfs) { enetc_msg_psi_free(pf); - kfree(pf->vf_state); pf->num_vfs = 0; pci_disable_sriov(pdev); } else { pf->num_vfs = num_vfs; - pf->vf_state = kcalloc(num_vfs, sizeof(struct enetc_vf_state), - GFP_KERNEL); - if (!pf->vf_state) { - pf->num_vfs = 0; - return -ENOMEM; - } - err = enetc_msg_psi_init(pf); if (err) { dev_err(&pdev->dev, "enetc_msg_psi_init (%d)\n", err); @@ -641,7 +633,6 @@ static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs) err_en_sriov: enetc_msg_psi_free(pf); err_msg_psi: - kfree(pf->vf_state); pf->num_vfs = 0; return err; @@ -1023,9 +1014,16 @@ static int enetc_pf_probe(struct pci_dev *pdev, pf = enetc_si_priv(si); pf->si = si; - pf->total_vfs = pci_sriov_get_totalvfs(pdev); pf->ops = &enetc_pf_ops; + pf->total_vfs = pci_sriov_get_totalvfs(pdev); + if (pf->total_vfs) { + pf->vf_state = kcalloc(pf->total_vfs, sizeof(struct enetc_vf_state), + GFP_KERNEL); + if (!pf->vf_state) + goto err_alloc_vf_state; + } + err = enetc_setup_mac_addresses(node, pf); if (err) goto err_setup_mac_addresses; @@ -1102,6 +1100,8 @@ err_alloc_si_res: free_netdev(ndev); err_alloc_netdev: err_setup_mac_addresses: + kfree(pf->vf_state); +err_alloc_vf_state: enetc_psi_destroy(pdev); err_psi_create: return err; @@ -1128,6 +1128,7 @@ static void enetc_pf_remove(struct pci_dev *pdev) enetc_free_si_resources(priv); free_netdev(si->ndev); + kfree(pf->vf_state); enetc_psi_destroy(pdev); } diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index 31e630638090..a5f8ce576b6e 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -78,11 +78,18 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr) { struct enetc_ndev_priv *priv = netdev_priv(ndev); struct sockaddr *saddr = addr; + int err; if (!is_valid_ether_addr(saddr->sa_data)) return -EADDRNOTAVAIL; - return enetc_msg_vsi_set_primary_mac_addr(priv, saddr); + err = enetc_msg_vsi_set_primary_mac_addr(priv, saddr); + if (err) + return err; + + eth_hw_addr_set(ndev, saddr->sa_data); + + return 0; } static int enetc_vf_set_features(struct net_device *ndev, @@ -167,9 +174,11 @@ static int enetc_vf_probe(struct pci_dev *pdev, si = pci_get_drvdata(pdev); si->revision = ENETC_REV_1_0; err = enetc_get_driver_data(si); - if (err) - return dev_err_probe(&pdev->dev, err, - "Could not get VF driver data\n"); + if (err) { + dev_err_probe(&pdev->dev, err, + "Could not get VF driver data\n"); + goto err_alloc_netdev; + } enetc_get_si_caps(si); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index d3ddca22d6b0..6663c1768089 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3591,22 +3591,23 @@ static int ucc_geth_probe(struct platform_device* ofdev) if ((ucc_num < 0) || (ucc_num > 7)) return -ENODEV; - ug_info = kmemdup(&ugeth_primary_info, sizeof(*ug_info), GFP_KERNEL); - if (ug_info == NULL) + ug_info = devm_kmemdup(&ofdev->dev, &ugeth_primary_info, + sizeof(*ug_info), GFP_KERNEL); + if (!ug_info) return -ENOMEM; ug_info->uf_info.ucc_num = ucc_num; err = ucc_geth_parse_clock(np, "rx", &ug_info->uf_info.rx_clock); if (err) - goto err_free_info; + return err; err = ucc_geth_parse_clock(np, "tx", &ug_info->uf_info.tx_clock); if (err) - goto err_free_info; + return err; err = of_address_to_resource(np, 0, &res); if (err) - goto err_free_info; + return err; ug_info->uf_info.regs = res.start; ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); @@ -3619,7 +3620,7 @@ static int ucc_geth_probe(struct platform_device* ofdev) */ err = of_phy_register_fixed_link(np); if (err) - goto err_free_info; + return err; ug_info->phy_node = of_node_get(np); } @@ -3687,9 +3688,8 @@ static int ucc_geth_probe(struct platform_device* ofdev) ug_info->uf_info.irq); /* Create an ethernet device instance */ - dev = alloc_etherdev(sizeof(*ugeth)); - - if (dev == NULL) { + dev = devm_alloc_etherdev(&ofdev->dev, sizeof(*ugeth)); + if (!dev) { err = -ENOMEM; goto err_deregister_fixed_link; } @@ -3724,15 +3724,17 @@ static int ucc_geth_probe(struct platform_device* ofdev) /* Carrier starts down, phylib will bring it up */ netif_carrier_off(dev); - err = register_netdev(dev); + err = devm_register_netdev(&ofdev->dev, dev); if (err) { if (netif_msg_probe(ugeth)) pr_err("%s: Cannot register net device, aborting\n", dev->name); - goto err_free_netdev; + goto err_deregister_fixed_link; } - of_get_ethdev_address(np, dev); + err = of_get_ethdev_address(np, dev); + if (err == -EPROBE_DEFER) + goto err_deregister_fixed_link; ugeth->ug_info = ug_info; ugeth->dev = device; @@ -3741,16 +3743,11 @@ static int ucc_geth_probe(struct platform_device* ofdev) return 0; -err_free_netdev: - free_netdev(dev); err_deregister_fixed_link: if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(ug_info->tbi_node); of_node_put(ug_info->phy_node); -err_free_info: - kfree(ug_info); - return err; } @@ -3760,14 +3757,11 @@ static void ucc_geth_remove(struct platform_device* ofdev) struct ucc_geth_private *ugeth = netdev_priv(dev); struct device_node *np = ofdev->dev.of_node; - unregister_netdev(dev); ucc_geth_memclean(ugeth); if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(ugeth->ug_info->tbi_node); of_node_put(ugeth->ug_info->phy_node); - kfree(ugeth->ug_info); - free_netdev(dev); } static const struct of_device_id ucc_geth_match[] = { diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index d72657444ef3..2ae34d01fd36 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -512,7 +512,7 @@ struct hnae_ae_ops { struct net_device_stats *net_stats); void (*get_stats)(struct hnae_handle *handle, u64 *data); void (*get_strings)(struct hnae_handle *handle, - u32 stringset, u8 *data); + u32 stringset, u8 **data); int (*get_sset_count)(struct hnae_handle *handle, int stringset); void (*update_led_status)(struct hnae_handle *handle); int (*set_led_id)(struct hnae_handle *handle, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index bc3e406f0139..8ce910f8d0cc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -730,15 +730,14 @@ static void hns_ae_get_stats(struct hnae_handle *handle, u64 *data) hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index); } -static void hns_ae_get_strings(struct hnae_handle *handle, - u32 stringset, u8 *data) +static void hns_ae_get_strings(struct hnae_handle *handle, u32 stringset, + u8 **data) { int port; int idx; struct hns_mac_cb *mac_cb; struct hns_ppe_cb *ppe_cb; struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); - u8 *p = data; struct hnae_vf_cb *vf_cb; assert(handle); @@ -748,19 +747,14 @@ static void hns_ae_get_strings(struct hnae_handle *handle, mac_cb = hns_get_mac_cb(handle); ppe_cb = hns_get_ppe_cb(handle); - for (idx = 0; idx < handle->q_num; idx++) { - hns_rcb_get_strings(stringset, p, idx); - p += ETH_GSTRING_LEN * hns_rcb_get_ring_sset_count(stringset); - } - - hns_ppe_get_strings(ppe_cb, stringset, p); - p += ETH_GSTRING_LEN * hns_ppe_get_sset_count(stringset); + for (idx = 0; idx < handle->q_num; idx++) + hns_rcb_get_strings(stringset, data, idx); - hns_mac_get_strings(mac_cb, stringset, p); - p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset); + hns_ppe_get_strings(ppe_cb, stringset, data); + hns_mac_get_strings(mac_cb, stringset, data); if (mac_cb->mac_type == HNAE_PORT_SERVICE) - hns_dsaf_get_strings(stringset, p, port, dsaf_dev); + hns_dsaf_get_strings(stringset, data, port, dsaf_dev); } static int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index bdb7afaabdd0..400933ca1a29 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -669,16 +669,15 @@ static void hns_gmac_get_stats(void *mac_drv, u64 *data) } } -static void hns_gmac_get_strings(u32 stringset, u8 *data) +static void hns_gmac_get_strings(u32 stringset, u8 **data) { - u8 *buff = data; u32 i; if (stringset != ETH_SS_STATS) return; for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) - ethtool_puts(&buff, g_gmac_stats_string[i].desc); + ethtool_puts(data, g_gmac_stats_string[i].desc); } static int hns_gmac_get_sset_count(int stringset) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 5fa9b2eeb929..bc6b269be299 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -1190,8 +1190,7 @@ void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data) mac_ctrl_drv->get_ethtool_stats(mac_ctrl_drv, data); } -void hns_mac_get_strings(struct hns_mac_cb *mac_cb, - int stringset, u8 *data) +void hns_mac_get_strings(struct hns_mac_cb *mac_cb, int stringset, u8 **data) { struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index edf0bcf76ac9..630f01cf7a71 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -378,7 +378,7 @@ struct mac_driver { void (*get_regs)(void *mac_drv, void *data); int (*get_regs_count)(void); /* get strings name for ethtool statistic */ - void (*get_strings)(u32 stringset, u8 *data); + void (*get_strings)(u32 stringset, u8 **data); /* get the number of strings*/ int (*get_sset_count)(int stringset); @@ -445,7 +445,7 @@ int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb, enum hnae_loop loop, int en); void hns_mac_update_stats(struct hns_mac_cb *mac_cb); void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data); -void hns_mac_get_strings(struct hns_mac_cb *mac_cb, int stringset, u8 *data); +void hns_mac_get_strings(struct hns_mac_cb *mac_cb, int stringset, u8 **data); int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset); void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data); int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index eb60f45a3460..851490346261 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -2590,55 +2590,34 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) p[i] = 0xdddddddd; } -static char *hns_dsaf_get_node_stats_strings(char *data, int node, - struct dsaf_device *dsaf_dev) +static void hns_dsaf_get_node_stats_strings(u8 **data, int node, + struct dsaf_device *dsaf_dev) { - char *buff = data; - int i; bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver); + int i; - snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node); - buff += ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "innod%d_manage_pkts", node); - buff += ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkts", node); - buff += ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkt_id", node); - buff += ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pause_frame", node); - buff += ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "innod%d_release_buf_num", node); - buff += ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "innod%d_sbm_drop_pkts", node); - buff += ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "innod%d_crc_false_pkts", node); - buff += ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "innod%d_bp_drop_pkts", node); - buff += ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "innod%d_lookup_rslt_drop_pkts", node); - buff += ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "innod%d_local_rslt_fail_pkts", node); - buff += ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "innod%d_vlan_drop_pkts", node); - buff += ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node); - buff += ETH_GSTRING_LEN; + ethtool_sprintf(data, "innod%d_pad_drop_pkts", node); + ethtool_sprintf(data, "innod%d_manage_pkts", node); + ethtool_sprintf(data, "innod%d_rx_pkts", node); + ethtool_sprintf(data, "innod%d_rx_pkt_id", node); + ethtool_sprintf(data, "innod%d_rx_pause_frame", node); + ethtool_sprintf(data, "innod%d_release_buf_num", node); + ethtool_sprintf(data, "innod%d_sbm_drop_pkts", node); + ethtool_sprintf(data, "innod%d_crc_false_pkts", node); + ethtool_sprintf(data, "innod%d_bp_drop_pkts", node); + ethtool_sprintf(data, "innod%d_lookup_rslt_drop_pkts", node); + ethtool_sprintf(data, "innod%d_local_rslt_fail_pkts", node); + ethtool_sprintf(data, "innod%d_vlan_drop_pkts", node); + ethtool_sprintf(data, "innod%d_stp_drop_pkts", node); if (node < DSAF_SERVICE_NW_NUM && !is_ver1) { for (i = 0; i < DSAF_PRIO_NR; i++) { - snprintf(buff + 0 * ETH_GSTRING_LEN * DSAF_PRIO_NR, - ETH_GSTRING_LEN, "inod%d_pfc_prio%d_pkts", - node, i); - snprintf(buff + 1 * ETH_GSTRING_LEN * DSAF_PRIO_NR, - ETH_GSTRING_LEN, "onod%d_pfc_prio%d_pkts", - node, i); - buff += ETH_GSTRING_LEN; + ethtool_sprintf(data, "inod%d_pfc_prio%d_pkts", node, + i); + ethtool_sprintf(data, "onod%d_pfc_prio%d_pkts", node, + i); } - buff += 1 * DSAF_PRIO_NR * ETH_GSTRING_LEN; } - snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node); - buff += ETH_GSTRING_LEN; - - return buff; + ethtool_sprintf(data, "onnod%d_tx_pkts", node); } static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data, @@ -2720,21 +2699,20 @@ int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset) *@port:port index *@dsaf_dev: dsaf device */ -void hns_dsaf_get_strings(int stringset, u8 *data, int port, +void hns_dsaf_get_strings(int stringset, u8 **data, int port, struct dsaf_device *dsaf_dev) { - char *buff = (char *)data; int node = port; if (stringset != ETH_SS_STATS) return; /* for ge/xge node info */ - buff = hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev); + hns_dsaf_get_node_stats_strings(data, node, dsaf_dev); /* for ppe node info */ node = port + DSAF_PPE_INODE_BASE; - (void)hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev); + hns_dsaf_get_node_stats_strings(data, node, dsaf_dev); } /** diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 5526a10caac5..0eb03dff1a8b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -442,7 +442,7 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 inode_num); int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset); void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port); -void hns_dsaf_get_strings(int stringset, u8 *data, int port, +void hns_dsaf_get_strings(int stringset, u8 **data, int port, struct dsaf_device *dsaf_dev); void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index a08d1f0a5a16..5013beb4d282 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -457,24 +457,23 @@ int hns_ppe_get_regs_count(void) * @stringset: string set type * @data: output string */ -void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data) +void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 **data) { int index = ppe_cb->index; - u8 *buff = data; - - ethtool_sprintf(&buff, "ppe%d_rx_sw_pkt", index); - ethtool_sprintf(&buff, "ppe%d_rx_pkt_ok", index); - ethtool_sprintf(&buff, "ppe%d_rx_drop_pkt_no_bd", index); - ethtool_sprintf(&buff, "ppe%d_rx_alloc_buf_fail", index); - ethtool_sprintf(&buff, "ppe%d_rx_alloc_buf_wait", index); - ethtool_sprintf(&buff, "ppe%d_rx_pkt_drop_no_buf", index); - ethtool_sprintf(&buff, "ppe%d_rx_pkt_err_fifo_full", index); - - ethtool_sprintf(&buff, "ppe%d_tx_bd", index); - ethtool_sprintf(&buff, "ppe%d_tx_pkt", index); - ethtool_sprintf(&buff, "ppe%d_tx_pkt_ok", index); - ethtool_sprintf(&buff, "ppe%d_tx_pkt_err_fifo_empty", index); - ethtool_sprintf(&buff, "ppe%d_tx_pkt_err_csum_fail", index); + + ethtool_sprintf(data, "ppe%d_rx_sw_pkt", index); + ethtool_sprintf(data, "ppe%d_rx_pkt_ok", index); + ethtool_sprintf(data, "ppe%d_rx_drop_pkt_no_bd", index); + ethtool_sprintf(data, "ppe%d_rx_alloc_buf_fail", index); + ethtool_sprintf(data, "ppe%d_rx_alloc_buf_wait", index); + ethtool_sprintf(data, "ppe%d_rx_pkt_drop_no_buf", index); + ethtool_sprintf(data, "ppe%d_rx_pkt_err_fifo_full", index); + + ethtool_sprintf(data, "ppe%d_tx_bd", index); + ethtool_sprintf(data, "ppe%d_tx_pkt", index); + ethtool_sprintf(data, "ppe%d_tx_pkt_ok", index); + ethtool_sprintf(data, "ppe%d_tx_pkt_err_fifo_empty", index); + ethtool_sprintf(data, "ppe%d_tx_pkt_err_csum_fail", index); } void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index 7e00231c1acf..602c8e971fe4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h @@ -109,7 +109,7 @@ int hns_ppe_get_sset_count(int stringset); int hns_ppe_get_regs_count(void); void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data); -void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data); +void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 **data); void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data); void hns_ppe_set_tso_enable(struct hns_ppe_cb *ppe_cb, u32 value); void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 93344563a259..46af467aa596 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -923,44 +923,42 @@ int hns_rcb_get_ring_regs_count(void) *@data:strings name value *@index:queue index */ -void hns_rcb_get_strings(int stringset, u8 *data, int index) +void hns_rcb_get_strings(int stringset, u8 **data, int index) { - u8 *buff = data; - if (stringset != ETH_SS_STATS) return; - ethtool_sprintf(&buff, "tx_ring%d_rcb_pkt_num", index); - ethtool_sprintf(&buff, "tx_ring%d_ppe_tx_pkt_num", index); - ethtool_sprintf(&buff, "tx_ring%d_ppe_drop_pkt_num", index); - ethtool_sprintf(&buff, "tx_ring%d_fbd_num", index); - - ethtool_sprintf(&buff, "tx_ring%d_pkt_num", index); - ethtool_sprintf(&buff, "tx_ring%d_bytes", index); - ethtool_sprintf(&buff, "tx_ring%d_err_cnt", index); - ethtool_sprintf(&buff, "tx_ring%d_io_err", index); - ethtool_sprintf(&buff, "tx_ring%d_sw_err", index); - ethtool_sprintf(&buff, "tx_ring%d_seg_pkt", index); - ethtool_sprintf(&buff, "tx_ring%d_restart_queue", index); - ethtool_sprintf(&buff, "tx_ring%d_tx_busy", index); - - ethtool_sprintf(&buff, "rx_ring%d_rcb_pkt_num", index); - ethtool_sprintf(&buff, "rx_ring%d_ppe_pkt_num", index); - ethtool_sprintf(&buff, "rx_ring%d_ppe_drop_pkt_num", index); - ethtool_sprintf(&buff, "rx_ring%d_fbd_num", index); - - ethtool_sprintf(&buff, "rx_ring%d_pkt_num", index); - ethtool_sprintf(&buff, "rx_ring%d_bytes", index); - ethtool_sprintf(&buff, "rx_ring%d_err_cnt", index); - ethtool_sprintf(&buff, "rx_ring%d_io_err", index); - ethtool_sprintf(&buff, "rx_ring%d_sw_err", index); - ethtool_sprintf(&buff, "rx_ring%d_seg_pkt", index); - ethtool_sprintf(&buff, "rx_ring%d_reuse_pg", index); - ethtool_sprintf(&buff, "rx_ring%d_len_err", index); - ethtool_sprintf(&buff, "rx_ring%d_non_vld_desc_err", index); - ethtool_sprintf(&buff, "rx_ring%d_bd_num_err", index); - ethtool_sprintf(&buff, "rx_ring%d_l2_err", index); - ethtool_sprintf(&buff, "rx_ring%d_l3l4csum_err", index); + ethtool_sprintf(data, "tx_ring%d_rcb_pkt_num", index); + ethtool_sprintf(data, "tx_ring%d_ppe_tx_pkt_num", index); + ethtool_sprintf(data, "tx_ring%d_ppe_drop_pkt_num", index); + ethtool_sprintf(data, "tx_ring%d_fbd_num", index); + + ethtool_sprintf(data, "tx_ring%d_pkt_num", index); + ethtool_sprintf(data, "tx_ring%d_bytes", index); + ethtool_sprintf(data, "tx_ring%d_err_cnt", index); + ethtool_sprintf(data, "tx_ring%d_io_err", index); + ethtool_sprintf(data, "tx_ring%d_sw_err", index); + ethtool_sprintf(data, "tx_ring%d_seg_pkt", index); + ethtool_sprintf(data, "tx_ring%d_restart_queue", index); + ethtool_sprintf(data, "tx_ring%d_tx_busy", index); + + ethtool_sprintf(data, "rx_ring%d_rcb_pkt_num", index); + ethtool_sprintf(data, "rx_ring%d_ppe_pkt_num", index); + ethtool_sprintf(data, "rx_ring%d_ppe_drop_pkt_num", index); + ethtool_sprintf(data, "rx_ring%d_fbd_num", index); + + ethtool_sprintf(data, "rx_ring%d_pkt_num", index); + ethtool_sprintf(data, "rx_ring%d_bytes", index); + ethtool_sprintf(data, "rx_ring%d_err_cnt", index); + ethtool_sprintf(data, "rx_ring%d_io_err", index); + ethtool_sprintf(data, "rx_ring%d_sw_err", index); + ethtool_sprintf(data, "rx_ring%d_seg_pkt", index); + ethtool_sprintf(data, "rx_ring%d_reuse_pg", index); + ethtool_sprintf(data, "rx_ring%d_len_err", index); + ethtool_sprintf(data, "rx_ring%d_non_vld_desc_err", index); + ethtool_sprintf(data, "rx_ring%d_bd_num_err", index); + ethtool_sprintf(data, "rx_ring%d_l2_err", index); + ethtool_sprintf(data, "rx_ring%d_l3l4csum_err", index); } void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index c1e9b6997853..0f4cc184ef39 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h @@ -157,7 +157,7 @@ int hns_rcb_get_ring_regs_count(void); void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data); -void hns_rcb_get_strings(int stringset, u8 *data, int index); +void hns_rcb_get_strings(int stringset, u8 **data, int index); void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size); void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index c58833eb4830..dbc44c2c26c2 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -743,16 +743,15 @@ static void hns_xgmac_get_stats(void *mac_drv, u64 *data) *@stringset: type of values in data *@data:data for value of string name */ -static void hns_xgmac_get_strings(u32 stringset, u8 *data) +static void hns_xgmac_get_strings(u32 stringset, u8 **data) { - u8 *buff = data; u32 i; if (stringset != ETH_SS_STATS) return; for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) - ethtool_puts(&buff, g_xgmac_stats_string[i].desc); + ethtool_puts(data, g_xgmac_stats_string[i].desc); } /** diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index a5bb306b2cf1..6c458f037262 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -903,7 +903,6 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct hns_nic_priv *priv = netdev_priv(netdev); struct hnae_handle *h = priv->ae_handle; - u8 *buff = data; if (!h->dev->ops->get_strings) { netdev_err(netdev, "h->dev->ops->get_strings is null!\n"); @@ -912,43 +911,43 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data) if (stringset == ETH_SS_TEST) { if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII) - ethtool_puts(&buff, + ethtool_puts(&data, hns_nic_test_strs[MAC_INTERNALLOOP_MAC]); - ethtool_puts(&buff, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES]); + ethtool_puts(&data, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES]); if ((netdev->phydev) && (!netdev->phydev->is_c45)) - ethtool_puts(&buff, + ethtool_puts(&data, hns_nic_test_strs[MAC_INTERNALLOOP_PHY]); } else { - ethtool_puts(&buff, "rx_packets"); - ethtool_puts(&buff, "tx_packets"); - ethtool_puts(&buff, "rx_bytes"); - ethtool_puts(&buff, "tx_bytes"); - ethtool_puts(&buff, "rx_errors"); - ethtool_puts(&buff, "tx_errors"); - ethtool_puts(&buff, "rx_dropped"); - ethtool_puts(&buff, "tx_dropped"); - ethtool_puts(&buff, "multicast"); - ethtool_puts(&buff, "collisions"); - ethtool_puts(&buff, "rx_over_errors"); - ethtool_puts(&buff, "rx_crc_errors"); - ethtool_puts(&buff, "rx_frame_errors"); - ethtool_puts(&buff, "rx_fifo_errors"); - ethtool_puts(&buff, "rx_missed_errors"); - ethtool_puts(&buff, "tx_aborted_errors"); - ethtool_puts(&buff, "tx_carrier_errors"); - ethtool_puts(&buff, "tx_fifo_errors"); - ethtool_puts(&buff, "tx_heartbeat_errors"); - ethtool_puts(&buff, "rx_length_errors"); - ethtool_puts(&buff, "tx_window_errors"); - ethtool_puts(&buff, "rx_compressed"); - ethtool_puts(&buff, "tx_compressed"); - ethtool_puts(&buff, "netdev_rx_dropped"); - ethtool_puts(&buff, "netdev_tx_dropped"); - - ethtool_puts(&buff, "netdev_tx_timeout"); - - h->dev->ops->get_strings(h, stringset, buff); + ethtool_puts(&data, "rx_packets"); + ethtool_puts(&data, "tx_packets"); + ethtool_puts(&data, "rx_bytes"); + ethtool_puts(&data, "tx_bytes"); + ethtool_puts(&data, "rx_errors"); + ethtool_puts(&data, "tx_errors"); + ethtool_puts(&data, "rx_dropped"); + ethtool_puts(&data, "tx_dropped"); + ethtool_puts(&data, "multicast"); + ethtool_puts(&data, "collisions"); + ethtool_puts(&data, "rx_over_errors"); + ethtool_puts(&data, "rx_crc_errors"); + ethtool_puts(&data, "rx_frame_errors"); + ethtool_puts(&data, "rx_fifo_errors"); + ethtool_puts(&data, "rx_missed_errors"); + ethtool_puts(&data, "tx_aborted_errors"); + ethtool_puts(&data, "tx_carrier_errors"); + ethtool_puts(&data, "tx_fifo_errors"); + ethtool_puts(&data, "tx_heartbeat_errors"); + ethtool_puts(&data, "rx_length_errors"); + ethtool_puts(&data, "tx_window_errors"); + ethtool_puts(&data, "rx_compressed"); + ethtool_puts(&data, "tx_compressed"); + ethtool_puts(&data, "netdev_rx_dropped"); + ethtool_puts(&data, "netdev_tx_dropped"); + + ethtool_puts(&data, "netdev_tx_timeout"); + + h->dev->ops->get_strings(h, stringset, &data); } } @@ -970,7 +969,7 @@ static int hns_get_sset_count(struct net_device *netdev, int stringset) return -EOPNOTSUPP; } if (stringset == ETH_SS_TEST) { - u32 cnt = (sizeof(hns_nic_test_strs) / ETH_GSTRING_LEN); + u32 cnt = ARRAY_SIZE(hns_nic_test_strs); if (priv->ae_handle->phy_if == PHY_INTERFACE_MODE_XGMII) cnt--; diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 67b0bf310daa..9a63fbc69408 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -25,8 +25,11 @@ void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo) pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); if (!pci_id) continue; - if (IS_ENABLED(CONFIG_PCI_IOV)) + if (IS_ENABLED(CONFIG_PCI_IOV)) { + device_lock(&ae_dev->pdev->dev); pci_disable_sriov(ae_dev->pdev); + device_unlock(&ae_dev->pdev->dev); + } } } EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 27dbe367f3d3..710a8f9f2248 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -677,7 +677,7 @@ struct hnae3_ae_ops { void (*get_mac_stats)(struct hnae3_handle *handle, struct hns3_mac_stats *mac_stats); void (*get_strings)(struct hnae3_handle *handle, - u32 stringset, u8 *data); + u32 stringset, u8 **data); int (*get_sset_count)(struct hnae3_handle *handle, int stringset); void (*get_regs)(struct hnae3_handle *handle, u32 *version, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c index 2b31188ff555..f9a3d6fc4416 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c @@ -36,27 +36,22 @@ int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle) } EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_sset_count); -u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data) +void hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 **data) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; - u8 *buff = data; u16 i; for (i = 0; i < kinfo->num_tqps; i++) { struct hclge_comm_tqp *tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", tqp->index); - buff += ETH_GSTRING_LEN; + ethtool_sprintf(data, "txq%u_pktnum_rcd", tqp->index); } for (i = 0; i < kinfo->num_tqps; i++) { struct hclge_comm_tqp *tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", tqp->index); - buff += ETH_GSTRING_LEN; + ethtool_sprintf(data, "rxq%u_pktnum_rcd", tqp->index); } - - return buff; } EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_strings); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h index a46350162ee8..b9ff424c0bc2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h @@ -32,7 +32,7 @@ struct hclge_comm_tqp { u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data); int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle); -u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data); +void hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 **data); void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle); int hclge_comm_tqps_update_stats(struct hnae3_handle *handle, struct hclge_comm_hw *hw); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 841e5af7b2be..807eb3bbb11c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -1293,10 +1293,8 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer, /* save the buffer addr until the last read operation */ *save_buf = read_buf; - } - /* get data ready for the first time to read */ - if (!*ppos) { + /* get data ready for the first time to read */ ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd, read_buf, hns3_dbg_cmd[index].buf_len); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index fbfd3ee5648f..43377a7b2426 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -11,7 +11,6 @@ #include <linux/irq.h> #include <linux/ip.h> #include <linux/ipv6.h> -#include <linux/iommu.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/skbuff.h> @@ -381,24 +380,6 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = { #define HNS3_INVALID_PTYPE \ ARRAY_SIZE(hns3_rx_ptype_tbl) -static void hns3_dma_map_sync(struct device *dev, unsigned long iova) -{ - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct iommu_iotlb_gather iotlb_gather; - size_t granule; - - if (!domain || !iommu_is_dma_domain(domain)) - return; - - granule = 1 << __ffs(domain->pgsize_bitmap); - iova = ALIGN_DOWN(iova, granule); - iotlb_gather.start = iova; - iotlb_gather.end = iova + granule - 1; - iotlb_gather.pgsize = granule; - - iommu_iotlb_sync(domain, &iotlb_gather); -} - static irqreturn_t hns3_irq_handle(int irq, void *vector) { struct hns3_enet_tqp_vector *tqp_vector = vector; @@ -1051,8 +1032,6 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring, static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) { u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; - struct net_device *netdev = ring_to_netdev(ring); - struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_tx_spare *tx_spare; struct page *page; dma_addr_t dma; @@ -1094,7 +1073,6 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) tx_spare->buf = page_address(page); tx_spare->len = PAGE_SIZE << order; ring->tx_spare = tx_spare; - ring->tx_copybreak = priv->tx_copybreak; return; dma_mapping_error: @@ -1746,9 +1724,7 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv, unsigned int type) { struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; - struct hnae3_handle *handle = ring->tqp->handle; struct device *dev = ring_to_dev(ring); - struct hnae3_ae_dev *ae_dev; unsigned int size; dma_addr_t dma; @@ -1780,13 +1756,6 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv, return -ENOMEM; } - /* Add a SYNC command to sync io-pgtale to avoid errors in pgtable - * prefetch - */ - ae_dev = hns3_get_ae_dev(handle); - if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) - hns3_dma_map_sync(dev, dma); - desc_cb->priv = priv; desc_cb->length = size; desc_cb->dma = dma; @@ -2483,6 +2452,7 @@ static int hns3_nic_set_features(struct net_device *netdev, return ret; } + netdev->features = features; return 0; } @@ -4898,30 +4868,6 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) devm_kfree(&pdev->dev, priv->tqp_vector); } -static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv) -{ -#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024) -#define HNS3_MAX_PACKET_SIZE (64 * 1024) - - struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev); - struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle); - struct hnae3_handle *handle = priv->ae_handle; - - if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) - return; - - if (!(domain && iommu_is_dma_domain(domain))) - return; - - priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE; - priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE; - - if (priv->tx_copybreak < priv->min_tx_copybreak) - priv->tx_copybreak = priv->min_tx_copybreak; - if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size) - handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size; -} - static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, unsigned int ring_type) { @@ -5155,7 +5101,6 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv) int i, j; int ret; - hns3_update_tx_spare_buf_config(priv); for (i = 0; i < ring_num; i++) { ret = hns3_alloc_ring_memory(&priv->ring[i]); if (ret) { @@ -5360,8 +5305,6 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->ae_handle = handle; priv->tx_timeout_count = 0; priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num; - priv->min_tx_copybreak = 0; - priv->min_tx_spare_buf_size = 0; set_bit(HNS3_NIC_STATE_DOWN, &priv->state); handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index caf7a4df8585..d36c4ed16d8d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -596,8 +596,6 @@ struct hns3_nic_priv { struct hns3_enet_coalesce rx_coal; u32 tx_copybreak; u32 rx_copybreak; - u32 min_tx_copybreak; - u32 min_tx_spare_buf_size; }; union l3_hdr_info { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 97eaeec1952b..b771a2daba43 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -509,54 +509,37 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset) } } -static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, - u32 stat_count, u32 num_tqps, const char *prefix) +static void hns3_update_strings(u8 **data, const struct hns3_stats *stats, + u32 stat_count, u32 num_tqps, + const char *prefix) { -#define MAX_PREFIX_SIZE (6 + 4) - u32 size_left; u32 i, j; - u32 n1; - for (i = 0; i < num_tqps; i++) { - for (j = 0; j < stat_count; j++) { - data[ETH_GSTRING_LEN - 1] = '\0'; - - /* first, prepend the prefix string */ - n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%u_", - prefix, i); - size_left = (ETH_GSTRING_LEN - 1) - n1; - - /* now, concatenate the stats string to it */ - strncat(data, stats[j].stats_string, size_left); - data += ETH_GSTRING_LEN; - } - } - - return data; + for (i = 0; i < num_tqps; i++) + for (j = 0; j < stat_count; j++) + ethtool_sprintf(data, "%s%u_%s", prefix, i, + stats[j].stats_string); } -static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data) +static void hns3_get_strings_tqps(struct hnae3_handle *handle, u8 **data) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; const char tx_prefix[] = "txq"; const char rx_prefix[] = "rxq"; /* get strings for Tx */ - data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT, - kinfo->num_tqps, tx_prefix); + hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT, + kinfo->num_tqps, tx_prefix); /* get strings for Rx */ - data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT, - kinfo->num_tqps, rx_prefix); - - return data; + hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT, + kinfo->num_tqps, rx_prefix); } static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct hnae3_handle *h = hns3_get_handle(netdev); const struct hnae3_ae_ops *ops = h->ae_algo->ops; - char *buff = (char *)data; int i; if (!ops->get_strings) @@ -564,18 +547,15 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) switch (stringset) { case ETH_SS_STATS: - buff = hns3_get_strings_tqps(h, buff); - ops->get_strings(h, stringset, (u8 *)buff); + hns3_get_strings_tqps(h, &data); + ops->get_strings(h, stringset, &data); break; case ETH_SS_TEST: - ops->get_strings(h, stringset, data); + ops->get_strings(h, stringset, &data); break; case ETH_SS_PRIV_FLAGS: - for (i = 0; i < HNS3_PRIV_FLAGS_LEN; i++) { - snprintf(buff, ETH_GSTRING_LEN, "%s", - hns3_priv_flags[i].name); - buff += ETH_GSTRING_LEN; - } + for (i = 0; i < HNS3_PRIV_FLAGS_LEN; i++) + ethtool_puts(&data, hns3_priv_flags[i].name); break; default: break; @@ -1933,31 +1913,6 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev, return ret; } -static int hns3_check_tx_copybreak(struct net_device *netdev, u32 copybreak) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - - if (copybreak < priv->min_tx_copybreak) { - netdev_err(netdev, "tx copybreak %u should be no less than %u!\n", - copybreak, priv->min_tx_copybreak); - return -EINVAL; - } - return 0; -} - -static int hns3_check_tx_spare_buf_size(struct net_device *netdev, u32 buf_size) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - - if (buf_size < priv->min_tx_spare_buf_size) { - netdev_err(netdev, - "tx spare buf size %u should be no less than %u!\n", - buf_size, priv->min_tx_spare_buf_size); - return -EINVAL; - } - return 0; -} - static int hns3_set_tunable(struct net_device *netdev, const struct ethtool_tunable *tuna, const void *data) @@ -1974,10 +1929,6 @@ static int hns3_set_tunable(struct net_device *netdev, switch (tuna->id) { case ETHTOOL_TX_COPYBREAK: - ret = hns3_check_tx_copybreak(netdev, *(u32 *)data); - if (ret) - return ret; - priv->tx_copybreak = *(u32 *)data; for (i = 0; i < h->kinfo.num_tqps; i++) @@ -1992,10 +1943,6 @@ static int hns3_set_tunable(struct net_device *netdev, break; case ETHTOOL_TX_COPYBREAK_BUF_SIZE: - ret = hns3_check_tx_spare_buf_size(netdev, *(u32 *)data); - if (ret) - return ret; - old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size; new_tx_spare_buf_size = *(u32 *)data; netdev_info(netdev, "request to set tx spare buf size from %u to %u\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 728f4777e51f..05942fa78b11 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -6,7 +6,6 @@ #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/interrupt.h> -#include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> @@ -595,25 +594,21 @@ static u64 *hclge_comm_get_stats(struct hclge_dev *hdev, return buf; } -static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset, - const struct hclge_comm_stats_str strs[], - int size, u8 *data) +static void hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset, + const struct hclge_comm_stats_str strs[], + int size, u8 **data) { - char *buff = (char *)data; u32 i; if (stringset != ETH_SS_STATS) - return buff; + return; for (i = 0; i < size; i++) { if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) continue; - snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc); - buff = buff + ETH_GSTRING_LEN; + ethtool_puts(data, strs[i].desc); } - - return (u8 *)buff; } static void hclge_update_stats_for_all(struct hclge_dev *hdev) @@ -718,44 +713,38 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) } static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset, - u8 *data) + u8 **data) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - u8 *p = (char *)data; + const char *str; int size; if (stringset == ETH_SS_STATS) { size = ARRAY_SIZE(g_mac_stats_string); - p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string, - size, p); - p = hclge_comm_tqps_get_strings(handle, p); + hclge_comm_get_strings(hdev, stringset, g_mac_stats_string, + size, data); + hclge_comm_tqps_get_strings(handle, data); } else if (stringset == ETH_SS_TEST) { if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) { - memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL], - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; + str = hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL]; + ethtool_puts(data, str); } if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { - memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP], - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; + str = hns3_nic_test_strs[HNAE3_LOOP_APP]; + ethtool_puts(data, str); } if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { - memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; + str = hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES]; + ethtool_puts(data, str); } if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { - memcpy(p, - hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; + str = hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES]; + ethtool_puts(data, str); } if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { - memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY], - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; + str = hns3_nic_test_strs[HNAE3_LOOP_PHY]; + ethtool_puts(data, str); } } } @@ -3585,17 +3574,6 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, return ret; } -static void hclge_set_reset_pending(struct hclge_dev *hdev, - enum hnae3_reset_type reset_type) -{ - /* When an incorrect reset type is executed, the get_reset_level - * function generates the HNAE3_NONE_RESET flag. As a result, this - * type do not need to pending. - */ - if (reset_type != HNAE3_NONE_RESET) - set_bit(reset_type, &hdev->reset_pending); -} - static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) { u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg; @@ -3616,7 +3594,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) */ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) { dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); - hclge_set_reset_pending(hdev, HNAE3_IMP_RESET); + set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); hdev->rst_stats.imp_rst_cnt++; @@ -3626,7 +3604,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) { dev_info(&hdev->pdev->dev, "global reset interrupt\n"); set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); - hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET); + set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); hdev->rst_stats.global_rst_cnt++; return HCLGE_VECTOR0_EVENT_RST; @@ -3781,7 +3759,7 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev) snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", HCLGE_NAME, pci_name(hdev->pdev)); ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, - IRQ_NOAUTOEN, hdev->misc_vector.name, hdev); + 0, hdev->misc_vector.name, hdev); if (ret) { hclge_free_vector(hdev, 0); dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", @@ -4074,7 +4052,7 @@ static void hclge_do_reset(struct hclge_dev *hdev) case HNAE3_FUNC_RESET: dev_info(&pdev->dev, "PF reset requested\n"); /* schedule again to check later */ - hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET); + set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); hclge_reset_task_schedule(hdev); break; default: @@ -4108,8 +4086,6 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, clear_bit(HNAE3_FLR_RESET, addr); } - clear_bit(HNAE3_NONE_RESET, addr); - if (hdev->reset_type != HNAE3_NONE_RESET && rst_level < hdev->reset_type) return HNAE3_NONE_RESET; @@ -4251,7 +4227,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev) return false; } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { hdev->rst_stats.reset_fail_cnt++; - hclge_set_reset_pending(hdev, hdev->reset_type); + set_bit(hdev->reset_type, &hdev->reset_pending); dev_info(&hdev->pdev->dev, "re-schedule reset task(%u)\n", hdev->rst_stats.reset_fail_cnt); @@ -4494,20 +4470,8 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, enum hnae3_reset_type rst_type) { -#define HCLGE_SUPPORT_RESET_TYPE \ - (BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \ - BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET)) - struct hclge_dev *hdev = ae_dev->priv; - if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) { - /* To prevent reset triggered by hclge_reset_event */ - set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request); - dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n", - rst_type); - return; - } - set_bit(rst_type, &hdev->default_reset_request); } @@ -11917,6 +11881,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_init_rxd_adv_layout(hdev); + /* Enable MISC vector(vector0) */ + hclge_enable_vector(&hdev->misc_vector, true); + ret = hclge_init_wol(hdev); if (ret) dev_warn(&pdev->dev, @@ -11929,10 +11896,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_state_init(hdev); hdev->last_reset_time = jiffies; - /* Enable MISC vector(vector0) */ - enable_irq(hdev->misc_vector.vector_irq); - hclge_enable_vector(&hdev->misc_vector, true); - dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", HCLGE_DRIVER_NAME); @@ -12338,7 +12301,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) /* Disable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, false); - disable_irq(hdev->misc_vector.vector_irq); + synchronize_irq(hdev->misc_vector.vector_irq); /* Disable all hw interrupts */ hclge_config_mac_tnl_int(hdev, false); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index bab16c2191b2..5505caea88e9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -58,9 +58,6 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb) struct hclge_dev *hdev = vport->back; struct hclge_ptp *ptp = hdev->ptp; - if (!ptp) - return false; - if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) || test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) { ptp->tx_skipped++; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c index 8c057192aae6..43c1c18fa81f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c @@ -510,9 +510,9 @@ out: static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data, struct hnae3_knic_private_info *kinfo) { +#define HCLGE_RING_REG_OFFSET 0x200 #define HCLGE_RING_INT_REG_OFFSET 0x4 - struct hnae3_queue *tqp; int i, j, reg_num; int data_num_sum; u32 *reg = data; @@ -533,11 +533,10 @@ static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data, reg_num = ARRAY_SIZE(ring_reg_addr_list); for (j = 0; j < kinfo->num_tqps; j++) { reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg); - tqp = kinfo->tqp[j]; for (i = 0; i < reg_num; i++) - *reg++ = readl_relaxed(tqp->io_base - - HCLGE_TQP_REG_OFFSET + - ring_reg_addr_list[i]); + *reg++ = hclge_read_dev(&hdev->hw, + ring_reg_addr_list[i] + + HCLGE_RING_REG_OFFSET * j); } data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 896f1eb172d3..2f6ffb88e700 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -130,12 +130,10 @@ static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) } static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, - u8 *data) + u8 **data) { - u8 *p = (char *)data; - if (strset == ETH_SS_STATS) - p = hclge_comm_tqps_get_strings(handle, p); + hclge_comm_tqps_get_strings(handle, data); } static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) @@ -1395,17 +1393,6 @@ static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, return ret; } -static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev, - enum hnae3_reset_type reset_type) -{ - /* When an incorrect reset type is executed, the get_reset_level - * function generates the HNAE3_NONE_RESET flag. As a result, this - * type do not need to pending. - */ - if (reset_type != HNAE3_NONE_RESET) - set_bit(reset_type, &hdev->reset_pending); -} - static int hclgevf_reset_wait(struct hclgevf_dev *hdev) { #define HCLGEVF_RESET_WAIT_US 20000 @@ -1555,7 +1542,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) hdev->rst_stats.rst_fail_cnt); if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) - hclgevf_set_reset_pending(hdev, hdev->reset_type); + set_bit(hdev->reset_type, &hdev->reset_pending); if (hclgevf_is_reset_pending(hdev)) { set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); @@ -1675,8 +1662,6 @@ static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr) clear_bit(HNAE3_FLR_RESET, addr); } - clear_bit(HNAE3_NONE_RESET, addr); - return rst_level; } @@ -1686,15 +1671,14 @@ static void hclgevf_reset_event(struct pci_dev *pdev, struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); struct hclgevf_dev *hdev = ae_dev->priv; + dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); + if (hdev->default_reset_request) hdev->reset_level = hclgevf_get_reset_level(&hdev->default_reset_request); else hdev->reset_level = HNAE3_VF_FUNC_RESET; - dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n", - hdev->reset_level); - /* reset of this VF requested */ set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); hclgevf_reset_task_schedule(hdev); @@ -1705,20 +1689,8 @@ static void hclgevf_reset_event(struct pci_dev *pdev, static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, enum hnae3_reset_type rst_type) { -#define HCLGEVF_SUPPORT_RESET_TYPE \ - (BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \ - BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \ - BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET)) - struct hclgevf_dev *hdev = ae_dev->priv; - if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) { - /* To prevent reset triggered by hclge_reset_event */ - set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request); - dev_info(&hdev->pdev->dev, "unsupported reset type %d\n", - rst_type); - return; - } set_bit(rst_type, &hdev->default_reset_request); } @@ -1875,14 +1847,14 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) */ if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { /* prepare for full reset of stack + pcie interface */ - hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET); + set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); /* "defer" schedule the reset task again */ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); } else { hdev->reset_attempts++; - hclgevf_set_reset_pending(hdev, hdev->reset_level); + set_bit(hdev->reset_level, &hdev->reset_pending); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); } hclgevf_reset_task_schedule(hdev); @@ -2005,7 +1977,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); dev_info(&hdev->pdev->dev, "receive reset interrupt 0x%x!\n", rst_ing_reg); - hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET); + set_bit(HNAE3_VF_RESET, &hdev->reset_pending); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); @@ -2315,7 +2287,6 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev) clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); - timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0); mutex_init(&hdev->mbx_resp.mbx_mutex); sema_init(&hdev->reset_sem, 1); @@ -3015,6 +2986,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) HCLGEVF_DRIVER_NAME); hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); + timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0); return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c index 7d9d9dbc7560..6db415d8b917 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c @@ -123,10 +123,10 @@ int hclgevf_get_regs_len(struct hnae3_handle *handle) void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, void *data) { +#define HCLGEVF_RING_REG_OFFSET 0x200 #define HCLGEVF_RING_INT_REG_OFFSET 0x4 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hnae3_queue *tqp; int i, j, reg_um; u32 *reg = data; @@ -147,11 +147,10 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, reg_um = ARRAY_SIZE(ring_reg_addr_list); for (j = 0; j < hdev->num_tqps; j++) { reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg); - tqp = &hdev->htqp[j].q; for (i = 0; i < reg_um; i++) - *reg++ = readl_relaxed(tqp->io_base - - HCLGEVF_TQP_REG_OFFSET + - ring_reg_addr_list[i]); + *reg++ = hclgevf_read_dev(&hdev->hw, + ring_reg_addr_list[i] + + HCLGEVF_RING_REG_OFFSET * j); } reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index ce227b56cf72..2f9655cf5dd9 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1205,12 +1205,10 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) if (ret_val) goto out; - if (hw->mac.type != e1000_pch_mtp) { - ret_val = e1000e_force_smbus(hw); - if (ret_val) { - e_dbg("Failed to force SMBUS: %d\n", ret_val); - goto release; - } + ret_val = e1000e_force_smbus(hw); + if (ret_val) { + e_dbg("Failed to force SMBUS: %d\n", ret_val); + goto release; } /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable @@ -1273,13 +1271,6 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) } release: - if (hw->mac.type == e1000_pch_mtp) { - ret_val = e1000e_force_smbus(hw); - if (ret_val) - e_dbg("Failed to force SMBUS over MTL system: %d\n", - ret_val); - } - hw->phy.ops.release(hw); out: if (ret_val) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 2089a0e172bf..d4255c2706fa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -755,6 +755,7 @@ enum i40e_filter_state { I40E_FILTER_ACTIVE, /* Added to switch by FW */ I40E_FILTER_FAILED, /* Rejected by FW */ I40E_FILTER_REMOVE, /* To be removed */ + I40E_FILTER_NEW_SYNC, /* New, not sent yet, is in i40e_sync_vsi_filters() */ /* There is no 'removed' state; the filter struct is freed */ }; struct i40e_mac_filter { diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index abf624d770e6..208c2f0857b6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -89,6 +89,7 @@ static char *i40e_filter_state_string[] = { "ACTIVE", "FAILED", "REMOVE", + "NEW_SYNC", }; /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 25295ae370b2..ab5febf83ec3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1255,6 +1255,7 @@ int i40e_count_filters(struct i40e_vsi *vsi) hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (f->state == I40E_FILTER_NEW || + f->state == I40E_FILTER_NEW_SYNC || f->state == I40E_FILTER_ACTIVE) ++cnt; } @@ -1441,6 +1442,8 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, new->f = add_head; new->state = add_head->state; + if (add_head->state == I40E_FILTER_NEW) + add_head->state = I40E_FILTER_NEW_SYNC; /* Add the new filter to the tmp list */ hlist_add_head(&new->hlist, tmp_add_list); @@ -1550,6 +1553,8 @@ static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi, return -ENOMEM; new_mac->f = add_head; new_mac->state = add_head->state; + if (add_head->state == I40E_FILTER_NEW) + add_head->state = I40E_FILTER_NEW_SYNC; /* Add the new filter to the tmp list */ hlist_add_head(&new_mac->hlist, tmp_add_list); @@ -2437,7 +2442,8 @@ static int i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, struct i40e_mac_filter *f) { - bool enable = f->state == I40E_FILTER_NEW; + bool enable = f->state == I40E_FILTER_NEW || + f->state == I40E_FILTER_NEW_SYNC; struct i40e_hw *hw = &vsi->back->hw; int aq_ret; @@ -2611,6 +2617,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) /* Add it to the hash list */ hlist_add_head(&new->hlist, &tmp_add_list); + f->state = I40E_FILTER_NEW_SYNC; } /* Count the number of active (current and new) VLAN @@ -2762,7 +2769,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) spin_lock_bh(&vsi->mac_filter_hash_lock); hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { /* Only update the state if we're still NEW */ - if (new->f->state == I40E_FILTER_NEW) + if (new->f->state == I40E_FILTER_NEW || + new->f->state == I40E_FILTER_NEW_SYNC) new->f->state = new->state; hlist_del(&new->hlist); netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); @@ -13087,12 +13095,13 @@ static int i40e_get_phys_port_id(struct net_device *netdev, * @addr: the MAC address entry being added * @vid: VLAN ID * @flags: instructions from stack about fdb operation + * @notified: whether notification was emitted * @extack: netlink extended ack, unused currently */ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, - u16 flags, + u16 flags, bool *notified, struct netlink_ext_ack *extack) { struct i40e_netdev_priv *np = netdev_priv(dev); diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index c0b3e70a7ea3..fb527434b58b 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -552,13 +552,14 @@ int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) static void ice_eswitch_detach(struct ice_pf *pf, struct ice_repr *repr) { ice_eswitch_stop_reprs(pf); + repr->ops.rem(repr); + xa_erase(&pf->eswitch.reprs, repr->id); if (xa_empty(&pf->eswitch.reprs)) ice_eswitch_disable_switchdev(pf); ice_eswitch_release_repr(pf, repr); - repr->ops.rem(repr); ice_repr_destroy(repr); if (xa_empty(&pf->eswitch.reprs)) { diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 5412eff8ef23..ee9862ddfe15 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -1830,11 +1830,12 @@ static int ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp, struct ice_fdir_fltr *input) { - u16 dest_vsi, q_index = 0; + s16 q_index = ICE_FDIR_NO_QUEUE_IDX; u16 orig_q_index = 0; struct ice_pf *pf; struct ice_hw *hw; int flow_type; + u16 dest_vsi; u8 dest_ctl; if (!vsi || !fsp || !input) diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h index ab5b118daa2d..820023c0271f 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_fdir.h @@ -53,6 +53,8 @@ */ #define ICE_FDIR_IPV4_PKT_FLAG_MF 0x20 +#define ICE_FDIR_NO_QUEUE_IDX -1 + enum ice_fltr_prgm_desc_dest { ICE_FLTR_PRGM_DESC_DEST_DROP_PKT, ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX, @@ -186,7 +188,7 @@ struct ice_fdir_fltr { u16 flex_fltr; /* filter control */ - u16 q_index; + s16 q_index; u16 orig_q_index; u16 dest_vsi; u8 dest_ctl; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index cc8d2d6e2b4d..1eaa4428fd24 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -6156,12 +6156,14 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) * @addr: the MAC address entry being added * @vid: VLAN ID * @flags: instructions from stack about fdb operation + * @notified: whether notification was emitted * @extack: netlink extended ack */ static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, - u16 flags, struct netlink_ext_ack __always_unused *extack) + u16 flags, bool *notified, + struct netlink_ext_ack __always_unused *extack) { int err; @@ -6195,12 +6197,14 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], * @dev: the net device pointer * @addr: the MAC address entry being added * @vid: VLAN ID + * @notified: whether notification was emitted * @extack: netlink extended ack */ static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, - __always_unused u16 vid, struct netlink_ext_ack *extack) + __always_unused u16 vid, bool *notified, + struct netlink_ext_ack *extack) { int err; diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index 2c31ad87587a..66544faab710 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -141,6 +141,7 @@ enum idpf_vport_state { * @adapter: Adapter back pointer * @vport: Vport back pointer * @vport_id: Vport identifier + * @link_speed_mbps: Link speed in mbps * @vport_idx: Relative vport index * @state: See enum idpf_vport_state * @netstats: Packet and byte stats @@ -150,6 +151,7 @@ struct idpf_netdev_priv { struct idpf_adapter *adapter; struct idpf_vport *vport; u32 vport_id; + u32 link_speed_mbps; u16 vport_idx; enum idpf_vport_state state; struct rtnl_link_stats64 netstats; @@ -287,7 +289,6 @@ struct idpf_port_stats { * @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation * @port_stats: per port csum, header split, and other offload stats * @link_up: True if link is up - * @link_speed_mbps: Link speed in mbps * @sw_marker_wq: workqueue for marker packets */ struct idpf_vport { @@ -331,7 +332,6 @@ struct idpf_vport { struct idpf_port_stats port_stats; bool link_up; - u32 link_speed_mbps; wait_queue_head_t sw_marker_wq; }; diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 3806ddd3ce4a..59b1a1a09996 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -1296,24 +1296,19 @@ static void idpf_set_msglevel(struct net_device *netdev, u32 data) static int idpf_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { - struct idpf_vport *vport; - - idpf_vport_ctrl_lock(netdev); - vport = idpf_netdev_to_vport(netdev); + struct idpf_netdev_priv *np = netdev_priv(netdev); ethtool_link_ksettings_zero_link_mode(cmd, supported); cmd->base.autoneg = AUTONEG_DISABLE; cmd->base.port = PORT_NONE; - if (vport->link_up) { + if (netif_carrier_ok(netdev)) { cmd->base.duplex = DUPLEX_FULL; - cmd->base.speed = vport->link_speed_mbps; + cmd->base.speed = np->link_speed_mbps; } else { cmd->base.duplex = DUPLEX_UNKNOWN; cmd->base.speed = SPEED_UNKNOWN; } - idpf_vport_ctrl_unlock(netdev); - return 0; } diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 4f20343e49a9..b4fbb99bfad2 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -1786,6 +1786,7 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter) */ err = idpf_vc_core_init(adapter); if (err) { + cancel_delayed_work_sync(&adapter->mbx_task); idpf_deinit_dflt_mbx(adapter); goto unlock_mutex; } @@ -1860,7 +1861,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, * mess with. Nothing below should use those variables from new_vport * and should instead always refer to them in vport if they need to. */ - memcpy(new_vport, vport, offsetof(struct idpf_vport, link_speed_mbps)); + memcpy(new_vport, vport, offsetof(struct idpf_vport, link_up)); /* Adjust resource parameters prior to reallocating resources */ switch (reset_cause) { @@ -1906,7 +1907,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, /* Same comment as above regarding avoiding copying the wait_queues and * mutexes applies here. We do not want to mess with those if possible. */ - memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps)); + memcpy(vport, new_vport, offsetof(struct idpf_vport, link_up)); if (reset_cause == IDPF_SR_Q_CHANGE) idpf_vport_alloc_vec_indexes(vport); diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index 15c00a01f1c0..d46c95f91b0d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -141,7 +141,7 @@ static void idpf_handle_event_link(struct idpf_adapter *adapter, } np = netdev_priv(vport->netdev); - vport->link_speed_mbps = le32_to_cpu(v2e->link_speed); + np->link_speed_mbps = le32_to_cpu(v2e->link_speed); if (vport->link_up == v2e->link_status) return; @@ -3063,7 +3063,6 @@ init_failed: adapter->state = __IDPF_VER_CHECK; if (adapter->vcxn_mngr) idpf_vc_xn_shutdown(adapter->vcxn_mngr); - idpf_deinit_dflt_mbx(adapter); set_bit(IDPF_HR_DRV_LOAD, adapter->flags); queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task, msecs_to_jiffies(task_delay)); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 37b674f8cbcd..08578980b651 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -907,7 +907,7 @@ static int igb_request_msix(struct igb_adapter *adapter) int i, err = 0, vector = 0, free_vector = 0; err = request_irq(adapter->msix_entries[vector].vector, - igb_msix_other, IRQF_NO_THREAD, netdev->name, adapter); + igb_msix_other, 0, netdev->name, adapter); if (err) goto err_out; @@ -2486,7 +2486,7 @@ static int igb_set_features(struct net_device *netdev, static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, - u16 flags, + u16 flags, bool *notified, struct netlink_ext_ack *extack) { /* guarantee we can provide a unique filter for the unicast address */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c229a26fbbb7..2e38e8f6fac1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9955,7 +9955,7 @@ static int ixgbe_set_features(struct net_device *netdev, static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, - u16 flags, + u16 flags, bool *notified, struct netlink_ext_ack *extack) { /* guarantee we can provide a unique filter for the unicast address */ diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig index a32d85d6f599..35c4f5f64f58 100644 --- a/drivers/net/ethernet/marvell/octeontx2/Kconfig +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -46,3 +46,11 @@ config OCTEONTX2_VF depends on OCTEONTX2_PF help This driver supports Marvell's OcteonTX2 NIC virtual function. + +config RVU_ESWITCH + tristate "Marvell RVU E-Switch support" + depends on OCTEONTX2_PF + default m + help + This driver supports Marvell's RVU E-Switch that + provides internal SRIOV packet steering and switching. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index 3cf4c8285c90..ccea37847df8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -11,4 +11,5 @@ rvu_mbox-y := mbox.o rvu_trace.o rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \ - rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o + rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o \ + rvu_rep.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index 2436c1ff9ba4..5d84386ed22d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -156,6 +156,7 @@ enum nix_scheduler { #define NIC_HW_MIN_FRS 40 #define NIC_HW_MAX_FRS 9212 #define SDP_HW_MAX_FRS 65535 +#define SDP_HW_MIN_FRS 16 #define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */ #define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 6ea2f3071fe8..62c07407eb94 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -144,6 +144,9 @@ M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \ msg_rsp) \ M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \ M(PTP_GET_CAP, 0x00c, ptp_get_cap, msg_req, ptp_get_cap_rsp) \ +M(GET_REP_CNT, 0x00d, get_rep_cnt, msg_req, get_rep_cnt_rsp) \ +M(ESW_CFG, 0x00e, esw_cfg, esw_cfg_req, msg_rsp) \ +M(REP_EVENT_NOTIFY, 0x00f, rep_event_notify, rep_event, msg_rsp) \ /* CGX mbox IDs (range 0x200 - 0x3FF) */ \ M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \ M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \ @@ -319,6 +322,7 @@ M(NIX_MCAST_GRP_DESTROY, 0x802c, nix_mcast_grp_destroy, nix_mcast_grp_destroy_re M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, \ nix_mcast_grp_update_req, \ nix_mcast_grp_update_rsp) \ +M(NIX_LF_STATS, 0x802e, nix_lf_stats, nix_stats_req, nix_stats_rsp) \ /* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \ M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \ mcs_alloc_rsrc_rsp) \ @@ -380,12 +384,16 @@ M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp) #define MBOX_UP_MCS_MESSAGES \ M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp) +#define MBOX_UP_REP_MESSAGES \ +M(REP_EVENT_UP_NOTIFY, 0xEF0, rep_event_up_notify, rep_event, msg_rsp) \ + enum { #define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id, MBOX_MESSAGES MBOX_UP_CGX_MESSAGES MBOX_UP_CPT_MESSAGES MBOX_UP_MCS_MESSAGES +MBOX_UP_REP_MESSAGES #undef M }; @@ -1364,6 +1372,37 @@ struct nix_bandprof_get_hwinfo_rsp { u32 policer_timeunit; }; +struct nix_stats_req { + struct mbox_msghdr hdr; + u8 reset; + u16 pcifunc; + u64 rsvd; +}; + +struct nix_stats_rsp { + struct mbox_msghdr hdr; + u16 pcifunc; + struct { + u64 octs; + u64 ucast; + u64 bcast; + u64 mcast; + u64 drop; + u64 drop_octs; + u64 drop_mcast; + u64 drop_bcast; + u64 err; + u64 rsvd[5]; + } rx; + struct { + u64 ucast; + u64 bcast; + u64 mcast; + u64 drop; + u64 octs; + } tx; +}; + /* NPC mbox message structs */ #define NPC_MCAM_ENTRY_INVALID 0xFFFF @@ -1525,6 +1564,41 @@ struct ptp_get_cap_rsp { u64 cap; }; +struct get_rep_cnt_rsp { + struct mbox_msghdr hdr; + u16 rep_cnt; + u16 rep_pf_map[64]; + u64 rsvd; +}; + +struct esw_cfg_req { + struct mbox_msghdr hdr; + u8 ena; + u64 rsvd; +}; + +struct rep_evt_data { + u8 port_state; + u8 vf_state; + u16 rx_mode; + u16 rx_flags; + u16 mtu; + u8 mac[ETH_ALEN]; + u64 rsvd[5]; +}; + +struct rep_event { + struct mbox_msghdr hdr; + u16 pcifunc; +#define RVU_EVENT_PORT_STATE BIT_ULL(0) +#define RVU_EVENT_PFVF_STATE BIT_ULL(1) +#define RVU_EVENT_MTU_CHANGE BIT_ULL(2) +#define RVU_EVENT_RX_MODE_CHANGE BIT_ULL(3) +#define RVU_EVENT_MAC_ADDR_CHANGE BIT_ULL(4) + u16 event; + struct rep_evt_data evt_data; +}; + struct flow_msg { unsigned char dmac[6]; unsigned char smac[6]; @@ -1563,6 +1637,7 @@ struct flow_msg { u8 icmp_type; u8 icmp_code; __be16 tcp_flags; + u16 sq_id; }; struct npc_install_flow_req { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 5016ba82e142..b897845e25fd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -513,6 +513,11 @@ struct rvu_switch { u16 start_entry; }; +struct rep_evtq_ent { + struct list_head node; + struct rep_event event; +}; + struct rvu { void __iomem *afreg_base; void __iomem *pfreg_base; @@ -525,6 +530,7 @@ struct rvu { struct mutex alias_lock; /* Serialize bar2 alias access */ int vfs; /* Number of VFs attached to RVU */ u16 vf_devid; /* VF devices id */ + bool def_rule_cntr_en; int nix_blkaddr[MAX_NIX_BLKS]; /* Mbox */ @@ -594,6 +600,15 @@ struct rvu { spinlock_t cpt_intr_lock; struct mutex mbox_lock; /* Serialize mbox up and down msgs */ + u16 rep_pcifunc; + int rep_cnt; + u16 *rep2pfvf_map; + u8 rep_mode; + struct work_struct rep_evt_work; + struct workqueue_struct *rep_evt_wq; + struct list_head rep_evtq_head; + /* Representor event lock */ + spinlock_t rep_evtq_lock; }; static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) @@ -852,6 +867,14 @@ bool is_sdp_pfvf(u16 pcifunc); bool is_sdp_pf(u16 pcifunc); bool is_sdp_vf(struct rvu *rvu, u16 pcifunc); +static inline bool is_rep_dev(struct rvu *rvu, u16 pcifunc) +{ + if (rvu->rep_pcifunc && rvu->rep_pcifunc == pcifunc) + return true; + + return false; +} + /* CGX APIs */ static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf) { @@ -960,7 +983,11 @@ void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, int group, int alg_idx, int mcam_index); - +void __rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc, + struct rvu_npc_mcam_rule *rule); +void __rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, + struct rvu_npc_mcam_rule *rule, + struct npc_install_flow_rsp *rsp); void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, int blkaddr, int *alloc_cnt, int *enable_cnt); @@ -985,6 +1012,7 @@ void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 src, struct mcam_entry *entry, u8 *intf, u8 *ena); +int npc_config_cntr_default_entries(struct rvu *rvu, bool enable); bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc); bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); u32 rvu_cgx_get_fifolen(struct rvu *rvu); @@ -1045,7 +1073,8 @@ int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr); /* RVU Switch */ void rvu_switch_enable(struct rvu *rvu); void rvu_switch_disable(struct rvu *rvu); -void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc); +void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena); +void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool ena); int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, u64 pkind, u8 var_len_off, u8 var_len_off_mask, @@ -1058,4 +1087,9 @@ int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc); void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena); void rvu_mcs_exit(struct rvu *rvu); +/* Representor APIs */ +int rvu_rep_pf_init(struct rvu *rvu); +int rvu_rep_install_mcam_rules(struct rvu *rvu); +void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena); +int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable); #endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 8c700ee4a82b..148144f5b61d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -45,33 +45,6 @@ enum { CGX_STAT18, }; -/* NIX TX stats */ -enum nix_stat_lf_tx { - TX_UCAST = 0x0, - TX_BCAST = 0x1, - TX_MCAST = 0x2, - TX_DROP = 0x3, - TX_OCTS = 0x4, - TX_STATS_ENUM_LAST, -}; - -/* NIX RX stats */ -enum nix_stat_lf_rx { - RX_OCTS = 0x0, - RX_UCAST = 0x1, - RX_BCAST = 0x2, - RX_MCAST = 0x3, - RX_DROP = 0x4, - RX_DROP_OCTS = 0x5, - RX_FCS = 0x6, - RX_ERR = 0x7, - RX_DRP_BCAST = 0x8, - RX_DRP_MCAST = 0x9, - RX_DRP_L3BCAST = 0xa, - RX_DRP_L3MCAST = 0xb, - RX_STATS_ENUM_LAST, -}; - static char *cgx_rx_stats_fields[] = { [CGX_STAT0] = "Received packets", [CGX_STAT1] = "Octets of received packets", diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 7498ab429963..dab4deca893f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -1238,6 +1238,7 @@ enum rvu_af_dl_param_id { RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU, RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT, RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE, + RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE, RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF, }; @@ -1358,6 +1359,32 @@ static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink return 0; } +static int rvu_af_dl_npc_def_rule_cntr_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + + ctx->val.vbool = rvu->def_rule_cntr_en; + + return 0; +} + +static int rvu_af_dl_npc_def_rule_cntr_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + int err; + + err = npc_config_cntr_default_entries(rvu, ctx->val.vbool); + if (!err) + rvu->def_rule_cntr_en = ctx->val.vbool; + + return err; +} + static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx) { @@ -1444,6 +1471,11 @@ static const struct devlink_param rvu_af_dl_params[] = { rvu_af_dl_npc_mcam_high_zone_percent_get, rvu_af_dl_npc_mcam_high_zone_percent_set, rvu_af_dl_npc_mcam_high_zone_percent_validate), + DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE, + "npc_def_rule_cntr", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + rvu_af_dl_npc_def_rule_cntr_get, + rvu_af_dl_npc_def_rule_cntr_set, NULL), DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF, "nix_maxlf", DEVLINK_PARAM_TYPE_U16, BIT(DEVLINK_PARAM_CMODE_RUNTIME), @@ -1468,6 +1500,9 @@ static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) struct rvu *rvu = rvu_dl->rvu; struct rvu_switch *rswitch; + if (rvu->rep_mode) + return -EOPNOTSUPP; + rswitch = &rvu->rswitch; *mode = rswitch->mode; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index da69350c6f76..5d5a01dbbca1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -31,6 +31,7 @@ static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, u32 leaf_prof); static const char *nix_get_ctx_name(int ctype); +static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc); enum mc_tbl_sz { MC_TBL_SZ_256, @@ -312,7 +313,9 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, /* TLs aggegating traffic are shared across PF and VFs */ if (lvl >= hw->cap.nix_tx_aggr_lvl) { - if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) + if ((nix_get_tx_link(rvu, map_func) != + nix_get_tx_link(rvu, pcifunc)) && + (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))) return false; else return true; @@ -360,7 +363,6 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); rvu_npc_set_pkind(rvu, pkind, pfvf); - break; case NIX_INTF_TYPE_LBK: vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; @@ -584,6 +586,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) return 0; + if (is_sdp_pfvf(pcifunc)) + type = NIX_INTF_TYPE_SDP; + pfvf = rvu_get_pfvf(rvu, pcifunc); err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); if (err) @@ -1614,6 +1619,12 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, cfg = NPC_TX_DEF_PKIND; rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); + if (is_rep_dev(rvu, pcifunc)) { + pfvf->tx_chan_base = RVU_SWITCH_LBK_CHAN; + pfvf->tx_chan_cnt = 1; + goto exit; + } + intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; if (is_sdp_pfvf(pcifunc)) intf = NIX_INTF_TYPE_SDP; @@ -1684,6 +1695,9 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, if (nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; + if (is_rep_dev(rvu, pcifunc)) + goto free_lf; + if (req->flags & NIX_LF_DISABLE_FLOWS) rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); else @@ -1695,6 +1709,7 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, nix_interface_deinit(rvu, pcifunc, nixlf); +free_lf: /* Reset this NIX LF */ err = rvu_lf_reset(rvu, block, nixlf); if (err) { @@ -2007,7 +2022,8 @@ static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, struct rvu_hwinfo *hw = rvu->hw; int pf = rvu_get_pf(pcifunc); - if (is_lbk_vf(rvu, pcifunc)) { /* LBK links */ + /* LBK links */ + if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) { *start = hw->cap.nix_txsch_per_cgx_lmac * link; *end = *start + hw->cap.nix_txsch_per_lbk_lmac; } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ @@ -2760,7 +2776,7 @@ void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, int schq; u64 cfg; - if (!is_pf_cgxmapped(rvu, pf)) + if (!is_pf_cgxmapped(rvu, pf) && !is_rep_dev(rvu, pcifunc)) return; cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0; @@ -4393,8 +4409,6 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) ether_addr_copy(pfvf->default_mac, req->mac_addr); - rvu_switch_update_rules(rvu, pcifunc); - return 0; } @@ -4555,7 +4569,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; - if (is_lbk_vf(rvu, pcifunc)) + if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) rvu_get_lbk_link_max_frs(rvu, &max_mtu); else rvu_get_lmac_link_max_frs(rvu, &max_mtu); @@ -4583,6 +4597,8 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, /* For VFs of PF0 ingress is LBK port, so config LBK link */ pfvf = rvu_get_pfvf(rvu, pcifunc); link = hw->cgx_links + pfvf->lbkid; + } else if (is_rep_dev(rvu, pcifunc)) { + link = hw->cgx_links + 0; } if (link < 0) @@ -4674,7 +4690,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr, if (hw->sdp_links) { link = hw->cgx_links + hw->lbk_links; rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), - SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); + SDP_HW_MAX_FRS << 16 | SDP_HW_MIN_FRS); } /* Get MCS external bypass status for CN10K-B */ @@ -5166,7 +5182,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, { u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; - int nixlf, err; + int nixlf, err, pf; err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); if (err) @@ -5182,7 +5198,11 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, pfvf = rvu_get_pfvf(rvu, pcifunc); set_bit(NIXLF_INITIALIZED, &pfvf->flags); - rvu_switch_update_rules(rvu, pcifunc); + rvu_switch_update_rules(rvu, pcifunc, true); + + pf = rvu_get_pf(pcifunc); + if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) + rvu_rep_notify_pfvf_state(rvu, pcifunc, true); return rvu_cgx_start_stop_io(rvu, pcifunc, true); } @@ -5192,7 +5212,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, { u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; - int nixlf, err; + int nixlf, err, pf; err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); if (err) @@ -5210,8 +5230,12 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, if (err) return err; + rvu_switch_update_rules(rvu, pcifunc, false); rvu_cgx_tx_enable(rvu, pcifunc, true); + pf = rvu_get_pf(pcifunc); + if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) + rvu_rep_notify_pfvf_state(rvu, pcifunc, false); return 0; } @@ -5239,6 +5263,9 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) clear_bit(NIXLF_INITIALIZED, &pfvf->flags); + if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) + rvu_rep_notify_pfvf_state(rvu, pcifunc, false); + rvu_cgx_start_stop_io(rvu, pcifunc, false); if (pfvf->sq_ctx) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 97722ce8c4cb..821fe242f821 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -2691,6 +2691,49 @@ void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx) npc_mcam_set_bit(mcam, entry_idx); } +int npc_config_cntr_default_entries(struct rvu *rvu, bool enable) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct npc_install_flow_rsp rsp; + struct rvu_npc_mcam_rule *rule; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return -EINVAL; + + mutex_lock(&mcam->lock); + list_for_each_entry(rule, &mcam->mcam_rules, list) { + if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, rule->entry)) + continue; + if (!rule->default_rule) + continue; + if (enable && !rule->has_cntr) { /* Alloc and map new counter */ + __rvu_mcam_add_counter_to_rule(rvu, rule->owner, + rule, &rsp); + if (rsp.counter < 0) { + dev_err(rvu->dev, + "%s: Failed to allocate cntr for default rule (err=%d)\n", + __func__, rsp.counter); + break; + } + npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, + rule->entry, rsp.counter); + /* Reset counter before use */ + rvu_write64(rvu, blkaddr, + NPC_AF_MATCH_STATX(rule->cntr), 0x0); + } + + /* Free and unmap counter */ + if (!enable && rule->has_cntr) + __rvu_mcam_remove_counter_from_rule(rvu, rule->owner, + rule); + } + mutex_unlock(&mcam->lock); + + return 0; +} + int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, struct npc_mcam_alloc_entry_req *req, struct npc_mcam_alloc_entry_rsp *rsp) @@ -2975,9 +3018,9 @@ int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu, return rc; } -int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, - struct npc_mcam_alloc_counter_req *req, - struct npc_mcam_alloc_counter_rsp *rsp) +static int __npc_mcam_alloc_counter(struct rvu *rvu, + struct npc_mcam_alloc_counter_req *req, + struct npc_mcam_alloc_counter_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; u16 pcifunc = req->hdr.pcifunc; @@ -2998,11 +3041,9 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS) return NPC_MCAM_INVALID_REQ; - mutex_lock(&mcam->lock); /* Check if unused counters are available or not */ if (!rvu_rsrc_free_count(&mcam->counters)) { - mutex_unlock(&mcam->lock); return NPC_MCAM_ALLOC_FAILED; } @@ -3035,12 +3076,27 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, } } - mutex_unlock(&mcam->lock); return 0; } -int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, - struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) +int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, + struct npc_mcam_alloc_counter_req *req, + struct npc_mcam_alloc_counter_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int err; + + mutex_lock(&mcam->lock); + + err = __npc_mcam_alloc_counter(rvu, req, rsp); + + mutex_unlock(&mcam->lock); + return err; +} + +static int __npc_mcam_free_counter(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, + struct msg_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; u16 index, entry = 0; @@ -3050,10 +3106,8 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; - mutex_lock(&mcam->lock); err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); if (err) { - mutex_unlock(&mcam->lock); return err; } @@ -3077,10 +3131,66 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, index, req->cntr); } - mutex_unlock(&mcam->lock); return 0; } +int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int err; + + mutex_lock(&mcam->lock); + + err = __npc_mcam_free_counter(rvu, req, rsp); + + mutex_unlock(&mcam->lock); + + return err; +} + +void __rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc, + struct rvu_npc_mcam_rule *rule) +{ + struct npc_mcam_oper_counter_req free_req = { 0 }; + struct msg_rsp free_rsp; + + if (!rule->has_cntr) + return; + + free_req.hdr.pcifunc = pcifunc; + free_req.cntr = rule->cntr; + + __npc_mcam_free_counter(rvu, &free_req, &free_rsp); + rule->has_cntr = false; +} + +void __rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, + struct rvu_npc_mcam_rule *rule, + struct npc_install_flow_rsp *rsp) +{ + struct npc_mcam_alloc_counter_req cntr_req = { 0 }; + struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 }; + int err; + + cntr_req.hdr.pcifunc = pcifunc; + cntr_req.contig = true; + cntr_req.count = 1; + + /* we try to allocate a counter to track the stats of this + * rule. If counter could not be allocated then proceed + * without counter because counters are limited than entries. + */ + err = __npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); + if (!err && cntr_rsp.count) { + rule->cntr = cntr_rsp.cntr; + rule->has_cntr = true; + rsp->counter = rule->cntr; + } else { + rsp->counter = err; + } +} + int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu, struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 150635de2bd5..da69e454662a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -1081,44 +1081,26 @@ static void rvu_mcam_add_rule(struct npc_mcam *mcam, static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc, struct rvu_npc_mcam_rule *rule) { - struct npc_mcam_oper_counter_req free_req = { 0 }; - struct msg_rsp free_rsp; + struct npc_mcam *mcam = &rvu->hw->mcam; - if (!rule->has_cntr) - return; + mutex_lock(&mcam->lock); - free_req.hdr.pcifunc = pcifunc; - free_req.cntr = rule->cntr; + __rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule); - rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp); - rule->has_cntr = false; + mutex_unlock(&mcam->lock); } static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, struct rvu_npc_mcam_rule *rule, struct npc_install_flow_rsp *rsp) { - struct npc_mcam_alloc_counter_req cntr_req = { 0 }; - struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 }; - int err; + struct npc_mcam *mcam = &rvu->hw->mcam; - cntr_req.hdr.pcifunc = pcifunc; - cntr_req.contig = true; - cntr_req.count = 1; + mutex_lock(&mcam->lock); - /* we try to allocate a counter to track the stats of this - * rule. If counter could not be allocated then proceed - * without counter because counters are limited than entries. - */ - err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, - &cntr_rsp); - if (!err && cntr_rsp.count) { - rule->cntr = cntr_rsp.cntr; - rule->has_cntr = true; - rsp->counter = rule->cntr; - } else { - rsp->counter = err; - } + __rvu_mcam_add_counter_to_rule(rvu, pcifunc, rule, rsp); + + mutex_unlock(&mcam->lock); } static int npc_mcast_update_action_index(struct rvu *rvu, struct npc_install_flow_req *req, @@ -1416,6 +1398,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, struct npc_install_flow_rsp *rsp) { bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK); + bool from_rep_dev = !!is_rep_dev(rvu, req->hdr.pcifunc); struct rvu_switch *rswitch = &rvu->rswitch; int blkaddr, nixlf, err; struct rvu_pfvf *pfvf; @@ -1472,14 +1455,19 @@ process_flow: /* AF installing for a PF/VF */ if (!req->hdr.pcifunc) target = req->vf; + /* PF installing for its VF */ - else if (!from_vf && req->vf) { + if (!from_vf && req->vf && !from_rep_dev) { target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf; pf_set_vfs_mac = req->default_rule && (req->features & BIT_ULL(NPC_DMAC)); } - /* msg received from PF/VF */ + + /* Representor device installing for a representee */ + if (from_rep_dev && req->vf) + target = req->vf; else + /* msg received from PF/VF */ target = req->hdr.pcifunc; /* ignore chan_mask in case pf func is not AF, revisit later */ @@ -1492,8 +1480,10 @@ process_flow: pfvf = rvu_get_pfvf(rvu, target); + if (from_rep_dev) + req->channel = pfvf->rx_chan_base; /* PF installing for its VF */ - if (req->hdr.pcifunc && !from_vf && req->vf) + if (req->hdr.pcifunc && !from_vf && req->vf && !from_rep_dev) set_bit(PF_SET_VF_CFG, &pfvf->flags); /* update req destination mac addr */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 2b299fa85159..62cdc714ba57 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -445,6 +445,7 @@ #define NIX_CONST_MAX_BPIDS GENMASK_ULL(23, 12) #define NIX_CONST_SDP_CHANS GENMASK_ULL(11, 0) +#define NIX_VLAN_ETYPE_MASK GENMASK_ULL(63, 48) #define NIX_AF_MDQ_PARENT_MASK GENMASK_ULL(24, 16) #define NIX_AF_TL4_PARENT_MASK GENMASK_ULL(23, 16) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c new file mode 100644 index 000000000000..052ae5923e3a --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c @@ -0,0 +1,468 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/bitfield.h> +#include <linux/types.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "rvu.h" +#include "rvu_reg.h" + +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ +static struct _req_type __maybe_unused \ +*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \ +{ \ + struct _req_type *req; \ + \ + req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ + &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \ + sizeof(struct _rsp_type)); \ + if (!req) \ + return NULL; \ + req->hdr.sig = OTX2_MBOX_REQ_SIG; \ + req->hdr.id = _id; \ + return req; \ +} + +MBOX_UP_REP_MESSAGES +#undef M + +static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, event->pcifunc); + struct rep_event *msg; + int pf; + + pf = rvu_get_pf(event->pcifunc); + + if (event->event & RVU_EVENT_MAC_ADDR_CHANGE) + ether_addr_copy(pfvf->mac_addr, event->evt_data.mac); + + mutex_lock(&rvu->mbox_lock); + msg = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf); + if (!msg) { + mutex_unlock(&rvu->mbox_lock); + return -ENOMEM; + } + + msg->hdr.pcifunc = event->pcifunc; + msg->event = event->event; + + memcpy(&msg->evt_data, &event->evt_data, sizeof(struct rep_evt_data)); + + otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf); + + otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); + + mutex_unlock(&rvu->mbox_lock); + return 0; +} + +static void rvu_rep_wq_handler(struct work_struct *work) +{ + struct rvu *rvu = container_of(work, struct rvu, rep_evt_work); + struct rep_evtq_ent *qentry; + struct rep_event *event; + unsigned long flags; + + do { + spin_lock_irqsave(&rvu->rep_evtq_lock, flags); + qentry = list_first_entry_or_null(&rvu->rep_evtq_head, + struct rep_evtq_ent, + node); + if (qentry) + list_del(&qentry->node); + + spin_unlock_irqrestore(&rvu->rep_evtq_lock, flags); + if (!qentry) + break; /* nothing more to process */ + + event = &qentry->event; + + rvu_rep_up_notify(rvu, event); + kfree(qentry); + } while (1); +} + +int rvu_mbox_handler_rep_event_notify(struct rvu *rvu, struct rep_event *req, + struct msg_rsp *rsp) +{ + struct rep_evtq_ent *qentry; + + qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC); + if (!qentry) + return -ENOMEM; + + qentry->event = *req; + spin_lock(&rvu->rep_evtq_lock); + list_add_tail(&qentry->node, &rvu->rep_evtq_head); + spin_unlock(&rvu->rep_evtq_lock); + queue_work(rvu->rep_evt_wq, &rvu->rep_evt_work); + return 0; +} + +int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable) +{ + struct rep_event *req; + int pf; + + if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) + return 0; + + pf = rvu_get_pf(rvu->rep_pcifunc); + + mutex_lock(&rvu->mbox_lock); + req = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf); + if (!req) { + mutex_unlock(&rvu->mbox_lock); + return -ENOMEM; + } + + req->hdr.pcifunc = rvu->rep_pcifunc; + req->event |= RVU_EVENT_PFVF_STATE; + req->pcifunc = pcifunc; + req->evt_data.vf_state = enable; + + otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf); + otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); + + mutex_unlock(&rvu->mbox_lock); + return 0; +} + +#define RVU_LF_RX_STATS(reg) \ + rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, reg)) + +#define RVU_LF_TX_STATS(reg) \ + rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, reg)) + +int rvu_mbox_handler_nix_lf_stats(struct rvu *rvu, + struct nix_stats_req *req, + struct nix_stats_rsp *rsp) +{ + u16 pcifunc = req->pcifunc; + int nixlf, blkaddr, err; + struct msg_req rst_req; + struct msg_rsp rst_rsp; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (err) + return 0; + + if (req->reset) { + rst_req.hdr.pcifunc = pcifunc; + return rvu_mbox_handler_nix_stats_rst(rvu, &rst_req, &rst_rsp); + } + rsp->rx.octs = RVU_LF_RX_STATS(RX_OCTS); + rsp->rx.ucast = RVU_LF_RX_STATS(RX_UCAST); + rsp->rx.bcast = RVU_LF_RX_STATS(RX_BCAST); + rsp->rx.mcast = RVU_LF_RX_STATS(RX_MCAST); + rsp->rx.drop = RVU_LF_RX_STATS(RX_DROP); + rsp->rx.err = RVU_LF_RX_STATS(RX_ERR); + rsp->rx.drop_octs = RVU_LF_RX_STATS(RX_DROP_OCTS); + rsp->rx.drop_mcast = RVU_LF_RX_STATS(RX_DRP_MCAST); + rsp->rx.drop_bcast = RVU_LF_RX_STATS(RX_DRP_BCAST); + + rsp->tx.octs = RVU_LF_TX_STATS(TX_OCTS); + rsp->tx.ucast = RVU_LF_TX_STATS(TX_UCAST); + rsp->tx.bcast = RVU_LF_TX_STATS(TX_BCAST); + rsp->tx.mcast = RVU_LF_TX_STATS(TX_MCAST); + rsp->tx.drop = RVU_LF_TX_STATS(TX_DROP); + + rsp->pcifunc = req->pcifunc; + return 0; +} + +static u16 rvu_rep_get_vlan_id(struct rvu *rvu, u16 pcifunc) +{ + int id; + + for (id = 0; id < rvu->rep_cnt; id++) + if (rvu->rep2pfvf_map[id] == pcifunc) + return id; + return 0; +} + +static int rvu_rep_tx_vlan_cfg(struct rvu *rvu, u16 pcifunc, + u16 vlan_tci, int *vidx) +{ + struct nix_vtag_config_rsp rsp = {}; + struct nix_vtag_config req = {}; + u64 etype = ETH_P_8021Q; + int err; + + /* Insert vlan tag */ + req.hdr.pcifunc = pcifunc; + req.vtag_size = VTAGSIZE_T4; + req.cfg_type = 0; /* tx vlan cfg */ + req.tx.cfg_vtag0 = true; + req.tx.vtag0 = FIELD_PREP(NIX_VLAN_ETYPE_MASK, etype) | vlan_tci; + + err = rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp); + if (err) { + dev_err(rvu->dev, "Tx vlan config failed\n"); + return err; + } + *vidx = rsp.vtag0_idx; + return 0; +} + +static int rvu_rep_rx_vlan_cfg(struct rvu *rvu, u16 pcifunc) +{ + struct nix_vtag_config req = {}; + struct nix_vtag_config_rsp rsp; + + /* config strip, capture and size */ + req.hdr.pcifunc = pcifunc; + req.vtag_size = VTAGSIZE_T4; + req.cfg_type = 1; /* rx vlan cfg */ + req.rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0; + req.rx.strip_vtag = true; + req.rx.capture_vtag = false; + + return rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp); +} + +static int rvu_rep_install_rx_rule(struct rvu *rvu, u16 pcifunc, + u16 entry, bool rte) +{ + struct npc_install_flow_req req = {}; + struct npc_install_flow_rsp rsp = {}; + struct rvu_pfvf *pfvf; + u16 vlan_tci, rep_id; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + + /* To steer the traffic from Representee to Representor */ + rep_id = rvu_rep_get_vlan_id(rvu, pcifunc); + if (rte) { + vlan_tci = rep_id | BIT_ULL(8); + req.vf = rvu->rep_pcifunc; + req.op = NIX_RX_ACTIONOP_UCAST; + req.index = rep_id; + } else { + vlan_tci = rep_id; + req.vf = pcifunc; + req.op = NIX_RX_ACTION_DEFAULT; + } + + rvu_rep_rx_vlan_cfg(rvu, req.vf); + req.entry = entry; + req.hdr.pcifunc = 0; /* AF is requester */ + req.features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_VLAN_ETYPE_CTAG); + req.vtag0_valid = true; + req.vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0; + req.packet.vlan_etype = cpu_to_be16(ETH_P_8021Q); + req.mask.vlan_etype = cpu_to_be16(ETH_P_8021Q); + req.packet.vlan_tci = cpu_to_be16(vlan_tci); + req.mask.vlan_tci = cpu_to_be16(0xffff); + + req.channel = RVU_SWITCH_LBK_CHAN; + req.chan_mask = 0xffff; + req.intf = pfvf->nix_rx_intf; + + return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); +} + +static int rvu_rep_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry, + bool rte) +{ + struct npc_install_flow_req req = {}; + struct npc_install_flow_rsp rsp = {}; + struct rvu_pfvf *pfvf; + int vidx, err; + u16 vlan_tci; + u8 lbkid; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + vlan_tci = rvu_rep_get_vlan_id(rvu, pcifunc); + if (rte) + vlan_tci |= BIT_ULL(8); + + err = rvu_rep_tx_vlan_cfg(rvu, pcifunc, vlan_tci, &vidx); + if (err) + return err; + + lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1; + req.hdr.pcifunc = 0; /* AF is requester */ + if (rte) { + req.vf = pcifunc; + } else { + req.vf = rvu->rep_pcifunc; + req.packet.sq_id = vlan_tci; + req.mask.sq_id = 0xffff; + } + + req.entry = entry; + req.intf = pfvf->nix_tx_intf; + req.op = NIX_TX_ACTIONOP_UCAST_CHAN; + req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN; + req.set_cntr = 1; + req.vtag0_def = vidx; + req.vtag0_op = 1; + return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); +} + +int rvu_rep_install_mcam_rules(struct rvu *rvu) +{ + struct rvu_switch *rswitch = &rvu->rswitch; + u16 start = rswitch->start_entry; + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc, entry = 0; + int pf, vf, numvfs; + int err, nixlf, i; + u8 rep; + + for (pf = 1; pf < hw->total_pfs; pf++) { + if (!is_pf_cgxmapped(rvu, pf)) + continue; + + pcifunc = pf << RVU_PFVF_PF_SHIFT; + rvu_get_nix_blkaddr(rvu, pcifunc); + rep = true; + for (i = 0; i < 2; i++) { + err = rvu_rep_install_rx_rule(rvu, pcifunc, + start + entry, rep); + if (err) + return err; + rswitch->entry2pcifunc[entry++] = pcifunc; + + err = rvu_rep_install_tx_rule(rvu, pcifunc, + start + entry, rep); + if (err) + return err; + rswitch->entry2pcifunc[entry++] = pcifunc; + rep = false; + } + + rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); + for (vf = 0; vf < numvfs; vf++) { + pcifunc = pf << RVU_PFVF_PF_SHIFT | + ((vf + 1) & RVU_PFVF_FUNC_MASK); + rvu_get_nix_blkaddr(rvu, pcifunc); + + /* Skip installimg rules if nixlf is not attached */ + err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); + if (err) + continue; + rep = true; + for (i = 0; i < 2; i++) { + err = rvu_rep_install_rx_rule(rvu, pcifunc, + start + entry, + rep); + if (err) + return err; + rswitch->entry2pcifunc[entry++] = pcifunc; + + err = rvu_rep_install_tx_rule(rvu, pcifunc, + start + entry, + rep); + if (err) + return err; + rswitch->entry2pcifunc[entry++] = pcifunc; + rep = false; + } + } + } + + /* Initialize the wq for handling REP events */ + spin_lock_init(&rvu->rep_evtq_lock); + INIT_LIST_HEAD(&rvu->rep_evtq_head); + INIT_WORK(&rvu->rep_evt_work, rvu_rep_wq_handler); + rvu->rep_evt_wq = alloc_workqueue("rep_evt_wq", 0, 0); + if (!rvu->rep_evt_wq) { + dev_err(rvu->dev, "REP workqueue allocation failed\n"); + return -ENOMEM; + } + return 0; +} + +void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena) +{ + struct rvu_switch *rswitch = &rvu->rswitch; + struct npc_mcam *mcam = &rvu->hw->mcam; + u32 max = rswitch->used_entries; + int blkaddr; + u16 entry; + + if (!rswitch->used_entries) + return; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + + if (blkaddr < 0) + return; + + rvu_switch_enable_lbk_link(rvu, pcifunc, ena); + mutex_lock(&mcam->lock); + for (entry = 0; entry < max; entry++) { + if (rswitch->entry2pcifunc[entry] == pcifunc) + npc_enable_mcam_entry(rvu, mcam, blkaddr, entry, ena); + } + mutex_unlock(&mcam->lock); +} + +int rvu_rep_pf_init(struct rvu *rvu) +{ + u16 pcifunc = rvu->rep_pcifunc; + struct rvu_pfvf *pfvf; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + set_bit(NIXLF_INITIALIZED, &pfvf->flags); + rvu_switch_enable_lbk_link(rvu, pcifunc, true); + rvu_rep_rx_vlan_cfg(rvu, pcifunc); + return 0; +} + +int rvu_mbox_handler_esw_cfg(struct rvu *rvu, struct esw_cfg_req *req, + struct msg_rsp *rsp) +{ + if (req->hdr.pcifunc != rvu->rep_pcifunc) + return 0; + + rvu->rep_mode = req->ena; + + if (!rvu->rep_mode) + rvu_npc_free_mcam_entries(rvu, req->hdr.pcifunc, -1); + + return 0; +} + +int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req, + struct get_rep_cnt_rsp *rsp) +{ + int pf, vf, numvfs, hwvf, rep = 0; + u16 pcifunc; + + rvu->rep_pcifunc = req->hdr.pcifunc; + rsp->rep_cnt = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs; + rvu->rep_cnt = rsp->rep_cnt; + + rvu->rep2pfvf_map = devm_kzalloc(rvu->dev, rvu->rep_cnt * + sizeof(u16), GFP_KERNEL); + if (!rvu->rep2pfvf_map) + return -ENOMEM; + + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + if (!is_pf_cgxmapped(rvu, pf)) + continue; + pcifunc = pf << RVU_PFVF_PF_SHIFT; + rvu->rep2pfvf_map[rep] = pcifunc; + rsp->rep_pf_map[rep] = pcifunc; + rep++; + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); + for (vf = 0; vf < numvfs; vf++) { + rvu->rep2pfvf_map[rep] = pcifunc | + ((vf + 1) & RVU_PFVF_FUNC_MASK); + rsp->rep_pf_map[rep] = rvu->rep2pfvf_map[rep]; + rep++; + } + } + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index fc8da2090657..77ac94cb2ec4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -823,4 +823,30 @@ enum nix_tx_vtag_op { #define VTAG_STRIP BIT_ULL(4) #define VTAG_CAPTURE BIT_ULL(5) +/* NIX TX stats */ +enum nix_stat_lf_tx { + TX_UCAST = 0x0, + TX_BCAST = 0x1, + TX_MCAST = 0x2, + TX_DROP = 0x3, + TX_OCTS = 0x4, + TX_STATS_ENUM_LAST, +}; + +/* NIX RX stats */ +enum nix_stat_lf_rx { + RX_OCTS = 0x0, + RX_UCAST = 0x1, + RX_BCAST = 0x2, + RX_MCAST = 0x3, + RX_DROP = 0x4, + RX_DROP_OCTS = 0x5, + RX_FCS = 0x6, + RX_ERR = 0x7, + RX_DRP_BCAST = 0x8, + RX_DRP_MCAST = 0x9, + RX_DRP_L3BCAST = 0xa, + RX_DRP_L3MCAST = 0xb, + RX_STATS_ENUM_LAST, +}; #endif /* RVU_STRUCT_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c index 854045ed3b06..268efb7c1c15 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c @@ -8,7 +8,7 @@ #include <linux/bitfield.h> #include "rvu.h" -static void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool enable) +void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool enable) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct nix_hw *nix_hw; @@ -166,6 +166,8 @@ void rvu_switch_enable(struct rvu *rvu) alloc_req.contig = true; alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs; + if (rvu->rep_mode) + alloc_req.count = alloc_req.count * 4; ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req, &alloc_rsp); if (ret) { @@ -189,7 +191,12 @@ void rvu_switch_enable(struct rvu *rvu) rswitch->used_entries = alloc_rsp.count; rswitch->start_entry = alloc_rsp.entry; - ret = rvu_switch_install_rules(rvu); + if (rvu->rep_mode) { + rvu_rep_pf_init(rvu); + ret = rvu_rep_install_mcam_rules(rvu); + } else { + ret = rvu_switch_install_rules(rvu); + } if (ret) goto uninstall_rules; @@ -222,6 +229,9 @@ void rvu_switch_disable(struct rvu *rvu) if (!rswitch->used_entries) return; + if (rvu->rep_mode) + goto free_ents; + for (pf = 1; pf < hw->total_pfs; pf++) { if (!is_pf_cgxmapped(rvu, pf)) continue; @@ -249,6 +259,7 @@ void rvu_switch_disable(struct rvu *rvu) } } +free_ents: uninstall_req.start = rswitch->start_entry; uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; free_req.all = 1; @@ -258,12 +269,15 @@ void rvu_switch_disable(struct rvu *rvu) kfree(rswitch->entry2pcifunc); } -void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc) +void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena) { struct rvu_switch *rswitch = &rvu->rswitch; u32 max = rswitch->used_entries; u16 entry; + if (rvu->rep_mode) + return rvu_rep_update_rules(rvu, pcifunc, ena); + if (!rswitch->used_entries) return; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index 64a97a0a10ed..dbc971266865 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -5,11 +5,13 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o otx2_ptp.o obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o +obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \ otx2_devlink.o qos_sq.o qos.o rvu_nicvf-y := otx2_vf.o +rvu_rep-y := rep.o rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index 7417087b6db5..a15cc86635d6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -72,7 +72,7 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf) } EXPORT_SYMBOL(cn10k_lmtst_init); -int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) +int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura) { struct nix_cn10k_aq_enq_req *aq; struct otx2_nic *pfvf = dev; @@ -88,7 +88,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) aq->sq.ena = 1; aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); - aq->sq.default_chan = pfvf->hw.tx_chan_base; + aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset; aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ aq->sq.sqb_aura = sqb_aura; aq->sq.sq_int_ena = NIX_SQINT_BITS; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h index c1861f7de254..e3f0bce9908f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h @@ -26,7 +26,7 @@ static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu) int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); -int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); +int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); int cn10k_lmtst_init(struct otx2_nic *pfvf); int cn10k_free_all_ipolicers(struct otx2_nic *pfvf); int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 6e0183f0d5a1..523ecb798a7a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -83,6 +83,7 @@ int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx) otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx); return 1; } +EXPORT_SYMBOL(otx2_update_rq_stats); int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx) { @@ -99,6 +100,7 @@ int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx) otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx); return 1; } +EXPORT_SYMBOL(otx2_update_sq_stats); void otx2_get_dev_stats(struct otx2_nic *pfvf) { @@ -246,13 +248,14 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) mutex_unlock(&pfvf->mbox.lock); return err; } +EXPORT_SYMBOL(otx2_hw_set_mtu); int otx2_config_pause_frm(struct otx2_nic *pfvf) { struct cgx_pause_frm_cfg *req; int err; - if (is_otx2_lbkvf(pfvf->pdev)) + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) return 0; mutex_lock(&pfvf->mbox.lock); @@ -646,12 +649,22 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq); req->regval[2] = dwrr_val; } else if (lvl == NIX_TXSCH_LVL_TL4) { + int sdp_chan = hw->tx_chan_base + prio; + + if (is_otx2_sdp_rep(pfvf->pdev)) + prio = 0; parent = schq_list[NIX_TXSCH_LVL_TL3][prio]; req->reg[0] = NIX_AF_TL4X_PARENT(schq); req->regval[0] = (u64)parent << 16; req->num_regs++; req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq); req->regval[1] = dwrr_val; + if (is_otx2_sdp_rep(pfvf->pdev)) { + req->num_regs++; + req->reg[2] = NIX_AF_TL4X_SDP_LINK_CFG(schq); + req->regval[2] = BIT_ULL(12) | BIT_ULL(13) | + (sdp_chan & 0xff); + } } else if (lvl == NIX_TXSCH_LVL_TL3) { parent = schq_list[NIX_TXSCH_LVL_TL2][prio]; req->reg[0] = NIX_AF_TL3X_PARENT(schq); @@ -659,7 +672,8 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for req->num_regs++; req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq); req->regval[1] = dwrr_val; - if (lvl == hw->txschq_link_cfg_lvl) { + if (lvl == hw->txschq_link_cfg_lvl && + !is_otx2_sdp_rep(pfvf->pdev)) { req->num_regs++; req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); /* Enable this queue and backpressure @@ -676,7 +690,8 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq); req->regval[1] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | dwrr_val; - if (lvl == hw->txschq_link_cfg_lvl) { + if (lvl == hw->txschq_link_cfg_lvl && + !is_otx2_sdp_rep(pfvf->pdev)) { req->num_regs++; req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); /* Enable this queue and backpressure @@ -735,6 +750,7 @@ EXPORT_SYMBOL(otx2_smq_flush); int otx2_txsch_alloc(struct otx2_nic *pfvf) { + int chan_cnt = pfvf->hw.tx_chan_cnt; struct nix_txsch_alloc_req *req; struct nix_txsch_alloc_rsp *rsp; int lvl, schq, rc; @@ -747,6 +763,12 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf) /* Request one schq per level */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) req->schq[lvl] = 1; + + if (is_otx2_sdp_rep(pfvf->pdev) && chan_cnt > 1) { + req->schq[NIX_TXSCH_LVL_SMQ] = chan_cnt; + req->schq[NIX_TXSCH_LVL_TL4] = chan_cnt; + } + rc = otx2_sync_mbox_msg(&pfvf->mbox); if (rc) return rc; @@ -757,10 +779,12 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf) return PTR_ERR(rsp); /* Setup transmit scheduler list */ - for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + pfvf->hw.txschq_cnt[lvl] = rsp->schq[lvl]; for (schq = 0; schq < rsp->schq[lvl]; schq++) pfvf->hw.txschq_list[lvl][schq] = rsp->schq_list[lvl][schq]; + } pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl; pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio; @@ -798,12 +822,15 @@ EXPORT_SYMBOL(otx2_txschq_free_one); void otx2_txschq_stop(struct otx2_nic *pfvf) { - int lvl, schq; + int lvl, schq, idx; /* free non QOS TLx nodes */ - for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) - otx2_txschq_free_one(pfvf, lvl, - pfvf->hw.txschq_list[lvl][0]); + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + for (idx = 0; idx < pfvf->hw.txschq_cnt[lvl]; idx++) { + otx2_txschq_free_one(pfvf, lvl, + pfvf->hw.txschq_list[lvl][idx]); + } + } /* Clear the txschq list */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { @@ -883,7 +910,7 @@ static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura) return otx2_sync_mbox_msg(&pfvf->mbox); } -int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) +int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura) { struct otx2_nic *pfvf = dev; struct otx2_snd_queue *sq; @@ -902,7 +929,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) aq->sq.ena = 1; aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); - aq->sq.default_chan = pfvf->hw.tx_chan_base; + aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset; aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ aq->sq.sqb_aura = sqb_aura; aq->sq.sq_int_ena = NIX_SQINT_BITS; @@ -925,6 +952,7 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) struct otx2_qset *qset = &pfvf->qset; struct otx2_snd_queue *sq; struct otx2_pool *pool; + u8 chan_offset; int err; pool = &pfvf->qset.pool[sqb_aura]; @@ -971,7 +999,8 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) sq->stats.bytes = 0; sq->stats.pkts = 0; - err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura); + chan_offset = qidx % pfvf->hw.tx_chan_cnt; + err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, chan_offset, sqb_aura); if (err) { kfree(sq->sg); sq->sg = NULL; @@ -1738,6 +1767,8 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, pfvf->hw.sqb_size = rsp->sqb_size; pfvf->hw.rx_chan_base = rsp->rx_chan_base; pfvf->hw.tx_chan_base = rsp->tx_chan_base; + pfvf->hw.rx_chan_cnt = rsp->rx_chan_cnt; + pfvf->hw.tx_chan_cnt = rsp->tx_chan_cnt; pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx; pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx; pfvf->hw.cgx_links = rsp->cgx_links; @@ -1782,6 +1813,7 @@ void otx2_free_cints(struct otx2_nic *pfvf, int n) free_irq(vector, &qset->napi[qidx]); } } +EXPORT_SYMBOL(otx2_free_cints); void otx2_set_cints_affinity(struct otx2_nic *pfvf) { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 327254e578d5..566848663fea 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -29,6 +29,7 @@ #include "otx2_devlink.h" #include <rvu_trace.h> #include "qos.h" +#include "rep.h" /* IPv4 flag more fragment bit */ #define IPV4_FLAG_MORE 0x20 @@ -41,6 +42,8 @@ #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 #define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00 +#define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7 + /* PCI BAR nos */ #define PCI_CFG_REG_BAR_NUM 2 #define PCI_MBOX_BAR_NUM 4 @@ -120,33 +123,6 @@ enum otx2_errcodes_re { ERRCODE_IL4_CSUM = 0x22, }; -/* NIX TX stats */ -enum nix_stat_lf_tx { - TX_UCAST = 0x0, - TX_BCAST = 0x1, - TX_MCAST = 0x2, - TX_DROP = 0x3, - TX_OCTS = 0x4, - TX_STATS_ENUM_LAST, -}; - -/* NIX RX stats */ -enum nix_stat_lf_rx { - RX_OCTS = 0x0, - RX_UCAST = 0x1, - RX_BCAST = 0x2, - RX_MCAST = 0x3, - RX_DROP = 0x4, - RX_DROP_OCTS = 0x5, - RX_FCS = 0x6, - RX_ERR = 0x7, - RX_DRP_BCAST = 0x8, - RX_DRP_MCAST = 0x9, - RX_DRP_L3BCAST = 0xa, - RX_DRP_L3MCAST = 0xb, - RX_STATS_ENUM_LAST, -}; - struct otx2_dev_stats { u64 rx_bytes; u64 rx_frames; @@ -224,6 +200,7 @@ struct otx2_hw { /* NIX */ u8 txschq_link_cfg_lvl; + u8 txschq_cnt[NIX_TXSCH_LVL_CNT]; u8 txschq_aggr_lvl_rr_prio; u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; u16 matchall_ipolicer; @@ -234,6 +211,8 @@ struct otx2_hw { /* HW settings, coalescing etc */ u16 rx_chan_base; u16 tx_chan_base; + u8 rx_chan_cnt; + u8 tx_chan_cnt; u16 cq_qcount_wait; u16 cq_ecount_wait; u16 rq_skid; @@ -368,7 +347,8 @@ struct otx2_flow_config { }; struct dev_hw_ops { - int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura); + int (*sq_aq_init)(void *dev, u16 qidx, u8 chan_offset, + u16 sqb_aura); void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, int size, int qidx); int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); @@ -466,6 +446,8 @@ struct otx2_nic { #define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15) #define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16) #define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17) +#define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18) +#define OTX2_FLAG_PORT_UP BIT_ULL(19) u64 flags; u64 *cq_op_addr; @@ -533,11 +515,19 @@ struct otx2_nic { #if IS_ENABLED(CONFIG_MACSEC) struct cn10k_mcs_cfg *macsec_cfg; #endif + +#if IS_ENABLED(CONFIG_RVU_ESWITCH) + struct rep_dev **reps; + int rep_cnt; + u16 rep_pf_map[RVU_MAX_REP]; + u16 esw_mode; +#endif }; static inline bool is_otx2_lbkvf(struct pci_dev *pdev) { - return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF; + return (pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF) || + (pdev->device == PCI_DEVID_RVU_REP); } static inline bool is_96xx_A0(struct pci_dev *pdev) @@ -552,6 +542,11 @@ static inline bool is_96xx_B0(struct pci_dev *pdev) (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); } +static inline bool is_otx2_sdp_rep(struct pci_dev *pdev) +{ + return pdev->device == PCI_DEVID_OCTEONTX2_SDP_REP; +} + /* REVID for PCIe devices. * Bits 0..1: minor pass, bit 3..2: major pass * bits 7..4: midr id @@ -914,15 +909,19 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf, static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx) { u16 smq; + int idx; + #ifdef CONFIG_DCB if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx]) return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx]; #endif /* check if qidx falls under QOS queues */ - if (qidx >= pfvf->hw.non_qos_queues) + if (qidx >= pfvf->hw.non_qos_queues) { smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues]; - else - smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; + } else { + idx = qidx % pfvf->hw.txschq_cnt[NIX_TXSCH_LVL_SMQ]; + smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][idx]; + } return smq; } @@ -989,8 +988,8 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx); void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura); -int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); -int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); +int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); +int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, dma_addr_t *dma); int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, @@ -1142,4 +1141,12 @@ u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb, int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid); void otx2_qos_config_txschq(struct otx2_nic *pfvf); void otx2_clean_qos_queues(struct otx2_nic *pfvf); +int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info); +int otx2_setup_tc_cls_flower(struct otx2_nic *nic, + struct flow_cls_offload *cls_flower); + +static inline int mcam_entry_cmp(const void *a, const void *b) +{ + return *(u16 *)a - *(u16 *)b; +} #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c index 53f14aa944bd..33ec9a7f7c03 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -141,7 +141,56 @@ static const struct devlink_param otx2_dl_params[] = { otx2_dl_ucast_flt_cnt_validate), }; +#ifdef CONFIG_RVU_ESWITCH +static int otx2_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + + if (!otx2_rep_dev(pfvf->pdev)) + return -EOPNOTSUPP; + + *mode = pfvf->esw_mode; + + return 0; +} + +static int otx2_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + int ret = 0; + + if (!otx2_rep_dev(pfvf->pdev)) + return -EOPNOTSUPP; + + if (pfvf->esw_mode == mode) + return 0; + + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + rvu_rep_destroy(pfvf); + break; + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + ret = rvu_rep_create(pfvf, extack); + break; + default: + return -EINVAL; + } + + if (!ret) + pfvf->esw_mode = mode; + + return ret; +} +#endif + static const struct devlink_ops otx2_devlink_ops = { +#ifdef CONFIG_RVU_ESWITCH + .eswitch_mode_get = otx2_devlink_eswitch_mode_get, + .eswitch_mode_set = otx2_devlink_eswitch_mode_set, +#endif }; int otx2_register_dl(struct otx2_nic *pfvf) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 58720a161ee2..47bfd1fb37d4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -64,11 +64,6 @@ static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf) return 0; } -static int mcam_entry_cmp(const void *a, const void *b) -{ - return *(u16 *)a - *(u16 *)b; -} - int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index e6b03bad2dba..e310f99b1736 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -519,6 +519,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work) switch (msg->id) { case MBOX_MSG_CGX_LINK_EVENT: + case MBOX_MSG_REP_EVENT_UP_NOTIFY: break; default: if (msg->rc) @@ -832,6 +833,9 @@ static void otx2_handle_link_event(struct otx2_nic *pf) struct cgx_link_user_info *linfo = &pf->linfo; struct net_device *netdev = pf->netdev; + if (pf->flags & OTX2_FLAG_PORT_UP) + return; + pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name, linfo->link_up ? "UP" : "DOWN", linfo->speed, linfo->full_duplex ? "Full" : "Half"); @@ -844,6 +848,35 @@ static void otx2_handle_link_event(struct otx2_nic *pf) } } +static int otx2_mbox_up_handler_rep_event_up_notify(struct otx2_nic *pf, + struct rep_event *info, + struct msg_rsp *rsp) +{ + struct net_device *netdev = pf->netdev; + + if (info->event == RVU_EVENT_MTU_CHANGE) { + netdev->mtu = info->evt_data.mtu; + return 0; + } + + if (info->event == RVU_EVENT_PORT_STATE) { + if (info->evt_data.port_state) { + pf->flags |= OTX2_FLAG_PORT_UP; + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + } else { + pf->flags &= ~OTX2_FLAG_PORT_UP; + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } + return 0; + } +#ifdef CONFIG_RVU_ESWITCH + rvu_event_up_notify(pf, info); +#endif + return 0; +} + int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf, struct mcs_intr_info *event, struct msg_rsp *rsp) @@ -913,6 +946,7 @@ static int otx2_process_mbox_msg_up(struct otx2_nic *pf, } MBOX_UP_CGX_MESSAGES MBOX_UP_MCS_MESSAGES +MBOX_UP_REP_MESSAGES #undef M break; default: @@ -1016,6 +1050,7 @@ void otx2_disable_mbox_intr(struct otx2_nic *pf) otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0)); free_irq(vector, pf); } +EXPORT_SYMBOL(otx2_disable_mbox_intr); int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af) { @@ -1076,6 +1111,7 @@ void otx2_pfaf_mbox_destroy(struct otx2_nic *pf) otx2_mbox_destroy(&mbox->mbox); otx2_mbox_destroy(&mbox->mbox_up); } +EXPORT_SYMBOL(otx2_pfaf_mbox_destroy); int otx2_pfaf_mbox_init(struct otx2_nic *pf) { @@ -1398,6 +1434,7 @@ irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq) return IRQ_HANDLED; } +EXPORT_SYMBOL(otx2_cq_intr_handler); void otx2_disable_napi(struct otx2_nic *pf) { @@ -1415,6 +1452,7 @@ void otx2_disable_napi(struct otx2_nic *pf) netif_napi_del(&cq_poll->napi); } } +EXPORT_SYMBOL(otx2_disable_napi); static void otx2_free_cq_res(struct otx2_nic *pf) { @@ -1496,10 +1534,11 @@ int otx2_init_hw_resources(struct otx2_nic *pf) hw->sqpool_cnt = otx2_get_total_tx_queues(pf); hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt; - /* Maximum hardware supported transmit length */ - pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN; - - pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu); + if (!otx2_rep_dev(pf->pdev)) { + /* Maximum hardware supported transmit length */ + pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN; + pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu); + } mutex_lock(&mbox->lock); /* NPA init */ @@ -1552,10 +1591,15 @@ int otx2_init_hw_resources(struct otx2_nic *pf) } for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { - err = otx2_txschq_config(pf, lvl, 0, false); - if (err) { - mutex_unlock(&mbox->lock); - goto err_free_nix_queues; + int idx; + + for (idx = 0; idx < pf->hw.txschq_cnt[lvl]; idx++) { + err = otx2_txschq_config(pf, lvl, idx, false); + if (err) { + dev_err(pf->dev, "Failed to config TXSCH\n"); + mutex_unlock(&mbox->lock); + goto err_free_nix_queues; + } } } @@ -1604,6 +1648,7 @@ exit: mutex_unlock(&mbox->lock); return err; } +EXPORT_SYMBOL(otx2_init_hw_resources); void otx2_free_hw_resources(struct otx2_nic *pf) { @@ -1627,11 +1672,12 @@ void otx2_free_hw_resources(struct otx2_nic *pf) otx2_pfc_txschq_stop(pf); #endif - otx2_clean_qos_queues(pf); + if (!otx2_rep_dev(pf->pdev)) + otx2_clean_qos_queues(pf); mutex_lock(&mbox->lock); /* Disable backpressure */ - if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK)) + if (!is_otx2_lbkvf(pf->pdev)) otx2_nix_config_bp(pf, false); mutex_unlock(&mbox->lock); @@ -1663,7 +1709,8 @@ void otx2_free_hw_resources(struct otx2_nic *pf) otx2_free_cq_res(pf); /* Free all ingress bandwidth profiles allocated */ - cn10k_free_all_ipolicers(pf); + if (!otx2_rep_dev(pf->pdev)) + cn10k_free_all_ipolicers(pf); mutex_lock(&mbox->lock); /* Reset NIX LF */ @@ -1691,6 +1738,7 @@ void otx2_free_hw_resources(struct otx2_nic *pf) } mutex_unlock(&mbox->lock); } +EXPORT_SYMBOL(otx2_free_hw_resources); static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf) { @@ -1784,6 +1832,7 @@ void otx2_free_queue_mem(struct otx2_qset *qset) kfree(qset->napi); qset->napi = NULL; } +EXPORT_SYMBOL(otx2_free_queue_mem); int otx2_alloc_queue_mem(struct otx2_nic *pf) { @@ -1830,6 +1879,7 @@ err_free_mem: otx2_free_queue_mem(qset); return -ENOMEM; } +EXPORT_SYMBOL(otx2_alloc_queue_mem); int otx2_open(struct net_device *netdev) { @@ -1963,6 +2013,7 @@ int otx2_open(struct net_device *netdev) } pf->flags &= ~OTX2_FLAG_INTF_DOWN; + pf->flags &= ~OTX2_FLAG_PORT_UP; /* 'intf_down' may be checked on any cpu */ smp_wmb(); @@ -2105,7 +2156,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) sq = &pf->qset.sq[sq_idx]; txq = netdev_get_tx_queue(netdev, qidx); - if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) { + if (!otx2_sq_append_skb(pf, txq, sq, skb, qidx)) { netif_tx_stop_queue(txq); /* Check again, incase SQBs got freed up */ @@ -2861,6 +2912,7 @@ int otx2_realloc_msix_vectors(struct otx2_nic *pf) return otx2_register_mbox_intr(pf, false); } +EXPORT_SYMBOL(otx2_realloc_msix_vectors); static int otx2_sriov_vfcfg_init(struct otx2_nic *pf) { @@ -2976,6 +3028,7 @@ err_free_irq_vectors: return err; } +EXPORT_SYMBOL(otx2_init_rsrc); static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index e63cc1eb6d89..9a226ca74425 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -443,6 +443,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, struct flow_action_entry *act; struct net_device *target; struct otx2_nic *priv; + struct rep_dev *rdev; u32 burst, mark = 0; u8 nr_police = 0; u8 num_intf = 1; @@ -464,14 +465,18 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, return 0; case FLOW_ACTION_REDIRECT_INGRESS: target = act->dev; - priv = netdev_priv(target); - /* npc_install_flow_req doesn't support passing a target pcifunc */ - if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { - NL_SET_ERR_MSG_MOD(extack, - "can't redirect to other pf/vf"); - return -EOPNOTSUPP; + if (target->dev.parent) { + priv = netdev_priv(target); + if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { + NL_SET_ERR_MSG_MOD(extack, + "can't redirect to other pf/vf"); + return -EOPNOTSUPP; + } + req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; + } else { + rdev = netdev_priv(target); + req->vf = rdev->pcifunc & RVU_PFVF_FUNC_MASK; } - req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; /* if op is already set; avoid overwriting the same */ if (!req->op) @@ -1300,6 +1305,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, req->channel = nic->hw.rx_chan_base; req->entry = flow_cfg->flow_ent[mcam_idx]; req->intf = NIX_INTF_RX; + req->vf = nic->pcifunc; req->set_cntr = 1; new_node->entry = req->entry; @@ -1400,8 +1406,8 @@ static int otx2_tc_get_flow_stats(struct otx2_nic *nic, return 0; } -static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, - struct flow_cls_offload *cls_flower) +int otx2_setup_tc_cls_flower(struct otx2_nic *nic, + struct flow_cls_offload *cls_flower) { switch (cls_flower->command) { case FLOW_CLS_REPLACE: @@ -1414,6 +1420,7 @@ static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, return -EOPNOTSUPP; } } +EXPORT_SYMBOL(otx2_setup_tc_cls_flower); static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, struct tc_cls_matchall_offload *cls) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 7aaf32e9aa95..04bc06a80e23 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -376,9 +376,11 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, } otx2_set_rxhash(pfvf, cqe, skb); - skb_record_rx_queue(skb, cq->cq_idx); - if (pfvf->netdev->features & NETIF_F_RXCSUM) - skb->ip_summed = CHECKSUM_UNNECESSARY; + if (!(pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)) { + skb_record_rx_queue(skb, cq->cq_idx); + if (pfvf->netdev->features & NETIF_F_RXCSUM) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } if (pfvf->flags & OTX2_FLAG_TC_MARK_ENABLED) skb->mark = parse->match_id; @@ -453,6 +455,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf, int tx_pkts = 0, tx_bytes = 0, qidx; struct otx2_snd_queue *sq; struct nix_cqe_tx_s *cqe; + struct net_device *ndev; int processed_cqe = 0; if (cq->pend_cqe >= budget) @@ -493,6 +496,13 @@ process_cqe: otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, ((u64)cq->cq_idx << 32) | processed_cqe); +#if IS_ENABLED(CONFIG_RVU_ESWITCH) + if (pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED) + ndev = pfvf->reps[qidx]->netdev; + else +#endif + ndev = pfvf->netdev; + if (likely(tx_pkts)) { struct netdev_queue *txq; @@ -500,12 +510,14 @@ process_cqe: if (qidx >= pfvf->hw.tx_queues) qidx -= pfvf->hw.xdp_queues; - txq = netdev_get_tx_queue(pfvf->netdev, qidx); + if (pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED) + qidx = 0; + txq = netdev_get_tx_queue(ndev, qidx); netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); /* Check if queue was stopped earlier due to ring full */ smp_mb(); if (netif_tx_queue_stopped(txq) && - netif_carrier_ok(pfvf->netdev)) + netif_carrier_ok(ndev)) netif_tx_wake_queue(txq); } return 0; @@ -594,6 +606,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) } return workdone; } +EXPORT_SYMBOL(otx2_napi_handler); void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) @@ -1141,13 +1154,13 @@ static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb, } } -bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, +bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, + struct otx2_snd_queue *sq, struct sk_buff *skb, u16 qidx) { - struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx); - struct otx2_nic *pfvf = netdev_priv(netdev); int offset, num_segs, free_desc; struct nix_sqe_hdr_s *sqe_hdr; + struct otx2_nic *pfvf = dev; /* Check if there is enough room between producer * and consumer index. diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index 3f1d2655ff77..e1db5f961877 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -167,7 +167,8 @@ static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr) } int otx2_napi_handler(struct napi_struct *napi, int budget); -bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, +bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, + struct otx2_snd_queue *sq, struct sk_buff *skb, u16 qidx); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index c4e6c78a8deb..839fc77c11b2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -21,6 +21,7 @@ static const struct pci_device_id otx2_vf_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) }, { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) }, + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_SDP_REP) }, { } }; @@ -371,7 +372,7 @@ static int otx2vf_open(struct net_device *netdev) /* LBKs do not receive link events so tell everyone we are up here */ vf = netdev_priv(netdev); - if (is_otx2_lbkvf(vf->pdev)) { + if (is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev)) { pr_info("%s NIC Link is UP\n", netdev->name); netif_carrier_on(netdev); netif_tx_start_all_queues(netdev); @@ -395,7 +396,7 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev) sq = &vf->qset.sq[qidx]; txq = netdev_get_tx_queue(netdev, qidx); - if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) { + if (!otx2_sq_append_skb(vf, txq, sq, skb, qidx)) { netif_tx_stop_queue(txq); /* Check again, incase SQBs got freed up */ @@ -683,6 +684,15 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n); } + if (is_otx2_sdp_rep(vf->pdev)) { + int n; + + n = vf->pcifunc & RVU_PFVF_FUNC_MASK; + n -= 1; + snprintf(netdev->name, sizeof(netdev->name), "sdp%d-%d", + pdev->bus->number, n); + } + err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c new file mode 100644 index 000000000000..232b10740c13 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c @@ -0,0 +1,864 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU representor driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/etherdevice.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/net_tstamp.h> +#include <linux/sort.h> + +#include "otx2_common.h" +#include "cn10k.h" +#include "otx2_reg.h" +#include "rep.h" + +#define DRV_NAME "rvu_rep" +#define DRV_STRING "Marvell RVU Representor Driver" + +static const struct pci_device_id rvu_rep_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_RVU_REP) }, + { } +}; + +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_DESCRIPTION(DRV_STRING); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, rvu_rep_id_table); + +static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event, + struct rep_event *data); + +static int rvu_rep_mcam_flow_init(struct rep_dev *rep) +{ + struct npc_mcam_alloc_entry_req *req; + struct npc_mcam_alloc_entry_rsp *rsp; + struct otx2_nic *priv = rep->mdev; + int ent, allocated = 0; + int count; + + rep->flow_cfg = kcalloc(1, sizeof(struct otx2_flow_config), GFP_KERNEL); + + if (!rep->flow_cfg) + return -ENOMEM; + + count = OTX2_DEFAULT_FLOWCOUNT; + + rep->flow_cfg->flow_ent = kcalloc(count, sizeof(u16), GFP_KERNEL); + if (!rep->flow_cfg->flow_ent) + return -ENOMEM; + + while (allocated < count) { + req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&priv->mbox); + if (!req) + goto exit; + + req->hdr.pcifunc = rep->pcifunc; + req->contig = false; + req->ref_entry = 0; + req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ? + NPC_MAX_NONCONTIG_ENTRIES : count - allocated; + + if (otx2_sync_mbox_msg(&priv->mbox)) + goto exit; + + rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp + (&priv->mbox.mbox, 0, &req->hdr); + + for (ent = 0; ent < rsp->count; ent++) + rep->flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent]; + + allocated += rsp->count; + + if (rsp->count != req->count) + break; + } +exit: + /* Multiple MCAM entry alloc requests could result in non-sequential + * MCAM entries in the flow_ent[] array. Sort them in an ascending + * order, otherwise user installed ntuple filter index and MCAM entry + * index will not be in sync. + */ + if (allocated) + sort(&rep->flow_cfg->flow_ent[0], allocated, + sizeof(rep->flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL); + + mutex_unlock(&priv->mbox.lock); + + rep->flow_cfg->max_flows = allocated; + + if (allocated) { + rep->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC; + rep->flags |= OTX2_FLAG_NTUPLE_SUPPORT; + rep->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT; + } + + INIT_LIST_HEAD(&rep->flow_cfg->flow_list); + INIT_LIST_HEAD(&rep->flow_cfg->flow_list_tc); + return 0; +} + +static int rvu_rep_setup_tc_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct rep_dev *rep = cb_priv; + struct otx2_nic *priv = rep->mdev; + + if (!(rep->flags & RVU_REP_VF_INITIALIZED)) + return -EINVAL; + + if (!(rep->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) + rvu_rep_mcam_flow_init(rep); + + priv->netdev = rep->netdev; + priv->flags = rep->flags; + priv->pcifunc = rep->pcifunc; + priv->flow_cfg = rep->flow_cfg; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return otx2_setup_tc_cls_flower(priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(rvu_rep_block_cb_list); +static int rvu_rep_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) +{ + struct rvu_rep *rep = netdev_priv(netdev); + + switch (type) { + case TC_SETUP_BLOCK: + return flow_block_cb_setup_simple(type_data, + &rvu_rep_block_cb_list, + rvu_rep_setup_tc_cb, + rep, rep, true); + default: + return -EOPNOTSUPP; + } +} + +static int +rvu_rep_sp_stats64(const struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct rep_dev *rep = netdev_priv(dev); + struct otx2_nic *priv = rep->mdev; + struct otx2_rcv_queue *rq; + struct otx2_snd_queue *sq; + u16 qidx = rep->rep_id; + + otx2_update_rq_stats(priv, qidx); + rq = &priv->qset.rq[qidx]; + + otx2_update_sq_stats(priv, qidx); + sq = &priv->qset.sq[qidx]; + + stats->tx_bytes = sq->stats.bytes; + stats->tx_packets = sq->stats.pkts; + stats->rx_bytes = rq->stats.bytes; + stats->rx_packets = rq->stats.pkts; + return 0; +} + +static bool +rvu_rep_has_offload_stats(const struct net_device *dev, int attr_id) +{ + return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT; +} + +static int +rvu_rep_get_offload_stats(int attr_id, const struct net_device *dev, + void *sp) +{ + if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT) + return rvu_rep_sp_stats64(dev, (struct rtnl_link_stats64 *)sp); + + return -EINVAL; +} + +static int rvu_rep_dl_port_fn_hw_addr_get(struct devlink_port *port, + u8 *hw_addr, int *hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct rep_dev *rep = container_of(port, struct rep_dev, dl_port); + + ether_addr_copy(hw_addr, rep->mac); + *hw_addr_len = ETH_ALEN; + return 0; +} + +static int rvu_rep_dl_port_fn_hw_addr_set(struct devlink_port *port, + const u8 *hw_addr, int hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct rep_dev *rep = container_of(port, struct rep_dev, dl_port); + struct otx2_nic *priv = rep->mdev; + struct rep_event evt = {0}; + + eth_hw_addr_set(rep->netdev, hw_addr); + ether_addr_copy(rep->mac, hw_addr); + + ether_addr_copy(evt.evt_data.mac, hw_addr); + evt.pcifunc = rep->pcifunc; + rvu_rep_notify_pfvf(priv, RVU_EVENT_MAC_ADDR_CHANGE, &evt); + return 0; +} + +static const struct devlink_port_ops rvu_rep_dl_port_ops = { + .port_fn_hw_addr_get = rvu_rep_dl_port_fn_hw_addr_get, + .port_fn_hw_addr_set = rvu_rep_dl_port_fn_hw_addr_set, +}; + +static void +rvu_rep_devlink_set_switch_id(struct otx2_nic *priv, + struct netdev_phys_item_id *ppid) +{ + struct pci_dev *pdev = priv->pdev; + u64 id; + + id = pci_get_dsn(pdev); + + ppid->id_len = sizeof(id); + put_unaligned_be64(id, &ppid->id); +} + +static void rvu_rep_devlink_port_unregister(struct rep_dev *rep) +{ + devlink_port_unregister(&rep->dl_port); +} + +static int rvu_rep_devlink_port_register(struct rep_dev *rep) +{ + struct devlink_port_attrs attrs = {}; + struct otx2_nic *priv = rep->mdev; + struct devlink *dl = priv->dl->dl; + int err; + + if (!(rep->pcifunc & RVU_PFVF_FUNC_MASK)) { + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = rvu_get_pf(rep->pcifunc); + } else { + attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; + attrs.pci_vf.pf = rvu_get_pf(rep->pcifunc); + attrs.pci_vf.vf = rep->pcifunc & RVU_PFVF_FUNC_MASK; + } + + rvu_rep_devlink_set_switch_id(priv, &attrs.switch_id); + devlink_port_attrs_set(&rep->dl_port, &attrs); + + err = devl_port_register_with_ops(dl, &rep->dl_port, rep->rep_id, + &rvu_rep_dl_port_ops); + if (err) { + dev_err(rep->mdev->dev, "devlink_port_register failed: %d\n", + err); + return err; + } + return 0; +} + +static int rvu_rep_get_repid(struct otx2_nic *priv, u16 pcifunc) +{ + int rep_id; + + for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++) + if (priv->rep_pf_map[rep_id] == pcifunc) + return rep_id; + return -EINVAL; +} + +static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event, + struct rep_event *data) +{ + struct rep_event *req; + + mutex_lock(&priv->mbox.lock); + req = otx2_mbox_alloc_msg_rep_event_notify(&priv->mbox); + if (!req) { + mutex_unlock(&priv->mbox.lock); + return -ENOMEM; + } + req->event = event; + req->pcifunc = data->pcifunc; + + memcpy(&req->evt_data, &data->evt_data, sizeof(struct rep_evt_data)); + otx2_sync_mbox_msg(&priv->mbox); + mutex_unlock(&priv->mbox.lock); + return 0; +} + +static void rvu_rep_state_evt_handler(struct otx2_nic *priv, + struct rep_event *info) +{ + struct rep_dev *rep; + int rep_id; + + rep_id = rvu_rep_get_repid(priv, info->pcifunc); + rep = priv->reps[rep_id]; + if (info->evt_data.vf_state) + rep->flags |= RVU_REP_VF_INITIALIZED; + else + rep->flags &= ~RVU_REP_VF_INITIALIZED; +} + +int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info) +{ + if (info->event & RVU_EVENT_PFVF_STATE) + rvu_rep_state_evt_handler(pf, info); + return 0; +} + +static int rvu_rep_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rep_dev *rep = netdev_priv(dev); + struct otx2_nic *priv = rep->mdev; + struct rep_event evt = {0}; + + netdev_info(dev, "Changing MTU from %d to %d\n", + dev->mtu, new_mtu); + dev->mtu = new_mtu; + + evt.evt_data.mtu = new_mtu; + evt.pcifunc = rep->pcifunc; + rvu_rep_notify_pfvf(priv, RVU_EVENT_MTU_CHANGE, &evt); + return 0; +} + +static void rvu_rep_get_stats(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct nix_stats_req *req; + struct nix_stats_rsp *rsp; + struct rep_stats *stats; + struct otx2_nic *priv; + struct rep_dev *rep; + int err; + + rep = container_of(del_work, struct rep_dev, stats_wrk); + priv = rep->mdev; + + mutex_lock(&priv->mbox.lock); + req = otx2_mbox_alloc_msg_nix_lf_stats(&priv->mbox); + if (!req) { + mutex_unlock(&priv->mbox.lock); + return; + } + req->pcifunc = rep->pcifunc; + err = otx2_sync_mbox_msg_busy_poll(&priv->mbox); + if (err) + goto exit; + + rsp = (struct nix_stats_rsp *) + otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr); + + if (IS_ERR(rsp)) { + err = PTR_ERR(rsp); + goto exit; + } + + stats = &rep->stats; + stats->rx_bytes = rsp->rx.octs; + stats->rx_frames = rsp->rx.ucast + rsp->rx.bcast + + rsp->rx.mcast; + stats->rx_drops = rsp->rx.drop; + stats->rx_mcast_frames = rsp->rx.mcast; + stats->tx_bytes = rsp->tx.octs; + stats->tx_frames = rsp->tx.ucast + rsp->tx.bcast + rsp->tx.mcast; + stats->tx_drops = rsp->tx.drop; +exit: + mutex_unlock(&priv->mbox.lock); +} + +static void rvu_rep_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct rep_dev *rep = netdev_priv(dev); + + if (!(rep->flags & RVU_REP_VF_INITIALIZED)) + return; + + stats->rx_packets = rep->stats.rx_frames; + stats->rx_bytes = rep->stats.rx_bytes; + stats->rx_dropped = rep->stats.rx_drops; + stats->multicast = rep->stats.rx_mcast_frames; + + stats->tx_packets = rep->stats.tx_frames; + stats->tx_bytes = rep->stats.tx_bytes; + stats->tx_dropped = rep->stats.tx_drops; + + schedule_delayed_work(&rep->stats_wrk, msecs_to_jiffies(100)); +} + +static int rvu_eswitch_config(struct otx2_nic *priv, u8 ena) +{ + struct esw_cfg_req *req; + + mutex_lock(&priv->mbox.lock); + req = otx2_mbox_alloc_msg_esw_cfg(&priv->mbox); + if (!req) { + mutex_unlock(&priv->mbox.lock); + return -ENOMEM; + } + req->ena = ena; + otx2_sync_mbox_msg(&priv->mbox); + mutex_unlock(&priv->mbox.lock); + return 0; +} + +static netdev_tx_t rvu_rep_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct rep_dev *rep = netdev_priv(dev); + struct otx2_nic *pf = rep->mdev; + struct otx2_snd_queue *sq; + struct netdev_queue *txq; + + sq = &pf->qset.sq[rep->rep_id]; + txq = netdev_get_tx_queue(dev, 0); + + if (!otx2_sq_append_skb(pf, txq, sq, skb, rep->rep_id)) { + netif_tx_stop_queue(txq); + + /* Check again, in case SQBs got freed up */ + smp_mb(); + if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb) + > sq->sqe_thresh) + netif_tx_wake_queue(txq); + + return NETDEV_TX_BUSY; + } + return NETDEV_TX_OK; +} + +static int rvu_rep_open(struct net_device *dev) +{ + struct rep_dev *rep = netdev_priv(dev); + struct otx2_nic *priv = rep->mdev; + struct rep_event evt = {0}; + + if (!(rep->flags & RVU_REP_VF_INITIALIZED)) + return 0; + + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + + evt.event = RVU_EVENT_PORT_STATE; + evt.evt_data.port_state = 1; + evt.pcifunc = rep->pcifunc; + rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt); + return 0; +} + +static int rvu_rep_stop(struct net_device *dev) +{ + struct rep_dev *rep = netdev_priv(dev); + struct otx2_nic *priv = rep->mdev; + struct rep_event evt = {0}; + + if (!(rep->flags & RVU_REP_VF_INITIALIZED)) + return 0; + + netif_carrier_off(dev); + netif_tx_disable(dev); + + evt.event = RVU_EVENT_PORT_STATE; + evt.pcifunc = rep->pcifunc; + rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt); + return 0; +} + +static const struct net_device_ops rvu_rep_netdev_ops = { + .ndo_open = rvu_rep_open, + .ndo_stop = rvu_rep_stop, + .ndo_start_xmit = rvu_rep_xmit, + .ndo_get_stats64 = rvu_rep_get_stats64, + .ndo_change_mtu = rvu_rep_change_mtu, + .ndo_has_offload_stats = rvu_rep_has_offload_stats, + .ndo_get_offload_stats = rvu_rep_get_offload_stats, + .ndo_setup_tc = rvu_rep_setup_tc, +}; + +static int rvu_rep_napi_init(struct otx2_nic *priv, + struct netlink_ext_ack *extack) +{ + struct otx2_qset *qset = &priv->qset; + struct otx2_cq_poll *cq_poll = NULL; + struct otx2_hw *hw = &priv->hw; + int err = 0, qidx, vec; + char *irq_name; + + qset->napi = kcalloc(hw->cint_cnt, sizeof(*cq_poll), GFP_KERNEL); + if (!qset->napi) + return -ENOMEM; + + /* Register NAPI handler */ + for (qidx = 0; qidx < hw->cint_cnt; qidx++) { + cq_poll = &qset->napi[qidx]; + cq_poll->cint_idx = qidx; + cq_poll->cq_ids[CQ_RX] = + (qidx < hw->rx_queues) ? qidx : CINT_INVALID_CQ; + cq_poll->cq_ids[CQ_TX] = (qidx < hw->tx_queues) ? + qidx + hw->rx_queues : + CINT_INVALID_CQ; + cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ; + cq_poll->cq_ids[CQ_QOS] = CINT_INVALID_CQ; + + cq_poll->dev = (void *)priv; + netif_napi_add(priv->reps[qidx]->netdev, &cq_poll->napi, + otx2_napi_handler); + napi_enable(&cq_poll->napi); + } + /* Register CQ IRQ handlers */ + vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START; + for (qidx = 0; qidx < hw->cint_cnt; qidx++) { + irq_name = &hw->irq_name[vec * NAME_SIZE]; + + snprintf(irq_name, NAME_SIZE, "rep%d-rxtx-%d", qidx, qidx); + + err = request_irq(pci_irq_vector(priv->pdev, vec), + otx2_cq_intr_handler, 0, irq_name, + &qset->napi[qidx]); + if (err) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "RVU REP IRQ registration failed for CQ%d", + qidx); + goto err_free_cints; + } + vec++; + + /* Enable CQ IRQ */ + otx2_write64(priv, NIX_LF_CINTX_INT(qidx), BIT_ULL(0)); + otx2_write64(priv, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0)); + } + priv->flags &= ~OTX2_FLAG_INTF_DOWN; + return 0; + +err_free_cints: + otx2_free_cints(priv, qidx); + otx2_disable_napi(priv); + return err; +} + +static void rvu_rep_free_cq_rsrc(struct otx2_nic *priv) +{ + struct otx2_qset *qset = &priv->qset; + struct otx2_cq_poll *cq_poll = NULL; + int qidx, vec; + + /* Cleanup CQ NAPI and IRQ */ + vec = priv->hw.nix_msixoff + NIX_LF_CINT_VEC_START; + for (qidx = 0; qidx < priv->hw.cint_cnt; qidx++) { + /* Disable interrupt */ + otx2_write64(priv, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0)); + + synchronize_irq(pci_irq_vector(priv->pdev, vec)); + + cq_poll = &qset->napi[qidx]; + napi_synchronize(&cq_poll->napi); + vec++; + } + otx2_free_cints(priv, priv->hw.cint_cnt); + otx2_disable_napi(priv); +} + +static void rvu_rep_rsrc_free(struct otx2_nic *priv) +{ + struct otx2_qset *qset = &priv->qset; + struct delayed_work *work; + int wrk; + + for (wrk = 0; wrk < priv->qset.cq_cnt; wrk++) { + work = &priv->refill_wrk[wrk].pool_refill_work; + cancel_delayed_work_sync(work); + } + devm_kfree(priv->dev, priv->refill_wrk); + + otx2_free_hw_resources(priv); + otx2_free_queue_mem(qset); +} + +static int rvu_rep_rsrc_init(struct otx2_nic *priv) +{ + struct otx2_qset *qset = &priv->qset; + int err; + + err = otx2_alloc_queue_mem(priv); + if (err) + return err; + + priv->hw.max_mtu = otx2_get_max_mtu(priv); + priv->tx_max_pktlen = priv->hw.max_mtu + OTX2_ETH_HLEN; + priv->rbsize = ALIGN(priv->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM; + + err = otx2_init_hw_resources(priv); + if (err) + goto err_free_rsrc; + + /* Set maximum frame size allowed in HW */ + err = otx2_hw_set_mtu(priv, priv->hw.max_mtu); + if (err) { + dev_err(priv->dev, "Failed to set HW MTU\n"); + goto err_free_rsrc; + } + return 0; + +err_free_rsrc: + otx2_free_hw_resources(priv); + otx2_free_queue_mem(qset); + return err; +} + +void rvu_rep_destroy(struct otx2_nic *priv) +{ + struct rep_dev *rep; + int rep_id; + + rvu_eswitch_config(priv, false); + priv->flags |= OTX2_FLAG_INTF_DOWN; + rvu_rep_free_cq_rsrc(priv); + for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++) { + rep = priv->reps[rep_id]; + unregister_netdev(rep->netdev); + rvu_rep_devlink_port_unregister(rep); + free_netdev(rep->netdev); + kfree(rep->flow_cfg); + } + kfree(priv->reps); + rvu_rep_rsrc_free(priv); +} + +int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack) +{ + int rep_cnt = priv->rep_cnt; + struct net_device *ndev; + struct rep_dev *rep; + int rep_id, err; + u16 pcifunc; + + err = rvu_rep_rsrc_init(priv); + if (err) + return -ENOMEM; + + priv->reps = kcalloc(rep_cnt, sizeof(struct rep_dev *), GFP_KERNEL); + if (!priv->reps) + return -ENOMEM; + + for (rep_id = 0; rep_id < rep_cnt; rep_id++) { + ndev = alloc_etherdev(sizeof(*rep)); + if (!ndev) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "PFVF representor:%d creation failed", + rep_id); + err = -ENOMEM; + goto exit; + } + + rep = netdev_priv(ndev); + priv->reps[rep_id] = rep; + rep->mdev = priv; + rep->netdev = ndev; + rep->rep_id = rep_id; + + ndev->min_mtu = OTX2_MIN_MTU; + ndev->max_mtu = priv->hw.max_mtu; + ndev->netdev_ops = &rvu_rep_netdev_ops; + pcifunc = priv->rep_pf_map[rep_id]; + rep->pcifunc = pcifunc; + + snprintf(ndev->name, sizeof(ndev->name), "Rpf%dvf%d", + rvu_get_pf(pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK)); + + ndev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | + NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6); + + ndev->hw_features |= NETIF_F_HW_TC; + ndev->features |= ndev->hw_features; + eth_hw_addr_random(ndev); + err = rvu_rep_devlink_port_register(rep); + if (err) + goto exit; + + SET_NETDEV_DEVLINK_PORT(ndev, &rep->dl_port); + err = register_netdev(ndev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "PFVF representor registration failed"); + free_netdev(ndev); + goto exit; + } + + INIT_DELAYED_WORK(&rep->stats_wrk, rvu_rep_get_stats); + } + err = rvu_rep_napi_init(priv, extack); + if (err) + goto exit; + + rvu_eswitch_config(priv, true); + return 0; +exit: + while (--rep_id >= 0) { + rep = priv->reps[rep_id]; + unregister_netdev(rep->netdev); + rvu_rep_devlink_port_unregister(rep); + free_netdev(rep->netdev); + } + kfree(priv->reps); + rvu_rep_rsrc_free(priv); + return err; +} + +static int rvu_get_rep_cnt(struct otx2_nic *priv) +{ + struct get_rep_cnt_rsp *rsp; + struct mbox_msghdr *msghdr; + struct msg_req *req; + int err, rep; + + mutex_lock(&priv->mbox.lock); + req = otx2_mbox_alloc_msg_get_rep_cnt(&priv->mbox); + if (!req) { + mutex_unlock(&priv->mbox.lock); + return -ENOMEM; + } + err = otx2_sync_mbox_msg(&priv->mbox); + if (err) + goto exit; + + msghdr = otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr); + if (IS_ERR(msghdr)) { + err = PTR_ERR(msghdr); + goto exit; + } + + rsp = (struct get_rep_cnt_rsp *)msghdr; + priv->hw.tx_queues = rsp->rep_cnt; + priv->hw.rx_queues = rsp->rep_cnt; + priv->rep_cnt = rsp->rep_cnt; + for (rep = 0; rep < priv->rep_cnt; rep++) + priv->rep_pf_map[rep] = rsp->rep_pf_map[rep]; + +exit: + mutex_unlock(&priv->mbox.lock); + return err; +} + +static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct otx2_nic *priv; + struct otx2_hw *hw; + int err; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + return err; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + return err; + } + + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "DMA mask config failed, abort\n"); + goto err_release_regions; + } + + pci_set_master(pdev); + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + err = -ENOMEM; + goto err_release_regions; + } + + pci_set_drvdata(pdev, priv); + priv->pdev = pdev; + priv->dev = dev; + priv->flags |= OTX2_FLAG_INTF_DOWN; + priv->flags |= OTX2_FLAG_REP_MODE_ENABLED; + + hw = &priv->hw; + hw->pdev = pdev; + hw->max_queues = OTX2_MAX_CQ_CNT; + hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN; + hw->xqe_size = 128; + + err = otx2_init_rsrc(pdev, priv); + if (err) + goto err_release_regions; + + priv->iommu_domain = iommu_get_domain_for_dev(dev); + + err = rvu_get_rep_cnt(priv); + if (err) + goto err_detach_rsrc; + + err = otx2_register_dl(priv); + if (err) + goto err_detach_rsrc; + + return 0; + +err_detach_rsrc: + if (priv->hw.lmt_info) + free_percpu(priv->hw.lmt_info); + if (test_bit(CN10K_LMTST, &priv->hw.cap_flag)) + qmem_free(priv->dev, priv->dync_lmt); + otx2_detach_resources(&priv->mbox); + otx2_disable_mbox_intr(priv); + otx2_pfaf_mbox_destroy(priv); + pci_free_irq_vectors(pdev); +err_release_regions: + pci_set_drvdata(pdev, NULL); + pci_release_regions(pdev); + return err; +} + +static void rvu_rep_remove(struct pci_dev *pdev) +{ + struct otx2_nic *priv = pci_get_drvdata(pdev); + + otx2_unregister_dl(priv); + if (!(priv->flags & OTX2_FLAG_INTF_DOWN)) + rvu_rep_destroy(priv); + otx2_detach_resources(&priv->mbox); + if (priv->hw.lmt_info) + free_percpu(priv->hw.lmt_info); + if (test_bit(CN10K_LMTST, &priv->hw.cap_flag)) + qmem_free(priv->dev, priv->dync_lmt); + otx2_disable_mbox_intr(priv); + otx2_pfaf_mbox_destroy(priv); + pci_free_irq_vectors(priv->pdev); + pci_set_drvdata(pdev, NULL); + pci_release_regions(pdev); +} + +static struct pci_driver rvu_rep_driver = { + .name = DRV_NAME, + .id_table = rvu_rep_id_table, + .probe = rvu_rep_probe, + .remove = rvu_rep_remove, + .shutdown = rvu_rep_remove, +}; + +static int __init rvu_rep_init_module(void) +{ + return pci_register_driver(&rvu_rep_driver); +} + +static void __exit rvu_rep_cleanup_module(void) +{ + pci_unregister_driver(&rvu_rep_driver); +} + +module_init(rvu_rep_init_module); +module_exit(rvu_rep_cleanup_module); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.h b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h new file mode 100644 index 000000000000..38446b3e4f13 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU REPRESENTOR driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef REP_H +#define REP_H + +#include <linux/pci.h> + +#include "otx2_reg.h" +#include "otx2_txrx.h" +#include "otx2_common.h" + +#define PCI_DEVID_RVU_REP 0xA0E0 + +#define RVU_MAX_REP OTX2_MAX_CQ_CNT + +struct rep_stats { + u64 rx_bytes; + u64 rx_frames; + u64 rx_drops; + u64 rx_mcast_frames; + + u64 tx_bytes; + u64 tx_frames; + u64 tx_drops; +}; + +struct rep_dev { + struct otx2_nic *mdev; + struct net_device *netdev; + struct rep_stats stats; + struct delayed_work stats_wrk; + struct devlink_port dl_port; + struct otx2_flow_config *flow_cfg; +#define RVU_REP_VF_INITIALIZED BIT_ULL(0) + u64 flags; + u16 rep_id; + u16 pcifunc; + u8 mac[ETH_ALEN]; +}; + +static inline bool otx2_rep_dev(struct pci_dev *pdev) +{ + return pdev->device == PCI_DEVID_RVU_REP; +} + +int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack); +void rvu_rep_destroy(struct otx2_nic *priv); +int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info); +#endif /* REP_H */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 4caa1b6f40ba..1fd403713baf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -71,6 +71,7 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq, { unsigned long flags; struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; + bool schedule_tasklet = false; spin_lock_irqsave(&tasklet_ctx->lock, flags); /* When migrating CQs between EQs will be implemented, please note @@ -80,9 +81,19 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq, */ if (list_empty_careful(&cq->tasklet_ctx.list)) { mlx5_cq_hold(cq); + /* If the tasklet CQ work list isn't empty, mlx5_cq_tasklet_cb() + * is scheduled/running and hasn't processed the list yet, so it + * will see this added CQ when it runs. If the list is empty, + * the tasklet needs to be scheduled to pick up the CQ. The + * spinlock avoids any race with the tasklet accessing the list. + */ + schedule_tasklet = list_empty(&tasklet_ctx->list); list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); } spin_unlock_irqrestore(&tasklet_ctx->lock, flags); + + if (schedule_tasklet) + tasklet_schedule(&tasklet_ctx->task); } /* Callers must verify outbox status in case of err */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 58f3df784ded..979fc56205e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -83,6 +83,7 @@ struct page_pool; #define MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE (8) #define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9) #define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE) +#define MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE (PAGE_SHIFT - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE) #define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64) #define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024) #define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096) @@ -624,16 +625,14 @@ struct mlx5e_dma_info { struct mlx5e_shampo_hd { u32 mkey; - struct mlx5e_dma_info *info; struct mlx5e_frag_page *pages; - u16 curr_page_index; u32 hd_per_wq; u16 hd_per_wqe; + u16 pages_per_wq; unsigned long *bitmap; u16 pi; u16 ci; __be32 key; - u64 last_addr; }; struct mlx5e_hw_gro_data { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 4877a9d86807..a84ebac2f011 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -866,7 +866,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, return 0; err_rule: - mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh); + mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, attr, zone_rule->mh); mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id); err_mod_hdr: kfree(attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index d61be26a4df1..3db31cc10719 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -660,7 +660,7 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, while (remaining > 0) { skb_frag_t *frag = &record->frags[i]; - get_page(skb_frag_page(frag)); + page_ref_inc(skb_frag_page(frag)); remaining -= skb_frag_size(frag); info->frags[i++] = *frag; } @@ -763,7 +763,7 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, stats = sq->stats; mlx5e_tx_dma_unmap(sq->pdev, dma); - put_page(wi->resync_dump_frag_page); + page_ref_dec(wi->resync_dump_frag_page); stats->tls_dump_packets++; stats->tls_dump_bytes += wi->num_bytes; } @@ -816,12 +816,12 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, err_out: for (; i < info.nr_frags; i++) - /* The put_page() here undoes the page ref obtained in tx_sync_info_get(). + /* The page_ref_dec() here undoes the page ref obtained in tx_sync_info_get(). * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be * released only upon their completions (or in mlx5e_free_txqsq_descs, * if channel closes). */ - put_page(skb_frag_page(&info.frags[i])); + page_ref_dec(skb_frag_page(&info.frags[i])); return MLX5E_KTLS_SYNC_FAIL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 59d7a0e28f24..d0b80b520397 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -350,19 +350,15 @@ static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node) shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL, node); - shampo->info = kvzalloc_node(array_size(shampo->hd_per_wq, - sizeof(*shampo->info)), - GFP_KERNEL, node); shampo->pages = kvzalloc_node(array_size(shampo->hd_per_wq, sizeof(*shampo->pages)), GFP_KERNEL, node); - if (!shampo->bitmap || !shampo->info || !shampo->pages) + if (!shampo->bitmap || !shampo->pages) goto err_nomem; return 0; err_nomem: - kvfree(shampo->info); kvfree(shampo->bitmap); kvfree(shampo->pages); @@ -372,7 +368,6 @@ err_nomem: static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq) { kvfree(rq->mpwqe.shampo->bitmap); - kvfree(rq->mpwqe.shampo->info); kvfree(rq->mpwqe.shampo->pages); } @@ -767,8 +762,6 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, u32 *pool_size, int node) { - void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq); - int wq_size; int err; if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) @@ -793,9 +786,9 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, cpu_to_be32(rq->mpwqe.shampo->mkey); rq->mpwqe.shampo->hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rqp); - wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); - *pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) / - MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; + rq->mpwqe.shampo->pages_per_wq = + rq->mpwqe.shampo->hd_per_wq / MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; + *pool_size += rq->mpwqe.shampo->pages_per_wq; return 0; err_hw_gro_data: @@ -4302,7 +4295,8 @@ void mlx5e_set_xdp_feature(struct net_device *netdev) struct mlx5e_params *params = &priv->channels.params; xdp_features_t val; - if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) { + if (!netdev->netdev_ops->ndo_bpf || + params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) { xdp_clear_features_flag(netdev); return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index d81083f4f316..1963bc5adb18 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -643,83 +643,82 @@ static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe, umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); } +static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq, int header_index) +{ + BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT); + + return &rq->mpwqe.shampo->pages[header_index >> MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE]; +} + +static u64 mlx5e_shampo_hd_offset(int header_index) +{ + return (header_index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << + MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; +} + +static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index); + static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, u16 ksm_entries, u16 index) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - u16 entries, pi, header_offset, err, wqe_bbs, new_entries; + u16 pi, header_offset, err, wqe_bbs; u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; - u16 page_index = shampo->curr_page_index; - struct mlx5e_frag_page *frag_page; - u64 addr = shampo->last_addr; - struct mlx5e_dma_info *dma_info; struct mlx5e_umr_wqe *umr_wqe; - int headroom, i; + int headroom, i = 0; headroom = rq->buff.headroom; - new_entries = ksm_entries - (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1)); - entries = ALIGN(ksm_entries, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT); - wqe_bbs = MLX5E_KSM_UMR_WQEBBS(entries); + wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries); pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs); umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); - build_ksm_umr(sq, umr_wqe, shampo->key, index, entries); + build_ksm_umr(sq, umr_wqe, shampo->key, index, ksm_entries); - frag_page = &shampo->pages[page_index]; + WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)); + while (i < ksm_entries) { + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); + u64 addr; - for (i = 0; i < entries; i++, index++) { - dma_info = &shampo->info[index]; - if (i >= ksm_entries || (index < shampo->pi && shampo->pi - index < - MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT)) - goto update_ksm; - header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; - if (!(header_offset & (PAGE_SIZE - 1))) { - page_index = (page_index + 1) & (shampo->hd_per_wq - 1); - frag_page = &shampo->pages[page_index]; + err = mlx5e_page_alloc_fragmented(rq, frag_page); + if (unlikely(err)) + goto err_unmap; - err = mlx5e_page_alloc_fragmented(rq, frag_page); - if (unlikely(err)) - goto err_unmap; - addr = page_pool_get_dma_addr(frag_page->page); + addr = page_pool_get_dma_addr(frag_page->page); - dma_info->addr = addr; - dma_info->frag_page = frag_page; - } else { - dma_info->addr = addr + header_offset; - dma_info->frag_page = frag_page; - } + for (int j = 0; j < MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; j++) { + header_offset = mlx5e_shampo_hd_offset(index++); -update_ksm: - umr_wqe->inline_ksms[i] = (struct mlx5_ksm) { - .key = cpu_to_be32(lkey), - .va = cpu_to_be64(dma_info->addr + headroom), - }; + umr_wqe->inline_ksms[i++] = (struct mlx5_ksm) { + .key = cpu_to_be32(lkey), + .va = cpu_to_be64(addr + header_offset + headroom), + }; + } } sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { .wqe_type = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR, .num_wqebbs = wqe_bbs, - .shampo.len = new_entries, + .shampo.len = ksm_entries, }; - shampo->pi = (shampo->pi + new_entries) & (shampo->hd_per_wq - 1); - shampo->curr_page_index = page_index; - shampo->last_addr = addr; + shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1); sq->pc += wqe_bbs; sq->doorbell_cseg = &umr_wqe->ctrl; return 0; err_unmap: - while (--i >= 0) { - dma_info = &shampo->info[--index]; - if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) { - dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE); - mlx5e_page_release_fragmented(rq, dma_info->frag_page); + while (--i) { + --index; + header_offset = mlx5e_shampo_hd_offset(index); + if (!header_offset) { + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); + + mlx5e_page_release_fragmented(rq, frag_page); } } + rq->stats->buff_alloc_err++; return err; } @@ -731,7 +730,8 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) struct mlx5e_icosq *sq = rq->icosq; int i, err, max_ksm_entries, len; - max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev); + max_ksm_entries = ALIGN_DOWN(MLX5E_MAX_KSM_PER_WQE(rq->mdev), + MLX5E_SHAMPO_WQ_HEADER_PER_PAGE); ksm_entries = bitmap_find_window(shampo->bitmap, shampo->hd_per_wqe, shampo->hd_per_wq, shampo->pi); @@ -739,8 +739,8 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) if (!ksm_entries) return 0; - ksm_entries += (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1)); - index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT); + /* pi is aligned to MLX5E_SHAMPO_WQ_HEADER_PER_PAGE */ + index = shampo->pi; entries_before = shampo->hd_per_wq - index; if (unlikely(entries_before < ksm_entries)) @@ -851,13 +851,11 @@ static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - u64 addr = shampo->info[header_index].addr; if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) { - struct mlx5e_dma_info *dma_info = &shampo->info[header_index]; + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index); - dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE); - mlx5e_page_release_fragmented(rq, dma_info->frag_page); + mlx5e_page_release_fragmented(rq, frag_page); } clear_bit(header_index, shampo->bitmap); } @@ -1211,10 +1209,10 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index) { - struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index]; - u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom; + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index); + u16 head_offset = mlx5e_shampo_hd_offset(header_index) + rq->buff.headroom; - return page_address(last_head->frag_page->page) + head_offset; + return page_address(frag_page->page) + head_offset; } static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4) @@ -2185,29 +2183,30 @@ static struct sk_buff * mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 header_index) { - struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index]; - u16 head_offset = head->addr & (PAGE_SIZE - 1); + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index); + dma_addr_t page_dma_addr = page_pool_get_dma_addr(frag_page->page); + u16 head_offset = mlx5e_shampo_hd_offset(header_index); + dma_addr_t dma_addr = page_dma_addr + head_offset; u16 head_size = cqe->shampo.header_size; u16 rx_headroom = rq->buff.headroom; struct sk_buff *skb = NULL; void *hdr, *data; u32 frag_size; - hdr = page_address(head->frag_page->page) + head_offset; + hdr = page_address(frag_page->page) + head_offset; data = hdr + rx_headroom; frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size); if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) { /* build SKB around header */ - dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir); + dma_sync_single_range_for_cpu(rq->pdev, dma_addr, 0, frag_size, rq->buff.map_dir); net_prefetchw(hdr); net_prefetch(data); skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0); - if (unlikely(!skb)) return NULL; - head->frag_page->frags++; + frag_page->frags++; } else { /* allocate SKB and copy header for large header */ rq->stats->gro_large_hds++; @@ -2219,7 +2218,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, } net_prefetchw(skb->data); - mlx5e_copy_skb_header(rq, skb, head->frag_page->page, head->addr, + mlx5e_copy_skb_header(rq, skb, frag_page->page, dma_addr, head_offset + rx_headroom, rx_headroom, head_size); /* skb linear part was allocated with headlen and aligned to long */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 5bf8318cc48b..1d60465cc2ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -36,6 +36,7 @@ #include "en.h" #include "en/port.h" #include "eswitch.h" +#include "lib/mlx5.h" static int mlx5e_test_health_info(struct mlx5e_priv *priv) { @@ -247,6 +248,9 @@ static int mlx5e_cond_loopback(struct mlx5e_priv *priv) if (is_mdev_switchdev_mode(priv->mdev)) return -EOPNOTSUPP; + if (mlx5_get_sd(priv->mdev)) + return -EOPNOTSUPP; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 859dcf09b770..2b229b6226c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -114,10 +114,10 @@ static int mlx5_eq_comp_int(struct notifier_block *nb, struct mlx5_eq *eq = &eq_comp->core; struct mlx5_eqe *eqe; int num_eqes = 0; - u32 cqn = -1; while ((eqe = next_eqe_sw(eq))) { struct mlx5_core_cq *cq; + u32 cqn; /* Make sure we read EQ entry contents after we've * checked the ownership bit. @@ -144,9 +144,6 @@ static int mlx5_eq_comp_int(struct notifier_block *nb, eq_update_ci(eq, 1); - if (cqn != -1) - tasklet_schedule(&eq_comp->tasklet_ctx.task); - return 0; } @@ -804,15 +801,8 @@ EXPORT_SYMBOL(mlx5_eq_get_eqe); void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm) { - __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); - u32 val; - eq->cons_index += cc; - val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); - - __raw_writel((__force u32)cpu_to_be32(val), addr); - /* We still want ordering, just not swabbing, so add a barrier */ - wmb(); + eq_update_ci(eq, arm); } EXPORT_SYMBOL(mlx5_eq_update_ci); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index d0f38818363f..982fe3714683 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -195,7 +195,7 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_vport *vport) return; dl_port = vport->dl_port; - mlx5_esw_qos_vport_update_node(vport, NULL, NULL); + mlx5_esw_qos_vport_update_parent(vport, NULL, NULL); devl_rate_leaf_destroy(&dl_port->dl_port); devl_port_unregister(&dl_port->dl_port); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 940e1c2d1e39..8b7c843446e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -101,6 +101,12 @@ esw_qos_node_set_parent(struct mlx5_esw_sched_node *node, struct mlx5_esw_sched_ node->esw = parent->esw; } +void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport) +{ + kfree(vport->qos.sched_node); + memset(&vport->qos, 0, sizeof(vport->qos)); +} + u32 mlx5_esw_qos_vport_get_sched_elem_ix(const struct mlx5_vport *vport) { if (!vport->qos.sched_node) @@ -118,18 +124,49 @@ mlx5_esw_qos_vport_get_parent(const struct mlx5_vport *vport) return vport->qos.sched_node->parent; } -static void esw_qos_sched_elem_config_warn(struct mlx5_esw_sched_node *node, int err) +static void esw_qos_sched_elem_warn(struct mlx5_esw_sched_node *node, int err, const char *op) { if (node->vport) { esw_warn(node->esw->dev, - "E-Switch modify %s scheduling element failed (vport=%d,err=%d)\n", - sched_node_type_str[node->type], node->vport->vport, err); + "E-Switch %s %s scheduling element failed (vport=%d,err=%d)\n", + op, sched_node_type_str[node->type], node->vport->vport, err); return; } esw_warn(node->esw->dev, - "E-Switch modify %s scheduling element failed (err=%d)\n", - sched_node_type_str[node->type], err); + "E-Switch %s %s scheduling element failed (err=%d)\n", + op, sched_node_type_str[node->type], err); +} + +static int esw_qos_node_create_sched_element(struct mlx5_esw_sched_node *node, void *ctx, + struct netlink_ext_ack *extack) +{ + int err; + + err = mlx5_create_scheduling_element_cmd(node->esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, ctx, + &node->ix); + if (err) { + esw_qos_sched_elem_warn(node, err, "create"); + NL_SET_ERR_MSG_MOD(extack, "E-Switch create scheduling element failed"); + } + + return err; +} + +static int esw_qos_node_destroy_sched_element(struct mlx5_esw_sched_node *node, + struct netlink_ext_ack *extack) +{ + int err; + + err = mlx5_destroy_scheduling_element_cmd(node->esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + node->ix); + if (err) { + esw_qos_sched_elem_warn(node, err, "destroy"); + NL_SET_ERR_MSG_MOD(extack, "E-Switch destroying scheduling element failed."); + } + + return err; } static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_rate, u32 bw_share, @@ -143,10 +180,21 @@ static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_r if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) return -EOPNOTSUPP; - MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); - MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); - bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; - bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; + if (bw_share && (!MLX5_CAP_QOS(dev, esw_bw_share) || + MLX5_CAP_QOS(dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE)) + return -EOPNOTSUPP; + + if (node->max_rate == max_rate && node->bw_share == bw_share) + return 0; + + if (node->max_rate != max_rate) { + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; + } + if (node->bw_share != bw_share) { + MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; + } err = mlx5_modify_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, @@ -154,12 +202,14 @@ static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_r node->ix, bitmask); if (err) { - esw_qos_sched_elem_config_warn(node, err); + esw_qos_sched_elem_warn(node, err, "modify"); NL_SET_ERR_MSG_MOD(extack, "E-Switch modify scheduling element failed"); return err; } + node->max_rate = max_rate; + node->bw_share = bw_share; if (node->type == SCHED_NODE_TYPE_VPORTS_TSAR) trace_mlx5_esw_node_qos_config(dev, node, node->ix, bw_share, max_rate); else if (node->type == SCHED_NODE_TYPE_VPORT) @@ -208,166 +258,51 @@ static u32 esw_qos_calc_bw_share(u32 min_rate, u32 divider, u32 fw_max) return min_t(u32, max_t(u32, DIV_ROUND_UP(min_rate, divider), MLX5_MIN_BW_SHARE), fw_max); } -static int esw_qos_update_sched_node_bw_share(struct mlx5_esw_sched_node *node, - u32 divider, - struct netlink_ext_ack *extack) +static void esw_qos_update_sched_node_bw_share(struct mlx5_esw_sched_node *node, + u32 divider, + struct netlink_ext_ack *extack) { u32 fw_max_bw_share = MLX5_CAP_QOS(node->esw->dev, max_tsar_bw_share); u32 bw_share; - int err; bw_share = esw_qos_calc_bw_share(node->min_rate, divider, fw_max_bw_share); - if (bw_share == node->bw_share) - return 0; - - err = esw_qos_sched_elem_config(node, node->max_rate, bw_share, extack); - if (err) - return err; - - node->bw_share = bw_share; - - return err; + esw_qos_sched_elem_config(node, node->max_rate, bw_share, extack); } -static int esw_qos_normalize_min_rate(struct mlx5_eswitch *esw, - struct mlx5_esw_sched_node *parent, - struct netlink_ext_ack *extack) +static void esw_qos_normalize_min_rate(struct mlx5_eswitch *esw, + struct mlx5_esw_sched_node *parent, + struct netlink_ext_ack *extack) { struct list_head *nodes = parent ? &parent->children : &esw->qos.domain->nodes; u32 divider = esw_qos_calculate_min_rate_divider(esw, parent); struct mlx5_esw_sched_node *node; list_for_each_entry(node, nodes, entry) { - int err; - if (node->esw != esw || node->ix == esw->qos.root_tsar_ix) continue; - err = esw_qos_update_sched_node_bw_share(node, divider, extack); - if (err) - return err; + esw_qos_update_sched_node_bw_share(node, divider, extack); if (list_empty(&node->children)) continue; - err = esw_qos_normalize_min_rate(node->esw, node, extack); - if (err) - return err; + esw_qos_normalize_min_rate(node->esw, node, extack); } - - return 0; -} - -static int esw_qos_set_vport_min_rate(struct mlx5_vport *vport, - u32 min_rate, struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - u32 fw_max_bw_share, previous_min_rate; - bool min_rate_supported; - int err; - - esw_assert_qos_lock_held(vport_node->esw); - fw_max_bw_share = MLX5_CAP_QOS(vport->dev, max_tsar_bw_share); - min_rate_supported = MLX5_CAP_QOS(vport->dev, esw_bw_share) && - fw_max_bw_share >= MLX5_MIN_BW_SHARE; - if (min_rate && !min_rate_supported) - return -EOPNOTSUPP; - if (min_rate == vport_node->min_rate) - return 0; - - previous_min_rate = vport_node->min_rate; - vport_node->min_rate = min_rate; - err = esw_qos_normalize_min_rate(vport_node->parent->esw, vport_node->parent, extack); - if (err) - vport_node->min_rate = previous_min_rate; - - return err; -} - -static int esw_qos_set_vport_max_rate(struct mlx5_vport *vport, - u32 max_rate, struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - u32 act_max_rate = max_rate; - bool max_rate_supported; - int err; - - esw_assert_qos_lock_held(vport_node->esw); - max_rate_supported = MLX5_CAP_QOS(vport->dev, esw_rate_limit); - - if (max_rate && !max_rate_supported) - return -EOPNOTSUPP; - if (max_rate == vport_node->max_rate) - return 0; - - /* Use parent node limit if new max rate is 0. */ - if (!max_rate) - act_max_rate = vport_node->parent->max_rate; - - err = esw_qos_sched_elem_config(vport_node, act_max_rate, vport_node->bw_share, extack); - if (!err) - vport_node->max_rate = max_rate; - - return err; } static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node, u32 min_rate, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = node->esw; - u32 previous_min_rate; - int err; - - if (!MLX5_CAP_QOS(esw->dev, esw_bw_share) || - MLX5_CAP_QOS(esw->dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE) - return -EOPNOTSUPP; if (min_rate == node->min_rate) return 0; - previous_min_rate = node->min_rate; node->min_rate = min_rate; - err = esw_qos_normalize_min_rate(esw, NULL, extack); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch node min rate setting failed"); - - /* Attempt restoring previous configuration */ - node->min_rate = previous_min_rate; - if (esw_qos_normalize_min_rate(esw, NULL, extack)) - NL_SET_ERR_MSG_MOD(extack, "E-Switch BW share restore failed"); - } - - return err; -} - -static int esw_qos_set_node_max_rate(struct mlx5_esw_sched_node *node, - u32 max_rate, struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node; - int err; - - if (node->max_rate == max_rate) - return 0; - - err = esw_qos_sched_elem_config(node, max_rate, node->bw_share, extack); - if (err) - return err; - - node->max_rate = max_rate; - - /* Any unlimited vports in the node should be set with the value of the node. */ - list_for_each_entry(vport_node, &node->children, entry) { - if (vport_node->max_rate) - continue; + esw_qos_normalize_min_rate(esw, node->parent, extack); - err = esw_qos_sched_elem_config(vport_node, max_rate, vport_node->bw_share, extack); - if (err) - NL_SET_ERR_MSG_MOD(extack, - "E-Switch vport implicit rate limit setting failed"); - } - - return err; + return 0; } static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id, @@ -397,14 +332,12 @@ static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_ tsar_ix); } -static int -esw_qos_vport_create_sched_element(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, - u32 max_rate, u32 bw_share, u32 *sched_elem_ix) +static int esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node, + struct netlink_ext_ack *extack) { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; - struct mlx5_core_dev *dev = parent->esw->dev; + struct mlx5_core_dev *dev = vport_node->esw->dev; void *attr; - int err; if (!mlx5_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT, @@ -414,93 +347,11 @@ esw_qos_vport_create_sched_element(struct mlx5_vport *vport, struct mlx5_esw_sch MLX5_SET(scheduling_context, sched_ctx, element_type, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes); - MLX5_SET(vport_element, attr, vport_number, vport->vport); - MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent->ix); - MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); - MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); - - err = mlx5_create_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - sched_ctx, - sched_elem_ix); - if (err) { - esw_warn(dev, - "E-Switch create vport scheduling element failed (vport=%d,err=%d)\n", - vport->vport, err); - return err; - } - - return 0; -} - -static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, - struct mlx5_esw_sched_node *curr_node, - struct mlx5_esw_sched_node *new_node, - struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - u32 max_rate; - int err; - - err = mlx5_destroy_scheduling_element_cmd(curr_node->esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, - vport_node->ix); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy vport scheduling element failed"); - return err; - } - - /* Use new node max rate if vport max rate is unlimited. */ - max_rate = vport_node->max_rate ? vport_node->max_rate : new_node->max_rate; - err = esw_qos_vport_create_sched_element(vport, new_node, max_rate, - vport_node->bw_share, - &vport_node->ix); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch vport node set failed."); - goto err_sched; - } - - esw_qos_node_set_parent(vport->qos.sched_node, new_node); - - return 0; + MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport); + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, vport_node->parent->ix); + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, vport_node->max_rate); -err_sched: - max_rate = vport_node->max_rate ? vport_node->max_rate : curr_node->max_rate; - if (esw_qos_vport_create_sched_element(vport, curr_node, max_rate, - vport_node->bw_share, - &vport_node->ix)) - esw_warn(curr_node->esw->dev, "E-Switch vport node restore failed (vport=%d)\n", - vport->vport); - - return err; -} - -static int esw_qos_vport_update_node(struct mlx5_vport *vport, - struct mlx5_esw_sched_node *node, - struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - struct mlx5_eswitch *esw = vport->dev->priv.eswitch; - struct mlx5_esw_sched_node *new_node, *curr_node; - int err; - - esw_assert_qos_lock_held(esw); - curr_node = vport_node->parent; - new_node = node ?: esw->qos.node0; - if (curr_node == new_node) - return 0; - - err = esw_qos_update_node_scheduling_element(vport, curr_node, new_node, extack); - if (err) - return err; - - /* Recalculate bw share weights of old and new nodes */ - if (vport_node->bw_share || new_node->bw_share) { - esw_qos_normalize_min_rate(curr_node->esw, curr_node, extack); - esw_qos_normalize_min_rate(new_node->esw, new_node, extack); - } - - return 0; + return esw_qos_node_create_sched_element(vport_node, sched_ctx, extack); } static struct mlx5_esw_sched_node * @@ -531,6 +382,12 @@ static void __esw_qos_free_node(struct mlx5_esw_sched_node *node) kfree(node); } +static void esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack) +{ + esw_qos_node_destroy_sched_element(node, extack); + __esw_qos_free_node(node); +} + static struct mlx5_esw_sched_node * __esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sched_node *parent, struct netlink_ext_ack *extack) @@ -552,17 +409,11 @@ __esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sch goto err_alloc_node; } - err = esw_qos_normalize_min_rate(esw, NULL, extack); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch nodes normalization failed"); - goto err_min_rate; - } + esw_qos_normalize_min_rate(esw, NULL, extack); trace_mlx5_esw_node_qos_create(esw->dev, node, node->ix); return node; -err_min_rate: - __esw_qos_free_node(node); err_alloc_node: if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, @@ -595,26 +446,13 @@ esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct netlink_ext_ac return node; } -static int __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack) +static void __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = node->esw; - int err; trace_mlx5_esw_node_qos_destroy(esw->dev, node, node->ix); - - err = mlx5_destroy_scheduling_element_cmd(esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, - node->ix); - if (err) - NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed"); - __esw_qos_free_node(node); - - err = esw_qos_normalize_min_rate(esw, NULL, extack); - if (err) - NL_SET_ERR_MSG_MOD(extack, "E-Switch nodes normalization failed"); - - - return err; + esw_qos_destroy_node(node, extack); + esw_qos_normalize_min_rate(esw, NULL, extack); } static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) @@ -699,45 +537,62 @@ static void esw_qos_put(struct mlx5_eswitch *esw) esw_qos_destroy(esw); } -static int esw_qos_vport_enable(struct mlx5_vport *vport, - u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack) +static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_ack *extack) +{ + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; + struct mlx5_esw_sched_node *parent = vport_node->parent; + + esw_qos_node_destroy_sched_element(vport_node, extack); + + vport_node->bw_share = 0; + list_del_init(&vport_node->entry); + esw_qos_normalize_min_rate(parent->esw, parent, extack); + + trace_mlx5_esw_vport_qos_destroy(vport_node->esw->dev, vport); +} + +static int esw_qos_vport_enable(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, + struct netlink_ext_ack *extack) { - struct mlx5_eswitch *esw = vport->dev->priv.eswitch; - u32 sched_elem_ix; int err; - esw_assert_qos_lock_held(esw); - if (vport->qos.sched_node) - return 0; + esw_assert_qos_lock_held(vport->dev->priv.eswitch); - err = esw_qos_get(esw, extack); + esw_qos_node_set_parent(vport->qos.sched_node, parent); + err = esw_qos_vport_create_sched_element(vport->qos.sched_node, extack); if (err) return err; - err = esw_qos_vport_create_sched_element(vport, esw->qos.node0, max_rate, bw_share, - &sched_elem_ix); - if (err) - goto err_out; + esw_qos_normalize_min_rate(parent->esw, parent, extack); - vport->qos.sched_node = __esw_qos_alloc_node(esw, sched_elem_ix, SCHED_NODE_TYPE_VPORT, - esw->qos.node0); - if (!vport->qos.sched_node) { - err = -ENOMEM; - goto err_alloc; - } + return 0; +} - vport->qos.sched_node->vport = vport; +static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_type type, + struct mlx5_esw_sched_node *parent, u32 max_rate, + u32 min_rate, struct netlink_ext_ack *extack) +{ + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; + struct mlx5_esw_sched_node *sched_node; + int err; - trace_mlx5_esw_vport_qos_create(vport->dev, vport, bw_share, max_rate); + esw_assert_qos_lock_held(esw); + err = esw_qos_get(esw, extack); + if (err) + return err; - return 0; + parent = parent ?: esw->qos.node0; + sched_node = __esw_qos_alloc_node(parent->esw, 0, type, parent); + if (!sched_node) + return -ENOMEM; -err_alloc: - if (mlx5_destroy_scheduling_element_cmd(esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, sched_elem_ix)) - esw_warn(esw->dev, "E-Switch destroy vport scheduling element failed.\n"); -err_out: - esw_qos_put(esw); + sched_node->max_rate = max_rate; + sched_node->min_rate = min_rate; + sched_node->vport = vport; + vport->qos.sched_node = sched_node; + err = esw_qos_vport_enable(vport, parent, extack); + if (err) + esw_qos_put(esw); return err; } @@ -745,51 +600,61 @@ err_out: void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; - struct mlx5_esw_sched_node *vport_node; - struct mlx5_core_dev *dev; - int err; + struct mlx5_esw_sched_node *parent; lockdep_assert_held(&esw->state_lock); esw_qos_lock(esw); - vport_node = vport->qos.sched_node; - if (!vport_node) + if (!vport->qos.sched_node) goto unlock; - WARN(vport_node->parent != esw->qos.node0, - "Disabling QoS on port before detaching it from node"); - - dev = vport_node->esw->dev; - trace_mlx5_esw_vport_qos_destroy(dev, vport); - - err = mlx5_destroy_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - vport_node->ix); - if (err) - esw_warn(dev, - "E-Switch destroy vport scheduling element failed (vport=%d,err=%d)\n", - vport->vport, err); - __esw_qos_free_node(vport_node); - memset(&vport->qos, 0, sizeof(vport->qos)); + parent = vport->qos.sched_node->parent; + WARN(parent != esw->qos.node0, "Disabling QoS on port before detaching it from node"); + esw_qos_vport_disable(vport, NULL); + mlx5_esw_qos_vport_qos_free(vport); esw_qos_put(esw); unlock: esw_qos_unlock(esw); } +static int mlx5_esw_qos_set_vport_max_rate(struct mlx5_vport *vport, u32 max_rate, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; + + esw_assert_qos_lock_held(vport->dev->priv.eswitch); + + if (!vport_node) + return mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, NULL, max_rate, 0, + extack); + else + return esw_qos_sched_elem_config(vport_node, max_rate, vport_node->bw_share, + extack); +} + +static int mlx5_esw_qos_set_vport_min_rate(struct mlx5_vport *vport, u32 min_rate, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; + + esw_assert_qos_lock_held(vport->dev->priv.eswitch); + + if (!vport_node) + return mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, NULL, 0, min_rate, + extack); + else + return esw_qos_set_node_min_rate(vport_node, min_rate, extack); +} + int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *vport, u32 max_rate, u32 min_rate) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; int err; esw_qos_lock(esw); - err = esw_qos_vport_enable(vport, 0, 0, NULL); - if (err) - goto unlock; - - err = esw_qos_set_vport_min_rate(vport, min_rate, NULL); + err = mlx5_esw_qos_set_vport_min_rate(vport, min_rate, NULL); if (!err) - err = esw_qos_set_vport_max_rate(vport, max_rate, NULL); -unlock: + err = mlx5_esw_qos_set_vport_max_rate(vport, max_rate, NULL); esw_qos_unlock(esw); return err; } @@ -809,6 +674,31 @@ bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *m return enabled; } +static int esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, + struct netlink_ext_ack *extack) +{ + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; + struct mlx5_esw_sched_node *curr_parent; + int err; + + esw_assert_qos_lock_held(esw); + curr_parent = vport->qos.sched_node->parent; + parent = parent ?: esw->qos.node0; + if (curr_parent == parent) + return 0; + + esw_qos_vport_disable(vport, extack); + + err = esw_qos_vport_enable(vport, parent, extack); + if (err) { + if (esw_qos_vport_enable(vport, curr_parent, NULL)) + esw_warn(parent->esw->dev, "vport restore QoS failed (vport=%d)\n", + vport->vport); + } + + return err; +} + static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev) { struct ethtool_link_ksettings lksettings; @@ -875,10 +765,8 @@ static int mlx5_esw_qos_link_speed_verify(struct mlx5_core_dev *mdev, int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps) { - u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; struct mlx5_vport *vport; u32 link_speed_max; - u32 bitmask; int err; vport = mlx5_eswitch_get_vport(esw, vport_num); @@ -897,20 +785,7 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 } esw_qos_lock(esw); - if (!vport->qos.sched_node) { - /* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */ - err = esw_qos_vport_enable(vport, rate_mbps, 0, NULL); - } else { - struct mlx5_core_dev *dev = vport->qos.sched_node->parent->esw->dev; - - MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); - bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; - err = mlx5_modify_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - ctx, - vport->qos.sched_node->ix, - bitmask); - } + err = mlx5_esw_qos_set_vport_max_rate(vport, rate_mbps, NULL); esw_qos_unlock(esw); return err; @@ -981,12 +856,7 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void return err; esw_qos_lock(esw); - err = esw_qos_vport_enable(vport, 0, 0, extack); - if (err) - goto unlock; - - err = esw_qos_set_vport_min_rate(vport, tx_share, extack); -unlock: + err = mlx5_esw_qos_set_vport_min_rate(vport, tx_share, extack); esw_qos_unlock(esw); return err; } @@ -1007,12 +877,7 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void * return err; esw_qos_lock(esw); - err = esw_qos_vport_enable(vport, 0, 0, extack); - if (err) - goto unlock; - - err = esw_qos_set_vport_max_rate(vport, tx_max, extack); -unlock: + err = mlx5_esw_qos_set_vport_max_rate(vport, tx_max, extack); esw_qos_unlock(esw); return err; } @@ -1046,7 +911,7 @@ int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void * return err; esw_qos_lock(esw); - err = esw_qos_set_node_max_rate(node, tx_max, extack); + err = esw_qos_sched_elem_config(node, tx_max, node->bw_share, extack); esw_qos_unlock(esw); return err; } @@ -1087,35 +952,30 @@ int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, { struct mlx5_esw_sched_node *node = priv; struct mlx5_eswitch *esw = node->esw; - int err; esw_qos_lock(esw); - err = __esw_qos_destroy_node(node, extack); + __esw_qos_destroy_node(node, extack); esw_qos_put(esw); esw_qos_unlock(esw); - return err; + return 0; } -int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport, - struct mlx5_esw_sched_node *node, - struct netlink_ext_ack *extack) +int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, + struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; int err = 0; - if (node && node->esw != esw) { + if (parent && parent->esw != esw) { NL_SET_ERR_MSG_MOD(extack, "Cross E-Switch scheduling is not supported"); return -EOPNOTSUPP; } esw_qos_lock(esw); - if (!vport->qos.sched_node && !node) - goto unlock; - - err = esw_qos_vport_enable(vport, 0, 0, extack); - if (!err) - err = esw_qos_vport_update_node(vport, node, extack); -unlock: + if (!vport->qos.sched_node && parent) + err = mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, parent, 0, 0, extack); + else if (vport->qos.sched_node) + err = esw_qos_vport_update_parent(vport, parent, extack); esw_qos_unlock(esw); return err; } @@ -1129,8 +989,8 @@ int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate, struct mlx5_vport *vport = priv; if (!parent) - return mlx5_esw_qos_vport_update_node(vport, NULL, extack); + return mlx5_esw_qos_vport_update_parent(vport, NULL, extack); node = parent_priv; - return mlx5_esw_qos_vport_update_node(vport, node, extack); + return mlx5_esw_qos_vport_update_parent(vport, node, extack); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h index 61a6fdd5c267..6eb8f6a648c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h @@ -13,6 +13,7 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *evport, u32 max_rate, u32 min bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *min_rate); void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport); +void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport); u32 mlx5_esw_qos_vport_get_sched_elem_ix(const struct mlx5_vport *vport); struct mlx5_esw_sched_node *mlx5_esw_qos_vport_get_parent(const struct mlx5_vport *vport); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index cead41ddbc38..7fb8a3381f84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1061,8 +1061,7 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) unsigned long i; mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { - kfree(vport->qos.sched_node); - memset(&vport->qos, 0, sizeof(vport->qos)); + mlx5_esw_qos_vport_qos_free(vport); memset(&vport->info, 0, sizeof(vport->info)); vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; } @@ -1074,8 +1073,7 @@ static void mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch *esw) unsigned long i; mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) { - kfree(vport->qos.sched_node); - memset(&vport->qos, 0, sizeof(vport->qos)); + mlx5_esw_qos_vport_qos_free(vport); memset(&vport->info, 0, sizeof(vport->info)); vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; } @@ -1490,7 +1488,6 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs) if (esw->mode == MLX5_ESWITCH_LEGACY) { err = esw_legacy_enable(esw); } else { - mlx5_rescan_drivers(esw->dev); err = esw_offloads_enable(esw); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 14dd42d44e6f..a83d41121db6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -427,9 +427,8 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, u16 vport_num, bool setting); int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, u32 max_rate, u32 min_rate); -int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport, - struct mlx5_esw_sched_node *node, - struct netlink_ext_ack *extack); +int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *node, + struct netlink_ext_ack *extack); int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index fd34f43d18d5..d6ff2dc4c19e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2332,18 +2332,35 @@ out_free: return err; } +static void esw_mode_change(struct mlx5_eswitch *esw, u16 mode) +{ + mlx5_devcom_comp_lock(esw->dev->priv.hca_devcom_comp); + + if (esw->dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV) { + esw->mode = mode; + mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp); + return; + } + + esw->dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; + mlx5_rescan_drivers_locked(esw->dev); + esw->mode = mode; + esw->dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; + mlx5_rescan_drivers_locked(esw->dev); + mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp); +} + static int esw_offloads_start(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) { int err; - esw->mode = MLX5_ESWITCH_OFFLOADS; + esw_mode_change(esw, MLX5_ESWITCH_OFFLOADS); err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to offloads"); - esw->mode = MLX5_ESWITCH_LEGACY; - mlx5_rescan_drivers(esw->dev); + esw_mode_change(esw, MLX5_ESWITCH_LEGACY); return err; } if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { @@ -2527,8 +2544,11 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep, u8 rep_type) { if (atomic_cmpxchg(&rep->rep_data[rep_type].state, - REP_LOADED, REP_REGISTERED) == REP_LOADED) + REP_LOADED, REP_REGISTERED) == REP_LOADED) { + if (rep_type == REP_ETH) + __esw_offloads_unload_rep(esw, rep, REP_IB); esw->offloads.rep_ops[rep_type]->unload(rep); + } } static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type) @@ -3584,7 +3604,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, { int err; - esw->mode = MLX5_ESWITCH_LEGACY; + esw_mode_change(esw, MLX5_ESWITCH_LEGACY); /* If changing from switchdev to legacy mode without sriov enabled, * no need to create legacy fdb. @@ -3770,7 +3790,6 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, err = esw_offloads_start(esw, extack); } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) { err = esw_offloads_stop(esw, extack); - mlx5_rescan_drivers(esw->dev); } else { err = -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index c2db0a1c132b..2eabfcc247c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2105,13 +2105,22 @@ lookup_fte_locked(struct mlx5_flow_group *g, fte_tmp = NULL; goto out; } + + nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); + if (!fte_tmp->node.active) { + up_write_ref_node(&fte_tmp->node, false); + + if (take_write) + up_write_ref_node(&g->node, false); + else + up_read_ref_node(&g->node); + tree_put_node(&fte_tmp->node, false); - fte_tmp = NULL; - goto out; + + return NULL; } - nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); out: if (take_write) up_write_ref_node(&g->node, false); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index b306ae79bf97..4822d01123b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -402,9 +402,7 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, struct ptp_system_timestamp *sts) { struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); - struct mlx5_timer *timer = &clock->timer; struct mlx5_core_dev *mdev; - unsigned long flags; u64 cycles, ns; mdev = container_of(clock, struct mlx5_core_dev, clock); @@ -413,10 +411,8 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, goto out; } - write_seqlock_irqsave(&clock->lock, flags); cycles = mlx5_read_time(mdev, sts, false); - ns = timecounter_cyc2time(&timer->tc, cycles); - write_sequnlock_irqrestore(&clock->lock, flags); + ns = mlx5_timecounter_cyc2time(clock, cycles); *ts = ns_to_timespec64(ns); out: return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h index 4b7f7131c560..b1edc71ffc6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h @@ -72,7 +72,7 @@ static inline void eq_update_ci(struct mlx5_eq *eq, int arm) __raw_writel((__force u32)cpu_to_be32(val), addr); /* We still want ordering, just not swabbing, so add a barrier */ - mb(); + wmb(); } int mlx5_eq_table_init(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 81a9232a03e1..7db9cab9bedf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -593,9 +593,11 @@ static void irq_pool_free(struct mlx5_irq_pool *pool) kvfree(pool); } -static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec) +static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec, + bool dynamic_vec) { struct mlx5_irq_table *table = dev->priv.irq_table; + int sf_vec_available = sf_vec; int num_sf_ctrl; int err; @@ -616,6 +618,13 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec) num_sf_ctrl = DIV_ROUND_UP(mlx5_sf_max_functions(dev), MLX5_SFS_PER_CTRL_IRQ); num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl); + if (!dynamic_vec && (num_sf_ctrl + 1) > sf_vec_available) { + mlx5_core_dbg(dev, + "Not enough IRQs for SFs control and completion pool, required=%d avail=%d\n", + num_sf_ctrl + 1, sf_vec_available); + return 0; + } + table->sf_ctrl_pool = irq_pool_alloc(dev, pcif_vec, num_sf_ctrl, "mlx5_sf_ctrl", MLX5_EQ_SHARE_IRQ_MIN_CTRL, @@ -624,9 +633,11 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec) err = PTR_ERR(table->sf_ctrl_pool); goto err_pf; } - /* init sf_comp_pool */ + sf_vec_available -= num_sf_ctrl; + + /* init sf_comp_pool, remaining vectors are for the SF completions */ table->sf_comp_pool = irq_pool_alloc(dev, pcif_vec + num_sf_ctrl, - sf_vec - num_sf_ctrl, "mlx5_sf_comp", + sf_vec_available, "mlx5_sf_comp", MLX5_EQ_SHARE_IRQ_MIN_COMP, MLX5_EQ_SHARE_IRQ_MAX_COMP); if (IS_ERR(table->sf_comp_pool)) { @@ -715,6 +726,7 @@ int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table) int mlx5_irq_table_create(struct mlx5_core_dev *dev) { int num_eqs = mlx5_max_eq_cap_get(dev); + bool dynamic_vec; int total_vec; int pcif_vec; int req_vec; @@ -724,21 +736,31 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev) if (mlx5_core_is_sf(dev)) return 0; + /* PCI PF vectors usage is limited by online cpus, device EQs and + * PCI MSI-X capability. + */ pcif_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1; pcif_vec = min_t(int, pcif_vec, num_eqs); + pcif_vec = min_t(int, pcif_vec, pci_msix_vec_count(dev->pdev)); total_vec = pcif_vec; if (mlx5_sf_max_functions(dev)) total_vec += MLX5_MAX_MSIX_PER_SF * mlx5_sf_max_functions(dev); total_vec = min_t(int, total_vec, pci_msix_vec_count(dev->pdev)); - pcif_vec = min_t(int, pcif_vec, pci_msix_vec_count(dev->pdev)); req_vec = pci_msix_can_alloc_dyn(dev->pdev) ? 1 : total_vec; n = pci_alloc_irq_vectors(dev->pdev, 1, req_vec, PCI_IRQ_MSIX); if (n < 0) return n; - err = irq_pools_init(dev, total_vec - pcif_vec, pcif_vec); + /* Further limit vectors of the pools based on platform for non dynamic case */ + dynamic_vec = pci_msix_can_alloc_dyn(dev->pdev); + if (!dynamic_vec) { + pcif_vec = min_t(int, n, pcif_vec); + total_vec = min_t(int, n, total_vec); + } + + err = irq_pools_init(dev, total_vec - pcif_vec, pcif_vec, dynamic_vec); if (err) pci_free_irq_vectors(dev->pdev); diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile index cadd4dac6620..425e8b801265 100644 --- a/drivers/net/ethernet/meta/fbnic/Makefile +++ b/drivers/net/ethernet/meta/fbnic/Makefile @@ -7,7 +7,8 @@ obj-$(CONFIG_FBNIC) += fbnic.o -fbnic-y := fbnic_devlink.o \ +fbnic-y := fbnic_csr.o \ + fbnic_devlink.o \ fbnic_ethtool.o \ fbnic_fw.o \ fbnic_hw_stats.o \ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h index fec567c8fe4a..98870cb2b689 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic.h @@ -48,6 +48,7 @@ struct fbnic_dev { struct fbnic_act_tcam act_tcam[FBNIC_RPC_TCAM_ACT_NUM_ENTRIES]; struct fbnic_mac_addr mac_addr[FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES]; u8 mac_addr_boundary; + u8 tce_tcam_last; /* Number of TCQs/RCQs available on hardware */ u16 max_num_queues; @@ -155,6 +156,9 @@ int fbnic_alloc_irqs(struct fbnic_dev *fbd); void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version, const size_t str_sz); +void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version); +int fbnic_csr_regs_len(struct fbnic_dev *fbd); + enum fbnic_boards { fbnic_board_asic }; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c new file mode 100644 index 000000000000..2118901b25e9 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include "fbnic.h" + +#define FBNIC_BOUNDS(section) { \ + .start = FBNIC_CSR_START_##section, \ + .end = FBNIC_CSR_END_##section + 1, \ +} + +struct fbnic_csr_bounds { + u32 start; + u32 end; +}; + +static const struct fbnic_csr_bounds fbnic_csr_sects[] = { + FBNIC_BOUNDS(INTR), + FBNIC_BOUNDS(INTR_CQ), + FBNIC_BOUNDS(QM_TX), + FBNIC_BOUNDS(QM_RX), + FBNIC_BOUNDS(TCE), + FBNIC_BOUNDS(TCE_RAM), + FBNIC_BOUNDS(TMI), + FBNIC_BOUNDS(PTP), + FBNIC_BOUNDS(RXB), + FBNIC_BOUNDS(RPC), + FBNIC_BOUNDS(FAB), + FBNIC_BOUNDS(MASTER), + FBNIC_BOUNDS(PCS), + FBNIC_BOUNDS(RSFEC), + FBNIC_BOUNDS(MAC_MAC), + FBNIC_BOUNDS(SIG), + FBNIC_BOUNDS(PUL_USER), + FBNIC_BOUNDS(QUEUE), + FBNIC_BOUNDS(RPC_RAM), +}; + +#define FBNIC_RPC_TCAM_ACT_DW_PER_ENTRY 14 +#define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64 + +#define FBNIC_RPC_TCAM_MACDA_DW_PER_ENTRY 4 +#define FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES 32 + +#define FBNIC_RPC_TCAM_OUTER_IPSRC_DW_PER_ENTRY 9 +#define FBNIC_RPC_TCAM_OUTER_IPSRC_NUM_ENTRIES 8 + +#define FBNIC_RPC_TCAM_OUTER_IPDST_DW_PER_ENTRY 9 +#define FBNIC_RPC_TCAM_OUTER_IPDST_NUM_ENTRIES 8 + +#define FBNIC_RPC_TCAM_IPSRC_DW_PER_ENTRY 9 +#define FBNIC_RPC_TCAM_IPSRC_NUM_ENTRIES 8 + +#define FBNIC_RPC_TCAM_IPDST_DW_PER_ENTRY 9 +#define FBNIC_RPC_TCAM_IPDST_NUM_ENTRIES 8 + +#define FBNIC_RPC_RSS_TBL_DW_PER_ENTRY 2 +#define FBNIC_RPC_RSS_TBL_NUM_ENTRIES 256 + +static void fbnic_csr_get_regs_rpc_ram(struct fbnic_dev *fbd, u32 **data_p) +{ + u32 start = FBNIC_CSR_START_RPC_RAM; + u32 end = FBNIC_CSR_END_RPC_RAM; + u32 *data = *data_p; + u32 i, j; + + *(data++) = start; + *(data++) = end - 1; + + /* FBNIC_RPC_TCAM_ACT */ + for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) { + for (j = 0; j < FBNIC_RPC_TCAM_ACT_DW_PER_ENTRY; j++) + *(data++) = rd32(fbd, FBNIC_RPC_TCAM_ACT(i, j)); + } + + /* FBNIC_RPC_TCAM_MACDA */ + for (i = 0; i < FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES; i++) { + for (j = 0; j < FBNIC_RPC_TCAM_MACDA_DW_PER_ENTRY; j++) + *(data++) = rd32(fbd, FBNIC_RPC_TCAM_MACDA(i, j)); + } + + /* FBNIC_RPC_TCAM_OUTER_IPSRC */ + for (i = 0; i < FBNIC_RPC_TCAM_OUTER_IPSRC_NUM_ENTRIES; i++) { + for (j = 0; j < FBNIC_RPC_TCAM_OUTER_IPSRC_DW_PER_ENTRY; j++) + *(data++) = rd32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(i, j)); + } + + /* FBNIC_RPC_TCAM_OUTER_IPDST */ + for (i = 0; i < FBNIC_RPC_TCAM_OUTER_IPDST_NUM_ENTRIES; i++) { + for (j = 0; j < FBNIC_RPC_TCAM_OUTER_IPDST_DW_PER_ENTRY; j++) + *(data++) = rd32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(i, j)); + } + + /* FBNIC_RPC_TCAM_IPSRC */ + for (i = 0; i < FBNIC_RPC_TCAM_IPSRC_NUM_ENTRIES; i++) { + for (j = 0; j < FBNIC_RPC_TCAM_IPSRC_DW_PER_ENTRY; j++) + *(data++) = rd32(fbd, FBNIC_RPC_TCAM_IPSRC(i, j)); + } + + /* FBNIC_RPC_TCAM_IPDST */ + for (i = 0; i < FBNIC_RPC_TCAM_IPDST_NUM_ENTRIES; i++) { + for (j = 0; j < FBNIC_RPC_TCAM_IPDST_DW_PER_ENTRY; j++) + *(data++) = rd32(fbd, FBNIC_RPC_TCAM_IPDST(i, j)); + } + + /* FBNIC_RPC_RSS_TBL */ + for (i = 0; i < FBNIC_RPC_RSS_TBL_NUM_ENTRIES; i++) { + for (j = 0; j < FBNIC_RPC_RSS_TBL_DW_PER_ENTRY; j++) + *(data++) = rd32(fbd, FBNIC_RPC_RSS_TBL(i, j)); + } + + *data_p = data; +} + +void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version) +{ + const struct fbnic_csr_bounds *bound; + u32 *start = data; + int i, j; + + *regs_version = 1u; + + /* Skip RPC_RAM section which cannot be dumped linearly */ + for (i = 0, bound = fbnic_csr_sects; + i < ARRAY_SIZE(fbnic_csr_sects) - 1; i++, ++bound) { + *(data++) = bound->start; + *(data++) = bound->end - 1; + for (j = bound->start; j < bound->end; j++) + *(data++) = rd32(fbd, j); + } + + /* Dump the RPC_RAM as special case registers */ + fbnic_csr_get_regs_rpc_ram(fbd, &data); + + WARN_ON(data - start != fbnic_csr_regs_len(fbd)); +} + +int fbnic_csr_regs_len(struct fbnic_dev *fbd) +{ + int i, len = 0; + + /* Dump includes start and end information of each section + * which results in an offset of 2 + */ + for (i = 0; i < ARRAY_SIZE(fbnic_csr_sects); i++) + len += fbnic_csr_sects[i].end - fbnic_csr_sects[i].start + 2; + + return len; +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h index 79cdd231d327..f9a531ce9e17 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h @@ -397,6 +397,14 @@ enum { #define FBNIC_TCE_DROP_CTRL_TTI_FRM_DROP_EN CSR_BIT(1) #define FBNIC_TCE_DROP_CTRL_TTI_TBI_DROP_EN CSR_BIT(2) +#define FBNIC_TCE_TCAM_IDX2DEST_MAP 0x0404A /* 0x10128 */ +#define FBNIC_TCE_TCAM_IDX2DEST_MAP_DEST_ID_0 CSR_GENMASK(3, 0) +enum { + FBNIC_TCE_TCAM_DEST_MAC = 1, + FBNIC_TCE_TCAM_DEST_BMC = 2, + FBNIC_TCE_TCAM_DEST_FW = 4, +}; + #define FBNIC_TCE_TXB_TX_BMC_Q_CTRL 0x0404B /* 0x1012c */ #define FBNIC_TCE_TXB_BMC_DWRR_CTRL 0x0404C /* 0x10130 */ #define FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0) @@ -407,6 +415,18 @@ enum { #define FBNIC_TCE_TXB_BMC_DWRR_CTRL_EXT 0x0404F /* 0x1013c */ #define FBNIC_CSR_END_TCE 0x04050 /* CSR section delimiter */ +/* TCE RAM registers */ +#define FBNIC_CSR_START_TCE_RAM 0x04200 /* CSR section delimiter */ +#define FBNIC_TCE_RAM_TCAM(m, n) \ + (0x04200 + 0x8 * (n) + (m)) /* 0x10800 + 32*n + 4*m */ +#define FBNIC_TCE_RAM_TCAM_MASK CSR_GENMASK(15, 0) +#define FBNIC_TCE_RAM_TCAM_VALUE CSR_GENMASK(31, 16) +#define FBNIC_TCE_RAM_TCAM3(n) (0x04218 + (n)) /* 0x010860 + 4*n */ +#define FBNIC_TCE_RAM_TCAM3_DEST_MASK CSR_GENMASK(5, 3) +#define FBNIC_TCE_RAM_TCAM3_MCQ_MASK CSR_BIT(7) +#define FBNIC_TCE_RAM_TCAM3_VALIDATE CSR_BIT(31) +#define FBNIC_CSR_END_TCE_RAM 0x0421F /* CSR section delimiter */ + /* TMI registers */ #define FBNIC_CSR_START_TMI 0x04400 /* CSR section delimiter */ #define FBNIC_TMI_SOP_PROT_CTRL 0x04400 /* 0x11000 */ @@ -645,6 +665,15 @@ enum { #define FBNIC_RPC_TCAM_MACDA_VALUE CSR_GENMASK(15, 0) #define FBNIC_RPC_TCAM_MACDA_MASK CSR_GENMASK(31, 16) +#define FBNIC_RPC_TCAM_OUTER_IPSRC(m, n)\ + (0x08c00 + 0x08 * (n) + (m)) /* 0x023000 + 32*n + 4*m */ +#define FBNIC_RPC_TCAM_OUTER_IPDST(m, n)\ + (0x08c48 + 0x08 * (n) + (m)) /* 0x023120 + 32*n + 4*m */ +#define FBNIC_RPC_TCAM_IPSRC(m, n)\ + (0x08c90 + 0x08 * (n) + (m)) /* 0x023240 + 32*n + 4*m */ +#define FBNIC_RPC_TCAM_IPDST(m, n)\ + (0x08cd8 + 0x08 * (n) + (m)) /* 0x023360 + 32*n + 4*m */ + #define FBNIC_RPC_RSS_TBL(n, m) \ (0x08d20 + 0x100 * (n) + (m)) /* 0x023480 + 1024*n + 4*m */ #define FBNIC_RPC_RSS_TBL_COUNT 2 @@ -663,6 +692,13 @@ enum { #define FBNIC_MASTER_SPARE_0 0x0C41B /* 0x3106c */ #define FBNIC_CSR_END_MASTER 0x0C452 /* CSR section delimiter */ +/* MAC PCS registers */ +#define FBNIC_CSR_START_PCS 0x10000 /* CSR section delimiter */ +#define FBNIC_CSR_END_PCS 0x10668 /* CSR section delimiter */ + +#define FBNIC_CSR_START_RSFEC 0x10800 /* CSR section delimiter */ +#define FBNIC_CSR_END_RSFEC 0x108c8 /* CSR section delimiter */ + /* MAC MAC registers (ASIC only) */ #define FBNIC_CSR_START_MAC_MAC 0x11000 /* CSR section delimiter */ #define FBNIC_MAC_COMMAND_CONFIG 0x11002 /* 0x44008 */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c index 1117d5a32867..354b5397815f 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c @@ -116,8 +116,25 @@ static void fbnic_get_ts_stats(struct net_device *netdev, } } +static void fbnic_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *data) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + + fbnic_csr_get_regs(fbn->fbd, data, ®s->version); +} + +static int fbnic_get_regs_len(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + + return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32); +} + static const struct ethtool_ops fbnic_ethtool_ops = { .get_drvinfo = fbnic_get_drvinfo, + .get_regs_len = fbnic_get_regs_len, + .get_regs = fbnic_get_regs, .get_ts_info = fbnic_get_ts_info, .get_ts_stats = fbnic_get_ts_stats, .get_eth_mac_stats = fbnic_get_eth_mac_stats, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c index c08798fad203..fc7d80db5fa6 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c @@ -273,6 +273,7 @@ void __fbnic_set_rx_mode(struct net_device *netdev) /* Write updates to hardware */ fbnic_write_rules(fbd); fbnic_write_macda(fbd); + fbnic_write_tce_tcam(fbd); } static void fbnic_set_rx_mode(struct net_device *netdev) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c index 337b8b3aef2f..908c098cd59e 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c @@ -587,6 +587,116 @@ static void fbnic_clear_act_tcam(struct fbnic_dev *fbd, unsigned int idx) wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), 0); } +static void fbnic_clear_tce_tcam_entry(struct fbnic_dev *fbd, unsigned int idx) +{ + int i; + + /* Invalidate entry and clear addr state info */ + for (i = 0; i <= FBNIC_TCE_TCAM_WORD_LEN; i++) + wr32(fbd, FBNIC_TCE_RAM_TCAM(idx, i), 0); +} + +static void fbnic_write_tce_tcam_dest(struct fbnic_dev *fbd, unsigned int idx, + struct fbnic_mac_addr *mac_addr) +{ + u32 dest = FBNIC_TCE_TCAM_DEST_BMC; + u32 idx2dest_map; + + if (is_multicast_ether_addr(mac_addr->value.addr8)) + dest |= FBNIC_TCE_TCAM_DEST_MAC; + + idx2dest_map = rd32(fbd, FBNIC_TCE_TCAM_IDX2DEST_MAP); + idx2dest_map &= ~(FBNIC_TCE_TCAM_IDX2DEST_MAP_DEST_ID_0 << (4 * idx)); + idx2dest_map |= dest << (4 * idx); + + wr32(fbd, FBNIC_TCE_TCAM_IDX2DEST_MAP, idx2dest_map); +} + +static void fbnic_write_tce_tcam_entry(struct fbnic_dev *fbd, unsigned int idx, + struct fbnic_mac_addr *mac_addr) +{ + __be16 *mask, *value; + int i; + + mask = &mac_addr->mask.addr16[FBNIC_TCE_TCAM_WORD_LEN - 1]; + value = &mac_addr->value.addr16[FBNIC_TCE_TCAM_WORD_LEN - 1]; + + for (i = 0; i < FBNIC_TCE_TCAM_WORD_LEN; i++) + wr32(fbd, FBNIC_TCE_RAM_TCAM(idx, i), + FIELD_PREP(FBNIC_TCE_RAM_TCAM_MASK, ntohs(*mask--)) | + FIELD_PREP(FBNIC_TCE_RAM_TCAM_VALUE, ntohs(*value--))); + + wrfl(fbd); + + wr32(fbd, FBNIC_TCE_RAM_TCAM3(idx), FBNIC_TCE_RAM_TCAM3_MCQ_MASK | + FBNIC_TCE_RAM_TCAM3_DEST_MASK | + FBNIC_TCE_RAM_TCAM3_VALIDATE); +} + +static void __fbnic_write_tce_tcam_rev(struct fbnic_dev *fbd) +{ + int tcam_idx = FBNIC_TCE_TCAM_NUM_ENTRIES; + int mac_idx; + + for (mac_idx = ARRAY_SIZE(fbd->mac_addr); mac_idx--;) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[mac_idx]; + + /* Verify BMC bit is set */ + if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) + continue; + + if (!tcam_idx) { + dev_err(fbd->dev, "TCE TCAM overflow\n"); + return; + } + + tcam_idx--; + fbnic_write_tce_tcam_dest(fbd, tcam_idx, mac_addr); + fbnic_write_tce_tcam_entry(fbd, tcam_idx, mac_addr); + } + + while (tcam_idx) + fbnic_clear_tce_tcam_entry(fbd, --tcam_idx); + + fbd->tce_tcam_last = tcam_idx; +} + +static void __fbnic_write_tce_tcam(struct fbnic_dev *fbd) +{ + int tcam_idx = 0; + int mac_idx; + + for (mac_idx = 0; mac_idx < ARRAY_SIZE(fbd->mac_addr); mac_idx++) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[mac_idx]; + + /* Verify BMC bit is set */ + if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) + continue; + + if (tcam_idx == FBNIC_TCE_TCAM_NUM_ENTRIES) { + dev_err(fbd->dev, "TCE TCAM overflow\n"); + return; + } + + fbnic_write_tce_tcam_dest(fbd, tcam_idx, mac_addr); + fbnic_write_tce_tcam_entry(fbd, tcam_idx, mac_addr); + tcam_idx++; + } + + while (tcam_idx < FBNIC_TCE_TCAM_NUM_ENTRIES) + fbnic_clear_tce_tcam_entry(fbd, tcam_idx++); + + fbd->tce_tcam_last = tcam_idx; +} + +void fbnic_write_tce_tcam(struct fbnic_dev *fbd) +{ + if (fbd->tce_tcam_last) + __fbnic_write_tce_tcam_rev(fbd); + else + __fbnic_write_tce_tcam(fbd); +} + void fbnic_clear_rules(struct fbnic_dev *fbd) { u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h index d62935f722a2..0d8285fa5b45 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h @@ -35,6 +35,9 @@ enum { #define FBNIC_RPC_TCAM_ACT_WORD_LEN 11 #define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64 +#define FBNIC_TCE_TCAM_WORD_LEN 3 +#define FBNIC_TCE_TCAM_NUM_ENTRIES 8 + struct fbnic_mac_addr { union { unsigned char addr8[ETH_ALEN]; @@ -186,4 +189,5 @@ static inline int __fbnic_mc_unsync(struct fbnic_mac_addr *mac_addr) void fbnic_clear_rules(struct fbnic_dev *fbd); void fbnic_write_rules(struct fbnic_dev *fbd); +void fbnic_write_tce_tcam(struct fbnic_dev *fbd); #endif /* _FBNIC_RPC_H_ */ diff --git a/drivers/net/ethernet/microchip/lan969x/Kconfig b/drivers/net/ethernet/microchip/lan969x/Kconfig index 728180d3fa33..c5c6122ae2ec 100644 --- a/drivers/net/ethernet/microchip/lan969x/Kconfig +++ b/drivers/net/ethernet/microchip/lan969x/Kconfig @@ -1,5 +1,5 @@ config LAN969X_SWITCH - tristate "Lan969x switch driver" + bool "Lan969x switch driver" depends on SPARX5_SWITCH help This driver supports the lan969x family of network switch devices. diff --git a/drivers/net/ethernet/microchip/lan969x/Makefile b/drivers/net/ethernet/microchip/lan969x/Makefile index 9a2351b4f111..316405cbbc71 100644 --- a/drivers/net/ethernet/microchip/lan969x/Makefile +++ b/drivers/net/ethernet/microchip/lan969x/Makefile @@ -3,7 +3,7 @@ # Makefile for the Microchip lan969x network device drivers. # -obj-$(CONFIG_LAN969X_SWITCH) += lan969x-switch.o +obj-$(CONFIG_SPARX5_SWITCH) += lan969x-switch.o lan969x-switch-y := lan969x_regs.o lan969x.o lan969x_calendar.o \ lan969x_vcap_ag_api.o lan969x_vcap_impl.o diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 7c9540a71725..558e03301aa8 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -730,7 +730,7 @@ static void ocelot_get_stats64(struct net_device *dev, static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, - u16 vid, u16 flags, + u16 vid, u16 flags, bool *notified, struct netlink_ext_ack *extack) { struct ocelot_port_private *priv = netdev_priv(dev); @@ -744,7 +744,7 @@ static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, - struct netlink_ext_ack *extack) + bool *notified, struct netlink_ext_ack *extack) { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 6e0929af0f72..98e098c09c03 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -829,7 +829,7 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, return err; } - irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask); + irq_update_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask); nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector, r_vec->irq_entry); @@ -840,7 +840,7 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, static void nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec) { - irq_set_affinity_hint(r_vec->irq_vector, NULL); + irq_update_affinity_hint(r_vec->irq_vector, NULL); nfp_net_napi_del(&nn->dp, r_vec); free_irq(r_vec->irq_vector, r_vec); } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c index b93791d6b593..f5dc876eb500 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -394,6 +394,7 @@ err_out_free_irqs: err_out_pci: ionic_dev_teardown(ionic); ionic_clear_pci(ionic); + ionic_debugfs_del_dev(ionic); err_out: mutex_destroy(&ionic->dev_cmd_lock); ionic_devlink_free(ionic); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index b3588a1ebc25..eb69121df726 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -367,7 +367,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p) static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *netdev, - const unsigned char *addr, u16 vid, + const unsigned char *addr, u16 vid, bool *notified, struct netlink_ext_ack *extack) { struct qlcnic_adapter *adapter = netdev_priv(netdev); @@ -394,7 +394,7 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *netdev, const unsigned char *addr, u16 vid, u16 flags, - struct netlink_ext_ack *extack) + bool *notified, struct netlink_ext_ack *extack) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int err = 0; diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index b84587841e31..739707a7b40f 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -347,6 +347,8 @@ enum rtl8125_registers { TxPoll_8125 = 0x90, LEDSEL3 = 0x96, MAC0_BKP = 0x19e0, + RSS_CTRL_8125 = 0x4500, + Q_NUM_CTRL_8125 = 0x4800, EEE_TXIDLE_TIMER_8125 = 0x6048, }; @@ -660,13 +662,9 @@ struct rtl8169_private { struct work_struct work; } wk; - raw_spinlock_t config25_lock; raw_spinlock_t mac_ocp_lock; struct mutex led_lock; /* serialize LED ctrl RMW access */ - raw_spinlock_t cfg9346_usage_lock; - int cfg9346_usage_count; - unsigned supports_gmii:1; unsigned aspm_manageable:1; unsigned dash_enabled:1; @@ -720,22 +718,12 @@ static inline struct device *tp_to_dev(struct rtl8169_private *tp) static void rtl_lock_config_regs(struct rtl8169_private *tp) { - unsigned long flags; - - raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags); - if (!--tp->cfg9346_usage_count) - RTL_W8(tp, Cfg9346, Cfg9346_Lock); - raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); } static void rtl_unlock_config_regs(struct rtl8169_private *tp) { - unsigned long flags; - - raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags); - if (!tp->cfg9346_usage_count++) - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags); + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); } static void rtl_pci_commit(struct rtl8169_private *tp) @@ -746,24 +734,32 @@ static void rtl_pci_commit(struct rtl8169_private *tp) static void rtl_mod_config2(struct rtl8169_private *tp, u8 clear, u8 set) { - unsigned long flags; u8 val; - raw_spin_lock_irqsave(&tp->config25_lock, flags); val = RTL_R8(tp, Config2); RTL_W8(tp, Config2, (val & ~clear) | set); - raw_spin_unlock_irqrestore(&tp->config25_lock, flags); } static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set) { - unsigned long flags; u8 val; - raw_spin_lock_irqsave(&tp->config25_lock, flags); val = RTL_R8(tp, Config5); RTL_W8(tp, Config5, (val & ~clear) | set); - raw_spin_unlock_irqrestore(&tp->config25_lock, flags); +} + +static void r8169_mod_reg8_cond(struct rtl8169_private *tp, int reg, + u8 bits, bool cond) +{ + u8 val, old_val; + + old_val = RTL_R8(tp, reg); + if (cond) + val = old_val | bits; + else + val = old_val & ~bits; + if (val != old_val) + RTL_W8(tp, reg, val); } static bool rtl_is_8125(struct rtl8169_private *tp) @@ -1435,19 +1431,11 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp) static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable) { - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: - case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: - case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_66: - if (enable) - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN); - else - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | D3_NO_PLL_DOWN); - break; - default: - break; - } + if (tp->mac_version >= RTL_GIGA_MAC_VER_25 && + tp->mac_version != RTL_GIGA_MAC_VER_28 && + tp->mac_version != RTL_GIGA_MAC_VER_31 && + tp->mac_version != RTL_GIGA_MAC_VER_38) + r8169_mod_reg8_cond(tp, PMCH, D3_NO_PLL_DOWN, !enable); } static void rtl_reset_packet_filter(struct rtl8169_private *tp) @@ -1556,61 +1544,40 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) { - static const struct { - u32 opt; - u16 reg; - u8 mask; - } cfg[] = { - { WAKE_PHY, Config3, LinkUp }, - { WAKE_UCAST, Config5, UWF }, - { WAKE_BCAST, Config5, BWF }, - { WAKE_MCAST, Config5, MWF }, - { WAKE_ANY, Config5, LanWake }, - { WAKE_MAGIC, Config3, MagicPacket } - }; - unsigned int i, tmp = ARRAY_SIZE(cfg); - unsigned long flags; - u8 options; - rtl_unlock_config_regs(tp); if (rtl_is_8168evl_up(tp)) { - tmp--; if (wolopts & WAKE_MAGIC) rtl_eri_set_bits(tp, 0x0dc, MagicPacket_v2); else rtl_eri_clear_bits(tp, 0x0dc, MagicPacket_v2); } else if (rtl_is_8125(tp)) { - tmp--; if (wolopts & WAKE_MAGIC) r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0)); else r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0); + } else { + r8169_mod_reg8_cond(tp, Config3, MagicPacket, + wolopts & WAKE_MAGIC); } - raw_spin_lock_irqsave(&tp->config25_lock, flags); - for (i = 0; i < tmp; i++) { - options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; - if (wolopts & cfg[i].opt) - options |= cfg[i].mask; - RTL_W8(tp, cfg[i].reg, options); - } - raw_spin_unlock_irqrestore(&tp->config25_lock, flags); + r8169_mod_reg8_cond(tp, Config3, LinkUp, wolopts & WAKE_PHY); + if (rtl_is_8125(tp)) + r8168_mac_ocp_modify(tp, 0xe0c6, 0x3f, + wolopts & WAKE_PHY ? 0x13 : 0); + r8169_mod_reg8_cond(tp, Config5, UWF, wolopts & WAKE_UCAST); + r8169_mod_reg8_cond(tp, Config5, BWF, wolopts & WAKE_BCAST); + r8169_mod_reg8_cond(tp, Config5, MWF, wolopts & WAKE_MCAST); + r8169_mod_reg8_cond(tp, Config5, LanWake, wolopts); switch (tp->mac_version) { case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: - options = RTL_R8(tp, Config1) & ~PMEnable; - if (wolopts) - options |= PMEnable; - RTL_W8(tp, Config1, options); + r8169_mod_reg8_cond(tp, Config1, PMEnable, wolopts); break; case RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_37: case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_66: - if (wolopts) - rtl_mod_config2(tp, 0, PME_SIGNAL); - else - rtl_mod_config2(tp, PME_SIGNAL, 0); + r8169_mod_reg8_cond(tp, Config2, PME_SIGNAL, wolopts); break; default: break; @@ -2576,86 +2543,31 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; } -static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) -{ - RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); - RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); -} - -static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) -{ - RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); - RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); -} - -static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) -{ - RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); -} - -static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) -{ - RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); -} - -static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) -{ - RTL_W8(tp, MaxTxPacketSize, 0x24); - RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); - RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01); -} - -static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) -{ - RTL_W8(tp, MaxTxPacketSize, 0x3f); - RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); - RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); -} - -static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) -{ - RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0)); -} - -static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) -{ - RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); -} - static void rtl_jumbo_config(struct rtl8169_private *tp) { bool jumbo = tp->dev->mtu > ETH_DATA_LEN; int readrq = 4096; + if (jumbo && tp->mac_version >= RTL_GIGA_MAC_VER_17 && + tp->mac_version <= RTL_GIGA_MAC_VER_26) + readrq = 512; + rtl_unlock_config_regs(tp); switch (tp->mac_version) { case RTL_GIGA_MAC_VER_17: - if (jumbo) { - readrq = 512; - r8168b_1_hw_jumbo_enable(tp); - } else { - r8168b_1_hw_jumbo_disable(tp); - } + r8169_mod_reg8_cond(tp, Config4, BIT(0), jumbo); break; case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26: - if (jumbo) { - readrq = 512; - r8168c_hw_jumbo_enable(tp); - } else { - r8168c_hw_jumbo_disable(tp); - } + r8169_mod_reg8_cond(tp, Config3, Jumbo_En0, jumbo); + r8169_mod_reg8_cond(tp, Config4, Jumbo_En1, jumbo); break; case RTL_GIGA_MAC_VER_28: - if (jumbo) - r8168dp_hw_jumbo_enable(tp); - else - r8168dp_hw_jumbo_disable(tp); + r8169_mod_reg8_cond(tp, Config3, Jumbo_En0, jumbo); break; case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: - if (jumbo) - r8168e_hw_jumbo_enable(tp); - else - r8168e_hw_jumbo_disable(tp); + RTL_W8(tp, MaxTxPacketSize, jumbo ? 0x24 : 0x3f); + r8169_mod_reg8_cond(tp, Config3, Jumbo_En0, jumbo); + r8169_mod_reg8_cond(tp, Config4, BIT(0), jumbo); break; default: break; @@ -3766,8 +3678,8 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) rtl_pcie_state_l2l3_disable(tp); RTL_W16(tp, 0x382, 0x221b); - RTL_W8(tp, 0x4500, 0); - RTL_W16(tp, 0x4800, 0); + RTL_W32(tp, RSS_CTRL_8125, 0); + RTL_W16(tp, Q_NUM_CTRL_8125, 0); /* disable UPS */ r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000); @@ -5316,6 +5228,12 @@ static int r8169_mdio_register(struct rtl8169_private *tp) phy_support_eee(tp->phydev); phy_support_asym_pause(tp->phydev); + /* mimic behavior of r8125/r8126 vendor drivers */ + if (tp->mac_version == RTL_GIGA_MAC_VER_61) + phy_set_eee_broken(tp->phydev, + ETHTOOL_LINK_MODE_2500baseT_Full_BIT); + phy_set_eee_broken(tp->phydev, ETHTOOL_LINK_MODE_5000baseT_Full_BIT); + /* PHY will be woken up in rtl_open() */ phy_suspend(tp->phydev); @@ -5487,8 +5405,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1; tp->ocp_base = OCP_STD_PHY_BASE; - raw_spin_lock_init(&tp->cfg9346_usage_lock); - raw_spin_lock_init(&tp->config25_lock); raw_spin_lock_init(&tp->mac_ocp_lock); mutex_init(&tp->led_lock); diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index 1d5b33f6c4b5..5307c6ff4e25 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -96,15 +96,7 @@ static void rtl8125_common_config_eee_phy(struct phy_device *phydev) phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000); } -static void rtl8125a_config_eee_phy(struct phy_device *phydev) -{ - rtl8168g_config_eee_phy(phydev); - /* disable EEE at 2.5Gbps */ - phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); - rtl8125_common_config_eee_phy(phydev); -} - -static void rtl8125b_config_eee_phy(struct phy_device *phydev) +static void rtl8125_config_eee_phy(struct phy_device *phydev) { rtl8168g_config_eee_phy(phydev); rtl8125_common_config_eee_phy(phydev); @@ -1066,7 +1058,7 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, rtl8168g_enable_gphy_10m(phydev); rtl8168g_disable_aldps(phydev); - rtl8125a_config_eee_phy(phydev); + rtl8125_config_eee_phy(phydev); } static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, @@ -1106,7 +1098,7 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, rtl8125_legacy_force_mode(phydev); rtl8168g_disable_aldps(phydev); - rtl8125b_config_eee_phy(phydev); + rtl8125_config_eee_phy(phydev); } static void rtl8125d_hw_phy_config(struct rtl8169_private *tp, @@ -1116,7 +1108,7 @@ static void rtl8125d_hw_phy_config(struct rtl8169_private *tp, rtl8168g_enable_gphy_10m(phydev); rtl8125_legacy_force_mode(phydev); rtl8168g_disable_aldps(phydev); - rtl8125b_config_eee_phy(phydev); + rtl8125_config_eee_phy(phydev); } static void rtl8126a_hw_phy_config(struct rtl8169_private *tp, diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h index 583c33930f88..942f1e531a85 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase.h +++ b/drivers/net/ethernet/realtek/rtase/rtase.h @@ -170,7 +170,7 @@ enum rtase_registers { RTASE_INT_MITI_TX = 0x0A00, RTASE_INT_MITI_RX = 0x0A80, - RTASE_VLAN_ENTRY_0 = 0xAC80, + RTASE_VLAN_ENTRY_0 = 0xAC80, }; enum rtase_desc_status_bit { diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c index f8777b7663d3..874994d9ceb9 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase_main.c +++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c @@ -2115,7 +2115,7 @@ static int rtase_init_one(struct pci_dev *pdev, ret = rtase_alloc_interrupt(pdev, tp); if (ret < 0) { dev_err(&pdev->dev, "unable to alloc MSIX/MSI\n"); - goto err_out_1; + goto err_out_del_napi; } rtase_init_netdev_ops(dev); @@ -2148,7 +2148,7 @@ static int rtase_init_one(struct pci_dev *pdev, GFP_KERNEL); if (!tp->tally_vaddr) { ret = -ENOMEM; - goto err_out; + goto err_out_free_dma; } rtase_tally_counter_clear(tp); @@ -2159,13 +2159,13 @@ static int rtase_init_one(struct pci_dev *pdev, ret = register_netdev(dev); if (ret != 0) - goto err_out; + goto err_out_free_dma; netdev_dbg(dev, "%pM, IRQ %d\n", dev->dev_addr, dev->irq); return 0; -err_out: +err_out_free_dma: if (tp->tally_vaddr) { dma_free_coherent(&pdev->dev, sizeof(*tp->tally_vaddr), @@ -2175,7 +2175,7 @@ err_out: tp->tally_vaddr = NULL; } -err_out_1: +err_out_del_napi: for (i = 0; i < tp->int_nums; i++) { ivec = &tp->int_vector[i]; netif_napi_del(&ivec->napi); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index de131fc5fa0b..452009ed7a43 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1751,7 +1751,7 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) #endif } -static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) +static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 **names) { DECLARE_BITMAP(mask, EF10_STAT_COUNT); diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c index 5c2551369812..6c3b74000d3b 100644 --- a/drivers/net/ethernet/sfc/ef100_ethtool.c +++ b/drivers/net/ethernet/sfc/ef100_ethtool.c @@ -59,6 +59,7 @@ const struct ethtool_ops ef100_ethtool_ops = { .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, .rxfh_per_ctx_key = true, + .cap_rss_rxnfc_adds = true, .rxfh_priv_size = sizeof(struct efx_rss_context_priv), .get_rxfh = efx_ethtool_get_rxfh, .set_rxfh = efx_ethtool_set_rxfh, diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 6da06931187d..62e674d6ff60 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -583,7 +583,7 @@ static const struct efx_hw_stat_desc ef100_stat_desc[EF100_STAT_COUNT] = { EFX_GENERIC_SW_STAT(rx_noskb_drops), }; -static size_t ef100_describe_stats(struct efx_nic *efx, u8 *names) +static size_t ef100_describe_stats(struct efx_nic *efx, u8 **names) { DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {}; diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 90bb7db15519..650136dfc642 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -418,14 +418,6 @@ unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs) return usecs * 1000 / efx->timer_quantum_ns; } -unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks) -{ - /* We must round up when converting ticks to microseconds - * because we round down when converting the other way. - */ - return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000); -} - /* Set interrupt moderation parameters */ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, unsigned int rx_usecs, bool rx_adaptive, diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 7a6cab883d66..45e191686625 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -168,7 +168,6 @@ extern const struct ethtool_ops efx_ethtool_ops; /* Global */ unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs); -unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks); int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, unsigned int rx_usecs, bool rx_adaptive, bool rx_may_override_tx); diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index 13cf647051af..c88ec3e24836 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -635,22 +635,6 @@ int __efx_reconfigure_port(struct efx_nic *efx) return rc; } -/* Reinitialise the MAC to pick up new PHY settings, even if the port is - * disabled. - */ -int efx_reconfigure_port(struct efx_nic *efx) -{ - int rc; - - EFX_ASSERT_RESET_SERIALISED(efx); - - mutex_lock(&efx->mac_lock); - rc = __efx_reconfigure_port(efx); - mutex_unlock(&efx->mac_lock); - - return rc; -} - /************************************************************************** * * Device reset and suspend diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h index 2c54dac3e662..19a8ca530969 100644 --- a/drivers/net/ethernet/sfc/efx_common.h +++ b/drivers/net/ethernet/sfc/efx_common.h @@ -40,7 +40,6 @@ void efx_destroy_reset_workqueue(void); void efx_start_monitor(struct efx_nic *efx); int __efx_reconfigure_port(struct efx_nic *efx); -int efx_reconfigure_port(struct efx_nic *efx); #define EFX_ASSERT_RESET_SERIALISED(efx) \ do { \ diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index bb1930818beb..83d715544f7f 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -263,6 +263,7 @@ const struct ethtool_ops efx_ethtool_ops = { .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, .rxfh_per_ctx_key = true, + .cap_rss_rxnfc_adds = true, .rxfh_priv_size = sizeof(struct efx_rss_context_priv), .get_rxfh = efx_ethtool_get_rxfh, .set_rxfh = efx_ethtool_set_rxfh, diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c index ae32e08540fa..2d734496733f 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.c +++ b/drivers/net/ethernet/sfc/ethtool_common.c @@ -395,7 +395,7 @@ int efx_ethtool_fill_self_tests(struct efx_nic *efx, return n; } -static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings) +static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 **strings) { size_t n_stats = 0; struct efx_channel *channel; @@ -403,24 +403,22 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings) efx_for_each_channel(channel, efx) { if (efx_channel_has_tx_queues(channel)) { n_stats++; - if (strings != NULL) { - snprintf(strings, ETH_GSTRING_LEN, - "tx-%u.tx_packets", - channel->tx_queue[0].queue / - EFX_MAX_TXQ_PER_CHANNEL); + if (!strings) + continue; - strings += ETH_GSTRING_LEN; - } + ethtool_sprintf(strings, "tx-%u.tx_packets", + channel->tx_queue[0].queue / + EFX_MAX_TXQ_PER_CHANNEL); } } efx_for_each_channel(channel, efx) { if (efx_channel_has_rx_queue(channel)) { n_stats++; - if (strings != NULL) { - snprintf(strings, ETH_GSTRING_LEN, - "rx-%d.rx_packets", channel->channel); - strings += ETH_GSTRING_LEN; - } + if (!strings) + continue; + + ethtool_sprintf(strings, "rx-%d.rx_packets", + channel->channel); } } if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) { @@ -428,11 +426,11 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings) for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) { n_stats++; - if (strings) { - snprintf(strings, ETH_GSTRING_LEN, - "tx-xdp-cpu-%hu.tx_packets", xdp); - strings += ETH_GSTRING_LEN; - } + if (!strings) + continue; + + ethtool_sprintf(strings, "tx-xdp-cpu-%hu.tx_packets", + xdp); } } @@ -464,15 +462,11 @@ void efx_ethtool_get_strings(struct net_device *net_dev, switch (string_set) { case ETH_SS_STATS: - strings += (efx->type->describe_stats(efx, strings) * - ETH_GSTRING_LEN); + efx->type->describe_stats(efx, &strings); for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) - strscpy(strings + i * ETH_GSTRING_LEN, - efx_sw_stat_desc[i].name, ETH_GSTRING_LEN); - strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN; - strings += (efx_describe_per_queue_stats(efx, strings) * - ETH_GSTRING_LEN); - efx_ptp_describe_stats(efx, strings); + ethtool_puts(&strings, efx_sw_stat_desc[i].name); + efx_describe_per_queue_stats(efx, &strings); + efx_ptp_describe_stats(efx, &strings); break; case ETH_SS_TEST: efx_ethtool_fill_self_tests(efx, NULL, strings, NULL); diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 8925745f1c17..b07f7e4e2877 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -1886,14 +1886,6 @@ unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs) return usecs * 1000 / efx->timer_quantum_ns; } -unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks) -{ - /* We must round up when converting ticks to microseconds - * because we round down when converting the other way. - */ - return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000); -} - /* Set interrupt moderation parameters */ int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs, unsigned int rx_usecs, bool rx_adaptive, diff --git a/drivers/net/ethernet/sfc/falcon/efx.h b/drivers/net/ethernet/sfc/falcon/efx.h index d3b4646545fa..52508f2c8cb2 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.h +++ b/drivers/net/ethernet/sfc/falcon/efx.h @@ -198,7 +198,6 @@ int ef4_try_recovery(struct ef4_nic *efx); /* Global */ void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type); unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs); -unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks); int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs, unsigned int rx_usecs, bool rx_adaptive, bool rx_may_override_tx); diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c index f4db683b80f7..04766448a545 100644 --- a/drivers/net/ethernet/sfc/falcon/ethtool.c +++ b/drivers/net/ethernet/sfc/falcon/ethtool.c @@ -353,7 +353,7 @@ static int ef4_ethtool_fill_self_tests(struct ef4_nic *efx, return n; } -static size_t ef4_describe_per_queue_stats(struct ef4_nic *efx, u8 *strings) +static size_t ef4_describe_per_queue_stats(struct ef4_nic *efx, u8 **strings) { size_t n_stats = 0; struct ef4_channel *channel; @@ -361,24 +361,22 @@ static size_t ef4_describe_per_queue_stats(struct ef4_nic *efx, u8 *strings) ef4_for_each_channel(channel, efx) { if (ef4_channel_has_tx_queues(channel)) { n_stats++; - if (strings != NULL) { - snprintf(strings, ETH_GSTRING_LEN, - "tx-%u.tx_packets", - channel->tx_queue[0].queue / - EF4_TXQ_TYPES); + if (!strings) + continue; - strings += ETH_GSTRING_LEN; - } + ethtool_sprintf(strings, "tx-%u.tx_packets", + channel->tx_queue[0].queue / + EF4_TXQ_TYPES); } } ef4_for_each_channel(channel, efx) { if (ef4_channel_has_rx_queue(channel)) { n_stats++; - if (strings != NULL) { - snprintf(strings, ETH_GSTRING_LEN, - "rx-%d.rx_packets", channel->channel); - strings += ETH_GSTRING_LEN; - } + if (!strings) + continue; + + ethtool_sprintf(strings, "rx-%d.rx_packets", + channel->channel); } } return n_stats; @@ -409,14 +407,10 @@ static void ef4_ethtool_get_strings(struct net_device *net_dev, switch (string_set) { case ETH_SS_STATS: - strings += (efx->type->describe_stats(efx, strings) * - ETH_GSTRING_LEN); + efx->type->describe_stats(efx, &strings); for (i = 0; i < EF4_ETHTOOL_SW_STAT_COUNT; i++) - strscpy(strings + i * ETH_GSTRING_LEN, - ef4_sw_stat_desc[i].name, ETH_GSTRING_LEN); - strings += EF4_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN; - strings += (ef4_describe_per_queue_stats(efx, strings) * - ETH_GSTRING_LEN); + ethtool_puts(&strings, ef4_sw_stat_desc[i].name); + ef4_describe_per_queue_stats(efx, &strings); break; case ETH_SS_TEST: ef4_ethtool_fill_self_tests(efx, NULL, strings, NULL); diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c index 36114ce88034..4af56333ea49 100644 --- a/drivers/net/ethernet/sfc/falcon/falcon.c +++ b/drivers/net/ethernet/sfc/falcon/falcon.c @@ -2564,7 +2564,7 @@ static void falcon_remove_nic(struct ef4_nic *efx) efx->nic_data = NULL; } -static size_t falcon_describe_nic_stats(struct ef4_nic *efx, u8 *names) +static size_t falcon_describe_nic_stats(struct ef4_nic *efx, u8 **names) { return ef4_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT, falcon_stat_mask, names); diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c index c64623c2e80c..01017c41338e 100644 --- a/drivers/net/ethernet/sfc/falcon/farch.c +++ b/drivers/net/ethernet/sfc/falcon/farch.c @@ -1631,28 +1631,6 @@ void ef4_farch_rx_push_indir_table(struct ef4_nic *efx) } } -/* Looks at available SRAM resources and works out how many queues we - * can support, and where things like descriptor caches should live. - * - * SRAM is split up as follows: - * 0 buftbl entries for channels - * efx->vf_buftbl_base buftbl entries for SR-IOV - * efx->rx_dc_base RX descriptor caches - * efx->tx_dc_base TX descriptor caches - */ -void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw) -{ - unsigned vi_count; - - /* Account for the buffer table entries backing the datapath channels - * and the descriptor caches for those channels. - */ - vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES); - - efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; - efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; -} - u32 ef4_farch_fpga_ver(struct ef4_nic *efx) { ef4_oword_t altera_build; diff --git a/drivers/net/ethernet/sfc/falcon/net_driver.h b/drivers/net/ethernet/sfc/falcon/net_driver.h index a2c7139f2b32..7ab0db44720d 100644 --- a/drivers/net/ethernet/sfc/falcon/net_driver.h +++ b/drivers/net/ethernet/sfc/falcon/net_driver.h @@ -1057,7 +1057,7 @@ struct ef4_nic_type { void (*finish_flush)(struct ef4_nic *efx); void (*prepare_flr)(struct ef4_nic *efx); void (*finish_flr)(struct ef4_nic *efx); - size_t (*describe_stats)(struct ef4_nic *efx, u8 *names); + size_t (*describe_stats)(struct ef4_nic *efx, u8 **names); size_t (*update_stats)(struct ef4_nic *efx, u64 *full_stats, struct rtnl_link_stats64 *core_stats); void (*start_stats)(struct ef4_nic *efx); diff --git a/drivers/net/ethernet/sfc/falcon/nic.c b/drivers/net/ethernet/sfc/falcon/nic.c index 78c851b5a56f..a6304686bc90 100644 --- a/drivers/net/ethernet/sfc/falcon/nic.c +++ b/drivers/net/ethernet/sfc/falcon/nic.c @@ -444,18 +444,15 @@ void ef4_nic_get_regs(struct ef4_nic *efx, void *buf) * bits in the first @count bits of @mask for which a name is defined. */ size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count, - const unsigned long *mask, u8 *names) + const unsigned long *mask, u8 **names) { size_t visible = 0; size_t index; for_each_set_bit(index, mask, count) { if (desc[index].name) { - if (names) { - strscpy(names, desc[index].name, - ETH_GSTRING_LEN); - names += ETH_GSTRING_LEN; - } + if (names) + ethtool_puts(names, desc[index].name); ++visible; } } @@ -511,14 +508,3 @@ void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count, } } } - -void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *rx_nodesc_drops) -{ - /* if down, or this is the first update after coming up */ - if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state) - efx->rx_nodesc_drops_while_down += - *rx_nodesc_drops - efx->rx_nodesc_drops_total; - efx->rx_nodesc_drops_total = *rx_nodesc_drops; - efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP); - *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down; -} diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h index ada6e036fd97..bc6ef937d0fe 100644 --- a/drivers/net/ethernet/sfc/falcon/nic.h +++ b/drivers/net/ethernet/sfc/falcon/nic.h @@ -477,7 +477,6 @@ void ef4_farch_finish_flr(struct ef4_nic *efx); void falcon_start_nic_stats(struct ef4_nic *efx); void falcon_stop_nic_stats(struct ef4_nic *efx); int falcon_reset_xaui(struct ef4_nic *efx); -void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw); void ef4_farch_init_common(struct ef4_nic *efx); void ef4_farch_rx_push_indir_table(struct ef4_nic *efx); @@ -498,14 +497,10 @@ size_t ef4_nic_get_regs_len(struct ef4_nic *efx); void ef4_nic_get_regs(struct ef4_nic *efx, void *buf); size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count, - const unsigned long *mask, u8 *names); + const unsigned long *mask, u8 **names); void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count, const unsigned long *mask, u64 *stats, const void *dma_buf, bool accumulate); -void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *stat); - -#define EF4_MAX_FLUSH_TIME 5000 - void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq, ef4_qword_t *event); diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c index b9369483758c..e6e80b039ca2 100644 --- a/drivers/net/ethernet/sfc/falcon/tx.c +++ b/drivers/net/ethernet/sfc/falcon/tx.c @@ -40,14 +40,6 @@ static inline u8 *ef4_tx_get_copy_buffer(struct ef4_tx_queue *tx_queue, return (u8 *)page_buf->addr + offset; } -u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue, - struct ef4_tx_buffer *buffer, size_t len) -{ - if (len > EF4_TX_CB_SIZE) - return NULL; - return ef4_tx_get_copy_buffer(tx_queue, buffer); -} - static void ef4_dequeue_buffer(struct ef4_tx_queue *tx_queue, struct ef4_tx_buffer *buffer, unsigned int *pkts_compl, diff --git a/drivers/net/ethernet/sfc/falcon/tx.h b/drivers/net/ethernet/sfc/falcon/tx.h index 2a88c59cbbbe..868ed8a861ab 100644 --- a/drivers/net/ethernet/sfc/falcon/tx.h +++ b/drivers/net/ethernet/sfc/falcon/tx.h @@ -15,9 +15,6 @@ unsigned int ef4_tx_limit_len(struct ef4_tx_queue *tx_queue, dma_addr_t dma_addr, unsigned int len); -u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue, - struct ef4_tx_buffer *buffer, size_t len); - int ef4_enqueue_skb_tso(struct ef4_tx_queue *tx_queue, struct sk_buff *skb, bool *data_mapped); diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c index 10709d828a63..50f097487b14 100644 --- a/drivers/net/ethernet/sfc/mae.c +++ b/drivers/net/ethernet/sfc/mae.c @@ -76,17 +76,6 @@ void efx_mae_mport_uplink(struct efx_nic *efx __always_unused, u32 *out) *out = EFX_DWORD_VAL(mport); } -void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out) -{ - efx_dword_t mport; - - EFX_POPULATE_DWORD_3(mport, - MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC, - MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER, - MAE_MPORT_SELECTOR_FUNC_VF_ID, vf_id); - *out = EFX_DWORD_VAL(mport); -} - /* Constructs an mport selector from an mport ID, because they're not the same */ void efx_mae_mport_mport(struct efx_nic *efx __always_unused, u32 mport_id, u32 *out) { diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h index 8df30bc4f3ba..db79912c86d8 100644 --- a/drivers/net/ethernet/sfc/mae.h +++ b/drivers/net/ethernet/sfc/mae.h @@ -23,7 +23,6 @@ int efx_mae_free_mport(struct efx_nic *efx, u32 id); void efx_mae_mport_wire(struct efx_nic *efx, u32 *out); void efx_mae_mport_uplink(struct efx_nic *efx, u32 *out); -void efx_mae_mport_vf(struct efx_nic *efx, u32 vf_id, u32 *out); void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out); int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id); diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 76578502226e..d461b1a6ce81 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -1051,15 +1051,6 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, cookie, false); } -int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd, - const efx_dword_t *inbuf, size_t inlen, - size_t outlen, efx_mcdi_async_completer *complete, - unsigned long cookie) -{ - return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete, - cookie, true); -} - int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual) @@ -1068,14 +1059,6 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, outlen_actual, false, NULL, NULL); } -int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen, - efx_dword_t *outbuf, size_t outlen, - size_t *outlen_actual) -{ - return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, - outlen_actual, true, NULL, NULL); -} - void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, size_t inlen, efx_dword_t *outbuf, size_t outlen, int rc) @@ -1982,33 +1965,6 @@ efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) } -int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) -{ - MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN); - size_t outlen; - int rc; - - rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, - outbuf, sizeof(outbuf), &outlen); - if (rc) - goto fail; - - if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { - rc = -EIO; - goto fail; - } - - *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); - - return 0; - -fail: - *id_out = -1; - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); - return rc; -} - - int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) { MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN); @@ -2021,38 +1977,6 @@ int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) return rc; } -int efx_mcdi_flush_rxqs(struct efx_nic *efx) -{ - struct efx_channel *channel; - struct efx_rx_queue *rx_queue; - MCDI_DECLARE_BUF(inbuf, - MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS)); - int rc, count; - - BUILD_BUG_ON(EFX_MAX_CHANNELS > - MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); - - count = 0; - efx_for_each_channel(channel, efx) { - efx_for_each_channel_rx_queue(rx_queue, channel) { - if (rx_queue->flush_pending) { - rx_queue->flush_pending = false; - atomic_dec(&efx->rxq_flush_pending); - MCDI_SET_ARRAY_DWORD( - inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, - count, efx_rx_queue_index(rx_queue)); - count++; - } - } - } - - rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, - MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL); - WARN_ON(rc < 0); - - return rc; -} - int efx_mcdi_wol_filter_reset(struct efx_nic *efx) { int rc; diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index ea612c619874..cdb17d7c147f 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -155,9 +155,6 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual); -int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, - size_t inlen, efx_dword_t *outbuf, - size_t outlen, size_t *outlen_actual); typedef void efx_mcdi_async_completer(struct efx_nic *efx, unsigned long cookie, int rc, @@ -167,11 +164,6 @@ int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, const efx_dword_t *inbuf, size_t inlen, size_t outlen, efx_mcdi_async_completer *complete, unsigned long cookie); -int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd, - const efx_dword_t *inbuf, size_t inlen, - size_t outlen, - efx_mcdi_async_completer *complete, - unsigned long cookie); void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, size_t inlen, efx_dword_t *outbuf, @@ -410,10 +402,8 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx); int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out); -int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); int efx_mcdi_wol_filter_reset(struct efx_nic *efx); -int efx_mcdi_flush_rxqs(struct efx_nic *efx); void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev); void efx_mcdi_mac_start_stats(struct efx_nic *efx); void efx_mcdi_mac_stop_stats(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index b54662d32f55..620ba6ef3514 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1408,7 +1408,7 @@ struct efx_nic_type { int (*fini_dmaq)(struct efx_nic *efx); void (*prepare_flr)(struct efx_nic *efx); void (*finish_flr)(struct efx_nic *efx); - size_t (*describe_stats)(struct efx_nic *efx, u8 *names); + size_t (*describe_stats)(struct efx_nic *efx, u8 **names); size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats, struct rtnl_link_stats64 *core_stats); size_t (*update_stats_atomic)(struct efx_nic *efx, u64 *full_stats, diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index a33ed473cc8a..80aa5e9c732a 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -299,18 +299,15 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf) * bits in the first @count bits of @mask for which a name is defined. */ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, - const unsigned long *mask, u8 *names) + const unsigned long *mask, u8 **names) { size_t visible = 0; size_t index; for_each_set_bit(index, mask, count) { if (desc[index].name) { - if (names) { - strscpy(names, desc[index].name, - ETH_GSTRING_LEN); - names += ETH_GSTRING_LEN; - } + if (names) + ethtool_puts(names, desc[index].name); ++visible; } } diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h index 7ec4ac7b7ff5..821d91efda19 100644 --- a/drivers/net/ethernet/sfc/nic_common.h +++ b/drivers/net/ethernet/sfc/nic_common.h @@ -241,7 +241,7 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf); #define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1)) size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, - const unsigned long *mask, u8 *names); + const unsigned long *mask, u8 **names); int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest); void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, const unsigned long *mask, u64 *stats, diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index aaacdcfa54ae..4c7222bf26be 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -399,7 +399,7 @@ static const unsigned long efx_ptp_stat_mask[] = { [0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL, }; -size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings) +size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 **strings) { if (!efx->ptp_data) return 0; @@ -1800,11 +1800,6 @@ int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) return NETDEV_TX_OK; } -int efx_ptp_get_mode(struct efx_nic *efx) -{ - return efx->ptp_data->mode; -} - int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, unsigned int new_mode) { diff --git a/drivers/net/ethernet/sfc/ptp.h b/drivers/net/ethernet/sfc/ptp.h index 6946203499ef..cb9b077921e8 100644 --- a/drivers/net/ethernet/sfc/ptp.h +++ b/drivers/net/ethernet/sfc/ptp.h @@ -26,12 +26,11 @@ int efx_ptp_get_ts_config(struct efx_nic *efx, void efx_ptp_get_ts_info(struct efx_nic *efx, struct kernel_ethtool_ts_info *ts_info); bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); -int efx_ptp_get_mode(struct efx_nic *efx); int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, unsigned int new_mode); int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); -size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings); +size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 **strings); size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats); void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev); void __efx_rx_skb_attach_timestamp(struct efx_channel *channel, diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c index 075fef64de68..eeee676fdca7 100644 --- a/drivers/net/ethernet/sfc/siena/ethtool_common.c +++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c @@ -395,7 +395,7 @@ fail: test->flags |= ETH_TEST_FL_FAILED; } -static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings) +static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 **strings) { size_t n_stats = 0; struct efx_channel *channel; @@ -403,24 +403,22 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings) efx_for_each_channel(channel, efx) { if (efx_channel_has_tx_queues(channel)) { n_stats++; - if (strings != NULL) { - snprintf(strings, ETH_GSTRING_LEN, - "tx-%u.tx_packets", - channel->tx_queue[0].queue / - EFX_MAX_TXQ_PER_CHANNEL); + if (!strings) + continue; - strings += ETH_GSTRING_LEN; - } + ethtool_sprintf(strings, "tx-%u.tx_packets", + channel->tx_queue[0].queue / + EFX_MAX_TXQ_PER_CHANNEL); } } efx_for_each_channel(channel, efx) { if (efx_channel_has_rx_queue(channel)) { n_stats++; - if (strings != NULL) { - snprintf(strings, ETH_GSTRING_LEN, - "rx-%d.rx_packets", channel->channel); - strings += ETH_GSTRING_LEN; - } + if (!strings) + continue; + + ethtool_sprintf(strings, "rx-%d.rx_packets", + channel->channel); } } if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) { @@ -428,11 +426,11 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings) for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) { n_stats++; - if (strings) { - snprintf(strings, ETH_GSTRING_LEN, - "tx-xdp-cpu-%hu.tx_packets", xdp); - strings += ETH_GSTRING_LEN; - } + if (!strings) + continue; + + ethtool_sprintf(strings, "tx-xdp-cpu-%hu.tx_packets", + xdp); } } @@ -464,15 +462,11 @@ void efx_siena_ethtool_get_strings(struct net_device *net_dev, switch (string_set) { case ETH_SS_STATS: - strings += (efx->type->describe_stats(efx, strings) * - ETH_GSTRING_LEN); + efx->type->describe_stats(efx, &strings); for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) - strscpy(strings + i * ETH_GSTRING_LEN, - efx_sw_stat_desc[i].name, ETH_GSTRING_LEN); - strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN; - strings += (efx_describe_per_queue_stats(efx, strings) * - ETH_GSTRING_LEN); - efx_siena_ptp_describe_stats(efx, strings); + ethtool_puts(&strings, efx_sw_stat_desc[i].name); + efx_describe_per_queue_stats(efx, &strings); + efx_siena_ptp_describe_stats(efx, &strings); break; case ETH_SS_TEST: efx_ethtool_fill_self_tests(efx, NULL, strings, NULL); diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h index 3fa7c652ae9b..9785eff10607 100644 --- a/drivers/net/ethernet/sfc/siena/net_driver.h +++ b/drivers/net/ethernet/sfc/siena/net_driver.h @@ -1307,7 +1307,7 @@ struct efx_nic_type { void (*finish_flush)(struct efx_nic *efx); void (*prepare_flr)(struct efx_nic *efx); void (*finish_flr)(struct efx_nic *efx); - size_t (*describe_stats)(struct efx_nic *efx, u8 *names); + size_t (*describe_stats)(struct efx_nic *efx, u8 **names); size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats, struct rtnl_link_stats64 *core_stats); size_t (*update_stats_atomic)(struct efx_nic *efx, u64 *full_stats, diff --git a/drivers/net/ethernet/sfc/siena/nic.c b/drivers/net/ethernet/sfc/siena/nic.c index 0ea0433a6230..32fce70085e3 100644 --- a/drivers/net/ethernet/sfc/siena/nic.c +++ b/drivers/net/ethernet/sfc/siena/nic.c @@ -449,20 +449,20 @@ void efx_siena_get_regs(struct efx_nic *efx, void *buf) * Returns the number of visible statistics, i.e. the number of set * bits in the first @count bits of @mask for which a name is defined. */ -size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, - const unsigned long *mask, u8 *names) +size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, + size_t count, const unsigned long *mask, + u8 **names) { size_t visible = 0; size_t index; for_each_set_bit(index, mask, count) { if (desc[index].name) { - if (names) { - strscpy(names, desc[index].name, - ETH_GSTRING_LEN); - names += ETH_GSTRING_LEN; - } ++visible; + if (!names) + continue; + + ethtool_puts(names, desc[index].name); } } diff --git a/drivers/net/ethernet/sfc/siena/nic_common.h b/drivers/net/ethernet/sfc/siena/nic_common.h index 3af0405eeaa4..b7fbe198008d 100644 --- a/drivers/net/ethernet/sfc/siena/nic_common.h +++ b/drivers/net/ethernet/sfc/siena/nic_common.h @@ -239,8 +239,9 @@ void efx_siena_get_regs(struct efx_nic *efx, void *buf); #define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1)) -size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, - const unsigned long *mask, u8 *names); +size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, + size_t count, const unsigned long *mask, + u8 **names); void efx_siena_update_stats(const struct efx_hw_stat_desc *desc, size_t count, const unsigned long *mask, u64 *stats, const void *dma_buf, bool accumulate); diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c index 85005196b4c5..062c77c92077 100644 --- a/drivers/net/ethernet/sfc/siena/ptp.c +++ b/drivers/net/ethernet/sfc/siena/ptp.c @@ -393,7 +393,7 @@ static const unsigned long efx_ptp_stat_mask[] = { [0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL, }; -size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 *strings) +size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 **strings) { if (!efx->ptp_data) return 0; diff --git a/drivers/net/ethernet/sfc/siena/ptp.h b/drivers/net/ethernet/sfc/siena/ptp.h index b6133e7c5608..54840036ab67 100644 --- a/drivers/net/ethernet/sfc/siena/ptp.h +++ b/drivers/net/ethernet/sfc/siena/ptp.h @@ -28,7 +28,7 @@ int efx_siena_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, unsigned int new_mode); int efx_siena_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); void efx_siena_ptp_event(struct efx_nic *efx, efx_qword_t *ev); -size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 *strings); +size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 **strings); size_t efx_siena_ptp_update_stats(struct efx_nic *efx, u64 *stats); void efx_siena_time_sync_event(struct efx_channel *channel, efx_qword_t *ev); void __efx_siena_rx_skb_attach_timestamp(struct efx_channel *channel, diff --git a/drivers/net/ethernet/sfc/siena/siena.c b/drivers/net/ethernet/sfc/siena/siena.c index ca33dc08e555..49f0c8a1a90a 100644 --- a/drivers/net/ethernet/sfc/siena/siena.c +++ b/drivers/net/ethernet/sfc/siena/siena.c @@ -545,7 +545,7 @@ static const unsigned long siena_stat_mask[] = { [0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL, }; -static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names) +static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 **names) { return efx_siena_describe_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask, names); diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 822ec6564b2d..4dff19b6ef17 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -49,14 +49,6 @@ static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, return (u8 *)page_buf->addr + offset; } -u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, - struct efx_tx_buffer *buffer, size_t len) -{ - if (len > EFX_TX_CB_SIZE) - return NULL; - return efx_tx_get_copy_buffer(tx_queue, buffer); -} - static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) { /* We need to consider all queues that the net core sees as one */ diff --git a/drivers/net/ethernet/sfc/tx.h b/drivers/net/ethernet/sfc/tx.h index f2c4d2f89919..f882749af8c3 100644 --- a/drivers/net/ethernet/sfc/tx.h +++ b/drivers/net/ethernet/sfc/tx.h @@ -15,9 +15,6 @@ unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue, dma_addr_t dma_addr, unsigned int len); -u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, - struct efx_tx_buffer *buffer, size_t len); - /* What TXQ type will satisfy the checksum offloads required for this skb? */ static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb) { diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 05cc07b8f48c..6658536a4e17 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -228,6 +228,16 @@ config DWMAC_SUN8I stmmac device driver. This driver is used for H3/A83T/A64 EMAC ethernet controller. +config DWMAC_THEAD + tristate "T-HEAD dwmac support" + depends on OF && (ARCH_THEAD || COMPILE_TEST) + help + Support for ethernet controllers on T-HEAD RISC-V SoCs + + This selects the T-HEAD platform specific glue layer support for + the stmmac device driver. This driver is used for T-HEAD TH1520 + ethernet controller. + config DWMAC_IMX8 tristate "NXP IMX8 DWMAC support" default ARCH_MXC diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 7e46dca90628..2389fd261344 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o +obj-$(CONFIG_DWMAC_THEAD) += dwmac-thead.o obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o obj-$(CONFIG_DWMAC_LOONGSON1) += dwmac-loongson1.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 684489156dce..1367fa5c9b8e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -549,8 +549,12 @@ extern const struct stmmac_desc_ops ndesc_ops; struct mac_device_info; extern const struct stmmac_hwtimestamp stmmac_ptp; +extern const struct stmmac_hwtimestamp dwmac1000_ptp; extern const struct stmmac_mode_ops dwmac4_ring_mode_ops; +extern const struct ptp_clock_info stmmac_ptp_clock_ops; +extern const struct ptp_clock_info dwmac1000_ptp_clock_ops; + struct mac_link { u32 caps; u32 speed_mask; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c index 598eff926815..b9218c07eb6b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c @@ -56,6 +56,7 @@ static const struct of_device_id dwmac_generic_match[] = { { .compatible = "snps,dwmac-3.610"}, { .compatible = "snps,dwmac-3.70a"}, { .compatible = "snps,dwmac-3.710"}, + { .compatible = "snps,dwmac-3.72a"}, { .compatible = "snps,dwmac-4.00"}, { .compatible = "snps,dwmac-4.10a"}, { .compatible = "snps,dwmac"}, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c index 230e79658c54..a433526dcbe8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c @@ -108,7 +108,12 @@ static int intel_eth_plat_probe(struct platform_device *pdev) if (IS_ERR(dwmac->tx_clk)) return PTR_ERR(dwmac->tx_clk); - clk_prepare_enable(dwmac->tx_clk); + ret = clk_prepare_enable(dwmac->tx_clk); + if (ret) { + dev_err(&pdev->dev, + "Failed to enable tx_clk\n"); + return ret; + } /* Check and configure TX clock rate */ rate = clk_get_rate(dwmac->tx_clk); @@ -119,7 +124,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Failed to set tx_clk\n"); - return ret; + goto err_tx_clk_disable; } } } @@ -133,7 +138,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Failed to set clk_ptp_ref\n"); - return ret; + goto err_tx_clk_disable; } } } @@ -149,12 +154,15 @@ static int intel_eth_plat_probe(struct platform_device *pdev) } ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) { - clk_disable_unprepare(dwmac->tx_clk); - return ret; - } + if (ret) + goto err_tx_clk_disable; return 0; + +err_tx_clk_disable: + if (dwmac->data->tx_clk_en) + clk_disable_unprepare(dwmac->tx_clk); + return ret; } static void intel_eth_plat_remove(struct platform_device *pdev) @@ -162,7 +170,8 @@ static void intel_eth_plat_remove(struct platform_device *pdev) struct intel_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev); stmmac_pltfr_remove(pdev); - clk_disable_unprepare(dwmac->tx_clk); + if (dwmac->data->tx_clk_en) + clk_disable_unprepare(dwmac->tx_clk); } static struct platform_driver intel_eth_plat_driver = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c index f8ca81675407..c9636832a570 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c @@ -589,9 +589,9 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev, plat->mac_interface = priv_plat->phy_mode; if (priv_plat->mac_wol) - plat->flags |= STMMAC_FLAG_USE_PHY_WOL; - else plat->flags &= ~STMMAC_FLAG_USE_PHY_WOL; + else + plat->flags |= STMMAC_FLAG_USE_PHY_WOL; plat->riwt_off = 1; plat->maxmtu = ETH_DATA_LEN; plat->host_dma_width = priv_plat->variant->dma_bit_mask; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 0745117d5872..248b30d7b864 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -485,6 +485,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) plat_dat->pcs_init = socfpga_dwmac_pcs_init; plat_dat->pcs_exit = socfpga_dwmac_pcs_exit; plat_dat->select_pcs = socfpga_dwmac_select_pcs; + plat_dat->has_gmac = true; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c new file mode 100644 index 000000000000..dce84ed184e9 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * T-HEAD DWMAC platform driver + * + * Copyright (C) 2021 Alibaba Group Holding Limited. + * Copyright (C) 2023 Jisheng Zhang <jszhang@kernel.org> + * + */ + +#include <linux/bitfield.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_net.h> +#include <linux/platform_device.h> + +#include "stmmac_platform.h" + +#define GMAC_CLK_EN 0x00 +#define GMAC_TX_CLK_EN BIT(1) +#define GMAC_TX_CLK_N_EN BIT(2) +#define GMAC_TX_CLK_OUT_EN BIT(3) +#define GMAC_RX_CLK_EN BIT(4) +#define GMAC_RX_CLK_N_EN BIT(5) +#define GMAC_EPHY_REF_CLK_EN BIT(6) +#define GMAC_RXCLK_DELAY_CTRL 0x04 +#define GMAC_RXCLK_BYPASS BIT(15) +#define GMAC_RXCLK_INVERT BIT(14) +#define GMAC_RXCLK_DELAY GENMASK(4, 0) +#define GMAC_TXCLK_DELAY_CTRL 0x08 +#define GMAC_TXCLK_BYPASS BIT(15) +#define GMAC_TXCLK_INVERT BIT(14) +#define GMAC_TXCLK_DELAY GENMASK(4, 0) +#define GMAC_PLLCLK_DIV 0x0c +#define GMAC_PLLCLK_DIV_EN BIT(31) +#define GMAC_PLLCLK_DIV_NUM GENMASK(7, 0) +#define GMAC_GTXCLK_SEL 0x18 +#define GMAC_GTXCLK_SEL_PLL BIT(0) +#define GMAC_INTF_CTRL 0x1c +#define PHY_INTF_MASK BIT(0) +#define PHY_INTF_RGMII FIELD_PREP(PHY_INTF_MASK, 1) +#define PHY_INTF_MII_GMII FIELD_PREP(PHY_INTF_MASK, 0) +#define GMAC_TXCLK_OEN 0x20 +#define TXCLK_DIR_MASK BIT(0) +#define TXCLK_DIR_OUTPUT FIELD_PREP(TXCLK_DIR_MASK, 0) +#define TXCLK_DIR_INPUT FIELD_PREP(TXCLK_DIR_MASK, 1) + +#define GMAC_GMII_RGMII_RATE 125000000 +#define GMAC_MII_RATE 25000000 + +struct thead_dwmac { + struct plat_stmmacenet_data *plat; + void __iomem *apb_base; + struct device *dev; +}; + +static int thead_dwmac_set_phy_if(struct plat_stmmacenet_data *plat) +{ + struct thead_dwmac *dwmac = plat->bsp_priv; + u32 phyif; + + switch (plat->mac_interface) { + case PHY_INTERFACE_MODE_MII: + phyif = PHY_INTF_MII_GMII; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: + phyif = PHY_INTF_RGMII; + break; + default: + dev_err(dwmac->dev, "unsupported phy interface %d\n", + plat->mac_interface); + return -EINVAL; + } + + writel(phyif, dwmac->apb_base + GMAC_INTF_CTRL); + return 0; +} + +static int thead_dwmac_set_txclk_dir(struct plat_stmmacenet_data *plat) +{ + struct thead_dwmac *dwmac = plat->bsp_priv; + u32 txclk_dir; + + switch (plat->mac_interface) { + case PHY_INTERFACE_MODE_MII: + txclk_dir = TXCLK_DIR_INPUT; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: + txclk_dir = TXCLK_DIR_OUTPUT; + break; + default: + dev_err(dwmac->dev, "unsupported phy interface %d\n", + plat->mac_interface); + return -EINVAL; + } + + writel(txclk_dir, dwmac->apb_base + GMAC_TXCLK_OEN); + return 0; +} + +static void thead_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode) +{ + struct plat_stmmacenet_data *plat; + struct thead_dwmac *dwmac = priv; + unsigned long rate; + u32 div, reg; + + plat = dwmac->plat; + + switch (plat->mac_interface) { + /* For MII, rxc/txc is provided by phy */ + case PHY_INTERFACE_MODE_MII: + return; + + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + rate = clk_get_rate(plat->stmmac_clk); + if (!rate || rate % GMAC_GMII_RGMII_RATE != 0 || + rate % GMAC_MII_RATE != 0) { + dev_err(dwmac->dev, "invalid gmac rate %ld\n", rate); + return; + } + + writel(0, dwmac->apb_base + GMAC_PLLCLK_DIV); + + switch (speed) { + case SPEED_1000: + div = rate / GMAC_GMII_RGMII_RATE; + break; + case SPEED_100: + div = rate / GMAC_MII_RATE; + break; + case SPEED_10: + div = rate * 10 / GMAC_MII_RATE; + break; + default: + dev_err(dwmac->dev, "invalid speed %u\n", speed); + return; + } + + reg = FIELD_PREP(GMAC_PLLCLK_DIV_EN, 1) | + FIELD_PREP(GMAC_PLLCLK_DIV_NUM, div); + writel(reg, dwmac->apb_base + GMAC_PLLCLK_DIV); + break; + default: + dev_err(dwmac->dev, "unsupported phy interface %d\n", + plat->mac_interface); + return; + } +} + +static int thead_dwmac_enable_clk(struct plat_stmmacenet_data *plat) +{ + struct thead_dwmac *dwmac = plat->bsp_priv; + u32 reg; + + switch (plat->mac_interface) { + case PHY_INTERFACE_MODE_MII: + reg = GMAC_RX_CLK_EN | GMAC_TX_CLK_EN; + break; + + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + /* use pll */ + writel(GMAC_GTXCLK_SEL_PLL, dwmac->apb_base + GMAC_GTXCLK_SEL); + reg = GMAC_TX_CLK_EN | GMAC_TX_CLK_N_EN | GMAC_TX_CLK_OUT_EN | + GMAC_RX_CLK_EN | GMAC_RX_CLK_N_EN; + break; + + default: + dev_err(dwmac->dev, "unsupported phy interface %d\n", + plat->mac_interface); + return -EINVAL; + } + + writel(reg, dwmac->apb_base + GMAC_CLK_EN); + return 0; +} + +static int thead_dwmac_init(struct platform_device *pdev, void *priv) +{ + struct thead_dwmac *dwmac = priv; + unsigned int reg; + int ret; + + ret = thead_dwmac_set_phy_if(dwmac->plat); + if (ret) + return ret; + + ret = thead_dwmac_set_txclk_dir(dwmac->plat); + if (ret) + return ret; + + reg = readl(dwmac->apb_base + GMAC_RXCLK_DELAY_CTRL); + reg &= ~(GMAC_RXCLK_DELAY); + reg |= FIELD_PREP(GMAC_RXCLK_DELAY, 0); + writel(reg, dwmac->apb_base + GMAC_RXCLK_DELAY_CTRL); + + reg = readl(dwmac->apb_base + GMAC_TXCLK_DELAY_CTRL); + reg &= ~(GMAC_TXCLK_DELAY); + reg |= FIELD_PREP(GMAC_TXCLK_DELAY, 0); + writel(reg, dwmac->apb_base + GMAC_TXCLK_DELAY_CTRL); + + return thead_dwmac_enable_clk(dwmac->plat); +} + +static int thead_dwmac_probe(struct platform_device *pdev) +{ + struct stmmac_resources stmmac_res; + struct plat_stmmacenet_data *plat; + struct thead_dwmac *dwmac; + void __iomem *apb; + int ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "failed to get resources\n"); + + plat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat)) + return dev_err_probe(&pdev->dev, PTR_ERR(plat), + "dt configuration failed\n"); + + dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); + if (!dwmac) + return -ENOMEM; + + apb = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(apb)) + return dev_err_probe(&pdev->dev, PTR_ERR(apb), + "failed to remap gmac apb registers\n"); + + dwmac->dev = &pdev->dev; + dwmac->plat = plat; + dwmac->apb_base = apb; + plat->bsp_priv = dwmac; + plat->fix_mac_speed = thead_dwmac_fix_speed; + plat->init = thead_dwmac_init; + + return devm_stmmac_pltfr_probe(pdev, plat, &stmmac_res); +} + +static const struct of_device_id thead_dwmac_match[] = { + { .compatible = "thead,th1520-gmac" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, thead_dwmac_match); + +static struct platform_driver thead_dwmac_driver = { + .probe = thead_dwmac_probe, + .driver = { + .name = "thead-dwmac", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = thead_dwmac_match, + }, +}; +module_platform_driver(thead_dwmac_driver); + +MODULE_AUTHOR("Jisheng Zhang <jszhang@kernel.org>"); +MODULE_AUTHOR("Drew Fustini <drew@pdp7.com>"); +MODULE_DESCRIPTION("T-HEAD DWMAC platform driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index 4296ddda8aaa..600fea8f712f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -329,5 +329,17 @@ enum rtc_control { #define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 #define GMAC_EXTHASH_BASE 0x500 +/* PTP and timestamping registers */ + +#define GMAC3_X_ATSNS GENMASK(19, 16) +#define GMAC3_X_ATSNS_SHIFT 16 + +#define GMAC_PTP_TCR_ATSFC BIT(24) +#define GMAC_PTP_TCR_ATSEN0 BIT(25) + +#define GMAC3_X_TIMESTAMP_STATUS 0x28 +#define GMAC_PTP_ATNR 0x30 +#define GMAC_PTP_ATSR 0x34 + extern const struct stmmac_dma_ops dwmac1000_dma_ops; #endif /* __DWMAC1000_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index d413d76a8936..96bcda0856ec 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -18,6 +18,7 @@ #include <linux/io.h> #include "stmmac.h" #include "stmmac_pcs.h" +#include "stmmac_ptp.h" #include "dwmac1000.h" static void dwmac1000_core_init(struct mac_device_info *hw, @@ -551,3 +552,103 @@ int dwmac1000_setup(struct stmmac_priv *priv) return 0; } + +/* DWMAC 1000 HW Timestaming ops */ + +void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time) +{ + u64 ns; + + ns = readl(ptpaddr + GMAC_PTP_ATNR); + ns += readl(ptpaddr + GMAC_PTP_ATSR) * NSEC_PER_SEC; + + *ptp_time = ns; +} + +void dwmac1000_timestamp_interrupt(struct stmmac_priv *priv) +{ + struct ptp_clock_event event; + u32 ts_status, num_snapshot; + unsigned long flags; + u64 ptp_time; + int i; + + /* Clears the timestamp interrupt */ + ts_status = readl(priv->ptpaddr + GMAC3_X_TIMESTAMP_STATUS); + + if (!(priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)) + return; + + num_snapshot = (ts_status & GMAC3_X_ATSNS) >> GMAC3_X_ATSNS_SHIFT; + + for (i = 0; i < num_snapshot; i++) { + read_lock_irqsave(&priv->ptp_lock, flags); + stmmac_get_ptptime(priv, priv->ptpaddr, &ptp_time); + read_unlock_irqrestore(&priv->ptp_lock, flags); + + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = ptp_time; + ptp_clock_event(priv->ptp_clock, &event); + } +} + +/* DWMAC 1000 ptp_clock_info ops */ + +static void dwmac1000_timestamp_interrupt_cfg(struct stmmac_priv *priv, bool en) +{ + void __iomem *ioaddr = priv->ioaddr; + + u32 intr_mask = readl(ioaddr + GMAC_INT_MASK); + + if (en) + intr_mask &= ~GMAC_INT_DISABLE_TIMESTAMP; + else + intr_mask |= GMAC_INT_DISABLE_TIMESTAMP; + + writel(intr_mask, ioaddr + GMAC_INT_MASK); +} + +int dwmac1000_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + void __iomem *ptpaddr = priv->ptpaddr; + int ret = -EOPNOTSUPP; + u32 tcr_val; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + mutex_lock(&priv->aux_ts_lock); + tcr_val = readl(ptpaddr + PTP_TCR); + + if (on) { + tcr_val |= GMAC_PTP_TCR_ATSEN0; + tcr_val |= GMAC_PTP_TCR_ATSFC; + priv->plat->flags |= STMMAC_FLAG_EXT_SNAPSHOT_EN; + } else { + tcr_val &= ~GMAC_PTP_TCR_ATSEN0; + priv->plat->flags &= ~STMMAC_FLAG_EXT_SNAPSHOT_EN; + } + + netdev_dbg(priv->dev, "Auxiliary Snapshot %s.\n", + on ? "enabled" : "disabled"); + writel(tcr_val, ptpaddr + PTP_TCR); + + /* wait for auxts fifo clear to finish */ + ret = readl_poll_timeout(ptpaddr + PTP_TCR, tcr_val, + !(tcr_val & GMAC_PTP_TCR_ATSFC), + 10, 10000); + + mutex_unlock(&priv->aux_ts_lock); + + dwmac1000_timestamp_interrupt_cfg(priv, on); + break; + + default: + break; + } + + return ret; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 0c050324997a..184d41a306af 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -393,8 +393,8 @@ static inline u32 mtl_chanx_base_addr(const struct dwmac4_addrs *addrs, #define MTL_OP_MODE_EHFC BIT(7) -#define MTL_OP_MODE_RTC_MASK 0x18 -#define MTL_OP_MODE_RTC_SHIFT 3 +#define MTL_OP_MODE_RTC_MASK GENMASK(1, 0) +#define MTL_OP_MODE_RTC_SHIFT 0 #define MTL_OP_MODE_RTC_32 (1 << MTL_OP_MODE_RTC_SHIFT) #define MTL_OP_MODE_RTC_64 0 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 22a044d93e17..0cb84a0041a4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -274,7 +274,7 @@ static void dwmac4_dma_rx_chan_op_mode(struct stmmac_priv *priv, } else { pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode); mtl_rx_op &= ~MTL_OP_MODE_RSF; - mtl_rx_op &= MTL_OP_MODE_RTC_MASK; + mtl_rx_op &= ~MTL_OP_MODE_RTC_MASK; if (mode <= 32) mtl_rx_op |= MTL_OP_MODE_RTC_32; else if (mode <= 64) @@ -343,7 +343,7 @@ static void dwmac4_dma_tx_chan_op_mode(struct stmmac_priv *priv, } else { pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode); mtl_tx_op &= ~MTL_OP_MODE_TSF; - mtl_tx_op &= MTL_OP_MODE_TTC_MASK; + mtl_tx_op &= ~MTL_OP_MODE_TTC_MASK; /* Set the transmit threshold */ if (mode <= 32) mtl_tx_op |= MTL_OP_MODE_TTC_32; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index 0d185e54eb7e..57c03d491774 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -185,8 +185,6 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, x->rx_buf_unav_irq++; if (unlikely(intr_status & DMA_CHAN_STATUS_RPS)) x->rx_process_stopped_irq++; - if (unlikely(intr_status & DMA_CHAN_STATUS_RWT)) - x->rx_watchdog_irq++; if (unlikely(intr_status & DMA_CHAN_STATUS_ETI)) x->tx_early_irq++; if (unlikely(intr_status & DMA_CHAN_STATUS_TPS)) { @@ -198,6 +196,10 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, ret = tx_hard_error; } } + + if (unlikely(intr_status & DMA_CHAN_STATUS_RWT)) + x->rx_watchdog_irq++; + /* TX/RX NORMAL interrupts */ if (likely(intr_status & DMA_CHAN_STATUS_RI)) { u64_stats_update_begin(&stats->syncp); diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index cfc50289aed6..a72d336a8350 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -113,6 +113,7 @@ static const struct stmmac_hwif_entry { const void *dma; const void *mac; const void *hwtimestamp; + const void *ptp; const void *mode; const void *tc; const void *mmc; @@ -133,7 +134,8 @@ static const struct stmmac_hwif_entry { .desc = NULL, .dma = &dwmac100_dma_ops, .mac = &dwmac100_ops, - .hwtimestamp = &stmmac_ptp, + .hwtimestamp = &dwmac1000_ptp, + .ptp = &dwmac1000_ptp_clock_ops, .mode = NULL, .tc = NULL, .mmc = &dwmac_mmc_ops, @@ -151,7 +153,8 @@ static const struct stmmac_hwif_entry { .desc = NULL, .dma = &dwmac1000_dma_ops, .mac = &dwmac1000_ops, - .hwtimestamp = &stmmac_ptp, + .hwtimestamp = &dwmac1000_ptp, + .ptp = &dwmac1000_ptp_clock_ops, .mode = NULL, .tc = NULL, .mmc = &dwmac_mmc_ops, @@ -171,6 +174,7 @@ static const struct stmmac_hwif_entry { .dma = &dwmac4_dma_ops, .mac = &dwmac4_ops, .hwtimestamp = &stmmac_ptp, + .ptp = &stmmac_ptp_clock_ops, .mode = NULL, .tc = &dwmac4_tc_ops, .mmc = &dwmac_mmc_ops, @@ -192,6 +196,7 @@ static const struct stmmac_hwif_entry { .dma = &dwmac4_dma_ops, .mac = &dwmac410_ops, .hwtimestamp = &stmmac_ptp, + .ptp = &stmmac_ptp_clock_ops, .mode = &dwmac4_ring_mode_ops, .tc = &dwmac510_tc_ops, .mmc = &dwmac_mmc_ops, @@ -213,6 +218,7 @@ static const struct stmmac_hwif_entry { .dma = &dwmac410_dma_ops, .mac = &dwmac410_ops, .hwtimestamp = &stmmac_ptp, + .ptp = &stmmac_ptp_clock_ops, .mode = &dwmac4_ring_mode_ops, .tc = &dwmac510_tc_ops, .mmc = &dwmac_mmc_ops, @@ -234,6 +240,7 @@ static const struct stmmac_hwif_entry { .dma = &dwmac410_dma_ops, .mac = &dwmac510_ops, .hwtimestamp = &stmmac_ptp, + .ptp = &stmmac_ptp_clock_ops, .mode = &dwmac4_ring_mode_ops, .tc = &dwmac510_tc_ops, .mmc = &dwmac_mmc_ops, @@ -256,6 +263,7 @@ static const struct stmmac_hwif_entry { .dma = &dwxgmac210_dma_ops, .mac = &dwxgmac210_ops, .hwtimestamp = &stmmac_ptp, + .ptp = &stmmac_ptp_clock_ops, .mode = NULL, .tc = &dwxgmac_tc_ops, .mmc = &dwxgmac_mmc_ops, @@ -278,6 +286,7 @@ static const struct stmmac_hwif_entry { .dma = &dwxgmac210_dma_ops, .mac = &dwxlgmac2_ops, .hwtimestamp = &stmmac_ptp, + .ptp = &stmmac_ptp_clock_ops, .mode = NULL, .tc = &dwxgmac_tc_ops, .mmc = &dwxgmac_mmc_ops, @@ -362,6 +371,8 @@ int stmmac_hwif_init(struct stmmac_priv *priv) priv->fpe_cfg.reg = entry->regs.fpe_reg; priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off; priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off; + memcpy(&priv->ptp_clock_ops, entry->ptp, + sizeof(struct ptp_clock_info)); if (entry->est) priv->estaddr = priv->ioaddr + entry->regs.est_off; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 5ef52ef2698f..0f59aa982604 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -18,9 +18,22 @@ #include "dwmac4.h" #include "stmmac.h" +#define STMMAC_HWTS_CFG_MASK (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ + PTP_TCR_TSINIT | PTP_TCR_TSUPDT | \ + PTP_TCR_TSCTRLSSR | PTP_TCR_SNAPTYPSEL_1 | \ + PTP_TCR_TSIPV4ENA | PTP_TCR_TSIPV6ENA | \ + PTP_TCR_TSEVNTENA | PTP_TCR_TSMSTRENA | \ + PTP_TCR_TSVER2ENA | PTP_TCR_TSIPENA | \ + PTP_TCR_TSTRIG | PTP_TCR_TSENALL) + static void config_hw_tstamping(void __iomem *ioaddr, u32 data) { - writel(data, ioaddr + PTP_TCR); + u32 regval = readl(ioaddr + PTP_TCR); + + regval &= ~STMMAC_HWTS_CFG_MASK; + regval |= data; + + writel(regval, ioaddr + PTP_TCR); } static void config_sub_second_increment(void __iomem *ioaddr, @@ -269,3 +282,14 @@ const struct stmmac_hwtimestamp stmmac_ptp = { .timestamp_interrupt = timestamp_interrupt, .hwtstamp_correct_latency = hwtstamp_correct_latency, }; + +const struct stmmac_hwtimestamp dwmac1000_ptp = { + .config_hw_tstamping = config_hw_tstamping, + .init_systime = init_systime, + .config_sub_second_increment = config_sub_second_increment, + .config_addend = config_addend, + .adjust_systime = adjust_systime, + .get_systime = get_systime, + .get_ptptime = dwmac1000_get_ptptime, + .timestamp_interrupt = dwmac1000_timestamp_interrupt, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 12f0db0e8830..3cdc3910f3a0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3752,6 +3752,7 @@ static int stmmac_request_irq_single(struct net_device *dev) /* Request the Wake IRQ in case of another line * is used for WoL */ + priv->wol_irq_disabled = true; if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { ret = request_irq(priv->wol_irq, stmmac_interrupt, IRQF_SHARED, dev->name, dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index ad868e8d195d..3ac32444e492 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -522,6 +522,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) if (of_device_is_compatible(np, "st,spear600-gmac") || of_device_is_compatible(np, "snps,dwmac-3.50a") || of_device_is_compatible(np, "snps,dwmac-3.70a") || + of_device_is_compatible(np, "snps,dwmac-3.72a") || of_device_is_compatible(np, "snps,dwmac")) { /* Note that the max-frame-size parameter as defined in the * ePAPR v1.1 spec is defined as max-frame-size, it's diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index a6b1de9a251d..429b2d357813 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -9,7 +9,6 @@ *******************************************************************************/ #include "stmmac.h" #include "stmmac_ptp.h" -#include "dwmac4.h" /** * stmmac_adjust_freq @@ -265,7 +264,7 @@ static int stmmac_getcrosststamp(struct ptp_clock_info *ptp, } /* structure describing a PTP hardware clock */ -static struct ptp_clock_info stmmac_ptp_clock_ops = { +const struct ptp_clock_info stmmac_ptp_clock_ops = { .owner = THIS_MODULE, .name = "stmmac ptp", .max_adj = 62500000, @@ -282,6 +281,24 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = { .getcrosststamp = stmmac_getcrosststamp, }; +/* structure describing a PTP hardware clock */ +const struct ptp_clock_info dwmac1000_ptp_clock_ops = { + .owner = THIS_MODULE, + .name = "stmmac ptp", + .max_adj = 62500000, + .n_alarm = 0, + .n_ext_ts = 1, + .n_per_out = 0, + .n_pins = 0, + .pps = 0, + .adjfine = stmmac_adjust_freq, + .adjtime = stmmac_adjust_time, + .gettime64 = stmmac_get_time, + .settime64 = stmmac_set_time, + .enable = dwmac1000_ptp_enable, + .getcrosststamp = stmmac_getcrosststamp, +}; + /** * stmmac_ptp_register * @priv: driver private structure @@ -298,20 +315,25 @@ void stmmac_ptp_register(struct stmmac_priv *priv) priv->pps[i].available = true; } - if (priv->plat->ptp_max_adj) - stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj; - /* Calculate the clock domain crossing (CDC) error if necessary */ priv->plat->cdc_error_adj = 0; if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate; - stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num; - stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n; + /* Update the ptp clock parameters based on feature discovery, when + * available + */ + if (priv->dma_cap.pps_out_num) + priv->ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num; + + if (priv->dma_cap.aux_snapshot_n) + priv->ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n; + + if (priv->plat->ptp_max_adj) + priv->ptp_clock_ops.max_adj = priv->plat->ptp_max_adj; rwlock_init(&priv->ptp_lock); mutex_init(&priv->aux_ts_lock); - priv->ptp_clock_ops = stmmac_ptp_clock_ops; priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, priv->device); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index fce3fba2ffd2..4cc70480ce0f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -94,4 +94,14 @@ enum aux_snapshot { AUX_SNAPSHOT3 = 0x80, }; +struct ptp_clock_info; +struct ptp_clock_request; +struct stmmac_priv; + +int dwmac1000_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on); + +void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time); +void dwmac1000_timestamp_interrupt(struct stmmac_priv *priv); + #endif /* __STMMAC_PTP_H__ */ diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 6201a09fa5f0..47fb5af16796 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -337,9 +337,9 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common, struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; struct cppi5_host_desc_t *desc_rx; struct device *dev = common->dev; + struct am65_cpsw_swdata *swdata; dma_addr_t desc_dma; dma_addr_t buf_dma; - void *swdata; desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); if (!desc_rx) { @@ -363,7 +363,8 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common, cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE, buf_dma, AM65_CPSW_MAX_PACKET_SIZE); swdata = cppi5_hdesc_get_swdata(desc_rx); - *((void **)swdata) = page_address(page); + swdata->page = page; + swdata->flow_id = flow_idx; return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx, desc_rx, desc_dma); @@ -519,36 +520,31 @@ static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_ch static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow, struct page *page, - bool allow_direct, - int desc_idx) + bool allow_direct) { page_pool_put_full_page(flow->page_pool, page, allow_direct); - flow->pages[desc_idx] = NULL; } static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma) { - struct am65_cpsw_rx_flow *flow = data; + struct am65_cpsw_rx_chn *rx_chn = data; struct cppi5_host_desc_t *desc_rx; - struct am65_cpsw_rx_chn *rx_chn; + struct am65_cpsw_swdata *swdata; dma_addr_t buf_dma; + struct page *page; u32 buf_dma_len; - void *page_addr; - void **swdata; - int desc_idx; + u32 flow_id; - rx_chn = &flow->common->rx_chns; desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); swdata = cppi5_hdesc_get_swdata(desc_rx); - page_addr = *swdata; + page = swdata->page; + flow_id = swdata->flow_id; cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx, - rx_chn->dsize_log2); - am65_cpsw_put_page(flow, virt_to_page(page_addr), false, desc_idx); + am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false); } static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn, @@ -703,14 +699,13 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) ret = -ENOMEM; goto fail_rx; } - flow->pages[i] = page; ret = am65_cpsw_nuss_rx_push(common, page, flow_idx); if (ret < 0) { dev_err(common->dev, "cannot submit page to rx channel flow %d, error %d\n", flow_idx, ret); - am65_cpsw_put_page(flow, page, false, i); + am65_cpsw_put_page(flow, page, false); goto fail_rx; } } @@ -764,8 +759,8 @@ fail_tx: fail_rx: for (i = 0; i < common->rx_ch_num_flows; i++) - k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i], - am65_cpsw_nuss_rx_cleanup, 0); + k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn, + am65_cpsw_nuss_rx_cleanup, !!i); am65_cpsw_destroy_xdp_rxqs(common); @@ -817,11 +812,11 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common) dev_err(common->dev, "rx teardown timeout\n"); } - for (i = 0; i < common->rx_ch_num_flows; i++) { + for (i = common->rx_ch_num_flows - 1; i >= 0; i--) { napi_disable(&rx_chn->flows[i].napi_rx); hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer); - k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i], - am65_cpsw_nuss_rx_cleanup, 0); + k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn, + am65_cpsw_nuss_rx_cleanup, !!i); } k3_udma_glue_disable_rx_chn(rx_chn->rx_chn); @@ -1028,7 +1023,7 @@ pool_free: static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow, struct am65_cpsw_port *port, struct xdp_buff *xdp, - int desc_idx, int cpu, int *len) + int cpu, int *len) { struct am65_cpsw_common *common = flow->common; struct net_device *ndev = port->ndev; @@ -1090,7 +1085,7 @@ drop: } page = virt_to_head_page(xdp->data); - am65_cpsw_put_page(flow, page, true, desc_idx); + am65_cpsw_put_page(flow, page, true); out: return ret; @@ -1138,16 +1133,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, struct am65_cpsw_ndev_priv *ndev_priv; struct cppi5_host_desc_t *desc_rx; struct device *dev = common->dev; + struct am65_cpsw_swdata *swdata; struct page *page, *new_page; dma_addr_t desc_dma, buf_dma; struct am65_cpsw_port *port; - int headroom, desc_idx, ret; struct net_device *ndev; u32 flow_idx = flow->id; struct sk_buff *skb; struct xdp_buff xdp; + int headroom, ret; void *page_addr; - void **swdata; u32 *psdata; *xdp_state = AM65_CPSW_XDP_PASS; @@ -1170,8 +1165,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, __func__, flow_idx, &desc_dma); swdata = cppi5_hdesc_get_swdata(desc_rx); - page_addr = *swdata; - page = virt_to_page(page_addr); + page = swdata->page; + page_addr = page_address(page); cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); pkt_len = cppi5_hdesc_get_pktlen(desc_rx); @@ -1187,9 +1182,6 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx, - rx_chn->dsize_log2); - skb = am65_cpsw_build_skb(page_addr, ndev, AM65_CPSW_MAX_PACKET_SIZE); if (unlikely(!skb)) { @@ -1201,7 +1193,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]); xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM, pkt_len, false); - *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, desc_idx, + *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, cpu, &pkt_len); if (*xdp_state != AM65_CPSW_XDP_PASS) goto allocate; @@ -1230,10 +1222,8 @@ allocate: return -ENOMEM; } - flow->pages[desc_idx] = new_page; - if (netif_dormant(ndev)) { - am65_cpsw_put_page(flow, new_page, true, desc_idx); + am65_cpsw_put_page(flow, new_page, true); ndev->stats.rx_dropped++; return 0; } @@ -1241,7 +1231,7 @@ allocate: requeue: ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx); if (WARN_ON(ret < 0)) { - am65_cpsw_put_page(flow, new_page, true, desc_idx); + am65_cpsw_put_page(flow, new_page, true); ndev->stats.rx_errors++; ndev->stats.rx_dropped++; } @@ -2343,10 +2333,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) for (i = 0; i < common->rx_ch_num_flows; i++) { flow = &rx_chn->flows[i]; flow->page_pool = NULL; - flow->pages = devm_kcalloc(dev, AM65_CPSW_MAX_RX_DESC, - sizeof(*flow->pages), GFP_KERNEL); - if (!flow->pages) - return -ENOMEM; } rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg); @@ -2396,10 +2382,12 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) flow = &rx_chn->flows[i]; flow->id = i; flow->common = common; + flow->irq = -EINVAL; rx_flow_cfg.ring_rxfdq0_id = fdqring_id; rx_flow_cfg.rx_cfg.size = max_desc_num; - rx_flow_cfg.rxfdq_cfg.size = max_desc_num; + /* share same FDQ for all flows */ + rx_flow_cfg.rxfdq_cfg.size = max_desc_num * rx_cfg.flow_id_num; rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode; ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn, @@ -2437,6 +2425,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) if (ret) { dev_err(dev, "failure requesting rx %d irq %u, %d\n", i, flow->irq, ret); + flow->irq = -EINVAL; goto err; } } @@ -3274,8 +3263,8 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common) for (i = 0; i < common->rx_ch_num_flows; i++) k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, - &rx_chan->flows[i], - am65_cpsw_nuss_rx_cleanup, 0); + rx_chan, + am65_cpsw_nuss_rx_cleanup, !!i); k3_udma_glue_disable_rx_chn(rx_chan->rx_chn); diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h index 3f3e353dfe88..e7832a5cf3cc 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h @@ -101,10 +101,14 @@ struct am65_cpsw_rx_flow { struct hrtimer rx_hrtimer; unsigned long rx_pace_timeout; struct page_pool *page_pool; - struct page **pages; char name[32]; }; +struct am65_cpsw_swdata { + u32 flow_id; + struct page *page; +}; + struct am65_cpsw_rx_chn { struct device *dev; struct device *dma_dev; diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 0556910938fa..c568c84a032b 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -16,6 +16,7 @@ #include <linux/if_hsr.h> #include <linux/if_vlan.h> #include <linux/interrupt.h> +#include <linux/io-64-nonatomic-hi-lo.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> @@ -411,6 +412,8 @@ static int prueth_perout_enable(void *clockops_data, struct prueth_emac *emac = clockops_data; u32 reduction_factor = 0, offset = 0; struct timespec64 ts; + u64 current_cycle; + u64 start_offset; u64 ns_period; if (!on) @@ -449,8 +452,14 @@ static int prueth_perout_enable(void *clockops_data, writel(reduction_factor, emac->prueth->shram.va + TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET); - writel(0, emac->prueth->shram.va + - TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET); + current_cycle = icssg_read_time(emac->prueth->shram.va + + TIMESYNC_FW_WC_CYCLECOUNT_OFFSET); + + /* Rounding of current_cycle count to next second */ + start_offset = roundup(current_cycle, MSEC_PER_SEC); + + hi_lo_writeq(start_offset, emac->prueth->shram.va + + TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET); return 0; } @@ -808,6 +817,47 @@ static netdev_features_t emac_ndo_fix_features(struct net_device *ndev, return features; } +static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev, + __be16 proto, u16 vid) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth *prueth = emac->prueth; + int untag_mask = 0; + int port_mask; + + if (prueth->is_hsr_offload_mode) { + port_mask = BIT(PRUETH_PORT_HOST) | BIT(emac->port_id); + untag_mask = 0; + + netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n", + vid, port_mask, untag_mask); + + icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true); + icssg_set_pvid(emac->prueth, vid, emac->port_id); + } + return 0; +} + +static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev, + __be16 proto, u16 vid) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth *prueth = emac->prueth; + int untag_mask = 0; + int port_mask; + + if (prueth->is_hsr_offload_mode) { + port_mask = BIT(PRUETH_PORT_HOST); + untag_mask = 0; + + netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n", + vid, port_mask, untag_mask); + + icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false); + } + return 0; +} + static const struct net_device_ops emac_netdev_ops = { .ndo_open = emac_ndo_open, .ndo_stop = emac_ndo_stop, @@ -820,6 +870,8 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_get_stats64 = icssg_ndo_get_stats64, .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name, .ndo_fix_features = emac_ndo_fix_features, + .ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid, }; static int prueth_netdev_init(struct prueth *prueth, @@ -947,7 +999,7 @@ static int prueth_netdev_init(struct prueth *prueth, ndev->netdev_ops = &emac_netdev_ops; ndev->ethtool_ops = &icssg_ethtool_ops; ndev->hw_features = NETIF_F_SG; - ndev->features = ndev->hw_features; + ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES; netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index 8722bb4a268a..f5c1d473e9f9 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -330,6 +330,18 @@ static inline int prueth_emac_slice(struct prueth_emac *emac) extern const struct ethtool_ops icssg_ethtool_ops; extern const struct dev_pm_ops prueth_dev_pm_ops; +static inline u64 icssg_read_time(const void __iomem *addr) +{ + u32 low, high; + + do { + high = readl(addr + 4); + low = readl(addr); + } while (high != readl(addr + 4)); + + return low + ((u64)high << 32); +} + /* Classifier helpers */ void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac); void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac); diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c index a04d4073def9..89dc4c401a8d 100644 --- a/drivers/net/ethernet/vertexcom/mse102x.c +++ b/drivers/net/ethernet/vertexcom/mse102x.c @@ -222,7 +222,7 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp, struct mse102x_net_spi *mses = to_mse102x_spi(mse); struct spi_transfer *xfer = &mses->spi_xfer; struct spi_message *msg = &mses->spi_msg; - struct sk_buff *tskb; + struct sk_buff *tskb = NULL; int ret; netif_dbg(mse, tx_queued, mse->ndev, "%s: skb %p, %d@%p\n", @@ -235,7 +235,6 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp, if (!tskb) return -ENOMEM; - dev_kfree_skb(txp); txp = tskb; } @@ -257,6 +256,8 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp, mse->stats.xfer_err++; } + dev_kfree_skb(tskb); + return ret; } @@ -436,13 +437,15 @@ static void mse102x_tx_work(struct work_struct *work) mse = &mses->mse102x; while ((txb = skb_dequeue(&mse->txq))) { + unsigned int len = max_t(unsigned int, txb->len, ETH_ZLEN); + mutex_lock(&mses->lock); ret = mse102x_tx_pkt_spi(mse, txb, work_timeout); mutex_unlock(&mses->lock); if (ret) { mse->ndev->stats.tx_dropped++; } else { - mse->ndev->stats.tx_bytes += txb->len; + mse->ndev->stats.tx_bytes += len; mse->ndev->stats.tx_packets++; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 273ec5f70005..0f4b02fe6f85 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -924,13 +924,13 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev) skbuf_dma->sg_len = sg_len; dma_tx_desc->callback_param = lp; dma_tx_desc->callback_result = axienet_dma_tx_cb; - dmaengine_submit(dma_tx_desc); - dma_async_issue_pending(lp->tx_chan); txq = skb_get_tx_queue(lp->ndev, skb); netdev_tx_sent_queue(txq, skb->len); netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS); + dmaengine_submit(dma_tx_desc); + dma_async_issue_pending(lp->tx_chan); return NETDEV_TX_OK; xmit_error_unmap_sg: |