diff options
Diffstat (limited to 'drivers/net/ethernet/marvell')
68 files changed, 2573 insertions, 764 deletions
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index 837295fecd17..50f7c59e8a04 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -34,7 +34,6 @@ config MV643XX_ETH config MVMDIO tristate "Marvell MDIO interface support" depends on HAS_IOMEM - select MDIO_DEVRES select PHYLIB help This driver supports the MDIO interface found in the network diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 67a6ff07c83d..0ab52c57c648 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1333,7 +1333,8 @@ static void mib_counters_update(struct mv643xx_eth_private *mp) static void mib_counters_timer_wrapper(struct timer_list *t) { - struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer); + struct mv643xx_eth_private *mp = timer_container_of(mp, t, + mib_counters_timer); mib_counters_update(mp); mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); } @@ -2247,7 +2248,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) if (unlikely(mp->oom)) { mp->oom = 0; - del_timer(&mp->rx_oom); + timer_delete(&mp->rx_oom); } work_done = 0; @@ -2306,7 +2307,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) static inline void oom_timer_wrapper(struct timer_list *t) { - struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom); + struct mv643xx_eth_private *mp = timer_container_of(mp, t, rx_oom); napi_schedule(&mp->napi); } @@ -2521,7 +2522,7 @@ static int mv643xx_eth_stop(struct net_device *dev) napi_disable(&mp->napi); - del_timer_sync(&mp->rx_oom); + timer_delete_sync(&mp->rx_oom); netif_carrier_off(dev); if (dev->phydev) @@ -2531,7 +2532,7 @@ static int mv643xx_eth_stop(struct net_device *dev) port_reset(mp); mv643xx_eth_get_stats(dev); mib_counters_update(mp); - del_timer_sync(&mp->mib_counters_timer); + timer_delete_sync(&mp->mib_counters_timer); for (i = 0; i < mp->rxq_count; i++) rxq_deinit(mp->rxq + i); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 147571fdada3..476e73e502fe 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4610,7 +4610,7 @@ static int mvneta_stop(struct net_device *dev) /* Inform that we are stopping so we don't want to setup the * driver for new CPUs in the notifiers. The code of the * notifier for CPU online is protected by the same spinlock, - * so when we get the lock, the notifer work is done. + * so when we get the lock, the notifier work is done. */ spin_lock(&pp->lock); pp->is_stopped = true; @@ -5014,8 +5014,6 @@ static int mvneta_ethtool_get_rxnfc(struct net_device *dev, case ETHTOOL_GRXRINGS: info->data = rxq_number; return 0; - case ETHTOOL_GRXFH: - return -EOPNOTSUPP; default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h index e47783ce77e0..57ac039df6f7 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.h +++ b/drivers/net/ethernet/marvell/mvneta_bm.h @@ -115,7 +115,7 @@ struct mvneta_bm_pool { /* Packet size */ int pkt_size; - /* Size of the buffer acces through DMA*/ + /* Size of the buffer access through DMA */ u32 buf_size; /* BPPE virtual base address */ diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 44fe9b68d1c2..061fcd444d50 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -1113,6 +1113,9 @@ struct mvpp2 { /* Spinlocks for CM3 shared memory configuration */ spinlock_t mss_spinlock; + + /* Spinlock for shared PRS parser memory and shadow table */ + spinlock_t prs_spinlock; }; struct mvpp2_pcpu_stats { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 8ed83fb98862..44b201817d94 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -1618,7 +1618,8 @@ int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx, return 0; } -int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info) +int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, + const struct ethtool_rxfh_fields *info) { u16 hash_opts = 0; u32 flow_type; @@ -1656,7 +1657,8 @@ int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info) return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts); } -int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info) +int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, + struct ethtool_rxfh_fields *info) { unsigned long hash_opts; u32 flow_type; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h index 85c9c6e80678..caadf3aea95d 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -272,8 +272,10 @@ int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx, int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 rss_ctx, u32 *indir); -int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info); -int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info); +int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, + struct ethtool_rxfh_fields *info); +int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, + const struct ethtool_rxfh_fields *info); void mvpp2_cls_init(struct mvpp2 *priv); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 566c12c89520..8ebb985d2573 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5173,38 +5173,40 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_dropped = dev->stats.tx_dropped; } -static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) +static int mvpp2_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config config; + struct mvpp2_port *port = netdev_priv(dev); void __iomem *ptp; u32 gcr, int_mask; - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; + if (!port->hwtstamp) + return -EOPNOTSUPP; - if (config.tx_type != HWTSTAMP_TX_OFF && - config.tx_type != HWTSTAMP_TX_ON) + if (config->tx_type != HWTSTAMP_TX_OFF && + config->tx_type != HWTSTAMP_TX_ON) return -ERANGE; ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); int_mask = gcr = 0; - if (config.tx_type != HWTSTAMP_TX_OFF) { + if (config->tx_type != HWTSTAMP_TX_OFF) { gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET; int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 | MVPP22_PTP_INT_MASK_QUEUE0; } /* It seems we must also release the TX reset when enabling the TSU */ - if (config.rx_filter != HWTSTAMP_FILTER_NONE) + if (config->rx_filter != HWTSTAMP_FILTER_NONE) gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET | MVPP22_PTP_GCR_TX_RESET; if (gcr & MVPP22_PTP_GCR_TSU_ENABLE) mvpp22_tai_start(port->priv->tai); - if (config.rx_filter != HWTSTAMP_FILTER_NONE) { - config.rx_filter = HWTSTAMP_FILTER_ALL; + if (config->rx_filter != HWTSTAMP_FILTER_NONE) { + config->rx_filter = HWTSTAMP_FILTER_ALL; mvpp2_modify(ptp + MVPP22_PTP_GCR, MVPP22_PTP_GCR_RX_RESET | MVPP22_PTP_GCR_TX_RESET | @@ -5225,26 +5227,22 @@ static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE)) mvpp22_tai_stop(port->priv->tai); - port->tx_hwtstamp_type = config.tx_type; - - if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) - return -EFAULT; + port->tx_hwtstamp_type = config->tx_type; return 0; } -static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr) +static int mvpp2_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) { - struct hwtstamp_config config; - - memset(&config, 0, sizeof(config)); + struct mvpp2_port *port = netdev_priv(dev); - config.tx_type = port->tx_hwtstamp_type; - config.rx_filter = port->rx_hwtstamp ? - HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; + if (!port->hwtstamp) + return -EOPNOTSUPP; - if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) - return -EFAULT; + config->tx_type = port->tx_hwtstamp_type; + config->rx_filter = port->rx_hwtstamp ? HWTSTAMP_FILTER_ALL : + HWTSTAMP_FILTER_NONE; return 0; } @@ -5274,18 +5272,6 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mvpp2_port *port = netdev_priv(dev); - switch (cmd) { - case SIOCSHWTSTAMP: - if (port->hwtstamp) - return mvpp2_set_ts_config(port, ifr); - break; - - case SIOCGHWTSTAMP: - if (port->hwtstamp) - return mvpp2_get_ts_config(port, ifr); - break; - } - if (!port->phylink) return -ENOTSUPP; @@ -5602,9 +5588,6 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, return -EOPNOTSUPP; switch (info->cmd) { - case ETHTOOL_GRXFH: - ret = mvpp2_ethtool_rxfh_get(port, info); - break; case ETHTOOL_GRXRINGS: info->data = port->nrxqs; break; @@ -5642,9 +5625,6 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, return -EOPNOTSUPP; switch (info->cmd) { - case ETHTOOL_SRXFH: - ret = mvpp2_ethtool_rxfh_set(port, info); - break; case ETHTOOL_SRXCLSRLINS: ret = mvpp2_ethtool_cls_rule_ins(port, info); break; @@ -5761,6 +5741,29 @@ static int mvpp2_ethtool_set_rxfh(struct net_device *dev, return mvpp2_modify_rxfh_context(dev, NULL, rxfh, extack); } +static int mvpp2_ethtool_get_rxfh_fields(struct net_device *dev, + struct ethtool_rxfh_fields *info) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!mvpp22_rss_is_supported(port)) + return -EOPNOTSUPP; + + return mvpp2_ethtool_rxfh_get(port, info); +} + +static int mvpp2_ethtool_set_rxfh_fields(struct net_device *dev, + const struct ethtool_rxfh_fields *info, + struct netlink_ext_ack *extack) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!mvpp22_rss_is_supported(port)) + return -EOPNOTSUPP; + + return mvpp2_ethtool_rxfh_set(port, info); +} + static int mvpp2_ethtool_get_eee(struct net_device *dev, struct ethtool_keee *eee) { @@ -5799,6 +5802,8 @@ static const struct net_device_ops mvpp2_netdev_ops = { .ndo_set_features = mvpp2_set_features, .ndo_bpf = mvpp2_xdp, .ndo_xdp_xmit = mvpp2_xdp_xmit, + .ndo_hwtstamp_get = mvpp2_hwtstamp_get, + .ndo_hwtstamp_set = mvpp2_hwtstamp_set, }; static const struct ethtool_ops mvpp2_eth_tool_ops = { @@ -5825,6 +5830,8 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, .get_rxfh = mvpp2_ethtool_get_rxfh, .set_rxfh = mvpp2_ethtool_set_rxfh, + .get_rxfh_fields = mvpp2_ethtool_get_rxfh_fields, + .set_rxfh_fields = mvpp2_ethtool_set_rxfh_fields, .create_rxfh_context = mvpp2_create_rxfh_context, .modify_rxfh_context = mvpp2_modify_rxfh_context, .remove_rxfh_context = mvpp2_remove_rxfh_context, @@ -7723,8 +7730,9 @@ static int mvpp2_probe(struct platform_device *pdev) if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23) priv->hw_version = MVPP23; - /* Init mss lock */ + /* Init locks for shared packet processor resources */ spin_lock_init(&priv->mss_spinlock); + spin_lock_init(&priv->prs_spinlock); /* Initialize network controller */ err = mvpp2_init(pdev, priv); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 9af22f497a40..93e978bdf303 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -23,6 +23,8 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) { int i; + lockdep_assert_held(&priv->prs_spinlock); + if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL; @@ -43,11 +45,13 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) } /* Initialize tcam entry from hw */ -int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, - int tid) +static int __mvpp2_prs_init_from_hw(struct mvpp2 *priv, + struct mvpp2_prs_entry *pe, int tid) { int i; + lockdep_assert_held(&priv->prs_spinlock); + if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL; @@ -73,6 +77,18 @@ int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, return 0; } +int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, + int tid) +{ + int err; + + spin_lock_bh(&priv->prs_spinlock); + err = __mvpp2_prs_init_from_hw(priv, pe, tid); + spin_unlock_bh(&priv->prs_spinlock); + + return err; +} + /* Invalidate tcam hw entry */ static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) { @@ -374,7 +390,7 @@ static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); bits = mvpp2_prs_sram_ai_get(&pe); /* Sram store classification lookup ID in AI bits [5:0] */ @@ -441,7 +457,7 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); + __mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -469,14 +485,17 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) } /* Set port to unicast or multicast promiscuous mode */ -void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, - enum mvpp2_prs_l2_cast l2_cast, bool add) +static void __mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, + bool add) { struct mvpp2_prs_entry pe; unsigned char cast_match; unsigned int ri; int tid; + lockdep_assert_held(&priv->prs_spinlock); + if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { cast_match = MVPP2_PRS_UCAST_VAL; tid = MVPP2_PE_MAC_UC_PROMISCUOUS; @@ -489,7 +508,7 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, /* promiscuous mode - Accept unknown unicast or multicast packets */ if (priv->prs_shadow[tid].valid) { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); @@ -522,6 +541,14 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, mvpp2_prs_hw_write(priv, &pe); } +void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, bool add) +{ + spin_lock_bh(&priv->prs_spinlock); + __mvpp2_prs_mac_promisc_set(priv, port, l2_cast, add); + spin_unlock_bh(&priv->prs_spinlock); +} + /* Set entry for dsa packets */ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, bool tagged, bool extend) @@ -539,7 +566,7 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -610,7 +637,7 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -673,7 +700,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid); if (!match) continue; @@ -726,7 +753,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + __mvpp2_prs_init_from_hw(priv, &pe, tid_aux); ri_bits = mvpp2_prs_sram_ri_get(&pe); if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == MVPP2_PRS_RI_VLAN_DOUBLE) @@ -760,7 +787,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Update ports' mask */ mvpp2_prs_tcam_port_map_set(&pe, port_map); @@ -800,7 +827,7 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) && mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2); @@ -849,7 +876,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + __mvpp2_prs_init_from_hw(priv, &pe, tid_aux); ri_bits = mvpp2_prs_sram_ri_get(&pe); ri_bits &= MVPP2_PRS_RI_VLAN_MASK; if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || @@ -880,7 +907,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Update ports' mask */ @@ -1213,8 +1240,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv) /* Create dummy entries for drop all and promiscuous modes */ mvpp2_prs_drop_fc(priv); mvpp2_prs_mac_drop_all_set(priv, 0, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); + __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); + __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); } /* Set default entries for various types of dsa packets */ @@ -1533,12 +1560,6 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) struct mvpp2_prs_entry pe; int err; - priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), - MVPP2_PRS_DBL_VLANS_MAX, - GFP_KERNEL); - if (!priv->prs_double_vlans) - return -ENOMEM; - /* Double VLAN: 0x88A8, 0x8100 */ err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q, MVPP2_PRS_PORT_MASK); @@ -1941,7 +1962,7 @@ static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask) port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) continue; - mvpp2_prs_init_from_hw(port->priv, &pe, tid); + __mvpp2_prs_init_from_hw(port->priv, &pe, tid); mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); @@ -1970,6 +1991,8 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&priv->prs_spinlock); + /* Scan TCAM and see if entry with this <vid,port> already exist */ tid = mvpp2_prs_vid_range_find(port, vid, mask); @@ -1988,8 +2011,10 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) MVPP2_PRS_VLAN_FILT_MAX_ENTRY); /* There isn't room for a new VID filter */ - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&priv->prs_spinlock); return tid; + } mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); pe.index = tid; @@ -1997,7 +2022,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Enable the current port */ @@ -2019,6 +2044,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); mvpp2_prs_hw_write(priv, &pe); + spin_unlock_bh(&priv->prs_spinlock); return 0; } @@ -2028,15 +2054,16 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) struct mvpp2 *priv = port->priv; int tid; - /* Scan TCAM and see if entry with this <vid,port> already exist */ - tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); + spin_lock_bh(&priv->prs_spinlock); - /* No such entry */ - if (tid < 0) - return; + /* Invalidate TCAM entry with this <vid,port>, if it exists */ + tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); + if (tid >= 0) { + mvpp2_prs_hw_inv(priv, tid); + priv->prs_shadow[tid].valid = false; + } - mvpp2_prs_hw_inv(priv, tid); - priv->prs_shadow[tid].valid = false; + spin_unlock_bh(&priv->prs_spinlock); } /* Remove all existing VID filters on this port */ @@ -2045,6 +2072,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) struct mvpp2 *priv = port->priv; int tid; + spin_lock_bh(&priv->prs_spinlock); + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { if (priv->prs_shadow[tid].valid) { @@ -2052,6 +2081,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) priv->prs_shadow[tid].valid = false; } } + + spin_unlock_bh(&priv->prs_spinlock); } /* Remove VID filering entry for this port */ @@ -2060,10 +2091,14 @@ void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); struct mvpp2 *priv = port->priv; + spin_lock_bh(&priv->prs_spinlock); + /* Invalidate the guard entry */ mvpp2_prs_hw_inv(priv, tid); priv->prs_shadow[tid].valid = false; + + spin_unlock_bh(&priv->prs_spinlock); } /* Add guard entry that drops packets when no VID is matched on this port */ @@ -2079,6 +2114,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&priv->prs_spinlock); + pe.index = tid; reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); @@ -2111,6 +2148,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); mvpp2_prs_hw_write(priv, &pe); + + spin_unlock_bh(&priv->prs_spinlock); } /* Parser default initialization */ @@ -2118,6 +2157,20 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) { int err, index, i; + priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, + sizeof(*priv->prs_shadow), + GFP_KERNEL); + if (!priv->prs_shadow) + return -ENOMEM; + + priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), + MVPP2_PRS_DBL_VLANS_MAX, + GFP_KERNEL); + if (!priv->prs_double_vlans) + return -ENOMEM; + + spin_lock_bh(&priv->prs_spinlock); + /* Enable tcam table */ mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); @@ -2136,12 +2189,6 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) mvpp2_prs_hw_inv(priv, index); - priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, - sizeof(*priv->prs_shadow), - GFP_KERNEL); - if (!priv->prs_shadow) - return -ENOMEM; - /* Always start from lookup = 0 */ for (index = 0; index < MVPP2_MAX_PORTS; index++) mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, @@ -2158,26 +2205,13 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) mvpp2_prs_vid_init(priv); err = mvpp2_prs_etype_init(priv); - if (err) - return err; - - err = mvpp2_prs_vlan_init(pdev, priv); - if (err) - return err; - - err = mvpp2_prs_pppoe_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip6_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip4_init(priv); - if (err) - return err; + err = err ? : mvpp2_prs_vlan_init(pdev, priv); + err = err ? : mvpp2_prs_pppoe_init(priv); + err = err ? : mvpp2_prs_ip6_init(priv); + err = err ? : mvpp2_prs_ip4_init(priv); - return 0; + spin_unlock_bh(&priv->prs_spinlock); + return err; } /* Compare MAC DA with tcam entry data */ @@ -2217,7 +2251,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, (priv->prs_shadow[tid].udf != udf_type)) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); entry_pmap = mvpp2_prs_tcam_port_map_get(&pe); if (mvpp2_prs_mac_range_equals(&pe, da, mask) && @@ -2229,7 +2263,8 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, } /* Update parser's mac da entry */ -int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) +static int __mvpp2_prs_mac_da_accept(struct mvpp2_port *port, + const u8 *da, bool add) { unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; struct mvpp2 *priv = port->priv; @@ -2261,7 +2296,7 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); @@ -2317,6 +2352,17 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) return 0; } +int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) +{ + int err; + + spin_lock_bh(&port->priv->prs_spinlock); + err = __mvpp2_prs_mac_da_accept(port, da, add); + spin_unlock_bh(&port->priv->prs_spinlock); + + return err; +} + int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) { struct mvpp2_port *port = netdev_priv(dev); @@ -2345,6 +2391,8 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) unsigned long pmap; int index, tid; + spin_lock_bh(&priv->prs_spinlock); + for (tid = MVPP2_PE_MAC_RANGE_START; tid <= MVPP2_PE_MAC_RANGE_END; tid++) { unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; @@ -2354,7 +2402,7 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); pmap = mvpp2_prs_tcam_port_map_get(&pe); @@ -2375,14 +2423,17 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) continue; /* Remove entry from TCAM */ - mvpp2_prs_mac_da_accept(port, da, false); + __mvpp2_prs_mac_da_accept(port, da, false); } + + spin_unlock_bh(&priv->prs_spinlock); } int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) { switch (type) { case MVPP2_TAG_TYPE_EDSA: + spin_lock_bh(&priv->prs_spinlock); /* Add port to EDSA entries */ mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); @@ -2393,9 +2444,11 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + spin_unlock_bh(&priv->prs_spinlock); break; case MVPP2_TAG_TYPE_DSA: + spin_lock_bh(&priv->prs_spinlock); /* Add port to DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); @@ -2406,10 +2459,12 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + spin_unlock_bh(&priv->prs_spinlock); break; case MVPP2_TAG_TYPE_MH: case MVPP2_TAG_TYPE_NONE: + spin_lock_bh(&priv->prs_spinlock); /* Remove port form EDSA and DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); @@ -2419,6 +2474,7 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + spin_unlock_bh(&priv->prs_spinlock); break; default: @@ -2437,11 +2493,15 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&priv->prs_spinlock); + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&priv->prs_spinlock); return tid; + } pe.index = tid; @@ -2461,6 +2521,7 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); mvpp2_prs_hw_write(priv, &pe); + spin_unlock_bh(&priv->prs_spinlock); return 0; } @@ -2472,6 +2533,8 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&port->priv->prs_spinlock); + tid = mvpp2_prs_flow_find(port->priv, port->id); /* Such entry not exist */ @@ -2480,8 +2543,10 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) tid = mvpp2_prs_tcam_first_free(port->priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&port->priv->prs_spinlock); return tid; + } pe.index = tid; @@ -2492,13 +2557,14 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) /* Update shadow table */ mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); } else { - mvpp2_prs_init_from_hw(port->priv, &pe, tid); + __mvpp2_prs_init_from_hw(port->priv, &pe, tid); } mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); mvpp2_prs_hw_write(port->priv, &pe); + spin_unlock_bh(&port->priv->prs_spinlock); return 0; } @@ -2509,11 +2575,14 @@ int mvpp2_prs_hits(struct mvpp2 *priv, int index) if (index > MVPP2_PRS_TCAM_SRAM_SIZE) return -EINVAL; + spin_lock_bh(&priv->prs_spinlock); + mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index); val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG); val &= MVPP2_PRS_TCAM_HIT_CNT_MASK; + spin_unlock_bh(&priv->prs_spinlock); return val; } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 0a679e95196f..24499bb36c00 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -1223,7 +1223,7 @@ static void octep_hb_timeout_task(struct work_struct *work) miss_cnt); rtnl_lock(); if (netif_running(oct->netdev)) - octep_stop(oct->netdev); + dev_close(oct->netdev); rtnl_unlock(); } diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c index 18c922dd5fc6..420c3f4cf741 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c @@ -18,8 +18,6 @@ #include "octep_vf_config.h" #include "octep_vf_main.h" -struct workqueue_struct *octep_vf_wq; - /* Supported Devices */ static const struct pci_device_id octep_vf_pci_id_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_VF)}, @@ -835,7 +833,9 @@ static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue) struct octep_vf_device *oct = netdev_priv(netdev); netdev_hold(netdev, NULL, GFP_ATOMIC); - schedule_work(&oct->tx_timeout_task); + if (!schedule_work(&oct->tx_timeout_task)) + netdev_put(netdev, NULL); + } static int octep_vf_set_mac(struct net_device *netdev, void *p) diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h index 1a352f41f823..b9f13506f462 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h @@ -320,8 +320,6 @@ static inline u16 OCTEP_VF_MINOR_REV(struct octep_vf_device *oct) #define octep_vf_read_csr64(octep_vf_dev, reg_off) \ readq((octep_vf_dev)->mmio.hw_addr + (reg_off)) -extern struct workqueue_struct *octep_vf_wq; - int octep_vf_device_setup(struct octep_vf_device *oct); int octep_vf_setup_iqs(struct octep_vf_device *oct); void octep_vf_free_iqs(struct octep_vf_device *oct); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index ccea37847df8..532813d8d028 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -12,4 +12,4 @@ rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \ rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o \ - rvu_rep.o + rvu_rep.o cn20k/mbox_init.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 0b27a695008b..4ff19a04b23e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -717,6 +717,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat) if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; + + /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */ + if (idx >= CGX_RX_STAT_GLOBAL_INDEX) + lmac_id = 0; + *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8)); return 0; } @@ -1177,17 +1182,25 @@ static int cgx_link_usertable_index_map(int speed) static void set_mod_args(struct cgx_set_link_mode_args *args, u32 speed, u8 duplex, u8 autoneg, u64 mode) { - /* Fill default values incase of user did not pass - * valid parameters + int mode_baseidx; + u8 cgx_mode; + + if (args->multimode) { + args->mode |= mode; + return; + } + + /* Derive mode_base_idx and mode fields based + * on cgx_mode value */ - if (args->duplex == DUPLEX_UNKNOWN) - args->duplex = duplex; - if (args->speed == SPEED_UNKNOWN) - args->speed = speed; - if (args->an == AUTONEG_UNKNOWN) - args->an = autoneg; + cgx_mode = find_first_bit((unsigned long *)&mode, + CGX_MODE_MAX); args->mode = mode; - args->ports = 0; + mode_baseidx = cgx_mode - 41; + if (mode_baseidx > 0) { + args->mode_baseidx = 1; + args->mode = BIT_ULL(mode_baseidx); + } } static void otx2_map_ethtool_link_modes(u64 bitmask, @@ -1195,16 +1208,16 @@ static void otx2_map_ethtool_link_modes(u64 bitmask, { switch (bitmask) { case ETHTOOL_LINK_MODE_10baseT_Half_BIT: - set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII)); + set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII_10M_BIT)); break; case ETHTOOL_LINK_MODE_10baseT_Full_BIT: - set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII)); + set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII_10M_BIT)); break; case ETHTOOL_LINK_MODE_100baseT_Half_BIT: - set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII)); + set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII_100M_BIT)); break; case ETHTOOL_LINK_MODE_100baseT_Full_BIT: - set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII)); + set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII_100M_BIT)); break; case ETHTOOL_LINK_MODE_1000baseT_Half_BIT: set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII)); @@ -1476,25 +1489,36 @@ int cgx_get_fwdata_base(u64 *base) } int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args, + struct cgx_lmac_fwdata_s *linkmodes, int cgx_id, int lmac_id) { struct cgx *cgx = cgxd; u64 req = 0, resp; + u8 bit; if (!cgx) return -ENODEV; - if (args.mode) - otx2_map_ethtool_link_modes(args.mode, &args); - if (!args.speed && args.duplex && !args.an) - return -EINVAL; + for_each_set_bit(bit, args.advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS) + otx2_map_ethtool_link_modes(bit, &args); + + if (args.multimode) { + if (linkmodes->advertised_link_modes_own != CGX_CMD_OWN_NS) + return -EBUSY; + + linkmodes->advertised_link_modes = args.mode; + /* Update ownership */ + linkmodes->advertised_link_modes_own = CGX_CMD_OWN_FIRMWARE; + args.mode = GENMASK_ULL(41, 0); + } req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req); req = FIELD_SET(CMDMODECHANGE_SPEED, cgx_link_usertable_index_map(args.speed), req); req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req); req = FIELD_SET(CMDMODECHANGE_AN, args.an, req); - req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req); + req = FIELD_SET(CMDMODECHANGE_MODE_BASEIDX, args.mode_baseidx, req); req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req); return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); @@ -1680,9 +1704,11 @@ unsigned long cgx_get_lmac_bmap(void *cgxd) static int cgx_lmac_init(struct cgx *cgx) { + u8 max_dmac_filters; struct lmac *lmac; + int err, filter; + unsigned int i; u64 lmac_list; - int i, err; /* lmac_list specifies which lmacs are enabled * when bit n is set to 1, LMAC[n] is enabled @@ -1708,7 +1734,7 @@ static int cgx_lmac_init(struct cgx *cgx) err = -ENOMEM; goto err_lmac_free; } - sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); + sprintf(lmac->name, "cgx_fwi_%u_%u", cgx->cgx_id, i); if (cgx->mac_ops->non_contiguous_serdes_lane) { lmac->lmac_id = __ffs64(lmac_list); lmac_list &= ~BIT_ULL(lmac->lmac_id); @@ -1721,6 +1747,8 @@ static int cgx_lmac_init(struct cgx *cgx) cgx->mac_ops->dmac_filter_count / cgx->lmac_count; + max_dmac_filters = lmac->mac_to_index_bmap.max; + err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap); if (err) goto err_name_free; @@ -1750,6 +1778,15 @@ static int cgx_lmac_init(struct cgx *cgx) set_bit(lmac->lmac_id, &cgx->lmac_bmap); cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true); lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id); + + /* Disable stale DMAC filters for sane state */ + for (filter = 0; filter < max_dmac_filters; filter++) + cgx_lmac_addr_del(cgx->cgx_id, lmac->lmac_id, filter); + + /* As cgx_lmac_addr_del does not clear entry for index 0 + * so it needs to be done explicitly + */ + cgx_lmac_addr_reset(cgx->cgx_id, lmac->lmac_id); } /* Start X2P reset on given MAC block */ @@ -1927,6 +1964,12 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_disable_device; } + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "DMA mask config failed, abort\n"); + goto err_release_regions; + } + /* MAP configuration registers */ cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); if (!cgx->reg_base) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 1cf12e5c7da8..950231e7ea71 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -171,6 +171,7 @@ int cgx_set_fec(u64 fec, int cgx_id, int lmac_id); int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp); int cgx_get_phy_fec_stats(void *cgxd, int lmac_id); int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args, + struct cgx_lmac_fwdata_s *linkmodes, int cgx_id, int lmac_id); u64 cgx_features_get(void *cgxd); struct mac_ops *get_mac_ops(void *cgxd); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h index d4a27c882a5b..39352d451cc3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h @@ -95,7 +95,31 @@ enum CGX_MODE_ { CGX_MODE_100G_C2M, CGX_MODE_100G_CR4, CGX_MODE_100G_KR4, - CGX_MODE_MAX /* = 29 */ + CGX_MODE_LAUI_2_C2C_BIT, + CGX_MODE_LAUI_2_C2M_BIT, + CGX_MODE_50GBASE_CR2_C_BIT, + CGX_MODE_50GBASE_KR2_C_BIT, /* = 30 */ + CGX_MODE_100GAUI_2_C2C_BIT, + CGX_MODE_100GAUI_2_C2M_BIT, + CGX_MODE_100GBASE_CR2_BIT, + CGX_MODE_100GBASE_KR2_BIT, + CGX_MODE_SFI_1G_BIT, + CGX_MODE_25GBASE_CR_C_BIT, + CGX_MODE_25GBASE_KR_C_BIT, + CGX_MODE_SGMII_10M_BIT, + CGX_MODE_SGMII_100M_BIT, /* = 39 */ + CGX_MODE_2500_BASEX_BIT = 42, /* Mode group 1 */ + CGX_MODE_5000_BASEX_BIT, + CGX_MODE_O_USGMII_BIT, + CGX_MODE_Q_USGMII_BIT, + CGX_MODE_2_5G_USXGMII_BIT, + CGX_MODE_5G_USXGMII_BIT, + CGX_MODE_10G_SXGMII_BIT, + CGX_MODE_10G_DXGMII_BIT, + CGX_MODE_10G_QXGMII_BIT, + CGX_MODE_TP_BIT, + CGX_MODE_FIBER_BIT, + CGX_MODE_MAX /* = 53 */ }; /* REQUEST ID types. Input to firmware */ enum cgx_cmd_id { @@ -258,7 +282,12 @@ struct cgx_lnk_sts { #define CMDMODECHANGE_SPEED GENMASK_ULL(11, 8) #define CMDMODECHANGE_DUPLEX GENMASK_ULL(12, 12) #define CMDMODECHANGE_AN GENMASK_ULL(13, 13) -#define CMDMODECHANGE_PORT GENMASK_ULL(21, 14) +/* this field categorize the mode ID(FLAGS) range to accommodate + * more modes. + * To specify mode ID range of 0 - 41, this field will be 0. + * To specify mode ID range of 42 - 83, this field will be 1. + */ +#define CMDMODECHANGE_MODE_BASEIDX GENMASK_ULL(21, 20) #define CMDMODECHANGE_FLAGS GENMASK_ULL(63, 22) /* LINK_BRING_UP command timeout */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h new file mode 100644 index 000000000000..4285b5d6a6a2 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef CN20K_API_H +#define CN20K_API_H + +#include "../rvu.h" + +struct ng_rvu { + struct mbox_ops *rvu_mbox_ops; + struct qmem *pf_mbox_addr; + struct qmem *vf_mbox_addr; +}; + +/* Mbox related APIs */ +int cn20k_rvu_mbox_init(struct rvu *rvu, int type, int num); +int cn20k_rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, + int num, int type, unsigned long *pf_bmap); +void cn20k_free_mbox_memory(struct rvu *rvu); +int cn20k_register_afpf_mbox_intr(struct rvu *rvu); +int cn20k_register_afvf_mbox_intr(struct rvu *rvu, int pf_vec_start); +void cn20k_rvu_enable_mbox_intr(struct rvu *rvu); +void cn20k_rvu_unregister_interrupts(struct rvu *rvu); +int cn20k_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev, + void *reg_base, int direction, int ndevs); +void cn20k_rvu_enable_afvf_intr(struct rvu *rvu, int vfs); +void cn20k_rvu_disable_afvf_intr(struct rvu *rvu, int vfs); +#endif /* CN20K_API_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c new file mode 100644 index 000000000000..bd3aab7770dd --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c @@ -0,0 +1,424 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/interrupt.h> +#include <linux/irq.h> + +#include "rvu_trace.h" +#include "mbox.h" +#include "reg.h" +#include "api.h" + +static irqreturn_t cn20k_afvf_mbox_intr_handler(int irq, void *rvu_irq) +{ + struct rvu_irq_data *rvu_irq_data = rvu_irq; + struct rvu *rvu = rvu_irq_data->rvu; + u64 intr; + + /* Sync with mbox memory region */ + rmb(); + + /* Clear interrupts */ + intr = rvupf_read64(rvu, rvu_irq_data->intr_status); + rvupf_write64(rvu, rvu_irq_data->intr_status, intr); + + if (intr) + trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr); + + rvu_irq_data->afvf_queue_work_hdlr(&rvu->afvf_wq_info, rvu_irq_data->start, + rvu_irq_data->mdevs, intr); + + return IRQ_HANDLED; +} + +int cn20k_register_afvf_mbox_intr(struct rvu *rvu, int pf_vec_start) +{ + struct rvu_irq_data *irq_data; + int intr_vec, offset, vec = 0; + int err; + + /* irq data for 4 VFPF intr vectors */ + irq_data = devm_kcalloc(rvu->dev, 4, + sizeof(struct rvu_irq_data), GFP_KERNEL); + if (!irq_data) + return -ENOMEM; + + for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <= + RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; + intr_vec++, vec++) { + switch (intr_vec) { + case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF_INTX(0); + irq_data[vec].start = 0; + irq_data[vec].mdevs = 64; + break; + case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF_INTX(1); + irq_data[vec].start = 64; + irq_data[vec].mdevs = 64; + break; + case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF1_INTX(0); + irq_data[vec].start = 0; + irq_data[vec].mdevs = 64; + break; + case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1: + irq_data[vec].intr_status = RVU_MBOX_PF_VFPF1_INTX(1); + irq_data[vec].start = 64; + irq_data[vec].mdevs = 64; + break; + } + irq_data[vec].afvf_queue_work_hdlr = + rvu_queue_work; + offset = pf_vec_start + intr_vec; + irq_data[vec].vec_num = offset; + irq_data[vec].rvu = rvu; + + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAF VFAF%d Mbox%d", + vec / 2, vec % 2); + err = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], + &irq_data[vec]); + if (err) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for AFVF mbox irq\n"); + return err; + } + rvu->irq_allocated[offset] = true; + } + + return 0; +} + +/* CN20K mbox PFx => AF irq handler */ +static irqreturn_t cn20k_mbox_pf_common_intr_handler(int irq, void *rvu_irq) +{ + struct rvu_irq_data *rvu_irq_data = rvu_irq; + struct rvu *rvu = rvu_irq_data->rvu; + u64 intr; + + /* Clear interrupts */ + intr = rvu_read64(rvu, BLKADDR_RVUM, rvu_irq_data->intr_status); + rvu_write64(rvu, BLKADDR_RVUM, rvu_irq_data->intr_status, intr); + + if (intr) + trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr); + + /* Sync with mbox memory region */ + rmb(); + + rvu_irq_data->rvu_queue_work_hdlr(&rvu->afpf_wq_info, + rvu_irq_data->start, + rvu_irq_data->mdevs, intr); + + return IRQ_HANDLED; +} + +void cn20k_rvu_enable_mbox_intr(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + + /* Clear spurious irqs, if any */ + rvu_write64(rvu, BLKADDR_RVUM, + RVU_MBOX_AF_PFAF_INT(0), INTR_MASK(hw->total_pfs)); + + rvu_write64(rvu, BLKADDR_RVUM, + RVU_MBOX_AF_PFAF_INT(1), INTR_MASK(hw->total_pfs - 64)); + + rvu_write64(rvu, BLKADDR_RVUM, + RVU_MBOX_AF_PFAF1_INT(0), INTR_MASK(hw->total_pfs)); + + rvu_write64(rvu, BLKADDR_RVUM, + RVU_MBOX_AF_PFAF1_INT(1), INTR_MASK(hw->total_pfs - 64)); + + /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1S(0), + INTR_MASK(hw->total_pfs) & ~1ULL); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1S(1), + INTR_MASK(hw->total_pfs - 64)); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1S(0), + INTR_MASK(hw->total_pfs) & ~1ULL); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1S(1), + INTR_MASK(hw->total_pfs - 64)); +} + +void cn20k_rvu_unregister_interrupts(struct rvu *rvu) +{ + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1C(0), + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1C(1), + INTR_MASK(rvu->hw->total_pfs - 64)); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1C(0), + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1C(1), + INTR_MASK(rvu->hw->total_pfs - 64)); +} + +int cn20k_register_afpf_mbox_intr(struct rvu *rvu) +{ + struct rvu_irq_data *irq_data; + int intr_vec, ret, vec = 0; + + /* irq data for 4 PF intr vectors */ + irq_data = devm_kcalloc(rvu->dev, 4, + sizeof(struct rvu_irq_data), GFP_KERNEL); + if (!irq_data) + return -ENOMEM; + + for (intr_vec = RVU_AF_CN20K_INT_VEC_PFAF_MBOX0; intr_vec <= + RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1; intr_vec++, + vec++) { + switch (intr_vec) { + case RVU_AF_CN20K_INT_VEC_PFAF_MBOX0: + irq_data[vec].intr_status = + RVU_MBOX_AF_PFAF_INT(0); + irq_data[vec].start = 0; + irq_data[vec].mdevs = 64; + break; + case RVU_AF_CN20K_INT_VEC_PFAF_MBOX1: + irq_data[vec].intr_status = + RVU_MBOX_AF_PFAF_INT(1); + irq_data[vec].start = 64; + irq_data[vec].mdevs = 96; + break; + case RVU_AF_CN20K_INT_VEC_PFAF1_MBOX0: + irq_data[vec].intr_status = + RVU_MBOX_AF_PFAF1_INT(0); + irq_data[vec].start = 0; + irq_data[vec].mdevs = 64; + break; + case RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1: + irq_data[vec].intr_status = + RVU_MBOX_AF_PFAF1_INT(1); + irq_data[vec].start = 64; + irq_data[vec].mdevs = 96; + break; + } + irq_data[vec].rvu_queue_work_hdlr = rvu_queue_work; + irq_data[vec].vec_num = intr_vec; + irq_data[vec].rvu = rvu; + + /* Register mailbox interrupt handler */ + sprintf(&rvu->irq_name[intr_vec * NAME_SIZE], + "RVUAF PFAF%d Mbox%d", + vec / 2, vec % 2); + ret = request_irq(pci_irq_vector(rvu->pdev, intr_vec), + rvu->ng_rvu->rvu_mbox_ops->pf_intr_handler, 0, + &rvu->irq_name[intr_vec * NAME_SIZE], + &irq_data[vec]); + if (ret) + return ret; + + rvu->irq_allocated[intr_vec] = true; + } + + return 0; +} + +int cn20k_rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, + int num, int type, unsigned long *pf_bmap) +{ + int region; + u64 bar; + + if (type == TYPE_AFVF) { + for (region = 0; region < num; region++) { + if (!test_bit(region, pf_bmap)) + continue; + + bar = (u64)phys_to_virt((u64)rvu->ng_rvu->vf_mbox_addr->base); + bar += region * MBOX_SIZE; + mbox_addr[region] = (void *)bar; + + if (!mbox_addr[region]) + return -ENOMEM; + } + return 0; + } + + for (region = 0; region < num; region++) { + if (!test_bit(region, pf_bmap)) + continue; + + bar = (u64)phys_to_virt((u64)rvu->ng_rvu->pf_mbox_addr->base); + bar += region * MBOX_SIZE; + + mbox_addr[region] = (void *)bar; + + if (!mbox_addr[region]) + return -ENOMEM; + } + return 0; +} + +static int rvu_alloc_mbox_memory(struct rvu *rvu, int type, + int ndevs, int mbox_size) +{ + struct qmem *mbox_addr; + dma_addr_t iova; + int pf, err; + + /* Allocate contiguous memory for mailbox communication. + * eg: AF <=> PFx mbox memory + * This allocated memory is split into chunks of MBOX_SIZE + * and setup into each of the RVU PFs. In HW this memory will + * get aliased to an offset within BAR2 of those PFs. + * + * AF will access mbox memory using direct physical addresses + * and PFs will access the same shared memory from BAR2. + * + * PF <=> VF mbox memory also works in the same fashion. + * AFPF, PFVF requires IOVA to be used to maintain the mailbox msgs + */ + + err = qmem_alloc(rvu->dev, &mbox_addr, ndevs, mbox_size); + if (err) + return -ENOMEM; + + switch (type) { + case TYPE_AFPF: + rvu->ng_rvu->pf_mbox_addr = mbox_addr; + iova = (u64)mbox_addr->iova; + for (pf = 0; pf < ndevs; pf++) { + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFX_ADDR(pf), + (u64)iova); + iova += mbox_size; + } + break; + case TYPE_AFVF: + rvu->ng_rvu->vf_mbox_addr = mbox_addr; + rvupf_write64(rvu, RVU_PF_VF_MBOX_ADDR, (u64)mbox_addr->iova); + break; + default: + return 0; + } + + return 0; +} + +static struct mbox_ops cn20k_mbox_ops = { + .pf_intr_handler = cn20k_mbox_pf_common_intr_handler, + .afvf_intr_handler = cn20k_afvf_mbox_intr_handler, +}; + +int cn20k_rvu_mbox_init(struct rvu *rvu, int type, int ndevs) +{ + int dev; + + if (!is_cn20k(rvu->pdev)) + return 0; + + rvu->ng_rvu->rvu_mbox_ops = &cn20k_mbox_ops; + + if (type == TYPE_AFVF) { + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_PF_VF_CFG, ilog2(MBOX_SIZE)); + } else { + for (dev = 0; dev < ndevs; dev++) + rvu_write64(rvu, BLKADDR_RVUM, + RVU_MBOX_AF_PFX_CFG(dev), ilog2(MBOX_SIZE)); + } + + return rvu_alloc_mbox_memory(rvu, type, ndevs, MBOX_SIZE); +} + +void cn20k_free_mbox_memory(struct rvu *rvu) +{ + if (!is_cn20k(rvu->pdev)) + return; + + qmem_free(rvu->dev, rvu->ng_rvu->pf_mbox_addr); + qmem_free(rvu->dev, rvu->ng_rvu->vf_mbox_addr); +} + +void cn20k_rvu_disable_afvf_intr(struct rvu *rvu, int vfs) +{ + rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); + + if (vfs <= 64) + return; + + rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); +} + +void cn20k_rvu_enable_afvf_intr(struct rvu *rvu, int vfs) +{ + /* Clear any pending interrupts and enable AF VF interrupts for + * the first 64 VFs. + */ + rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INTX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INTX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(0), INTR_MASK(vfs)); + + /* FLR */ + rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs)); + + /* Same for remaining VFs, if any. */ + if (vfs <= 64) + return; + + rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INTX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INTX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); + + rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); +} + +int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf, + int blkaddr, int nixlf) +{ + int qints, hwctx_size, err; + u64 cfg, ctx_cfg; + + if (is_rvu_otx2(rvu) || is_cn20k(rvu->pdev)) + return 0; + + ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); + /* Alloc memory for CQINT's HW contexts */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); + qints = (cfg >> 24) & 0xFFF; + hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); + if (err) + return -ENOMEM; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), + (u64)pfvf->cq_ints_ctx->iova); + + /* Alloc memory for QINT's HW contexts */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); + qints = (cfg >> 12) & 0xFFF; + hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); + if (err) + return -ENOMEM; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), + (u64)pfvf->nix_qints_ctx->iova); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h new file mode 100644 index 000000000000..affb39803120 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef RVU_MBOX_REG_H +#define RVU_MBOX_REG_H +#include "../rvu.h" +#include "../rvu_reg.h" + +/* RVUM block registers */ +#define RVU_PF_DISC (0x0) +#define RVU_PRIV_PFX_DISC(a) (0x8000208 | (a) << 16) +#define RVU_PRIV_HWVFX_DISC(a) (0xD000000 | (a) << 12) + +/* Mbox Registers */ +/* RVU AF BAR0 Mbox registers for AF => PFx */ +#define RVU_MBOX_AF_PFX_ADDR(a) (0x5000 | (a) << 4) +#define RVU_MBOX_AF_PFX_CFG(a) (0x6000 | (a) << 4) +#define RVU_MBOX_AF_AFPFX_TRIGX(a) (0x9000 | (a) << 3) +#define RVU_MBOX_AF_PFAF_INT(a) (0x2980 | (a) << 6) +#define RVU_MBOX_AF_PFAF_INT_W1S(a) (0x2988 | (a) << 6) +#define RVU_MBOX_AF_PFAF_INT_ENA_W1S(a) (0x2990 | (a) << 6) +#define RVU_MBOX_AF_PFAF_INT_ENA_W1C(a) (0x2998 | (a) << 6) +#define RVU_MBOX_AF_PFAF1_INT(a) (0x29A0 | (a) << 6) +#define RVU_MBOX_AF_PFAF1_INT_W1S(a) (0x29A8 | (a) << 6) +#define RVU_MBOX_AF_PFAF1_INT_ENA_W1S(a) (0x29B0 | (a) << 6) +#define RVU_MBOX_AF_PFAF1_INT_ENA_W1C(a) (0x29B8 | (a) << 6) + +/* RVU PF => AF mbox registers */ +#define RVU_MBOX_PF_PFAF_TRIGX(a) (0xC00 | (a) << 3) +#define RVU_MBOX_PF_INT (0xC20) +#define RVU_MBOX_PF_INT_W1S (0xC28) +#define RVU_MBOX_PF_INT_ENA_W1S (0xC30) +#define RVU_MBOX_PF_INT_ENA_W1C (0xC38) + +#define RVU_AF_BAR2_SEL (0x9000000) +#define RVU_AF_BAR2_PFID (0x16400) +#define NIX_CINTX_INT_W1S(a) (0xd30 | (a) << 12) +#define NIX_QINTX_CNT(a) (0xc00 | (a) << 12) + +#define RVU_MBOX_AF_VFAF_INT(a) (0x3000 | (a) << 6) +#define RVU_MBOX_AF_VFAF_INT_W1S(a) (0x3008 | (a) << 6) +#define RVU_MBOX_AF_VFAF_INT_ENA_W1S(a) (0x3010 | (a) << 6) +#define RVU_MBOX_AF_VFAF_INT_ENA_W1C(a) (0x3018 | (a) << 6) +#define RVU_MBOX_AF_VFAF_INT_ENA_W1C(a) (0x3018 | (a) << 6) +#define RVU_MBOX_AF_VFAF1_INT(a) (0x3020 | (a) << 6) +#define RVU_MBOX_AF_VFAF1_INT_W1S(a) (0x3028 | (a) << 6) +#define RVU_MBOX_AF_VFAF1_IN_ENA_W1S(a) (0x3030 | (a) << 6) +#define RVU_MBOX_AF_VFAF1_IN_ENA_W1C(a) (0x3038 | (a) << 6) + +#define RVU_MBOX_AF_AFVFX_TRIG(a, b) (0x10000 | (a) << 4 | (b) << 3) +#define RVU_MBOX_AF_VFX_ADDR(a) (0x20000 | (a) << 4) +#define RVU_MBOX_AF_VFX_CFG(a) (0x28000 | (a) << 4) + +#define RVU_MBOX_PF_VFX_PFVF_TRIGX(a) (0x2000 | (a) << 3) + +#define RVU_MBOX_PF_VFPF_INTX(a) (0x1000 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INT_W1SX(a) (0x1020 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INT_ENA_W1SX(a) (0x1040 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INT_ENA_W1CX(a) (0x1060 | (a) << 3) + +#define RVU_MBOX_PF_VFPF1_INTX(a) (0x1080 | (a) << 3) +#define RVU_MBOX_PF_VFPF1_INT_W1SX(a) (0x10a0 | (a) << 3) +#define RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(a) (0x10c0 | (a) << 3) +#define RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(a) (0x10e0 | (a) << 3) + +#define RVU_MBOX_PF_VF_ADDR (0xC40) +#define RVU_MBOX_PF_LMTLINE_ADDR (0xC48) +#define RVU_MBOX_PF_VF_CFG (0xC60) + +#define RVU_MBOX_VF_VFPF_TRIGX(a) (0x3000 | (a) << 3) +#define RVU_MBOX_VF_INT (0x20) +#define RVU_MBOX_VF_INT_W1S (0x28) +#define RVU_MBOX_VF_INT_ENA_W1S (0x30) +#define RVU_MBOX_VF_INT_ENA_W1C (0x38) + +#define RVU_MBOX_VF_VFAF_TRIGX(a) (0x2000 | (a) << 3) +#endif /* RVU_MBOX_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h new file mode 100644 index 000000000000..76ce3ec6da9c --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef STRUCT_H +#define STRUCT_H + +/* + * CN20k RVU PF MBOX Interrupt Vector Enumeration + * + * Vectors 0 - 3 are compatible with pre cn20k and hence + * existing macros are being reused. + */ +enum rvu_mbox_pf_int_vec_e { + RVU_MBOX_PF_INT_VEC_VFPF_MBOX0 = 0x4, + RVU_MBOX_PF_INT_VEC_VFPF_MBOX1 = 0x5, + RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0 = 0x6, + RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1 = 0x7, + RVU_MBOX_PF_INT_VEC_AFPF_MBOX = 0x8, + RVU_MBOX_PF_INT_VEC_CNT = 0x9, +}; + +/* RVU Admin function Interrupt Vector Enumeration */ +enum rvu_af_cn20k_int_vec_e { + RVU_AF_CN20K_INT_VEC_POISON = 0x0, + RVU_AF_CN20K_INT_VEC_PFFLR0 = 0x1, + RVU_AF_CN20K_INT_VEC_PFFLR1 = 0x2, + RVU_AF_CN20K_INT_VEC_PFME0 = 0x3, + RVU_AF_CN20K_INT_VEC_PFME1 = 0x4, + RVU_AF_CN20K_INT_VEC_GEN = 0x5, + RVU_AF_CN20K_INT_VEC_PFAF_MBOX0 = 0x6, + RVU_AF_CN20K_INT_VEC_PFAF_MBOX1 = 0x7, + RVU_AF_CN20K_INT_VEC_PFAF1_MBOX0 = 0x8, + RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1 = 0x9, + RVU_AF_CN20K_INT_VEC_CNT = 0xa, +}; +#endif diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index 406c59100a35..8a08bebf08c2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -39,7 +39,7 @@ struct qmem { void *base; dma_addr_t iova; int alloc_sz; - u16 entry_sz; + u32 entry_sz; u8 align; u32 qsize; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c index 1e5aa5397504..75872d257eca 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -10,8 +10,11 @@ #include <linux/pci.h> #include "rvu_reg.h" +#include "cn20k/reg.h" +#include "cn20k/api.h" #include "mbox.h" #include "rvu_trace.h" +#include "rvu.h" static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); @@ -28,8 +31,10 @@ void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid) mdev->rsp_size = 0; tx_hdr->num_msgs = 0; tx_hdr->msg_size = 0; + tx_hdr->sig = 0; rx_hdr->num_msgs = 0; rx_hdr->msg_size = 0; + rx_hdr->sig = 0; } EXPORT_SYMBOL(__otx2_mbox_reset); @@ -53,9 +58,98 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox) } EXPORT_SYMBOL(otx2_mbox_destroy); +int cn20k_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev, + void *reg_base, int direction, int ndevs) +{ + switch (direction) { + case MBOX_DIR_AFPF: + case MBOX_DIR_PFVF: + mbox->tx_start = MBOX_DOWN_TX_START; + mbox->rx_start = MBOX_DOWN_RX_START; + mbox->tx_size = MBOX_DOWN_TX_SIZE; + mbox->rx_size = MBOX_DOWN_RX_SIZE; + break; + case MBOX_DIR_PFAF: + case MBOX_DIR_VFPF: + mbox->tx_start = MBOX_DOWN_RX_START; + mbox->rx_start = MBOX_DOWN_TX_START; + mbox->tx_size = MBOX_DOWN_RX_SIZE; + mbox->rx_size = MBOX_DOWN_TX_SIZE; + break; + case MBOX_DIR_AFPF_UP: + case MBOX_DIR_PFVF_UP: + mbox->tx_start = MBOX_UP_TX_START; + mbox->rx_start = MBOX_UP_RX_START; + mbox->tx_size = MBOX_UP_TX_SIZE; + mbox->rx_size = MBOX_UP_RX_SIZE; + break; + case MBOX_DIR_PFAF_UP: + case MBOX_DIR_VFPF_UP: + mbox->tx_start = MBOX_UP_RX_START; + mbox->rx_start = MBOX_UP_TX_START; + mbox->tx_size = MBOX_UP_RX_SIZE; + mbox->rx_size = MBOX_UP_TX_SIZE; + break; + default: + return -ENODEV; + } + + switch (direction) { + case MBOX_DIR_AFPF: + mbox->trigger = RVU_MBOX_AF_AFPFX_TRIGX(1); + mbox->tr_shift = 4; + break; + case MBOX_DIR_AFPF_UP: + mbox->trigger = RVU_MBOX_AF_AFPFX_TRIGX(0); + mbox->tr_shift = 4; + break; + case MBOX_DIR_PFAF: + mbox->trigger = RVU_MBOX_PF_PFAF_TRIGX(0); + mbox->tr_shift = 0; + break; + case MBOX_DIR_PFAF_UP: + mbox->trigger = RVU_MBOX_PF_PFAF_TRIGX(1); + mbox->tr_shift = 0; + break; + case MBOX_DIR_PFVF: + mbox->trigger = RVU_MBOX_PF_VFX_PFVF_TRIGX(1); + mbox->tr_shift = 4; + break; + case MBOX_DIR_PFVF_UP: + mbox->trigger = RVU_MBOX_PF_VFX_PFVF_TRIGX(0); + mbox->tr_shift = 4; + break; + case MBOX_DIR_VFPF: + mbox->trigger = RVU_MBOX_VF_VFPF_TRIGX(0); + mbox->tr_shift = 0; + break; + case MBOX_DIR_VFPF_UP: + mbox->trigger = RVU_MBOX_VF_VFPF_TRIGX(1); + mbox->tr_shift = 0; + break; + default: + return -ENODEV; + } + mbox->reg_base = reg_base; + mbox->pdev = pdev; + + mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL); + if (!mbox->dev) { + otx2_mbox_destroy(mbox); + return -ENOMEM; + } + mbox->ndevs = ndevs; + + return 0; +} + static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev, void *reg_base, int direction, int ndevs) { + if (is_cn20k(pdev)) + return cn20k_mbox_setup(mbox, pdev, reg_base, + direction, ndevs); + switch (direction) { case MBOX_DIR_AFPF: case MBOX_DIR_PFVF: @@ -188,14 +282,13 @@ int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid) { unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT); struct otx2_mbox_dev *mdev = &mbox->dev[devid]; - struct device *sender = &mbox->pdev->dev; while (!time_after(jiffies, timeout)) { if (mdev->num_msgs == mdev->msgs_acked) return 0; usleep_range(800, 1000); } - dev_dbg(sender, "timed out while waiting for rsp\n"); + trace_otx2_msg_wait_rsp(mbox->pdev); return -EIO; } EXPORT_SYMBOL(otx2_mbox_wait_for_rsp); @@ -219,6 +312,7 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data) struct otx2_mbox_dev *mdev = &mbox->dev[devid]; struct mbox_hdr *tx_hdr, *rx_hdr; void *hw_mbase = mdev->hwbase; + struct mbox_msghdr *msg; u64 intr_val; tx_hdr = hw_mbase + mbox->tx_start; @@ -234,7 +328,10 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data) spin_lock(&mdev->mbox_lock); - tx_hdr->msg_size = mdev->msg_size; + if (!tx_hdr->sig) { + tx_hdr->msg_size = mdev->msg_size; + tx_hdr->num_msgs = mdev->num_msgs; + } /* Reset header for next messages */ mdev->msg_size = 0; @@ -248,10 +345,12 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data) * messages. So this should be written after writing all the messages * to the shared memory. */ - tx_hdr->num_msgs = mdev->num_msgs; rx_hdr->num_msgs = 0; - trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size); + msg = (struct mbox_msghdr *)(hw_mbase + mbox->tx_start + msgs_offset); + + trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size, + msg->id, msg->pcifunc); spin_unlock(&mdev->mbox_lock); @@ -306,6 +405,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, { struct otx2_mbox_dev *mdev = &mbox->dev[devid]; struct mbox_msghdr *msghdr = NULL; + struct mbox_hdr *mboxhdr = NULL; spin_lock(&mdev->mbox_lock); size = ALIGN(size, MBOX_MSG_ALIGN); @@ -329,6 +429,11 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, mdev->msg_size += size; mdev->rsp_size += size_rsp; msghdr->next_msgoff = mdev->msg_size + msgs_offset; + + mboxhdr = mdev->mbase + mbox->tx_start; + /* Clear the msg header region */ + memset(mboxhdr, 0, msgs_offset); + exit: spin_unlock(&mdev->mbox_lock); @@ -445,6 +550,14 @@ const char *otx2_mbox_id2name(u16 id) #define M(_name, _id, _1, _2, _3) case _id: return # _name; MBOX_MESSAGES #undef M + +#define M(_name, _id, _1, _2, _3) case _id: return # _name; + MBOX_UP_CGX_MESSAGES +#undef M + +#define M(_name, _id, _1, _2, _3) case _id: return # _name; + MBOX_UP_CPT_MESSAGES +#undef M default: return "INVALID ID"; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 005ca8a056c0..933073cd2280 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -10,9 +10,11 @@ #include <linux/etherdevice.h> #include <linux/sizes.h> +#include <linux/ethtool.h> #include "rvu_struct.h" #include "common.h" +#include "cn20k/struct.h" #define MBOX_SIZE SZ_64K @@ -50,6 +52,11 @@ #define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */ #define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */ +enum { + TYPE_AFVF, + TYPE_AFPF, +}; + struct otx2_mbox_dev { void *mbase; /* This dev's mbox region */ void *hwbase; @@ -78,6 +85,8 @@ struct otx2_mbox { struct mbox_hdr { u64 msg_size; /* Total msgs size embedded */ u16 num_msgs; /* No of msgs embedded */ + u16 opt_msg; + u8 sig; }; /* Header which precedes every msg and is also part of it */ @@ -524,6 +533,8 @@ struct get_hw_cap_rsp { u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */ u8 nix_shaping; /* Is shaping and coloring supported */ u8 npc_hash_extract; /* Is hash extract supported */ +#define HW_CAP_MACSEC BIT_ULL(1) + u64 hw_caps; }; /* CGX mbox message formats */ @@ -648,11 +659,17 @@ struct cgx_lmac_fwdata_s { u64 supported_link_modes; /* only applicable if AN is supported */ u64 advertised_fec; - u64 advertised_link_modes; + u64 advertised_link_modes_own:1; /* CGX_CMD_OWN */ + u64 advertised_link_modes:63; /* Only applicable if SFP/QSFP slot is present */ struct sfp_eeprom_s sfp_eeprom; struct phy_s phy; -#define LMAC_FWDATA_RESERVED_MEM 1021 + u32 lmac_type; + u32 portm_idx; + u64 mgmt_port:1; + u64 advertised_an:1; + u64 port; +#define LMAC_FWDATA_RESERVED_MEM 1018 u64 reserved[LMAC_FWDATA_RESERVED_MEM]; }; @@ -665,12 +682,13 @@ struct cgx_set_link_mode_args { u32 speed; u8 duplex; u8 an; - u8 ports; + u8 mode_baseidx; + u8 multimode; u64 mode; + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); }; struct cgx_set_link_mode_req { -#define AUTONEG_UNKNOWN 0xff struct mbox_msghdr hdr; struct cgx_set_link_mode_args args; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c index 655dd4726d36..d7030dfa5dad 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c @@ -97,7 +97,7 @@ int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event) if (pcifunc & RVU_PFVF_FUNC_MASK) pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)]; else - pfvf = &mcs->pf[rvu_get_pf(pcifunc)]; + pfvf = &mcs->pf[rvu_get_pf(rvu->pdev, pcifunc)]; event->intr_mask &= pfvf->intr_mask; @@ -123,7 +123,7 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu) struct mcs_intr_info *req; int pf; - pf = rvu_get_pf(event->pcifunc); + pf = rvu_get_pf(rvu->pdev, event->pcifunc); mutex_lock(&rvu->mbox_lock); @@ -143,6 +143,8 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu) otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); + otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf); + mutex_unlock(&rvu->mbox_lock); return 0; @@ -191,7 +193,7 @@ int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu, if (pcifunc & RVU_PFVF_FUNC_MASK) pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)]; else - pfvf = &mcs->pf[rvu_get_pf(pcifunc)]; + pfvf = &mcs->pf[rvu_get_pf(rvu->pdev, pcifunc)]; mcs->pf_map[0] = pcifunc; pfvf->intr_mask = req->intr_mask; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 6575c422635b..c6bb3aaa8e0d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -20,6 +20,8 @@ #include "rvu_trace.h" #include "rvu_npc_hash.h" +#include "cn20k/reg.h" +#include "cn20k/api.h" #define DRV_NAME "rvu_af" #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" @@ -34,10 +36,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, int type, int num, void (mbox_handler)(struct work_struct *), void (mbox_up_handler)(struct work_struct *)); -enum { - TYPE_AFVF, - TYPE_AFPF, -}; +static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq); +static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq); /* Supported devices */ static const struct pci_device_id rvu_id_table[] = { @@ -294,7 +294,7 @@ int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc) devnum = rvu_get_hwvf(rvu, pcifunc); } else { is_pf = true; - devnum = rvu_get_pf(pcifunc); + devnum = rvu_get_pf(rvu->pdev, pcifunc); } /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or @@ -359,7 +359,7 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, devnum = rvu_get_hwvf(rvu, pcifunc); } else { is_pf = true; - devnum = rvu_get_pf(pcifunc); + devnum = rvu_get_pf(rvu->pdev, pcifunc); } block->fn_map[lf] = attach ? pcifunc : 0; @@ -400,11 +400,6 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs); } -inline int rvu_get_pf(u16 pcifunc) -{ - return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; -} - void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) { u64 cfg; @@ -422,7 +417,7 @@ int rvu_get_hwvf(struct rvu *rvu, int pcifunc) int pf, func; u64 cfg; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); func = pcifunc & RVU_PFVF_FUNC_MASK; /* Get first HWVF attached to this PF */ @@ -437,7 +432,7 @@ struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc) if (pcifunc & RVU_PFVF_FUNC_MASK) return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)]; else - return &rvu->pf[rvu_get_pf(pcifunc)]; + return &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)]; } static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc) @@ -445,7 +440,7 @@ static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc) int pf, vf, nvfs; u64 cfg; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); if (pf >= rvu->hw->total_pfs) return false; @@ -760,6 +755,11 @@ static void rvu_free_hw_resources(struct rvu *rvu) rvu_reset_msix(rvu); mutex_destroy(&rvu->rsrc_lock); + + /* Free the QINT/CINT memory */ + pfvf = &rvu->pf[RVU_AFPF]; + qmem_free(rvu->dev, pfvf->nix_qints_ctx); + qmem_free(rvu->dev, pfvf->cq_ints_ctx); } static void rvu_setup_pfvf_macaddress(struct rvu *rvu) @@ -1393,8 +1393,6 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) if (blkaddr < 0) return; - if (blktype == BLKTYPE_NIX) - rvu_nix_reset_mac(pfvf, pcifunc); block = &hw->block[blkaddr]; @@ -1407,6 +1405,10 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) if (lf < 0) /* This should never happen */ continue; + if (blktype == BLKTYPE_NIX) { + rvu_nix_reset_mac(pfvf, pcifunc); + rvu_npc_clear_ucast_entry(rvu, pcifunc, lf); + } /* Disable the LF */ rvu_write64(rvu, blkaddr, block->lfcfg_reg | (lf << block->lfshift), 0x00ULL); @@ -1485,7 +1487,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); /* All CGX mapped PFs are set with assigned NIX block during init */ - if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { + if (is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) { blkaddr = pf->nix_blkaddr; } else if (is_lbk_vf(rvu, pcifunc)) { vf = pcifunc - 1; @@ -1499,7 +1501,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) } /* if SDP1 then the blkaddr is NIX1 */ - if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1) + if (is_sdp_pfvf(rvu, pcifunc) && pf->sdp_info->node_id == 1) blkaddr = BLKADDR_NIX1; switch (blkaddr) { @@ -2004,7 +2006,7 @@ int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, vf = pcifunc & RVU_PFVF_FUNC_MASK; cfg = rvu_read64(rvu, BLKADDR_RVUM, - RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc))); + RVU_PRIV_PFX_CFG(rvu_get_pf(rvu->pdev, pcifunc))); numvfs = (cfg >> 12) & 0xFF; if (vf && vf <= numvfs) @@ -2031,6 +2033,9 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req, rsp->nix_shaping = hw->cap.nix_shaping; rsp->npc_hash_extract = hw->cap.npc_hash_extract; + if (rvu->mcs_blk_cnt) + rsp->hw_caps = HW_CAP_MACSEC; + return 0; } @@ -2173,7 +2178,7 @@ static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid, if (rsp && err) \ rsp->hdr.rc = err; \ \ - trace_otx2_msg_process(mbox->pdev, _id, err); \ + trace_otx2_msg_process(mbox->pdev, _id, err, req->pcifunc); \ return rsp ? err : -ENOMEM; \ } MBOX_MESSAGES @@ -2218,15 +2223,30 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll) offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); + if (req_hdr->sig && !(is_rvu_otx2(rvu) || is_cn20k(rvu->pdev))) { + req_hdr->opt_msg = mw->mbox_wrk[devid].num_msgs; + rvu_write64(rvu, BLKADDR_NIX0, RVU_AF_BAR2_SEL, + RVU_AF_BAR2_PFID); + if (type == TYPE_AFPF) + rvu_write64(rvu, BLKADDR_NIX0, + AF_BAR2_ALIASX(0, NIX_CINTX_INT_W1S(devid)), + 0x1); + else + rvu_write64(rvu, BLKADDR_NIX0, + AF_BAR2_ALIASX(0, NIX_QINTX_CNT(devid)), + 0x1); + usleep_range(5000, 6000); + goto done; + } + for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) { msg = mdev->mbase + offset; /* Set which PF/VF sent this message based on mbox IRQ */ switch (type) { case TYPE_AFPF: - msg->pcifunc &= - ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT); - msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT); + msg->pcifunc &= rvu_pcifunc_pf_mask(rvu->pdev); + msg->pcifunc |= rvu_make_pcifunc(rvu->pdev, devid, 0); break; case TYPE_AFVF: msg->pcifunc &= @@ -2244,16 +2264,17 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll) if (msg->pcifunc & RVU_PFVF_FUNC_MASK) dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n", err, otx2_mbox_id2name(msg->id), - msg->id, rvu_get_pf(msg->pcifunc), + msg->id, rvu_get_pf(rvu->pdev, msg->pcifunc), (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1); else dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n", err, otx2_mbox_id2name(msg->id), msg->id, devid); } +done: mw->mbox_wrk[devid].num_msgs = 0; - if (poll) + if (!is_cn20k(mbox->pdev) && poll) otx2_mbox_wait_for_zero(mbox, devid); /* Send mbox responses to VF/PF */ @@ -2359,13 +2380,21 @@ static inline void rvu_afvf_mbox_up_handler(struct work_struct *work) __rvu_mbox_up_handler(mwork, TYPE_AFVF); } -static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, +static int rvu_get_mbox_regions(struct rvu *rvu, void __iomem **mbox_addr, int num, int type, unsigned long *pf_bmap) { struct rvu_hwinfo *hw = rvu->hw; int region; u64 bar4; + /* For cn20k platform AF mailbox region is allocated by software + * and the corresponding IOVA is programmed in hardware unlike earlier + * silicons where software uses the hardware region after ioremap. + */ + if (is_cn20k(rvu->pdev)) + return cn20k_rvu_get_mbox_regions(rvu, (void *)mbox_addr, + num, type, pf_bmap); + /* For cn10k platform VF mailbox regions of a PF follows after the * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from * RVU_PF_VF_BAR4_ADDR register. @@ -2384,7 +2413,7 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR); bar4 += region * MBOX_SIZE; } - mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE); + mbox_addr[region] = ioremap_wc(bar4, MBOX_SIZE); if (!mbox_addr[region]) goto error; } @@ -2407,7 +2436,7 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, RVU_AF_PF_BAR4_ADDR); bar4 += region * MBOX_SIZE; } - mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE); + mbox_addr[region] = ioremap_wc(bar4, MBOX_SIZE); if (!mbox_addr[region]) goto error; } @@ -2415,20 +2444,26 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, error: while (region--) - iounmap((void __iomem *)mbox_addr[region]); + iounmap(mbox_addr[region]); return -ENOMEM; } +static struct mbox_ops rvu_mbox_ops = { + .pf_intr_handler = rvu_mbox_pf_intr_handler, + .afvf_intr_handler = rvu_mbox_intr_handler, +}; + static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, int type, int num, void (mbox_handler)(struct work_struct *), void (mbox_up_handler)(struct work_struct *)) { - int err = -EINVAL, i, dir, dir_up; + void __iomem **mbox_regions; + struct ng_rvu *ng_rvu_mbox; + int err, i, dir, dir_up; void __iomem *reg_base; struct rvu_work *mwork; unsigned long *pf_bmap; - void **mbox_regions; const char *name; u64 cfg; @@ -2436,6 +2471,12 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, if (!pf_bmap) return -ENOMEM; + ng_rvu_mbox = kzalloc(sizeof(*ng_rvu_mbox), GFP_KERNEL); + if (!ng_rvu_mbox) { + err = -ENOMEM; + goto free_bitmap; + } + /* RVU VFs */ if (type == TYPE_AFVF) bitmap_set(pf_bmap, 0, num); @@ -2449,12 +2490,20 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, } } + rvu->ng_rvu = ng_rvu_mbox; + + rvu->ng_rvu->rvu_mbox_ops = &rvu_mbox_ops; + + err = cn20k_rvu_mbox_init(rvu, type, num); + if (err) + goto free_mem; + mutex_init(&rvu->mbox_lock); - mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL); + mbox_regions = kcalloc(num, sizeof(void __iomem *), GFP_KERNEL); if (!mbox_regions) { err = -ENOMEM; - goto free_bitmap; + goto free_qmem; } switch (type) { @@ -2477,11 +2526,12 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, goto free_regions; break; default: + err = -EINVAL; goto free_regions; } mw->mbox_wq = alloc_workqueue("%s", - WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, + WQ_HIGHPRI | WQ_MEM_RECLAIM, num, name); if (!mw->mbox_wq) { err = -ENOMEM; @@ -2524,7 +2574,11 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, mwork->rvu = rvu; INIT_WORK(&mwork->work, mbox_up_handler); } - goto free_regions; + + kfree(mbox_regions); + bitmap_free(pf_bmap); + + return 0; exit: destroy_workqueue(mw->mbox_wq); @@ -2533,6 +2587,10 @@ unmap_regions: iounmap((void __iomem *)mbox_regions[num]); free_regions: kfree(mbox_regions); +free_qmem: + cn20k_free_mbox_memory(rvu); +free_mem: + kfree(rvu->ng_rvu); free_bitmap: bitmap_free(pf_bmap); return err; @@ -2559,8 +2617,8 @@ static void rvu_mbox_destroy(struct mbox_wq_info *mw) otx2_mbox_destroy(&mw->mbox_up); } -static void rvu_queue_work(struct mbox_wq_info *mw, int first, - int mdevs, u64 intr) +void rvu_queue_work(struct mbox_wq_info *mw, int first, + int mdevs, u64 intr) { struct otx2_mbox_dev *mdev; struct otx2_mbox *mbox; @@ -2651,6 +2709,11 @@ static void rvu_enable_mbox_intr(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; + if (is_cn20k(rvu->pdev)) { + cn20k_rvu_enable_mbox_intr(rvu); + return; + } + /* Clear spurious irqs, if any */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs)); @@ -2768,7 +2831,7 @@ static void rvu_flr_handler(struct work_struct *work) cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); numvfs = (cfg >> 12) & 0xFF; - pcifunc = pf << RVU_PFVF_PF_SHIFT; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); for (vf = 0; vf < numvfs; vf++) __rvu_flr_handler(rvu, (pcifunc | (vf + 1))); @@ -2904,9 +2967,12 @@ static void rvu_unregister_interrupts(struct rvu *rvu) rvu_cpt_unregister_interrupts(rvu); - /* Disable the Mbox interrupt */ - rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, - INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + if (!is_cn20k(rvu->pdev)) + /* Disable the Mbox interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + else + cn20k_rvu_unregister_interrupts(rvu); /* Disable the PF FLR interrupt */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, @@ -2939,6 +3005,10 @@ static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu) * VF interrupts can be handled. Offset equal to zero means * that PF vectors are not configured and overlapping AF vectors. */ + if (is_cn20k(rvu->pdev)) + return (pfvf->msix.max >= RVU_AF_CN20K_INT_VEC_CNT + + RVU_MBOX_PF_INT_VEC_CNT) && offset; + return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) && offset; } @@ -2969,18 +3039,30 @@ static int rvu_register_interrupts(struct rvu *rvu) return ret; } - /* Register mailbox interrupt handler */ - sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox"); - ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX), - rvu_mbox_pf_intr_handler, 0, - &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu); - if (ret) { - dev_err(rvu->dev, - "RVUAF: IRQ registration failed for mbox irq\n"); - goto fail; - } + if (!is_cn20k(rvu->pdev)) { + /* Register mailbox interrupt handler */ + sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], + "RVUAF Mbox"); + ret = request_irq(pci_irq_vector + (rvu->pdev, RVU_AF_INT_VEC_MBOX), + rvu->ng_rvu->rvu_mbox_ops->pf_intr_handler, 0, + &rvu->irq_name[RVU_AF_INT_VEC_MBOX * + NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for mbox\n"); + goto fail; + } - rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true; + rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true; + } else { + ret = cn20k_register_afpf_mbox_intr(rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for mbox\n"); + goto fail; + } + } /* Enable mailbox interrupts from all PFs */ rvu_enable_mbox_intr(rvu); @@ -3035,34 +3117,40 @@ static int rvu_register_interrupts(struct rvu *rvu) /* Get PF MSIX vectors offset. */ pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; + if (!is_cn20k(rvu->pdev)) { + /* Register MBOX0 interrupt. */ + offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], + rvu); + if (ret) + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for Mbox0\n"); - /* Register MBOX0 interrupt. */ - offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0; - sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0"); - ret = request_irq(pci_irq_vector(rvu->pdev, offset), - rvu_mbox_intr_handler, 0, - &rvu->irq_name[offset * NAME_SIZE], - rvu); - if (ret) - dev_err(rvu->dev, - "RVUAF: IRQ registration failed for Mbox0\n"); - - rvu->irq_allocated[offset] = true; + rvu->irq_allocated[offset] = true; - /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so - * simply increment current offset by 1. - */ - offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1; - sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1"); - ret = request_irq(pci_irq_vector(rvu->pdev, offset), - rvu_mbox_intr_handler, 0, - &rvu->irq_name[offset * NAME_SIZE], - rvu); - if (ret) - dev_err(rvu->dev, - "RVUAF: IRQ registration failed for Mbox1\n"); + /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so + * simply increment current offset by 1. + */ + offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], + rvu); + if (ret) + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for Mbox1\n"); - rvu->irq_allocated[offset] = true; + rvu->irq_allocated[offset] = true; + } else { + ret = cn20k_register_afvf_mbox_intr(rvu, pf_vec_start); + if (ret) + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for Mbox\n"); + } /* Register FLR interrupt handler for AF's VFs */ offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0; @@ -3173,6 +3261,9 @@ static void rvu_disable_afvf_intr(struct rvu *rvu) { int vfs = rvu->vfs; + if (is_cn20k(rvu->pdev)) + return cn20k_rvu_disable_afvf_intr(rvu, vfs); + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs)); rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); @@ -3189,6 +3280,9 @@ static void rvu_enable_afvf_intr(struct rvu *rvu) { int vfs = rvu->vfs; + if (is_cn20k(rvu->pdev)) + return cn20k_rvu_enable_afvf_intr(rvu, vfs); + /* Clear any pending interrupts and enable AF VF interrupts for * the first 64 VFs. */ @@ -3433,6 +3527,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) ptp_start(rvu, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate, rvu->fwdata->ptp_ext_tstamp); + /* Alloc CINT and QINT memory */ + rvu_alloc_cint_qint_mem(rvu, &rvu->pf[RVU_AFPF], BLKADDR_NIX0, + (rvu->hw->block[BLKADDR_NIX0].lf.max)); return 0; err_dl: rvu_unregister_dl(rvu); @@ -3484,6 +3581,9 @@ static void rvu_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); devm_kfree(&pdev->dev, rvu->hw); + if (is_cn20k(rvu->pdev)) + cn20k_free_mbox_memory(rvu); + kfree(rvu->ng_rvu); devm_kfree(&pdev->dev, rvu); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 60f085b00a8c..7ee1fdeb5295 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -10,6 +10,7 @@ #include <linux/pci.h> #include <net/devlink.h> +#include <linux/soc/marvell/silicons.h> #include "rvu_struct.h" #include "rvu_devlink.h" @@ -43,12 +44,39 @@ #define MAX_CPT_BLKS 2 /* PF_FUNC */ -#define RVU_PFVF_PF_SHIFT 10 -#define RVU_PFVF_PF_MASK 0x3F -#define RVU_PFVF_FUNC_SHIFT 0 -#define RVU_PFVF_FUNC_MASK 0x3FF +#define RVU_OTX2_PFVF_PF_SHIFT 10 +#define RVU_OTX2_PFVF_PF_MASK 0x3F +#define RVU_PFVF_FUNC_SHIFT 0 +#define RVU_PFVF_FUNC_MASK 0x3FF +#define RVU_CN20K_PFVF_PF_SHIFT 9 +#define RVU_CN20K_PFVF_PF_MASK 0x7F + +static inline u16 rvu_make_pcifunc(struct pci_dev *pdev, int pf, int func) +{ + if (is_cn20k(pdev)) + return ((pf & RVU_CN20K_PFVF_PF_MASK) << + RVU_CN20K_PFVF_PF_SHIFT) | + ((func & RVU_PFVF_FUNC_MASK) << + RVU_PFVF_FUNC_SHIFT); + else + return ((pf & RVU_OTX2_PFVF_PF_MASK) << + RVU_OTX2_PFVF_PF_SHIFT) | + ((func & RVU_PFVF_FUNC_MASK) << + RVU_PFVF_FUNC_SHIFT); +} + +static inline int rvu_pcifunc_pf_mask(struct pci_dev *pdev) +{ + if (is_cn20k(pdev)) + return ~(RVU_CN20K_PFVF_PF_MASK << RVU_CN20K_PFVF_PF_SHIFT); + else + return ~(RVU_OTX2_PFVF_PF_MASK << RVU_OTX2_PFVF_PF_SHIFT); +} + +#define RVU_AFPF 25 #ifdef CONFIG_DEBUG_FS + struct dump_ctx { int lf; int id; @@ -446,6 +474,23 @@ struct mbox_wq_info { struct workqueue_struct *mbox_wq; }; +struct rvu_irq_data { + u64 intr_status; + void (*rvu_queue_work_hdlr)(struct mbox_wq_info *mw, int first, + int mdevs, u64 intr); + void (*afvf_queue_work_hdlr)(struct mbox_wq_info *mw, int first, + int mdevs, u64 intr); + struct rvu *rvu; + int vec_num; + int start; + int mdevs; +}; + +struct mbox_ops { + irqreturn_t (*pf_intr_handler)(int irq, void *rvu_irq); + irqreturn_t (*afvf_intr_handler)(int irq, void *rvu_irq); +}; + struct channel_fwdata { struct sdp_node_info info; u8 valid; @@ -611,6 +656,8 @@ struct rvu { struct list_head rep_evtq_head; /* Representor event lock */ spinlock_t rep_evtq_lock; + + struct ng_rvu *ng_rvu; }; static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) @@ -836,7 +883,6 @@ int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc); void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start); bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc); u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr); -int rvu_get_pf(u16 pcifunc); struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc); void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf); bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr); @@ -865,8 +911,8 @@ void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq); /* SDP APIs */ int rvu_sdp_init(struct rvu *rvu); -bool is_sdp_pfvf(u16 pcifunc); -bool is_sdp_pf(u16 pcifunc); +bool is_sdp_pfvf(struct rvu *rvu, u16 pcifunc); +bool is_sdp_pf(struct rvu *rvu, u16 pcifunc); bool is_sdp_vf(struct rvu *rvu, u16 pcifunc); static inline bool is_rep_dev(struct rvu *rvu, u16 pcifunc) @@ -877,11 +923,21 @@ static inline bool is_rep_dev(struct rvu *rvu, u16 pcifunc) return false; } +static inline int rvu_get_pf(struct pci_dev *pdev, u16 pcifunc) +{ + if (is_cn20k(pdev)) + return (pcifunc >> RVU_CN20K_PFVF_PF_SHIFT) & + RVU_CN20K_PFVF_PF_MASK; + else + return (pcifunc >> RVU_OTX2_PFVF_PF_SHIFT) & + RVU_OTX2_PFVF_PF_MASK; +} + /* CGX APIs */ static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf) { return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) && - !is_sdp_pf(pf << RVU_PFVF_PF_SHIFT); + !is_sdp_pf(rvu, rvu_make_pcifunc(rvu->pdev, pf, 0)); } static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) @@ -893,7 +949,7 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) static inline bool is_cgx_vf(struct rvu *rvu, u16 pcifunc) { return ((pcifunc & RVU_PFVF_FUNC_MASK) && - is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))); + is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))); } #define M(_name, _id, fn_name, req, rsp) \ @@ -901,6 +957,10 @@ int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *); MBOX_MESSAGES #undef M +/* Mbox APIs */ +void rvu_queue_work(struct mbox_wq_info *mw, int first, + int mdevs, u64 intr); + int rvu_cgx_init(struct rvu *rvu); int rvu_cgx_exit(struct rvu *rvu); void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu); @@ -955,7 +1015,8 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx, u16 mcam_index); void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc); - +int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf, + int blkaddr, int nixlf); /* NPC APIs */ void rvu_npc_freemem(struct rvu *rvu); int rvu_npc_get_pkind(struct rvu *rvu, u16 pf); @@ -969,8 +1030,6 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf, bool enable); void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan); -void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, - bool enable); void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan); void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, @@ -996,6 +1055,8 @@ void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, int blkaddr, int *alloc_cnt, int *enable_cnt); +void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf); + bool is_npc_intf_tx(u8 intf); bool is_npc_intf_rx(u8 intf); bool is_npc_interface_valid(struct rvu *rvu, u8 intf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 992fa0b82e8d..3303c475414a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -34,7 +34,7 @@ static struct _req_type __maybe_unused \ return NULL; \ req->hdr.sig = OTX2_MBOX_REQ_SIG; \ req->hdr.id = _id; \ - trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \ + trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req), 0); \ return req; \ } @@ -272,6 +272,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid); + otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid); + mutex_unlock(&rvu->mbox_lock); } while (pfmap); } @@ -455,7 +457,7 @@ int rvu_cgx_exit(struct rvu *rvu) inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) { if ((pcifunc & RVU_PFVF_FUNC_MASK) || - !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) + !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) return false; return true; } @@ -482,7 +484,7 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable) int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -499,7 +501,7 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -524,7 +526,7 @@ int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable) void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); int i = 0, lmac_count = 0; struct mac_ops *mac_ops; u8 max_dmac_filters; @@ -575,7 +577,7 @@ int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req, static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req, void *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct mac_ops *mac_ops; int stat = 0, err = 0; u64 tx_stat, rx_stat; @@ -631,7 +633,7 @@ int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req, int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct rvu_pfvf *parent_pf; struct mac_ops *mac_ops; u8 cgx_idx, lmac; @@ -661,7 +663,7 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, struct msg_req *req, struct cgx_fec_stats_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct mac_ops *mac_ops; u8 cgx_idx, lmac; void *cgxd; @@ -679,17 +681,20 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); + struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; - if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) - return -EPERM; + if (!is_pf_cgxmapped(rvu, pf)) + return LMAC_AF_ERR_PF_NOT_MAPPED; if (rvu_npc_exact_has_match_table(rvu)) return rvu_npc_exact_mac_addr_set(rvu, req, rsp); rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + pfvf = &rvu->pf[pf]; + ether_addr_copy(pfvf->mac_addr, req->mac_addr); cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr); return 0; @@ -699,7 +704,7 @@ int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu, struct cgx_mac_addr_add_req *req, struct cgx_mac_addr_add_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; int rc = 0; @@ -723,7 +728,7 @@ int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu, struct cgx_mac_addr_del_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) @@ -741,7 +746,7 @@ int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu, struct cgx_max_dmac_entries_get_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; /* If msg is received from PFs(which are not mapped to CGX LMACs) @@ -767,20 +772,12 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); - u8 cgx_id, lmac_id; - int rc = 0; - u64 cfg; + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); - if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) - return -EPERM; - - rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, req->hdr.pcifunc))) + return LMAC_AF_ERR_PF_NOT_MAPPED; - rsp->hdr.rc = rc; - cfg = cgx_lmac_addr_get(cgx_id, lmac_id); - /* copy 48 bit mac address to req->mac_addr */ - u64_to_ether_addr(cfg, rsp->mac_addr); + ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); return 0; } @@ -788,7 +785,7 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) @@ -807,7 +804,7 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) @@ -826,7 +823,7 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -862,7 +859,7 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { - if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc))) + if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, req->hdr.pcifunc))) return -EPERM; return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true); @@ -876,7 +873,7 @@ int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req, static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, pcifunc)) @@ -915,7 +912,7 @@ int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req, u8 cgx_id, lmac_id; int pf, err; - pf = rvu_get_pf(req->hdr.pcifunc); + pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); if (!is_pf_cgxmapped(rvu, pf)) return -ENODEV; @@ -931,7 +928,7 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu, struct msg_req *req, struct cgx_features_info_msg *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_idx, lmac; void *cgxd; @@ -973,7 +970,7 @@ u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac) static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; @@ -1003,7 +1000,7 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 rx_pfc = 0, tx_pfc = 0; struct mac_ops *mac_ops; u8 cgx_id, lmac_id; @@ -1044,7 +1041,7 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, struct cgx_pause_frm_cfg *req, struct cgx_pause_frm_cfg *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; int err = 0; @@ -1071,7 +1068,7 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_pf_cgxmapped(rvu, pf)) @@ -1104,7 +1101,7 @@ int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, /* Assumes LF of a PF and all of its VF belongs to the same * NIX block */ - pcifunc = pf << RVU_PFVF_PF_SHIFT; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return 0; @@ -1131,10 +1128,10 @@ int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start) struct rvu_pfvf *parent_pf, *pfvf; int cgx_users, err = 0; - if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) + if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) return 0; - parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; + parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)]; pfvf = rvu_get_pfvf(rvu, pcifunc); mutex_lock(&rvu->cgx_cfg_lock); @@ -1177,7 +1174,7 @@ int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu, struct fec_mode *req, struct fec_mode *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_pf_cgxmapped(rvu, pf)) @@ -1193,7 +1190,7 @@ int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu, int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req, struct cgx_fw_data *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!rvu->fwdata) @@ -1220,7 +1217,8 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, struct cgx_set_link_mode_req *req, struct cgx_set_link_mode_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); + struct cgx_lmac_fwdata_s *linkmodes; u8 cgx_idx, lmac; void *cgxd; @@ -1229,14 +1227,20 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); cgxd = rvu_cgx_pdata(cgx_idx, rvu); - rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac); + if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX) + linkmodes = &rvu->fwdata->cgx_fw_data_usx[cgx_idx][lmac]; + else + linkmodes = &rvu->fwdata->cgx_fw_data[cgx_idx][lmac]; + + rsp->status = cgx_set_link_mode(cgxd, req->args, linkmodes, + cgx_idx, lmac); return 0; } int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) @@ -1254,7 +1258,7 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, struct cgx_mac_addr_update_req *req, struct cgx_mac_addr_update_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) @@ -1270,7 +1274,7 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause, u16 pfc_en) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 rx_8023 = 0, tx_8023 = 0; struct mac_ops *mac_ops; u8 cgx_id, lmac_id; @@ -1308,7 +1312,7 @@ int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, struct cgx_pfc_cfg *req, struct cgx_pfc_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -1333,7 +1337,7 @@ int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, void rvu_mac_reset(struct rvu *rvu, u16 pcifunc) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; struct cgx *cgxd; u8 cgx, lmac; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 7fa98aeb3663..d2163da28d18 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -13,19 +13,26 @@ /* RVU LMTST */ #define LMT_TBL_OP_READ 0 #define LMT_TBL_OP_WRITE 1 -#define LMT_MAP_TABLE_SIZE (128 * 1024) #define LMT_MAPTBL_ENTRY_SIZE 16 +#define LMT_MAX_VFS 256 + +#define LMT_MAP_ENTRY_ENA BIT_ULL(20) +#define LMT_MAP_ENTRY_LINES GENMASK_ULL(18, 16) /* Function to perform operations (read/write) on lmtst map table */ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, int lmt_tbl_op) { void __iomem *lmt_map_base; - u64 tbl_base; + u64 tbl_base, cfg; + int pfs, vfs; tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + cfg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); + vfs = 1 << (cfg & 0xF); + pfs = 1 << ((cfg >> 4) & 0x7); - lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE); + lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE); if (!lmt_map_base) { dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); return -ENOMEM; @@ -35,6 +42,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, *val = readq(lmt_map_base + index); } else { writeq((*val), (lmt_map_base + index)); + + cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1); + /* 2048 LMTLINES */ + cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6); + + writeq(cfg, (lmt_map_base + (index + 8))); + /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S * changes effective. Write 1 for flush and read is being used as a * barrier and sets up a data dependency. Write to 0 after a write @@ -52,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, #define LMT_MAP_TBL_W1_OFF 8 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) { - return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + + return ((rvu_get_pf(rvu->pdev, pcifunc) * LMT_MAX_VFS) + (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; } @@ -69,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, mutex_lock(&rvu->rsrc_lock); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); - pf = rvu_get_pf(pcifunc) & 0x1F; + pf = rvu_get_pf(rvu->pdev, pcifunc) & RVU_OTX2_PFVF_PF_MASK; val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val); @@ -141,7 +155,7 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, int err = 0; u64 val; - /* Check if PF_FUNC wants to use it's own local memory as LMTLINE + /* Check if PF_FUNC wants to use its own local memory as LMTLINE * region, if so, convert that IOVA to physical address and * populate LMT table with that address */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index 3c5bbaf12e59..f404117bf6c8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -410,7 +410,7 @@ static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc) { int cpt_pf_num = rvu->cpt_pf_num; - if (rvu_get_pf(pcifunc) != cpt_pf_num) + if (rvu_get_pf(rvu->pdev, pcifunc) != cpt_pf_num) return false; if (pcifunc & RVU_PFVF_FUNC_MASK) return false; @@ -422,7 +422,7 @@ static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc) { int cpt_pf_num = rvu->cpt_pf_num; - if (rvu_get_pf(pcifunc) != cpt_pf_num) + if (rvu_get_pf(rvu->pdev, pcifunc) != cpt_pf_num) return false; if (!(pcifunc & RVU_PFVF_FUNC_MASK)) return false; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index a1f9ec03c2ce..8375f18c8e07 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -553,6 +553,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, u64 lmt_addr, val, tbl_base; int pf, vf, num_vfs, hw_vfs; void __iomem *lmt_map_base; + int apr_pfs, apr_vfs; int buf_size = 10240; size_t off = 0; int index = 0; @@ -568,8 +569,12 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, return -ENOMEM; tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); + apr_vfs = 1 << (val & 0xF); + apr_pfs = 1 << ((val >> 4) & 0x7); - lmt_map_base = ioremap_wc(tbl_base, 128 * 1024); + lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs * + LMT_MAPTBL_ENTRY_SIZE); if (!lmt_map_base) { dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); kfree(buf); @@ -591,7 +596,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t", pf); - index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE; + index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE; off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t", (tbl_base + index)); lmt_addr = readq(lmt_map_base + index); @@ -604,7 +609,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, /* Reading num of VFs per PF */ rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs); for (vf = 0; vf < num_vfs; vf++) { - index = (pf * rvu->hw->total_vfs * 16) + + index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) + ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE); off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d:VF%d \t\t", pf, vf); @@ -683,7 +688,7 @@ static int get_max_column_width(struct rvu *rvu) for (pf = 0; pf < rvu->hw->total_pfs; pf++) { for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { - pcifunc = pf << 10 | vf; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf); if (!pcifunc) continue; @@ -754,7 +759,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { off = 0; flag = 0; - pcifunc = pf << 10 | vf; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf); if (!pcifunc) continue; @@ -837,7 +842,7 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) cgx[0] = 0; lmac[0] = 0; - pcifunc = pf << 10; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); pfvf = rvu_get_pfvf(rvu, pcifunc); if (pfvf->nix_blkaddr == BLKADDR_NIX0) @@ -862,6 +867,71 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL); +static int rvu_dbg_rvu_fwdata_display(struct seq_file *s, void *unused) +{ + struct rvu *rvu = s->private; + struct rvu_fwdata *fwdata; + u8 mac[ETH_ALEN]; + int count = 0, i; + + if (!rvu->fwdata) + return -EAGAIN; + + fwdata = rvu->fwdata; + seq_puts(s, "\nRVU Firmware Data:\n"); + seq_puts(s, "\n\t\tPTP INFORMATION\n"); + seq_puts(s, "\t\t===============\n"); + seq_printf(s, "\t\texternal clockrate \t :%x\n", + fwdata->ptp_ext_clk_rate); + seq_printf(s, "\t\texternal timestamp \t :%x\n", + fwdata->ptp_ext_tstamp); + seq_puts(s, "\n"); + + seq_puts(s, "\n\t\tSDP CHANNEL INFORMATION\n"); + seq_puts(s, "\t\t=======================\n"); + seq_printf(s, "\t\tValid \t\t\t :%x\n", fwdata->channel_data.valid); + seq_printf(s, "\t\tNode ID \t\t :%x\n", + fwdata->channel_data.info.node_id); + seq_printf(s, "\t\tNumber of VFs \t\t :%x\n", + fwdata->channel_data.info.max_vfs); + seq_printf(s, "\t\tNumber of PF-Rings \t :%x\n", + fwdata->channel_data.info.num_pf_rings); + seq_printf(s, "\t\tPF SRN \t\t\t :%x\n", + fwdata->channel_data.info.pf_srn); + seq_puts(s, "\n"); + + seq_puts(s, "\n\t\tPF-INDEX MACADDRESS\n"); + seq_puts(s, "\t\t====================\n"); + for (i = 0; i < PF_MACNUM_MAX; i++) { + u64_to_ether_addr(fwdata->pf_macs[i], mac); + if (!is_zero_ether_addr(mac)) { + seq_printf(s, "\t\t %d %pM\n", i, mac); + count++; + } + } + + if (!count) + seq_puts(s, "\t\tNo valid address found\n"); + + seq_puts(s, "\n\t\tVF-INDEX MACADDRESS\n"); + seq_puts(s, "\t\t====================\n"); + count = 0; + for (i = 0; i < VF_MACNUM_MAX; i++) { + u64_to_ether_addr(fwdata->vf_macs[i], mac); + if (!is_zero_ether_addr(mac)) { + seq_printf(s, "\t\t %d %pM\n", i, mac); + count++; + } + } + + if (!count) + seq_puts(s, "\t\tNo valid address found\n"); + + return 0; +} + +RVU_DEBUG_SEQ_FOPS(rvu_fwdata, rvu_fwdata_display, NULL); + static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf, u16 *pcifunc) { @@ -2618,10 +2688,10 @@ static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused) pcifunc = ipolicer->pfvf_map[idx]; if (!(pcifunc & RVU_PFVF_FUNC_MASK)) seq_printf(m, "Allocated to :: PF %d\n", - rvu_get_pf(pcifunc)); + rvu_get_pf(rvu->pdev, pcifunc)); else seq_printf(m, "Allocated to :: PF %d VF %d\n", - rvu_get_pf(pcifunc), + rvu_get_pf(rvu->pdev, pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK) - 1); print_band_prof_ctx(m, &aq_rsp.prof); } @@ -2918,6 +2988,97 @@ static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *s, void *unused) RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL); +static int cgx_print_fwdata(struct seq_file *s, int lmac_id) +{ + struct cgx_lmac_fwdata_s *fwdata; + void *cgxd = s->private; + struct phy_s *phy; + struct rvu *rvu; + int cgx_id, i; + + rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_RVU_AF, NULL)); + if (!rvu) + return -ENODEV; + + if (!rvu->fwdata) + return -EAGAIN; + + cgx_id = cgx_get_cgxid(cgxd); + + if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX) + fwdata = &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id]; + else + fwdata = &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id]; + + seq_puts(s, "\nFIRMWARE SHARED:\n"); + seq_puts(s, "\t\tSUPPORTED LINK INFORMATION\t\t\n"); + seq_puts(s, "\t\t==========================\n"); + seq_printf(s, "\t\t Link modes \t\t :%llx\n", + fwdata->supported_link_modes); + seq_printf(s, "\t\t Autoneg \t\t :%llx\n", fwdata->supported_an); + seq_printf(s, "\t\t FEC \t\t\t :%llx\n", fwdata->supported_fec); + seq_puts(s, "\n"); + + seq_puts(s, "\t\tADVERTISED LINK INFORMATION\t\t\n"); + seq_puts(s, "\t\t==========================\n"); + seq_printf(s, "\t\t Link modes \t\t :%llx\n", + (u64)fwdata->advertised_link_modes); + seq_printf(s, "\t\t Autoneg \t\t :%x\n", fwdata->advertised_an); + seq_printf(s, "\t\t FEC \t\t\t :%llx\n", fwdata->advertised_fec); + seq_puts(s, "\n"); + + seq_puts(s, "\t\tLMAC CONFIG\t\t\n"); + seq_puts(s, "\t\t============\n"); + seq_printf(s, "\t\t rw_valid \t\t :%x\n", fwdata->rw_valid); + seq_printf(s, "\t\t lmac_type \t\t :%x\n", fwdata->lmac_type); + seq_printf(s, "\t\t portm_idx \t\t :%x\n", fwdata->portm_idx); + seq_printf(s, "\t\t mgmt_port \t\t :%x\n", fwdata->mgmt_port); + seq_printf(s, "\t\t Link modes own \t :%llx\n", + (u64)fwdata->advertised_link_modes_own); + seq_puts(s, "\n"); + + seq_puts(s, "\n\t\tEEPROM DATA\n"); + seq_puts(s, "\t\t===========\n"); + seq_printf(s, "\t\t sff_id \t\t :%x\n", fwdata->sfp_eeprom.sff_id); + seq_puts(s, "\t\t data \t\t\t :\n"); + seq_puts(s, "\t\t"); + for (i = 0; i < SFP_EEPROM_SIZE; i++) { + seq_printf(s, "%x", fwdata->sfp_eeprom.buf[i]); + if ((i + 1) % 16 == 0) { + seq_puts(s, "\n"); + seq_puts(s, "\t\t"); + } + } + seq_puts(s, "\n"); + + phy = &fwdata->phy; + seq_puts(s, "\n\t\tPHY INFORMATION\n"); + seq_puts(s, "\t\t===============\n"); + seq_printf(s, "\t\t Mod type configurable \t\t :%x\n", + phy->misc.can_change_mod_type); + seq_printf(s, "\t\t Mod type \t\t\t :%x\n", phy->misc.mod_type); + seq_printf(s, "\t\t Support FEC \t\t\t :%x\n", phy->misc.has_fec_stats); + seq_printf(s, "\t\t RSFEC corrected words \t\t :%x\n", + phy->fec_stats.rsfec_corr_cws); + seq_printf(s, "\t\t RSFEC uncorrected words \t :%x\n", + phy->fec_stats.rsfec_uncorr_cws); + seq_printf(s, "\t\t BRFEC corrected words \t\t :%x\n", + phy->fec_stats.brfec_corr_blks); + seq_printf(s, "\t\t BRFEC uncorrected words \t :%x\n", + phy->fec_stats.brfec_uncorr_blks); + seq_puts(s, "\n"); + + return 0; +} + +static int rvu_dbg_cgx_fwdata_display(struct seq_file *s, void *unused) +{ + return cgx_print_fwdata(s, rvu_dbg_derive_lmacid(s)); +} + +RVU_DEBUG_SEQ_FOPS(cgx_fwdata, cgx_fwdata_display, NULL); + static void rvu_dbg_cgx_init(struct rvu *rvu) { struct mac_ops *mac_ops; @@ -2957,6 +3118,9 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) debugfs_create_file_aux_num("mac_filter", 0600, rvu->rvu_dbg.lmac, cgx, lmac_id, &rvu_dbg_cgx_dmac_flt_fops); + debugfs_create_file("fwdata", 0600, + rvu->rvu_dbg.lmac, cgx, + &rvu_dbg_cgx_fwdata_fops); } } } @@ -2978,10 +3142,10 @@ static void rvu_print_npc_mcam_info(struct seq_file *s, if (!(pcifunc & RVU_PFVF_FUNC_MASK)) seq_printf(s, "\n\t\t Device \t\t: PF%d\n", - rvu_get_pf(pcifunc)); + rvu_get_pf(rvu->pdev, pcifunc)); else seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n", - rvu_get_pf(pcifunc), + rvu_get_pf(rvu->pdev, pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK) - 1); if (entry_acnt) { @@ -3044,13 +3208,13 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued) seq_puts(filp, "\n\t\t Current allocation\n"); seq_puts(filp, "\t\t====================\n"); for (pf = 0; pf < rvu->hw->total_pfs; pf++) { - pcifunc = (pf << RVU_PFVF_PF_SHIFT); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); numvfs = (cfg >> 12) & 0xFF; for (vf = 0; vf < numvfs; vf++) { - pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1)); rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); } } @@ -3321,7 +3485,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused) mutex_lock(&mcam->lock); list_for_each_entry(iter, &mcam->mcam_rules, list) { - pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; + pf = rvu_get_pf(rvu->pdev, iter->owner); seq_printf(s, "\n\tInstalled by: PF%d ", pf); if (iter->owner & RVU_PFVF_FUNC_MASK) { @@ -3339,7 +3503,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused) rvu_dbg_npc_mcam_show_flows(s, iter); if (is_npc_intf_rx(iter->intf)) { target = iter->rx_action.pf_func; - pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; + pf = rvu_get_pf(rvu->pdev, target); seq_printf(s, "\tForward to: PF%d ", pf); if (target & RVU_PFVF_FUNC_MASK) { @@ -3803,6 +3967,9 @@ void rvu_dbg_init(struct rvu *rvu) debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root, rvu, &rvu_dbg_lmtst_map_table_fops); + debugfs_create_file("rvu_fwdata", 0444, rvu->rvu_dbg.root, rvu, + &rvu_dbg_rvu_fwdata_fops); + if (!cgx_get_cgxcnt_max()) goto create; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 613655fcd34f..60db1f616cc8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -315,7 +315,8 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, if (lvl >= hw->cap.nix_tx_aggr_lvl) { if ((nix_get_tx_link(rvu, map_func) != nix_get_tx_link(rvu, pcifunc)) && - (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))) + (rvu_get_pf(rvu->pdev, map_func) != + rvu_get_pf(rvu->pdev, pcifunc))) return false; else return true; @@ -339,7 +340,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, bool from_vf; int err; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && type != NIX_INTF_TYPE_SDP) return 0; @@ -416,7 +417,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, break; case NIX_INTF_TYPE_SDP: from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); - parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; + parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)]; sdp_info = parent_pf->sdp_info; if (!sdp_info) { dev_err(rvu->dev, "Invalid sdp_info pointer\n"); @@ -590,12 +591,12 @@ static int nix_bp_disable(struct rvu *rvu, u16 chan_v; u64 cfg; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) return 0; - if (is_sdp_pfvf(pcifunc)) + if (is_sdp_pfvf(rvu, pcifunc)) type = NIX_INTF_TYPE_SDP; if (cpt_link && !rvu->hw->cpt_links) @@ -736,9 +737,9 @@ static int nix_bp_enable(struct rvu *rvu, u16 chan_v; u64 cfg; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; - if (is_sdp_pfvf(pcifunc)) + if (is_sdp_pfvf(rvu, pcifunc)) type = NIX_INTF_TYPE_SDP; /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ @@ -1674,7 +1675,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, } intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; - if (is_sdp_pfvf(pcifunc)) + if (is_sdp_pfvf(rvu, pcifunc)) intf = NIX_INTF_TYPE_SDP; err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp, @@ -1798,7 +1799,8 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); if (rc < 0) { dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", - rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); + rvu_get_pf(rvu->pdev, pcifunc), + pcifunc & RVU_PFVF_FUNC_MASK); return NIX_AF_ERR_MARK_CFG_FAIL; } @@ -2050,7 +2052,7 @@ static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr, static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) { struct rvu_hwinfo *hw = rvu->hw; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id = 0, lmac_id = 0; if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */ @@ -2068,7 +2070,7 @@ static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, int link, int *start, int *end) { struct rvu_hwinfo *hw = rvu->hw; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); /* LBK links */ if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) { @@ -2426,7 +2428,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, { struct nix_smq_flush_ctx *smq_flush_ctx; int err, restore_tx_en = 0, i; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id = 0, lmac_id = 0; u16 tl2_tl3_link_schq; u8 link, link_level; @@ -2820,7 +2822,7 @@ void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, { struct rvu_hwinfo *hw = rvu->hw; int lbk_link_start, lbk_links; - u8 pf = rvu_get_pf(pcifunc); + u8 pf = rvu_get_pf(rvu->pdev, pcifunc); int schq; u64 cfg; @@ -3190,7 +3192,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); if (err) { dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", - rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); + rvu_get_pf(rvu->pdev, pcifunc), + pcifunc & RVU_PFVF_FUNC_MASK); return err; } return 0; @@ -3458,7 +3461,7 @@ int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, dev_err(rvu->dev, "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", __func__, idx, mce_list->max, - pcifunc >> RVU_PFVF_PF_SHIFT); + rvu_get_pf(rvu->pdev, pcifunc)); return -EINVAL; } @@ -3510,7 +3513,8 @@ void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, struct rvu_pfvf *pfvf; if (!hw->cap.nix_rx_multicast || - !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) { + !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, + pcifunc & ~RVU_PFVF_FUNC_MASK))) { *mce_list = NULL; *mce_idx = 0; return; @@ -3544,13 +3548,13 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, int pf; /* skip multicast pkt replication for AF's VFs & SDP links */ - if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc)) + if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(rvu, pcifunc)) return 0; if (!hw->cap.nix_rx_multicast) return 0; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); if (!is_pf_cgxmapped(rvu, pf)) return 0; @@ -3619,7 +3623,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) for (idx = 0; idx < (numvfs + 1); idx++) { /* idx-0 is for PF, followed by VFs */ - pcifunc = (pf << RVU_PFVF_PF_SHIFT); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); pcifunc |= idx; /* Add dummy entries now, so that we don't have to check * for whether AQ_OP should be INIT/WRITE later on. @@ -4554,7 +4558,7 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, static void nix_find_link_frs(struct rvu *rvu, struct nix_frs_cfg *req, u16 pcifunc) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct rvu_pfvf *pfvf; int maxlen, minlen; int numvfs, hwvf; @@ -4601,7 +4605,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); int blkaddr, link = -1; struct nix_hw *nix_hw; struct rvu_pfvf *pfvf; @@ -5046,7 +5050,7 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) (ltdefs->rx_apad1.ltype_match << 4) | ltdefs->rx_apad1.ltype_mask); - /* Receive ethertype defination register defines layer + /* Receive ethertype definition register defines layer * information in NPC_RESULT_S to identify the Ethertype * location in L2 header. Used for Ethertype overwriting * in inline IPsec flow. @@ -5251,7 +5255,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, rvu_switch_update_rules(rvu, pcifunc, true); - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) rvu_rep_notify_pfvf_state(rvu, pcifunc, true); @@ -5284,7 +5288,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, rvu_switch_update_rules(rvu, pcifunc, false); rvu_cgx_tx_enable(rvu, pcifunc, true); - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) rvu_rep_notify_pfvf_state(rvu, pcifunc, false); return 0; @@ -5296,7 +5300,7 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct hwctx_disable_req ctx_req; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; u64 sa_base; @@ -5385,7 +5389,7 @@ static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) int nixlf; u64 cfg; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 821fe242f821..c7c70429eb6c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -147,7 +147,9 @@ static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf, int type) { - int pf = rvu_get_pf(pcifunc); + struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam); + struct rvu *rvu = hw->rvu; + int pf = rvu_get_pf(rvu->pdev, pcifunc); int index; /* Check if this is for a PF */ @@ -698,7 +700,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, /* RX_ACTION set to MCAST for CGX PF's */ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list && - is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { + is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) { *(u64 *)&action = 0; action.op = NIX_RX_ACTIONOP_MCAST; pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); @@ -820,24 +822,6 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } -void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, - bool enable) -{ - struct npc_mcam *mcam = &rvu->hw->mcam; - int blkaddr, index; - - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); - if (blkaddr < 0) - return; - - /* Get 'pcifunc' of PF device */ - pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; - - index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, - NIXLF_BCAST_ENTRY); - npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); -} - void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan) { @@ -1125,6 +1109,7 @@ void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc, static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf, bool enable) { + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct npc_mcam *mcam = &rvu->hw->mcam; int index, blkaddr; @@ -1133,9 +1118,12 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, return; /* Ucast MCAM match entry of this PF/VF */ - index = npc_get_nixlf_mcam_index(mcam, pcifunc, - nixlf, NIXLF_UCAST_ENTRY); - npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); + if (npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), + pfvf->nix_rx_intf)) { + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); + } /* Nothing to do for VFs, on platforms where pkt replication * is not supported @@ -3448,7 +3436,7 @@ int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int blkaddr, nixlf, rc, intf_mode; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u64 rxpkind, txpkind; u8 cgx_id, lmac_id; @@ -3588,3 +3576,33 @@ int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu, return 0; } + +void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_npc_mcam_rule *rule; + int ucast_idx, blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + + npc_enable_mcam_entry(rvu, mcam, blkaddr, ucast_idx, false); + + npc_set_mcam_action(rvu, mcam, blkaddr, ucast_idx, 0); + + npc_clear_mcam_entry(rvu, mcam, blkaddr, ucast_idx); + + mutex_lock(&mcam->lock); + list_for_each_entry(rule, &mcam->mcam_rules, list) { + if (rule->entry == ucast_idx) { + list_del(&rule->list); + kfree(rule); + break; + } + } + mutex_unlock(&mcam->lock); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c index d2661e7fabdb..999f6d93c7fe 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c @@ -1465,7 +1465,7 @@ static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc) { struct npc_exact_table *table; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id, lmac_id; u32 drop_mcam_idx; bool *promisc; @@ -1512,7 +1512,7 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc) int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc) { struct npc_exact_table *table; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id, lmac_id; u32 drop_mcam_idx; bool *promisc; @@ -1560,7 +1560,7 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc) int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u32 seq_id = req->index; struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; @@ -1593,7 +1593,7 @@ int rvu_npc_exact_mac_addr_update(struct rvu *rvu, struct cgx_mac_addr_update_req *req, struct cgx_mac_addr_update_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct npc_exact_table_entry *entry; struct npc_exact_table *table; struct rvu_pfvf *pfvf; @@ -1675,7 +1675,7 @@ int rvu_npc_exact_mac_addr_add(struct rvu *rvu, struct cgx_mac_addr_add_req *req, struct cgx_mac_addr_add_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; int rc = 0; @@ -1711,7 +1711,7 @@ int rvu_npc_exact_mac_addr_del(struct rvu *rvu, struct cgx_mac_addr_del_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); int rc; rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index); @@ -1736,7 +1736,7 @@ int rvu_npc_exact_mac_addr_del(struct rvu *rvu, int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u32 seq_id = req->index; struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; @@ -2001,7 +2001,7 @@ int rvu_npc_exact_init(struct rvu *rvu) } /* Filter rules are only for PF */ - pcifunc = RVU_PFFUNC(i, 0); + pcifunc = RVU_PFFUNC(rvu->pdev, i, 0); dev_dbg(rvu->dev, "%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n", diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h index 57a09328d46b..cb25cf478f1f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h @@ -139,9 +139,7 @@ static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = { #define NPC_MCAM_DROP_RULE_MAX 30 #define NPC_MCAM_SDP_DROP_RULE_IDX 0 -#define RVU_PFFUNC(pf, func) \ - ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \ - (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT)) +#define RVU_PFFUNC(pdev, pf, func) rvu_make_pcifunc(pdev, pf, func) enum npc_exact_opc_type { NPC_EXACT_OPC_MEM, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c index 052ae5923e3a..03099bc570bd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c @@ -39,7 +39,7 @@ static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event) struct rep_event *msg; int pf; - pf = rvu_get_pf(event->pcifunc); + pf = rvu_get_pf(rvu->pdev, event->pcifunc); if (event->event & RVU_EVENT_MAC_ADDR_CHANGE) ether_addr_copy(pfvf->mac_addr, event->evt_data.mac); @@ -60,6 +60,8 @@ static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event) otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); + otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf); + mutex_unlock(&rvu->mbox_lock); return 0; } @@ -112,10 +114,10 @@ int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable) struct rep_event *req; int pf; - if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) + if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) return 0; - pf = rvu_get_pf(rvu->rep_pcifunc); + pf = rvu_get_pf(rvu->pdev, rvu->rep_pcifunc); mutex_lock(&rvu->mbox_lock); req = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf); @@ -323,7 +325,7 @@ int rvu_rep_install_mcam_rules(struct rvu *rvu) if (!is_pf_cgxmapped(rvu, pf)) continue; - pcifunc = pf << RVU_PFVF_PF_SHIFT; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); rvu_get_nix_blkaddr(rvu, pcifunc); rep = true; for (i = 0; i < 2; i++) { @@ -343,8 +345,7 @@ int rvu_rep_install_mcam_rules(struct rvu *rvu) rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); for (vf = 0; vf < numvfs; vf++) { - pcifunc = pf << RVU_PFVF_PF_SHIFT | - ((vf + 1) & RVU_PFVF_FUNC_MASK); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf + 1); rvu_get_nix_blkaddr(rvu, pcifunc); /* Skip installimg rules if nixlf is not attached */ @@ -452,7 +453,7 @@ int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req, for (pf = 0; pf < rvu->hw->total_pfs; pf++) { if (!is_pf_cgxmapped(rvu, pf)) continue; - pcifunc = pf << RVU_PFVF_PF_SHIFT; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); rvu->rep2pfvf_map[rep] = pcifunc; rsp->rep_pf_map[rep] = pcifunc; rep++; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c index 38cfe148f4b7..e4a5f9fa6fd4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c @@ -17,9 +17,9 @@ /* SDP PF number */ static int sdp_pf_num[MAX_SDP] = {-1, -1}; -bool is_sdp_pfvf(u16 pcifunc) +bool is_sdp_pfvf(struct rvu *rvu, u16 pcifunc) { - u16 pf = rvu_get_pf(pcifunc); + u16 pf = rvu_get_pf(rvu->pdev, pcifunc); u32 found = 0, i = 0; while (i < MAX_SDP) { @@ -34,9 +34,9 @@ bool is_sdp_pfvf(u16 pcifunc) return true; } -bool is_sdp_pf(u16 pcifunc) +bool is_sdp_pf(struct rvu *rvu, u16 pcifunc) { - return (is_sdp_pfvf(pcifunc) && + return (is_sdp_pfvf(rvu, pcifunc) && !(pcifunc & RVU_PFVF_FUNC_MASK)); } @@ -46,7 +46,7 @@ bool is_sdp_vf(struct rvu *rvu, u16 pcifunc) if (!(pcifunc & ~RVU_PFVF_FUNC_MASK)) return (rvu->vf_devid == RVU_SDP_VF_DEVID); - return (is_sdp_pfvf(pcifunc) && + return (is_sdp_pfvf(rvu, pcifunc) && !!(pcifunc & RVU_PFVF_FUNC_MASK)); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index 77ac94cb2ec4..0596a3ac4c12 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -33,7 +33,8 @@ enum rvu_block_addr_e { BLKADDR_NDC_NIX1_RX = 0x10ULL, BLKADDR_NDC_NIX1_TX = 0x11ULL, BLKADDR_APR = 0x16ULL, - BLK_COUNT = 0x17ULL, + BLKADDR_MBOX = 0x1bULL, + BLK_COUNT = 0x1cULL, }; /* RVU Block Type Enumeration */ @@ -49,7 +50,8 @@ enum rvu_block_type_e { BLKTYPE_TIM = 0x8, BLKTYPE_CPT = 0x9, BLKTYPE_NDC = 0xa, - BLKTYPE_MAX = 0xa, + BLKTYPE_MBOX = 0x13, + BLKTYPE_MAX = 0x13, }; /* RVU Admin function Interrupt Vector Enumeration */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c index 268efb7c1c15..49ce38685a7e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c @@ -93,7 +93,7 @@ static int rvu_switch_install_rules(struct rvu *rvu) if (!is_pf_cgxmapped(rvu, pf)) continue; - pcifunc = pf << 10; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); /* rvu_get_nix_blkaddr sets up the corresponding NIX block * address and NIX RX and TX interfaces for a pcifunc. * Generally it is called during attach call of a pcifunc but it @@ -126,7 +126,7 @@ static int rvu_switch_install_rules(struct rvu *rvu) rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); for (vf = 0; vf < numvfs; vf++) { - pcifunc = pf << 10 | ((vf + 1) & 0x3FF); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1)); rvu_get_nix_blkaddr(rvu, pcifunc); err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); @@ -236,7 +236,7 @@ void rvu_switch_disable(struct rvu *rvu) if (!is_pf_cgxmapped(rvu, pf)) continue; - pcifunc = pf << 10; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); if (err) dev_err(rvu->dev, @@ -248,7 +248,7 @@ void rvu_switch_disable(struct rvu *rvu) rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); for (vf = 0; vf < numvfs; vf++) { - pcifunc = pf << 10 | ((vf + 1) & 0x3FF); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1)); err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); if (err) dev_err(rvu->dev, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c index 775fd4c35794..19e0d16b12f6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c @@ -11,3 +11,5 @@ EXPORT_TRACEPOINT_SYMBOL(otx2_msg_alloc); EXPORT_TRACEPOINT_SYMBOL(otx2_msg_interrupt); EXPORT_TRACEPOINT_SYMBOL(otx2_msg_process); +EXPORT_TRACEPOINT_SYMBOL(otx2_msg_status); +EXPORT_TRACEPOINT_SYMBOL(otx2_parse_dump); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h index 5704520f9b02..4cd0fc4b0d20 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h @@ -18,33 +18,42 @@ #include "mbox.h" TRACE_EVENT(otx2_msg_alloc, - TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size), - TP_ARGS(pdev, id, size), + TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size, u16 pcifunc), + TP_ARGS(pdev, id, size, pcifunc), TP_STRUCT__entry(__string(dev, pci_name(pdev)) __field(u16, id) __field(u64, size) + __field(u16, pcifunc) ), TP_fast_assign(__assign_str(dev); __entry->id = id; __entry->size = size; + __entry->pcifunc = pcifunc; ), - TP_printk("[%s] msg:(%s) size:%lld\n", __get_str(dev), - otx2_mbox_id2name(__entry->id), __entry->size) + TP_printk("[%s] msg:(%s) size:%lld pcifunc:0x%x\n", __get_str(dev), + otx2_mbox_id2name(__entry->id), __entry->size, + __entry->pcifunc) ); TRACE_EVENT(otx2_msg_send, - TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size), - TP_ARGS(pdev, num_msgs, msg_size), + TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size, + u16 id, u16 pcifunc), + TP_ARGS(pdev, num_msgs, msg_size, id, pcifunc), TP_STRUCT__entry(__string(dev, pci_name(pdev)) __field(u16, num_msgs) __field(u64, msg_size) + __field(u16, id) + __field(u16, pcifunc) ), TP_fast_assign(__assign_str(dev); __entry->num_msgs = num_msgs; __entry->msg_size = msg_size; + __entry->id = id; + __entry->pcifunc = pcifunc; ), - TP_printk("[%s] sent %d msg(s) of size:%lld\n", __get_str(dev), - __entry->num_msgs, __entry->msg_size) + TP_printk("[%s] sent %d msg(s) of size:%lld msg:(%s) pcifunc:0x%x\n", + __get_str(dev), __entry->num_msgs, __entry->msg_size, + otx2_mbox_id2name(__entry->id), __entry->pcifunc) ); TRACE_EVENT(otx2_msg_check, @@ -81,18 +90,73 @@ TRACE_EVENT(otx2_msg_interrupt, ); TRACE_EVENT(otx2_msg_process, - TP_PROTO(const struct pci_dev *pdev, u16 id, int err), - TP_ARGS(pdev, id, err), + TP_PROTO(const struct pci_dev *pdev, u16 id, int err, u16 pcifunc), + TP_ARGS(pdev, id, err, pcifunc), TP_STRUCT__entry(__string(dev, pci_name(pdev)) __field(u16, id) __field(int, err) + __field(u16, pcifunc) ), TP_fast_assign(__assign_str(dev); __entry->id = id; __entry->err = err; + __entry->pcifunc = pcifunc; + ), + TP_printk("[%s] msg:(%s) error:%d pcifunc:0x%x\n", __get_str(dev), + otx2_mbox_id2name(__entry->id), + __entry->err, __entry->pcifunc) +); + +TRACE_EVENT(otx2_msg_wait_rsp, + TP_PROTO(const struct pci_dev *pdev), + TP_ARGS(pdev), + TP_STRUCT__entry(__string(dev, pci_name(pdev)) + ), + TP_fast_assign(__assign_str(dev) + ), + TP_printk("[%s] timed out while waiting for response\n", + __get_str(dev)) +); + +TRACE_EVENT(otx2_msg_status, + TP_PROTO(const struct pci_dev *pdev, const char *msg, u16 num_msgs), + TP_ARGS(pdev, msg, num_msgs), + TP_STRUCT__entry(__string(dev, pci_name(pdev)) + __string(str, msg) + __field(u16, num_msgs) + ), + TP_fast_assign(__assign_str(dev); + __assign_str(str); + __entry->num_msgs = num_msgs; + ), + TP_printk("[%s] %s num_msgs:%d\n", __get_str(dev), + __get_str(str), __entry->num_msgs) +); + +TRACE_EVENT(otx2_parse_dump, + TP_PROTO(const struct pci_dev *pdev, char *msg, u64 *word), + TP_ARGS(pdev, msg, word), + TP_STRUCT__entry(__string(dev, pci_name(pdev)) + __string(str, msg) + __field(u64, w0) + __field(u64, w1) + __field(u64, w2) + __field(u64, w3) + __field(u64, w4) + __field(u64, w5) + ), + TP_fast_assign(__assign_str(dev); + __assign_str(str); + __entry->w0 = *(word + 0); + __entry->w1 = *(word + 1); + __entry->w2 = *(word + 2); + __entry->w3 = *(word + 3); + __entry->w4 = *(word + 4); + __entry->w5 = *(word + 5); ), - TP_printk("[%s] msg:(%s) error:%d\n", __get_str(dev), - otx2_mbox_id2name(__entry->id), __entry->err) + TP_printk("[%s] nix parse %s W0:%#llx W1:%#llx W2:%#llx W3:%#llx W4:%#llx W5:%#llx\n", + __get_str(dev), __get_str(str), __entry->w0, __entry->w1, __entry->w2, + __entry->w3, __entry->w4, __entry->w5) ); #endif /* __RVU_TRACE_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index 69e0778f9ac1..883e9f4d601c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -8,7 +8,7 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ - otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \ + otx2_flows.o otx2_tc.o cn10k.o cn20k.o otx2_dmac_flt.o \ otx2_devlink.o qos_sq.o qos.o otx2_xsk.o rvu_nicvf-y := otx2_vf.o rvu_rep-y := rep.o diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index c3b6e0f60a79..bec7d5b4d7cc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -14,6 +14,7 @@ static struct dev_hw_ops otx2_hw_ops = { .sqe_flush = otx2_sqe_flush, .aura_freeptr = otx2_aura_freeptr, .refill_pool_ptrs = otx2_refill_pool_ptrs, + .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler, }; static struct dev_hw_ops cn10k_hw_ops = { @@ -21,8 +22,20 @@ static struct dev_hw_ops cn10k_hw_ops = { .sqe_flush = cn10k_sqe_flush, .aura_freeptr = cn10k_aura_freeptr, .refill_pool_ptrs = cn10k_refill_pool_ptrs, + .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler, }; +void otx2_init_hw_ops(struct otx2_nic *pfvf) +{ + if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) { + pfvf->hw_ops = &otx2_hw_ops; + return; + } + + pfvf->hw_ops = &cn10k_hw_ops; +} +EXPORT_SYMBOL(otx2_init_hw_ops); + int cn10k_lmtst_init(struct otx2_nic *pfvf) { @@ -30,12 +43,9 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf) struct otx2_lmt_info *lmt_info; int err, cpu; - if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) { - pfvf->hw_ops = &otx2_hw_ops; + if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) return 0; - } - pfvf->hw_ops = &cn10k_hw_ops; /* Total LMTLINES = num_online_cpus() * 32 (For Burst flush).*/ pfvf->tot_lmt_lines = (num_online_cpus() * LMT_BURST_SIZE); pfvf->hw.lmt_info = alloc_percpu(struct otx2_lmt_info); @@ -357,9 +367,12 @@ int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf) mutex_lock(&pfvf->mbox.lock); /* Remove RQ's policer mapping */ - for (qidx = 0; qidx < hw->rx_queues; qidx++) - cn10k_map_unmap_rq_policer(pfvf, qidx, - hw->matchall_ipolicer, false); + for (qidx = 0; qidx < hw->rx_queues; qidx++) { + rc = cn10k_map_unmap_rq_policer(pfvf, qidx, hw->matchall_ipolicer, false); + if (rc) + dev_warn(pfvf->dev, "Failed to unmap RQ %d's policer (error %d).", + qidx, rc); + } rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h index e3f0bce9908f..945ab10bd4ed 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h @@ -39,4 +39,5 @@ int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf); int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile, u32 burst, u64 rate, bool pps); int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf); +void otx2_init_hw_ops(struct otx2_nic *pfvf); #endif /* CN10K_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c index fc59e50bafce..c691f0722154 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c @@ -481,7 +481,7 @@ static int cn10k_outb_write_sa(struct otx2_nic *pf, struct qmem *sa_info) goto set_available; /* Trigger CTX flush to write dirty data back to DRAM */ - reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH, sa_iova >> 7); + reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH_CPTR, sa_iova >> 7); otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val); set_available: @@ -663,10 +663,10 @@ static int cn10k_ipsec_inb_add_state(struct xfrm_state *x, return -EOPNOTSUPP; } -static int cn10k_ipsec_outb_add_state(struct xfrm_state *x, +static int cn10k_ipsec_outb_add_state(struct net_device *dev, + struct xfrm_state *x, struct netlink_ext_ack *extack) { - struct net_device *netdev = x->xso.dev; struct cn10k_tx_sa_s *sa_entry; struct qmem *sa_info; struct otx2_nic *pf; @@ -676,7 +676,7 @@ static int cn10k_ipsec_outb_add_state(struct xfrm_state *x, if (err) return err; - pf = netdev_priv(netdev); + pf = netdev_priv(dev); err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN); if (err) @@ -700,18 +700,18 @@ static int cn10k_ipsec_outb_add_state(struct xfrm_state *x, return 0; } -static int cn10k_ipsec_add_state(struct xfrm_state *x, +static int cn10k_ipsec_add_state(struct net_device *dev, + struct xfrm_state *x, struct netlink_ext_ack *extack) { if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) return cn10k_ipsec_inb_add_state(x, extack); else - return cn10k_ipsec_outb_add_state(x, extack); + return cn10k_ipsec_outb_add_state(dev, x, extack); } -static void cn10k_ipsec_del_state(struct xfrm_state *x) +static void cn10k_ipsec_del_state(struct net_device *dev, struct xfrm_state *x) { - struct net_device *netdev = x->xso.dev; struct cn10k_tx_sa_s *sa_entry; struct qmem *sa_info; struct otx2_nic *pf; @@ -720,7 +720,7 @@ static void cn10k_ipsec_del_state(struct xfrm_state *x) if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) return; - pf = netdev_priv(netdev); + pf = netdev_priv(dev); sa_info = (struct qmem *)x->xso.offload_handle; sa_entry = (struct cn10k_tx_sa_s *)sa_info->base; @@ -732,7 +732,7 @@ static void cn10k_ipsec_del_state(struct xfrm_state *x) err = cn10k_outb_write_sa(pf, sa_info); if (err) - netdev_err(netdev, "Error (%d) deleting SA\n", err); + netdev_err(dev, "Error (%d) deleting SA\n", err); x->xso.offload_handle = 0; qmem_free(pf->dev, sa_info); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h index 9965df0faa3e..43fbce0d6039 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h @@ -220,7 +220,7 @@ struct cpt_sg_s { #define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0) /* CPT LF CTX Flush Register */ -#define CPT_LF_CTX_FLUSH GENMASK_ULL(45, 0) +#define CPT_LF_CTX_FLUSH_CPTR GENMASK_ULL(45, 0) #ifdef CONFIG_XFRM_OFFLOAD int cn10k_ipsec_init(struct net_device *netdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c index f3b9daffaec3..4c7e0f345cb5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c @@ -531,7 +531,8 @@ static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf, if (sw_tx_sc->encrypt) sectag_tci |= (MCS_TCI_E | MCS_TCI_C); - policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu); + policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, + pfvf->netdev->mtu + OTX2_ETH_HLEN); /* Write SecTag excluding AN bits(1..0) */ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2); policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c new file mode 100644 index 000000000000..ec8cde98076d --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include "otx2_common.h" +#include "otx2_reg.h" +#include "otx2_struct.h" +#include "cn10k.h" + +static struct dev_hw_ops cn20k_hw_ops = { + .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler, + .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler, + .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler, +}; + +void cn20k_init(struct otx2_nic *pfvf) +{ + pfvf->hw_ops = &cn20k_hw_ops; +} +EXPORT_SYMBOL(cn20k_init); +/* CN20K mbox AF => PFx irq handler */ +irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq) +{ + struct otx2_nic *pf = pf_irq; + struct mbox *mw = &pf->mbox; + struct otx2_mbox_dev *mdev; + struct otx2_mbox *mbox; + struct mbox_hdr *hdr; + u64 pf_trig_val; + + pf_trig_val = otx2_read64(pf, RVU_PF_INT) & 0x3ULL; + + /* Clear the IRQ */ + otx2_write64(pf, RVU_PF_INT, pf_trig_val); + + if (pf_trig_val & BIT_ULL(0)) { + mbox = &mw->mbox_up; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(pf->mbox_wq, &mw->mbox_up_wrk); + + trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF", + BIT_ULL(0)); + } + + if (pf_trig_val & BIT_ULL(1)) { + mbox = &mw->mbox; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(pf->mbox_wq, &mw->mbox_wrk); + trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF", + BIT_ULL(1)); + } + + return IRQ_HANDLED; +} + +irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq) +{ + struct otx2_nic *vf = vf_irq; + struct otx2_mbox_dev *mdev; + struct otx2_mbox *mbox; + struct mbox_hdr *hdr; + u64 vf_trig_val; + + vf_trig_val = otx2_read64(vf, RVU_VF_INT) & 0x3ULL; + /* Clear the IRQ */ + otx2_write64(vf, RVU_VF_INT, vf_trig_val); + + /* Read latest mbox data */ + smp_rmb(); + + if (vf_trig_val & BIT_ULL(1)) { + /* Check for PF => VF response messages */ + mbox = &vf->mbox.mbox; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk); + + trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF0 to VF", + BIT_ULL(1)); + } + + if (vf_trig_val & BIT_ULL(0)) { + /* Check for PF => VF notification messages */ + mbox = &vf->mbox.mbox_up; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk); + + trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF0 to VF", + BIT_ULL(0)); + } + + return IRQ_HANDLED; +} + +void cn20k_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) +{ + /* Clear PF <=> VF mailbox IRQ */ + otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull); + + /* Enable PF <=> VF mailbox IRQ */ + otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(0), INTR_MASK(numvfs)); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(0), INTR_MASK(numvfs)); + if (numvfs > 64) { + numvfs -= 64; + otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(1), + INTR_MASK(numvfs)); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(1), + INTR_MASK(numvfs)); + } +} + +void cn20k_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) +{ + int vector, intr_vec, vec = 0; + + /* Disable PF <=> VF mailbox IRQ */ + otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(0), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(1), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(0), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(1), ~0ull); + + otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull); + + if (numvfs > 64) { + otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull); + } + + for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <= + RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) { + vector = pci_irq_vector(pf->pdev, intr_vec); + free_irq(vector, pf->hw.pfvf_irq_devid[vec]); + } +} + +irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq) +{ + struct pf_irq_data *irq_data = pf_irq; + struct otx2_nic *pf = irq_data->pf; + struct mbox *mbox; + u64 intr; + + /* Sync with mbox memory region */ + rmb(); + + /* Clear interrupts */ + intr = otx2_read64(pf, irq_data->intr_status); + otx2_write64(pf, irq_data->intr_status, intr); + mbox = pf->mbox_pfvf; + + if (intr) + trace_otx2_msg_interrupt(pf->pdev, "VF(s) to PF", intr); + + irq_data->pf_queue_work_hdlr(mbox, pf->mbox_pfvf_wq, irq_data->start, + irq_data->mdevs, intr); + + return IRQ_HANDLED; +} + +int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) +{ + struct otx2_hw *hw = &pf->hw; + struct pf_irq_data *irq_data; + int intr_vec, ret, vec = 0; + char *irq_name; + + /* irq data for 4 PF intr vectors */ + irq_data = devm_kcalloc(pf->dev, 4, + sizeof(struct pf_irq_data), GFP_KERNEL); + if (!irq_data) + return -ENOMEM; + + for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <= + RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) { + switch (intr_vec) { + case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF_INTX(0); + irq_data[vec].start = 0; + irq_data[vec].mdevs = 64; + break; + case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF_INTX(1); + irq_data[vec].start = 64; + irq_data[vec].mdevs = 96; + break; + case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF1_INTX(0); + irq_data[vec].start = 0; + irq_data[vec].mdevs = 64; + break; + case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF1_INTX(1); + irq_data[vec].start = 64; + irq_data[vec].mdevs = 96; + break; + } + irq_data[vec].pf_queue_work_hdlr = otx2_queue_vf_work; + irq_data[vec].vec_num = intr_vec; + irq_data[vec].pf = pf; + + /* Register mailbox interrupt handler */ + irq_name = &hw->irq_name[intr_vec * NAME_SIZE]; + if (pf->pcifunc) + snprintf(irq_name, NAME_SIZE, + "RVUPF%d_VF%d Mbox%d", rvu_get_pf(pf->pdev, + pf->pcifunc), vec / 2, vec % 2); + else + snprintf(irq_name, NAME_SIZE, "RVUPF_VF%d Mbox%d", + vec / 2, vec % 2); + + hw->pfvf_irq_devid[vec] = &irq_data[vec]; + ret = request_irq(pci_irq_vector(pf->pdev, intr_vec), + pf->hw_ops->pfvf_mbox_intr_handler, 0, + irq_name, + &irq_data[vec]); + if (ret) { + dev_err(pf->dev, + "RVUPF: IRQ registration failed for PFVF mbox0 irq\n"); + return ret; + } + } + + cn20k_enable_pfvf_mbox_intr(pf, numvfs); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h new file mode 100644 index 000000000000..832adaf8c57f --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef CN20K_H +#define CN20K_H + +#include "otx2_common.h" + +void cn20k_init(struct otx2_nic *pfvf); +int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs); +void cn20k_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs); +void cn20k_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs); +#endif /* CN20K_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 84cd029a85aa..f674729124e6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -28,12 +28,12 @@ static void otx2_nix_rq_op_stats(struct queue_stats *stats, struct otx2_nic *pfvf, int qidx) { u64 incr = (u64)qidx << 32; - u64 *ptr; + void __iomem *ptr; - ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS); + ptr = otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS); stats->bytes = otx2_atomic64_add(incr, ptr); - ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS); + ptr = otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS); stats->pkts = otx2_atomic64_add(incr, ptr); } @@ -41,12 +41,12 @@ static void otx2_nix_sq_op_stats(struct queue_stats *stats, struct otx2_nic *pfvf, int qidx) { u64 incr = (u64)qidx << 32; - u64 *ptr; + void __iomem *ptr; - ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS); + ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS); stats->bytes = otx2_atomic64_add(incr, ptr); - ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS); + ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS); stats->pkts = otx2_atomic64_add(incr, ptr); } @@ -318,21 +318,20 @@ fail: return err; } -int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id) +int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id, const u32 *ind_tbl) { struct otx2_rss_info *rss = &pfvf->hw.rss_info; const int index = rss->rss_size * ctx_id; struct mbox *mbox = &pfvf->mbox; - struct otx2_rss_ctx *rss_ctx; struct nix_aq_enq_req *aq; int idx, err; mutex_lock(&mbox->lock); - rss_ctx = rss->rss_ctx[ctx_id]; + ind_tbl = ind_tbl ?: rss->ind_tbl; /* Get memory to put this msg */ for (idx = 0; idx < rss->rss_size; idx++) { /* Ignore the queue if AF_XDP zero copy is enabled */ - if (test_bit(rss_ctx->ind_tbl[idx], pfvf->af_xdp_zc_qidx)) + if (test_bit(ind_tbl[idx], pfvf->af_xdp_zc_qidx)) continue; aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); @@ -352,7 +351,7 @@ int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id) } } - aq->rss.rq = rss_ctx->ind_tbl[idx]; + aq->rss.rq = ind_tbl[idx]; /* Fill AQ info */ aq->qidx = index + idx; @@ -390,30 +389,22 @@ void otx2_set_rss_key(struct otx2_nic *pfvf) int otx2_rss_init(struct otx2_nic *pfvf) { struct otx2_rss_info *rss = &pfvf->hw.rss_info; - struct otx2_rss_ctx *rss_ctx; int idx, ret = 0; - rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]); + rss->rss_size = sizeof(*rss->ind_tbl); /* Init RSS key if it is not setup already */ if (!rss->enable) netdev_rss_key_fill(rss->key, sizeof(rss->key)); otx2_set_rss_key(pfvf); - if (!netif_is_rxfh_configured(pfvf->netdev)) { - /* Set RSS group 0 as default indirection table */ - rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size, - GFP_KERNEL); - if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]) - return -ENOMEM; - - rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]; + if (!netif_is_rxfh_configured(pfvf->netdev)) for (idx = 0; idx < rss->rss_size; idx++) - rss_ctx->ind_tbl[idx] = + rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx, pfvf->hw.rx_queues); - } - ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP); + + ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP, NULL); if (ret) return ret; @@ -860,9 +851,10 @@ void otx2_sqb_flush(struct otx2_nic *pfvf) { int qidx, sqe_tail, sqe_head; struct otx2_snd_queue *sq; - u64 incr, *ptr, val; + void __iomem *ptr; + u64 incr, val; - ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); + ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { sq = &pfvf->qset.sq[qidx]; if (!sq->sqb_ptrs) @@ -1822,7 +1814,7 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable) req->chan_cnt = IEEE_8021QAZ_MAX_TCS; req->bpid_per_chan = 1; } else { - req->chan_cnt = 1; + req->chan_cnt = pfvf->hw.rx_chan_cnt; req->bpid_per_chan = 0; } @@ -1847,7 +1839,7 @@ int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable) req->chan_cnt = IEEE_8021QAZ_MAX_TCS; req->bpid_per_chan = 1; } else { - req->chan_cnt = 1; + req->chan_cnt = pfvf->hw.rx_chan_cnt; req->bpid_per_chan = 0; } @@ -2055,6 +2047,43 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t } EXPORT_SYMBOL(otx2_handle_ntuple_tc_features); +int otx2_set_hw_capabilities(struct otx2_nic *pfvf) +{ + struct mbox *mbox = &pfvf->mbox; + struct otx2_hw *hw = &pfvf->hw; + struct get_hw_cap_rsp *rsp; + struct msg_req *req; + int ret = -ENOMEM; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_get_hw_cap(mbox); + if (!req) + goto fail; + + ret = otx2_sync_mbox_msg(mbox); + if (ret) + goto fail; + + rsp = (struct get_hw_cap_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, + 0, &req->hdr); + if (IS_ERR(rsp)) { + ret = -EINVAL; + goto fail; + } + + if (rsp->hw_caps & HW_CAP_MACSEC) + __set_bit(CN10K_HW_MACSEC, &hw->cap_flag); + + mutex_unlock(&mbox->lock); + + return 0; +fail: + dev_err(pfvf->dev, "Cannot get MACSEC capability from AF\n"); + mutex_unlock(&mbox->lock); + return ret; +} + #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ int __weak \ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 1e88422825be..e3765b73c434 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -28,10 +28,12 @@ #include "otx2_reg.h" #include "otx2_txrx.h" #include "otx2_devlink.h" +#include <rvu.h> #include <rvu_trace.h> #include "qos.h" #include "rep.h" #include "cn10k_ipsec.h" +#include "cn20k.h" /* IPv4 flag more fragment bit */ #define IPV4_FLAG_MORE 0x20 @@ -61,6 +63,12 @@ /* Number of segments per SG structure */ #define MAX_SEGS_PER_SG 3 +irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq); +irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq); +irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq); +irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq); +irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq); + enum arua_mapped_qtypes { AURA_NIX_RQ, AURA_NIX_SQ, @@ -85,10 +93,6 @@ struct otx2_lmt_info { u64 lmt_addr; u16 lmt_id; }; -/* RSS configuration */ -struct otx2_rss_ctx { - u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE]; -}; struct otx2_rss_info { u8 enable; @@ -96,7 +100,7 @@ struct otx2_rss_info { u16 rss_size; #define RSS_HASH_KEY_SIZE 44 /* 352 bit key */ u8 key[RSS_HASH_KEY_SIZE]; - struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS]; + u32 ind_tbl[MAX_RSS_INDIR_TBL_SIZE]; }; /* NIX (or NPC) RX errors */ @@ -245,6 +249,7 @@ struct otx2_hw { u16 nix_msixoff; /* Offset of NIX vectors */ char *irq_name; cpumask_var_t *affinity_mask; + struct pf_irq_data *pfvf_irq_devid[4]; /* Stats */ struct otx2_dev_stats dev_stats; @@ -356,6 +361,7 @@ struct otx2_flow_config { struct list_head flow_list_tc; u8 ucast_flt_cnt; bool ntuple; + u16 ntuple_cnt; }; struct dev_hw_ops { @@ -365,6 +371,9 @@ struct dev_hw_ops { int size, int qidx); int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); void (*aura_freeptr)(void *dev, int aura, u64 buf); + irqreturn_t (*pfaf_mbox_intr_handler)(int irq, void *pf_irq); + irqreturn_t (*vfaf_mbox_intr_handler)(int irq, void *pf_irq); + irqreturn_t (*pfvf_mbox_intr_handler)(int irq, void *pf_irq); }; #define CN10K_MCS_SA_PER_SC 4 @@ -432,6 +441,16 @@ struct cn10k_mcs_cfg { struct list_head rxsc_list; }; +struct pf_irq_data { + u64 intr_status; + void (*pf_queue_work_hdlr)(struct mbox *mb, struct workqueue_struct *mw, + int first, int mdevs, u64 intr); + struct otx2_nic *pf; + int vec_num; + int start; + int mdevs; +}; + struct otx2_nic { void __iomem *reg_base; struct net_device *netdev; @@ -475,6 +494,7 @@ struct otx2_nic { struct mbox *mbox_pfvf; struct workqueue_struct *mbox_wq; struct workqueue_struct *mbox_pfvf_wq; + struct qmem *pfvf_mbox_addr; u8 total_vfs; u16 pcifunc; /* RVU PF_FUNC */ @@ -631,9 +651,6 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag); __set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag); } - - if (is_dev_cn10kb(pfvf->pdev)) - __set_bit(CN10K_HW_MACSEC, &hw->cap_flag); } /* Register read/write APIs */ @@ -732,8 +749,9 @@ static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr) ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr)); } -static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr) +static inline u64 otx2_atomic64_add(u64 incr, void __iomem *addr) { + u64 __iomem *ptr = addr; u64 result; __asm__ volatile(".cpu generic+lse\n" @@ -746,7 +764,11 @@ static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr) #else #define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr) -#define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; }) + +static inline u64 otx2_atomic64_add(u64 incr, void __iomem *addr) +{ + return 0; +} #endif static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura, @@ -796,7 +818,7 @@ static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf) /* Alloc pointer from pool/aura */ static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura) { - u64 *ptr = (__force u64 *)otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0)); + void __iomem *ptr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0)); u64 incr = (u64)aura | BIT_ULL(63); return otx2_atomic64_add(incr, ptr); @@ -871,6 +893,7 @@ static struct _req_type __maybe_unused \ *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \ { \ struct _req_type *req; \ + u16 pcifunc = mbox->pfvf->pcifunc; \ \ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ &mbox->mbox, 0, sizeof(struct _req_type), \ @@ -879,7 +902,8 @@ static struct _req_type __maybe_unused \ return NULL; \ req->hdr.sig = OTX2_MBOX_REQ_SIG; \ req->hdr.id = _id; \ - trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \ + req->hdr.pcifunc = pcifunc; \ + trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req), pcifunc); \ return req; \ } @@ -899,21 +923,11 @@ MBOX_UP_MCS_MESSAGES /* Time to wait before watchdog kicks off */ #define OTX2_TX_TIMEOUT (100 * HZ) -#define RVU_PFVF_PF_SHIFT 10 -#define RVU_PFVF_PF_MASK 0x3F -#define RVU_PFVF_FUNC_SHIFT 0 -#define RVU_PFVF_FUNC_MASK 0x3FF - static inline bool is_otx2_vf(u16 pcifunc) { return !!(pcifunc & RVU_PFVF_FUNC_MASK); } -static inline int rvu_get_pf(u16 pcifunc) -{ - return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; -} - static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf, struct page *page, size_t offset, size_t size, @@ -1043,12 +1057,13 @@ void otx2_disable_napi(struct otx2_nic *pf); irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq); int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura); int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx); +int otx2_set_hw_capabilities(struct otx2_nic *pfvf); /* RSS configuration APIs*/ int otx2_rss_init(struct otx2_nic *pfvf); int otx2_set_flowkey_cfg(struct otx2_nic *pfvf); void otx2_set_rss_key(struct otx2_nic *pfvf); -int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id); +int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id, const u32 *ind_tbl); /* Mbox handlers */ void mbox_handler_msix_offset(struct otx2_nic *pfvf, @@ -1107,6 +1122,8 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf, u64 iova, int len, u16 qidx, u16 flags); +void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf, + u64 dma_addr, int len, int *offset, u16 flags); u16 otx2_get_max_mtu(struct otx2_nic *pfvf); int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features); @@ -1188,4 +1205,6 @@ dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, struct sk_buff *skb, int seg, int *len); void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg); int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx); +void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq, + int first, int mdevs, u64 intr); #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c index 33ec9a7f7c03..e13ae5484c19 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -41,6 +41,7 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id, if (!pfvf->flow_cfg) return 0; + pfvf->flow_cfg->ntuple_cnt = ctx->val.vu16; otx2_alloc_mcam_entries(pfvf, ctx->val.vu16); return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 010385b29988..998c734ff839 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -15,6 +15,7 @@ #include "otx2_common.h" #include "otx2_ptp.h" +#include <cgx_fw_if.h> #define DRV_NAME "rvu-nicpf" #define DRV_VF_NAME "rvu-nicvf" @@ -315,7 +316,7 @@ static void otx2_get_pauseparam(struct net_device *netdev, struct otx2_nic *pfvf = netdev_priv(netdev); struct cgx_pause_frm_cfg *req, *rsp; - if (is_otx2_lbkvf(pfvf->pdev)) + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) return; mutex_lock(&pfvf->mbox.lock); @@ -347,7 +348,7 @@ static int otx2_set_pauseparam(struct net_device *netdev, if (pause->autoneg) return -EOPNOTSUPP; - if (is_otx2_lbkvf(pfvf->pdev)) + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) return -EOPNOTSUPP; if (pause->rx_pause) @@ -559,10 +560,13 @@ static int otx2_set_coalesce(struct net_device *netdev, return 0; } -static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf, - struct ethtool_rxnfc *nfc) +static int otx2_get_rss_hash_opts(struct net_device *dev, + struct ethtool_rxfh_fields *nfc) { - struct otx2_rss_info *rss = &pfvf->hw.rss_info; + struct otx2_nic *pfvf = netdev_priv(dev); + struct otx2_rss_info *rss; + + rss = &pfvf->hw.rss_info; if (!(rss->flowkey_cfg & (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))) @@ -609,12 +613,17 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf, return 0; } -static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf, - struct ethtool_rxnfc *nfc) +static int otx2_set_rss_hash_opts(struct net_device *dev, + const struct ethtool_rxfh_fields *nfc, + struct netlink_ext_ack *extack) { - struct otx2_rss_info *rss = &pfvf->hw.rss_info; + struct otx2_nic *pfvf = netdev_priv(dev); u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3; - u32 rss_cfg = rss->flowkey_cfg; + struct otx2_rss_info *rss; + u32 rss_cfg; + + rss = &pfvf->hw.rss_info; + rss_cfg = rss->flowkey_cfg; if (!rss->enable) { netdev_err(pfvf->netdev, @@ -743,8 +752,6 @@ static int otx2_get_rxnfc(struct net_device *dev, if (netif_running(dev) && ntuple) ret = otx2_get_all_flows(pfvf, nfc, rules); break; - case ETHTOOL_GRXFH: - return otx2_get_rss_hash_opts(pfvf, nfc); default: break; } @@ -759,9 +766,6 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) pfvf->flow_cfg->ntuple = ntuple; switch (nfc->cmd) { - case ETHTOOL_SRXFH: - ret = otx2_set_rss_hash_opts(pfvf, nfc); - break; case ETHTOOL_SRXCLSRLINS: if (netif_running(dev) && ntuple) ret = otx2_add_flow(pfvf, nfc); @@ -792,60 +796,91 @@ static u32 otx2_get_rxfh_indir_size(struct net_device *dev) return MAX_RSS_INDIR_TBL_SIZE; } -static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id) +static int otx2_create_rxfh(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { - struct otx2_rss_info *rss = &pfvf->hw.rss_info; + struct otx2_nic *pfvf = netdev_priv(dev); + struct otx2_rss_info *rss; + unsigned int queues; + u32 *ind_tbl; + int idx; + + rss = &pfvf->hw.rss_info; + queues = pfvf->hw.rx_queues; - otx2_rss_ctx_flow_del(pfvf, ctx_id); - kfree(rss->rss_ctx[ctx_id]); - rss->rss_ctx[ctx_id] = NULL; + if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + ctx->hfunc = ETH_RSS_HASH_TOP; + if (!rss->enable) { + netdev_err(dev, "RSS is disabled, cannot change settings\n"); + return -EIO; + } + + ind_tbl = rxfh->indir; + if (!ind_tbl) { + ind_tbl = ethtool_rxfh_context_indir(ctx); + for (idx = 0; idx < rss->rss_size; idx++) + ind_tbl[idx] = ethtool_rxfh_indir_default(idx, queues); + } + + otx2_set_rss_table(pfvf, rxfh->rss_context, ind_tbl); return 0; } -static int otx2_rss_ctx_create(struct otx2_nic *pfvf, - u32 *rss_context) +static int otx2_modify_rxfh(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { - struct otx2_rss_info *rss = &pfvf->hw.rss_info; - u8 ctx; + struct otx2_nic *pfvf = netdev_priv(dev); - for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) { - if (!rss->rss_ctx[ctx]) - break; + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (!pfvf->hw.rss_info.enable) { + netdev_err(dev, "RSS is disabled, cannot change settings\n"); + return -EIO; } - if (ctx == MAX_RSS_GROUPS) - return -EINVAL; - rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL); - if (!rss->rss_ctx[ctx]) - return -ENOMEM; - *rss_context = ctx; + if (rxfh->indir) + otx2_set_rss_table(pfvf, rxfh->rss_context, rxfh->indir); return 0; } +static int otx2_remove_rxfh(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + u32 rss_context, + struct netlink_ext_ack *extack) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + + if (!pfvf->hw.rss_info.enable) { + netdev_err(dev, "RSS is disabled, cannot change settings\n"); + return -EIO; + } + + otx2_rss_ctx_flow_del(pfvf, rss_context); + return 0; +} + /* Configure RSS table and hash key */ static int otx2_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh, struct netlink_ext_ack *extack) { - u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP; struct otx2_nic *pfvf = netdev_priv(dev); - struct otx2_rss_ctx *rss_ctx; struct otx2_rss_info *rss; - int ret, idx; + int idx; if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (rxfh->rss_context) - rss_context = rxfh->rss_context; - - if (rss_context != ETH_RXFH_CONTEXT_ALLOC && - rss_context >= MAX_RSS_GROUPS) - return -EINVAL; - rss = &pfvf->hw.rss_info; if (!rss->enable) { @@ -857,21 +892,12 @@ static int otx2_set_rxfh(struct net_device *dev, memcpy(rss->key, rxfh->key, sizeof(rss->key)); otx2_set_rss_key(pfvf); } - if (rxfh->rss_delete) - return otx2_rss_ctx_delete(pfvf, rss_context); - - if (rss_context == ETH_RXFH_CONTEXT_ALLOC) { - ret = otx2_rss_ctx_create(pfvf, &rss_context); - rxfh->rss_context = rss_context; - if (ret) - return ret; - } + if (rxfh->indir) { - rss_ctx = rss->rss_ctx[rss_context]; for (idx = 0; idx < rss->rss_size; idx++) - rss_ctx->ind_tbl[idx] = rxfh->indir[idx]; + rss->ind_tbl[idx] = rxfh->indir[idx]; } - otx2_set_rss_table(pfvf, rss_context); + otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP, NULL); return 0; } @@ -880,9 +906,7 @@ static int otx2_set_rxfh(struct net_device *dev, static int otx2_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh) { - u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP; struct otx2_nic *pfvf = netdev_priv(dev); - struct otx2_rss_ctx *rss_ctx; struct otx2_rss_info *rss; u32 *indir = rxfh->indir; int idx, rx_queues; @@ -890,32 +914,21 @@ static int otx2_get_rxfh(struct net_device *dev, rss = &pfvf->hw.rss_info; rxfh->hfunc = ETH_RSS_HASH_TOP; - if (rxfh->rss_context) - rss_context = rxfh->rss_context; - if (!indir) return 0; - if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) { + if (!rss->enable) { rx_queues = pfvf->hw.rx_queues; for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++) indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues); return 0; } - if (rss_context >= MAX_RSS_GROUPS) - return -ENOENT; - - rss_ctx = rss->rss_ctx[rss_context]; - if (!rss_ctx) - return -ENOENT; - - if (indir) { - for (idx = 0; idx < rss->rss_size; idx++) { - /* Ignore if the rx queue is AF_XDP zero copy enabled */ - if (test_bit(rss_ctx->ind_tbl[idx], pfvf->af_xdp_zc_qidx)) - continue; - indir[idx] = rss_ctx->ind_tbl[idx]; - } + + for (idx = 0; idx < rss->rss_size; idx++) { + /* Ignore if the rx queue is AF_XDP zero copy enabled */ + if (test_bit(rss->ind_tbl[idx], pfvf->af_xdp_zc_qidx)) + continue; + indir[idx] = rss->ind_tbl[idx]; } if (rxfh->key) memcpy(rxfh->key, rss->key, sizeof(rss->key)); @@ -941,8 +954,8 @@ static u32 otx2_get_link(struct net_device *netdev) { struct otx2_nic *pfvf = netdev_priv(netdev); - /* LBK link is internal and always UP */ - if (is_otx2_lbkvf(pfvf->pdev)) + /* LBK and SDP links are internal and always UP */ + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) return 1; return pfvf->linfo.link_up; } @@ -1123,17 +1136,9 @@ static void otx2_get_link_mode_info(u64 link_mode_bmap, *link_ksettings) { __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, }; - const int otx2_sgmii_features[6] = { - ETHTOOL_LINK_MODE_10baseT_Half_BIT, - ETHTOOL_LINK_MODE_10baseT_Full_BIT, - ETHTOOL_LINK_MODE_100baseT_Half_BIT, - ETHTOOL_LINK_MODE_100baseT_Full_BIT, - ETHTOOL_LINK_MODE_1000baseT_Half_BIT, - ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - }; /* CGX link modes to Ethtool link mode mapping */ - const int cgx_link_mode[27] = { - 0, /* SGMII Mode */ + const int cgx_link_mode[CGX_MODE_MAX] = { + 0, /* SGMII 1000baseT */ ETHTOOL_LINK_MODE_1000baseX_Full_BIT, ETHTOOL_LINK_MODE_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, @@ -1163,14 +1168,19 @@ static void otx2_get_link_mode_info(u64 link_mode_bmap, }; u8 bit; - for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) { - /* SGMII mode is set */ - if (bit == 0) - linkmode_set_bit_array(otx2_sgmii_features, - ARRAY_SIZE(otx2_sgmii_features), - otx2_link_modes); - else + for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, ARRAY_SIZE(cgx_link_mode)) { + if (bit == CGX_MODE_SGMII_10M_BIT) { + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, otx2_link_modes); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, otx2_link_modes); + } else if (bit == CGX_MODE_SGMII_100M_BIT) { + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, otx2_link_modes); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, otx2_link_modes); + } else if (bit == CGX_MODE_SGMII) { + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, otx2_link_modes); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, otx2_link_modes); + } else { linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes); + } } if (req_mode == OTX2_MODE_ADVERTISED) @@ -1211,23 +1221,10 @@ static int otx2_get_link_ksettings(struct net_device *netdev, return 0; } -static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd, - u64 *mode) -{ - u32 bit_pos; - - /* Firmware does not support requesting multiple advertised modes - * return first set bit - */ - bit_pos = find_first_bit(cmd->link_modes.advertising, - __ETHTOOL_LINK_MODE_MASK_NBITS); - if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS) - *mode = bit_pos; -} - static int otx2_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct otx2_nic *pf = netdev_priv(netdev); struct ethtool_link_ksettings cur_ks; struct cgx_set_link_mode_req *req; @@ -1264,7 +1261,20 @@ static int otx2_set_link_ksettings(struct net_device *netdev, */ req->args.duplex = cmd->base.duplex ^ 0x1; req->args.an = cmd->base.autoneg; - otx2_get_advertised_mode(cmd, &req->args.mode); + /* Mask unsupported modes and send message to AF */ + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mask); + + linkmode_copy(req->args.advertising, + cmd->link_modes.advertising); + linkmode_andnot(req->args.advertising, + req->args.advertising, mask); + + /* inform AF that we need parse this differently */ + if (bitmap_weight(req->args.advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS) >= 2) + req->args.multimode = true; err = otx2_sync_mbox_msg(&pf->mbox); end: @@ -1306,12 +1316,12 @@ static void otx2_get_fec_stats(struct net_device *netdev, } static const struct ethtool_ops otx2_ethtool_ops = { - .cap_rss_ctx_supported = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USE_ADAPTIVE, .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN | ETHTOOL_RING_USE_CQE_SIZE, + .rxfh_max_num_contexts = MAX_RSS_GROUPS, .get_link = otx2_get_link, .get_drvinfo = otx2_get_drvinfo, .get_strings = otx2_get_strings, @@ -1329,6 +1339,11 @@ static const struct ethtool_ops otx2_ethtool_ops = { .get_rxfh_indir_size = otx2_get_rxfh_indir_size, .get_rxfh = otx2_get_rxfh, .set_rxfh = otx2_set_rxfh, + .get_rxfh_fields = otx2_get_rss_hash_opts, + .set_rxfh_fields = otx2_set_rss_hash_opts, + .create_rxfh_context = otx2_create_rxfh, + .modify_rxfh_context = otx2_modify_rxfh, + .remove_rxfh_context = otx2_remove_rxfh, .get_msglevel = otx2_get_msglevel, .set_msglevel = otx2_set_msglevel, .get_pauseparam = otx2_get_pauseparam, @@ -1413,7 +1428,7 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev, { struct otx2_nic *pfvf = netdev_priv(netdev); - if (is_otx2_lbkvf(pfvf->pdev)) { + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) { cmd->base.duplex = DUPLEX_FULL; cmd->base.speed = SPEED_100000; } else { @@ -1423,12 +1438,12 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev, } static const struct ethtool_ops otx2vf_ethtool_ops = { - .cap_rss_ctx_supported = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USE_ADAPTIVE, .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN | ETHTOOL_RING_USE_CQE_SIZE, + .rxfh_max_num_contexts = MAX_RSS_GROUPS, .get_link = otx2_get_link, .get_drvinfo = otx2vf_get_drvinfo, .get_strings = otx2vf_get_strings, @@ -1442,6 +1457,11 @@ static const struct ethtool_ops otx2vf_ethtool_ops = { .get_rxfh_indir_size = otx2_get_rxfh_indir_size, .get_rxfh = otx2_get_rxfh, .set_rxfh = otx2_set_rxfh, + .get_rxfh_fields = otx2_get_rss_hash_opts, + .set_rxfh_fields = otx2_set_rss_hash_opts, + .create_rxfh_context = otx2_create_rxfh, + .modify_rxfh_context = otx2_modify_rxfh, + .remove_rxfh_context = otx2_remove_rxfh, .get_ringparam = otx2_get_ringparam, .set_ringparam = otx2_set_ringparam, .get_coalesce = otx2_get_coalesce, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 47bfd1fb37d4..64c6d9162ef6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -247,7 +247,7 @@ int otx2_mcam_entry_init(struct otx2_nic *pfvf) mutex_unlock(&pfvf->mbox.lock); /* Allocate entries for Ntuple filters */ - count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT); + count = otx2_alloc_mcam_entries(pfvf, flow_cfg->ntuple_cnt); if (count <= 0) { otx2_clear_ntuple_flow_info(pfvf, flow_cfg); return 0; @@ -307,6 +307,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc); pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS; + pf->flow_cfg->ntuple_cnt = OTX2_DEFAULT_FLOWCOUNT; /* Allocate bare minimum number of MCAM entries needed for * unicast and ntuple filters. diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index cfed9ec5b157..b23585c5e5c2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -206,7 +206,8 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs) /* Register ME interrupt handler*/ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE]; - snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc)); + snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", + rvu_get_pf(pf->pdev, pf->pcifunc)); ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0), otx2_pf_me_intr_handler, 0, irq_name, pf); if (ret) { @@ -216,7 +217,8 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs) /* Register FLR interrupt handler */ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE]; - snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc)); + snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", + rvu_get_pf(pf->pdev, pf->pcifunc)); ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0), otx2_pf_flr_intr_handler, 0, irq_name, pf); if (ret) { @@ -228,7 +230,7 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs) if (numvfs > 64) { irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE]; snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1", - rvu_get_pf(pf->pcifunc)); + rvu_get_pf(pf->pdev, pf->pcifunc)); ret = request_irq(pci_irq_vector (pf->pdev, RVU_PF_INT_VEC_VFME1), otx2_pf_me_intr_handler, 0, irq_name, pf); @@ -238,7 +240,7 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs) } irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE]; snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1", - rvu_get_pf(pf->pcifunc)); + rvu_get_pf(pf->pdev, pf->pcifunc)); ret = request_irq(pci_irq_vector (pf->pdev, RVU_PF_INT_VEC_VFFLR1), otx2_pf_flr_intr_handler, 0, irq_name, pf); @@ -294,8 +296,8 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs) return 0; } -static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq, - int first, int mdevs, u64 intr) +void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq, + int first, int mdevs, u64 intr) { struct otx2_mbox_dev *mdev; struct otx2_mbox *mbox; @@ -465,6 +467,9 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work) offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); + trace_otx2_msg_status(pf->pdev, "PF-VF down queue handler(forwarding)", + vf_mbox->num_msgs); + for (id = 0; id < vf_mbox->num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start + offset); @@ -473,7 +478,7 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work) goto inval_msg; /* Set VF's number in each of the msg */ - msg->pcifunc &= RVU_PFVF_FUNC_MASK; + msg->pcifunc &= ~RVU_PFVF_FUNC_MASK; msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK; offset = msg->next_msgoff; } @@ -503,6 +508,9 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work) offset = mbox->rx_start + ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); + trace_otx2_msg_status(pf->pdev, "PF-VF up queue handler(response)", + vf_mbox->up_num_msgs); + for (id = 0; id < vf_mbox->up_num_msgs; id++) { msg = mdev->mbase + offset; @@ -539,7 +547,7 @@ end: } } -static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) +irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) { struct otx2_nic *pf = (struct otx2_nic *)(pf_irq); int vfs = pf->total_vfs; @@ -568,6 +576,23 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) return IRQ_HANDLED; } +static void *cn20k_pfvf_mbox_alloc(struct otx2_nic *pf, int numvfs) +{ + struct qmem *mbox_addr; + int err; + + err = qmem_alloc(&pf->pdev->dev, &mbox_addr, numvfs, MBOX_SIZE); + if (err) { + dev_err(pf->dev, "qmem alloc fail\n"); + return ERR_PTR(-ENOMEM); + } + + otx2_write64(pf, RVU_PF_VF_MBOX_ADDR, (u64)mbox_addr->iova); + pf->pfvf_mbox_addr = mbox_addr; + + return mbox_addr->base; +} + static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs) { void __iomem *hwbase; @@ -589,20 +614,27 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs) if (!pf->mbox_pfvf_wq) return -ENOMEM; - /* On CN10K platform, PF <-> VF mailbox region follows after - * PF <-> AF mailbox region. + /* For CN20K, PF allocates mbox memory in DRAM and writes PF/VF + * regions/offsets in RVU_PF_VF_MBOX_ADDR, the RVU_PFX_FUNC_PFAF_MBOX + * gives the aliased address to access PF/VF mailbox regions. */ - if (test_bit(CN10K_MBOX, &pf->hw.cap_flag)) - base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) + - MBOX_SIZE; - else - base = readq((void __iomem *)((u64)pf->reg_base + - RVU_PF_VF_BAR4_ADDR)); + if (is_cn20k(pf->pdev)) { + hwbase = (void __iomem *)cn20k_pfvf_mbox_alloc(pf, numvfs); + } else { + /* On CN10K platform, PF <-> VF mailbox region follows after + * PF <-> AF mailbox region. + */ + if (test_bit(CN10K_MBOX, &pf->hw.cap_flag)) + base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) + + MBOX_SIZE; + else + base = readq(pf->reg_base + RVU_PF_VF_BAR4_ADDR); - hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs); - if (!hwbase) { - err = -ENOMEM; - goto free_wq; + hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs); + if (!hwbase) { + err = -ENOMEM; + goto free_wq; + } } mbox = &pf->mbox_pfvf[0]; @@ -626,7 +658,7 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs) return 0; free_iomem: - if (hwbase) + if (hwbase && !(is_cn20k(pf->pdev))) iounmap(hwbase); free_wq: destroy_workqueue(pf->mbox_pfvf_wq); @@ -645,8 +677,10 @@ static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf) pf->mbox_pfvf_wq = NULL; } - if (mbox->mbox.hwbase) + if (mbox->mbox.hwbase && !is_cn20k(pf->pdev)) iounmap(mbox->mbox.hwbase); + else + qmem_free(&pf->pdev->dev, pf->pfvf_mbox_addr); otx2_mbox_destroy(&mbox->mbox); } @@ -670,6 +704,9 @@ static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) { int vector; + if (is_cn20k(pf->pdev)) + return cn20k_disable_pfvf_mbox_intr(pf, numvfs); + /* Disable PF <=> VF mailbox IRQ */ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull); otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull); @@ -691,11 +728,14 @@ static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) char *irq_name; int err; + if (is_cn20k(pf->pdev)) + return cn20k_register_pfvf_mbox_intr(pf, numvfs); + /* Register MBOX0 interrupt handler */ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE]; if (pf->pcifunc) snprintf(irq_name, NAME_SIZE, - "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc)); + "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pdev, pf->pcifunc)); else snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0"); err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0), @@ -711,7 +751,8 @@ static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE]; if (pf->pcifunc) snprintf(irq_name, NAME_SIZE, - "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc)); + "RVUPF%d_VF Mbox1", + rvu_get_pf(pf->pdev, pf->pcifunc)); else snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1"); err = request_irq(pci_irq_vector(pf->pdev, @@ -819,6 +860,9 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work) offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); pf = af_mbox->pfvf; + trace_otx2_msg_status(pf->pdev, "PF-AF down queue handler(response)", + num_msgs); + for (id = 0; id < num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); otx2_process_pfaf_mbox_msg(pf, msg); @@ -974,6 +1018,9 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work) offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + trace_otx2_msg_status(pf->pdev, "PF-AF up queue handler(notification)", + num_msgs); + for (id = 0; id < num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); @@ -994,7 +1041,7 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work) otx2_mbox_msg_send(mbox, 0); } -static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) +irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) { struct otx2_nic *pf = (struct otx2_nic *)pf_irq; struct mbox *mw = &pf->mbox; @@ -1023,6 +1070,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF", BIT_ULL(0)); + + trace_otx2_msg_status(pf->pdev, "PF-AF up work queued(interrupt)", + hdr->num_msgs); } if (mbox_data & MBOX_DOWN_MSG) { @@ -1039,6 +1089,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF", BIT_ULL(0)); + + trace_otx2_msg_status(pf->pdev, "PF-AF down work queued(interrupt)", + hdr->num_msgs); } return IRQ_HANDLED; @@ -1046,10 +1099,18 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) void otx2_disable_mbox_intr(struct otx2_nic *pf) { - int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX); + int vector; /* Disable AF => PF mailbox IRQ */ - otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0)); + if (!is_cn20k(pf->pdev)) { + vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX); + otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0)); + } else { + vector = pci_irq_vector(pf->pdev, + RVU_MBOX_PF_INT_VEC_AFPF_MBOX); + otx2_write64(pf, RVU_PF_INT_ENA_W1C, + BIT_ULL(0) | BIT_ULL(1)); + } free_irq(vector, pf); } EXPORT_SYMBOL(otx2_disable_mbox_intr); @@ -1062,10 +1123,24 @@ int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af) int err; /* Register mailbox interrupt handler */ - irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE]; - snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox"); - err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX), - otx2_pfaf_mbox_intr_handler, 0, irq_name, pf); + if (!is_cn20k(pf->pdev)) { + irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE]; + snprintf(irq_name, NAME_SIZE, "RVUPF%d AFPF Mbox", + rvu_get_pf(pf->pdev, pf->pcifunc)); + err = request_irq(pci_irq_vector + (pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX), + pf->hw_ops->pfaf_mbox_intr_handler, + 0, irq_name, pf); + } else { + irq_name = &hw->irq_name[RVU_MBOX_PF_INT_VEC_AFPF_MBOX * + NAME_SIZE]; + snprintf(irq_name, NAME_SIZE, "RVUPF%d AFPF Mbox", + rvu_get_pf(pf->pdev, pf->pcifunc)); + err = request_irq(pci_irq_vector + (pf->pdev, RVU_MBOX_PF_INT_VEC_AFPF_MBOX), + pf->hw_ops->pfaf_mbox_intr_handler, + 0, irq_name, pf); + } if (err) { dev_err(pf->dev, "RVUPF: IRQ registration failed for PFAF mbox irq\n"); @@ -1075,8 +1150,14 @@ int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af) /* Enable mailbox interrupt for msgs coming from AF. * First clear to avoid spurious interrupts, if any. */ - otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); - otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0)); + if (!is_cn20k(pf->pdev)) { + otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); + otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0)); + } else { + otx2_write64(pf, RVU_PF_INT, BIT_ULL(0) | BIT_ULL(1)); + otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0) | + BIT_ULL(1)); + } if (!probe_af) return 0; @@ -1107,7 +1188,7 @@ void otx2_pfaf_mbox_destroy(struct otx2_nic *pf) pf->mbox_wq = NULL; } - if (mbox->mbox.hwbase) + if (mbox->mbox.hwbase && !is_cn20k(pf->pdev)) iounmap((void __iomem *)mbox->mbox.hwbase); otx2_mbox_destroy(&mbox->mbox); @@ -1127,12 +1208,20 @@ int otx2_pfaf_mbox_init(struct otx2_nic *pf) if (!pf->mbox_wq) return -ENOMEM; - /* Mailbox is a reserved memory (in RAM) region shared between - * admin function (i.e AF) and this PF, shouldn't be mapped as - * device memory to allow unaligned accesses. + /* For CN20K, AF allocates mbox memory in DRAM and writes PF + * regions/offsets in RVU_MBOX_AF_PFX_ADDR, the RVU_PFX_FUNC_PFAF_MBOX + * gives the aliased address to access AF/PF mailbox regions. */ - hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM), - MBOX_SIZE); + if (is_cn20k(pf->pdev)) + hwbase = pf->reg_base + RVU_PFX_FUNC_PFAF_MBOX + + ((u64)BLKADDR_MBOX << RVU_FUNC_BLKADDR_SHIFT); + else + /* Mailbox is a reserved memory (in RAM) region shared between + * admin function (i.e AF) and this PF, shouldn't be mapped as + * device memory to allow unaligned accesses. + */ + hwbase = ioremap_wc(pci_resource_start + (pf->pdev, PCI_MBOX_BAR_NUM), MBOX_SIZE); if (!hwbase) { dev_err(pf->dev, "Unable to map PFAF mailbox region\n"); err = -ENOMEM; @@ -1305,8 +1394,8 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data) { struct otx2_nic *pf = data; struct otx2_snd_queue *sq; - u64 val, *ptr; - u64 qidx = 0; + void __iomem *ptr; + u64 val, qidx = 0; /* CQ */ for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) { @@ -1954,7 +2043,7 @@ int otx2_open(struct net_device *netdev) if (err) { dev_err(pf->dev, "RVUPF%d: IRQ registration failed for QERR\n", - rvu_get_pf(pf->pcifunc)); + rvu_get_pf(pf->pdev, pf->pcifunc)); goto err_disable_napi; } @@ -1972,7 +2061,7 @@ int otx2_open(struct net_device *netdev) if (name_len >= NAME_SIZE) { dev_err(pf->dev, "RVUPF%d: IRQ registration failed for CQ%d, irq name is too long\n", - rvu_get_pf(pf->pcifunc), qidx); + rvu_get_pf(pf->pdev, pf->pcifunc), qidx); err = -EINVAL; goto err_free_cints; } @@ -1983,7 +2072,7 @@ int otx2_open(struct net_device *netdev) if (err) { dev_err(pf->dev, "RVUPF%d: IRQ registration failed for CQ%d\n", - rvu_get_pf(pf->pcifunc), qidx); + rvu_get_pf(pf->pdev, pf->pcifunc), qidx); goto err_free_cints; } vec++; @@ -2069,7 +2158,6 @@ int otx2_stop(struct net_device *netdev) struct otx2_nic *pf = netdev_priv(netdev); struct otx2_cq_poll *cq_poll = NULL; struct otx2_qset *qset = &pf->qset; - struct otx2_rss_info *rss; int qidx, vec, wrk; /* If the DOWN flag is set resources are already freed */ @@ -2087,10 +2175,7 @@ int otx2_stop(struct net_device *netdev) otx2_rxtx_enable(pf, false); /* Clear RSS enable flag */ - rss = &pf->hw.rss_info; - rss->enable = false; - if (!netif_is_rxfh_configured(netdev)) - kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]); + pf->hw.rss_info.enable = false; /* Cleanup Queue IRQ */ vec = pci_irq_vector(pf->pdev, @@ -2980,8 +3065,13 @@ int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf) if (err) return err; - err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT, - RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX); + if (!is_cn20k(pf->pdev)) + err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT, + RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX); + else + err = pci_alloc_irq_vectors(hw->pdev, RVU_MBOX_PF_INT_VEC_CNT, + RVU_MBOX_PF_INT_VEC_CNT, + PCI_IRQ_MSIX); if (err < 0) { dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n", __func__, num_vec); @@ -2990,6 +3080,11 @@ int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf) otx2_setup_dev_hw_settings(pf); + if (is_cn20k(pf->pdev)) + cn20k_init(pf); + else + otx2_init_hw_ops(pf); + /* Init PF <=> AF mailbox stuff */ err = otx2_pfaf_mbox_init(pf); if (err) @@ -3048,7 +3143,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - err = pci_request_regions(pdev, DRV_NAME); + err = pcim_request_all_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); return err; @@ -3057,7 +3152,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "DMA mask config failed, abort\n"); - goto err_release_regions; + return err; } pci_set_master(pdev); @@ -3067,10 +3162,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES); netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount); - if (!netdev) { - err = -ENOMEM; - goto err_release_regions; - } + if (!netdev) + return -ENOMEM; pci_set_drvdata(pdev, netdev); SET_NETDEV_DEV(netdev, &pdev->dev); @@ -3128,6 +3221,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_ptp_destroy; + otx2_set_hw_capabilities(pf); + err = cn10k_mcs_init(pf); if (err) goto err_del_mcam_entries; @@ -3246,8 +3341,6 @@ err_detach_rsrc: err_free_netdev: pci_set_drvdata(pdev, NULL); free_netdev(netdev); -err_release_regions: - pci_release_regions(pdev); return err; } @@ -3289,6 +3382,7 @@ static void otx2_vf_link_event_task(struct work_struct *work) req = (struct cgx_link_info_msg *)msghdr; req->hdr.id = MBOX_MSG_CGX_LINK_EVENT; req->hdr.sig = OTX2_MBOX_REQ_SIG; + req->hdr.pcifunc = pf->pcifunc; memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info)); otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx); @@ -3447,8 +3541,6 @@ static void otx2_remove(struct pci_dev *pdev) pci_free_irq_vectors(pf->pdev); pci_set_drvdata(pdev, NULL); free_netdev(netdev); - - pci_release_regions(pdev); } static struct pci_driver otx2_pf_driver = { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index 63130ba37e9d..e52cc6b1a26c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -193,7 +193,7 @@ static int ptp_pps_on(struct otx2_ptp *ptp, int on, u64 period) return otx2_sync_mbox_msg(&ptp->nic->mbox); } -static u64 ptp_cc_read(const struct cyclecounter *cc) +static u64 ptp_cc_read(struct cyclecounter *cc) { struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h index e3aee6e36215..1cd576fd09c5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h @@ -44,6 +44,17 @@ #define RVU_PF_VF_MBOX_ADDR (0xC40) #define RVU_PF_LMTLINE_ADDR (0xC48) +#define RVU_MBOX_PF_VFX_PFVF_TRIGX(a) (0x2000 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INTX(a) (0x1000 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INT_W1SX(a) (0x1020 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INT_ENA_W1SX(a) (0x1040 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INT_ENA_W1CX(a) (0x1060 | (a) << 3) + +#define RVU_MBOX_PF_VFPF1_INTX(a) (0x1080 | (a) << 3) +#define RVU_MBOX_PF_VFPF1_INT_W1SX(a) (0x10a0 | (a) << 3) +#define RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(a) (0x10c0 | (a) << 3) +#define RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(a) (0x10e0 | (a) << 3) + /* RVU VF registers */ #define RVU_VF_VFPF_MBOX0 (0x00000) #define RVU_VF_VFPF_MBOX1 (0x00008) @@ -58,6 +69,11 @@ #define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3) #define RVU_VF_MBOX_REGION (0xC0000) +/* CN20K RVU_MBOX_E: RVU PF/VF MBOX Address Range Enumeration */ +#define RVU_MBOX_AF_PFX_ADDR(a) (0x5000 | (a) << 4) +#define RVU_PFX_FUNC_PFAF_MBOX (0x80000) +#define RVU_PFX_FUNCX_VFAF_MBOX (0x40000) + #define RVU_FUNC_BLKADDR_SHIFT 20 #define RVU_FUNC_BLKADDR_MASK 0x1FULL @@ -138,39 +154,12 @@ #define NIX_LF_CINTX_ENA_W1S(a) (NIX_LFBASE | 0xD40 | (a) << 12) #define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12) -/* NIX AF transmit scheduler registers */ -#define NIX_AF_SMQX_CFG(a) (0x700 | (u64)(a) << 16) -#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (u64)(a) << 16) -#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (u64)(a) << 16) -#define NIX_AF_TL1X_CIR(a) (0xC20 | (u64)(a) << 16) -#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (u64)(a) << 16) -#define NIX_AF_TL2X_PARENT(a) (0xE88 | (u64)(a) << 16) -#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (u64)(a) << 16) -#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (u64)(a) << 16) -#define NIX_AF_TL2X_CIR(a) (0xE20 | (u64)(a) << 16) -#define NIX_AF_TL2X_PIR(a) (0xE30 | (u64)(a) << 16) -#define NIX_AF_TL3X_PARENT(a) (0x1088 | (u64)(a) << 16) -#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (u64)(a) << 16) -#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (u64)(a) << 16) -#define NIX_AF_TL3X_CIR(a) (0x1020 | (u64)(a) << 16) -#define NIX_AF_TL3X_PIR(a) (0x1030 | (u64)(a) << 16) -#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (u64)(a) << 16) -#define NIX_AF_TL4X_PARENT(a) (0x1288 | (u64)(a) << 16) -#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (u64)(a) << 16) -#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (u64)(a) << 16) -#define NIX_AF_TL4X_CIR(a) (0x1220 | (u64)(a) << 16) -#define NIX_AF_TL4X_PIR(a) (0x1230 | (u64)(a) << 16) -#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (u64)(a) << 16) -#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (u64)(a) << 16) -#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (u64)(a) << 16) -#define NIX_AF_MDQX_CIR(a) (0x1420 | (u64)(a) << 16) -#define NIX_AF_MDQX_PIR(a) (0x1430 | (u64)(a) << 16) -#define NIX_AF_MDQX_PARENT(a) (0x1480 | (u64)(a) << 16) -#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (u64)(a) << 16 | (b) << 3) - /* LMT LF registers */ #define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT) #define LMT_LF_LMTLINEX(a) (LMT_LFBASE | 0x000 | (a) << 12) #define LMT_LF_LMTCANCEL (LMT_LFBASE | 0x400) +/* CN20K registers */ +#define RVU_PF_DISC (0x0) + #endif /* OTX2_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 9a226ca74425..5f80b23c5335 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -467,7 +467,8 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, target = act->dev; if (target->dev.parent) { priv = netdev_priv(target); - if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { + if (rvu_get_pf(nic->pdev, nic->pcifunc) != + rvu_get_pf(nic->pdev, priv->pcifunc)) { NL_SET_ERR_MSG_MOD(extack, "can't redirect to other pf/vf"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index af8cabe828d0..625bb5a05344 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -335,6 +335,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, struct nix_rx_parse_s *parse = &cqe->parse; struct nix_rx_sg_s *sg = &cqe->sg; struct sk_buff *skb = NULL; + u64 *word = (u64 *)parse; void *end, *start; u32 metasize = 0; u64 *seg_addr; @@ -342,9 +343,12 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, int seg; if (unlikely(parse->errlev || parse->errcode)) { - if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) + if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) { + trace_otx2_parse_dump(pfvf->pdev, "Err:", word); return; + } } + trace_otx2_parse_dump(pfvf->pdev, "", word); if (pfvf->xdp_prog) if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, @@ -1410,9 +1414,8 @@ void otx2_free_pending_sqe(struct otx2_nic *pfvf) } } -static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, - struct xdp_frame *xdpf, - u64 dma_addr, int len, int *offset, u16 flags) +void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf, + u64 dma_addr, int len, int *offset, u16 flags) { struct nix_sqe_sg_s *sg = NULL; u64 *iova = NULL; @@ -1559,17 +1562,16 @@ handle_xdp_verdict: break; default: bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act); - break; + fallthrough; case XDP_ABORTED: - if (xsk_buff) - xsk_buff_free(xsk_buff); - trace_xdp_exception(pfvf->netdev, prog, act); - break; + if (act == XDP_ABORTED) + trace_xdp_exception(pfvf->netdev, prog, act); + fallthrough; case XDP_DROP: cq->pool_ptrs++; if (xsk_buff) { xsk_buff_free(xsk_buff); - } else if (page->pp) { + } else if (pp_page_to_nmdesc(page)->pp) { page_pool_recycle_direct(pool->page_pool, page); } else { otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 7ef3ba477d49..5589fccd370b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -136,7 +136,7 @@ static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf, rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT; rsp->hdr.sig = OTX2_MBOX_RSP_SIG; - rsp->hdr.pcifunc = 0; + rsp->hdr.pcifunc = req->pcifunc; rsp->hdr.rc = 0; err = otx2_mbox_up_handler_cgx_link_event( vf, (struct cgx_link_info_msg *)req, rsp); @@ -240,6 +240,10 @@ static void otx2vf_disable_mbox_intr(struct otx2_nic *vf) /* Disable VF => PF mailbox IRQ */ otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0)); + + if (is_cn20k(vf->pdev)) + otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0) | BIT_ULL(1)); + free_irq(vector, vf); } @@ -252,9 +256,18 @@ static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf) /* Register mailbox interrupt handler */ irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE]; - snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox"); - err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX), - otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf); + snprintf(irq_name, NAME_SIZE, "RVUVF%d AFVF Mbox", ((vf->pcifunc & + RVU_PFVF_FUNC_MASK) - 1)); + + if (!is_cn20k(vf->pdev)) { + err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX), + otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf); + } else { + err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX), + vf->hw_ops->vfaf_mbox_intr_handler, 0, irq_name, + vf); + } + if (err) { dev_err(vf->dev, "RVUPF: IRQ registration failed for VFAF mbox irq\n"); @@ -264,8 +277,15 @@ static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf) /* Enable mailbox interrupt for msgs coming from PF. * First clear to avoid spurious interrupts, if any. */ - otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); - otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0)); + if (!is_cn20k(vf->pdev)) { + otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); + otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0)); + } else { + otx2_write64(vf, RVU_VF_INT, BIT_ULL(0) | BIT_ULL(1) | + BIT_ULL(2) | BIT_ULL(3)); + otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0) | + BIT_ULL(1) | BIT_ULL(2) | BIT_ULL(3)); + } if (!probe_pf) return 0; @@ -315,7 +335,13 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf) if (!vf->mbox_wq) return -ENOMEM; - if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) { + /* For cn20k platform, VF mailbox region is in dram aliased from AF + * VF MBOX ADDR, MBOX is a separate RVU block. + */ + if (is_cn20k(vf->pdev)) { + hwbase = vf->reg_base + RVU_VF_MBOX_REGION + ((u64)BLKADDR_MBOX << + RVU_FUNC_BLKADDR_SHIFT); + } else if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) { /* For cn10k platform, VF mailbox region is in its BAR2 * register space */ @@ -548,7 +574,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - err = pci_request_regions(pdev, DRV_NAME); + err = pcim_request_all_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); return err; @@ -557,7 +583,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "DMA mask config failed, abort\n"); - goto err_release_regions; + return err; } pci_set_master(pdev); @@ -565,10 +591,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) qcount = num_online_cpus(); qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES); netdev = alloc_etherdev_mqs(sizeof(*vf), qcount + qos_txqs, qcount); - if (!netdev) { - err = -ENOMEM; - goto err_release_regions; - } + if (!netdev) + return -ENOMEM; pci_set_drvdata(pdev, netdev); SET_NETDEV_DEV(netdev, &pdev->dev); @@ -618,6 +642,12 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) } otx2_setup_dev_hw_settings(vf); + + if (is_cn20k(vf->pdev)) + cn20k_init(vf); + else + otx2_init_hw_ops(vf); + /* Init VF <=> PF mailbox stuff */ err = otx2vf_vfaf_mbox_init(vf); if (err) @@ -729,9 +759,12 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) } #ifdef CONFIG_DCB - err = otx2_dcbnl_set_ops(netdev); - if (err) - goto err_free_zc_bmap; + /* Priority flow control is not supported for LBK and SDP vf(s) */ + if (!(is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev))) { + err = otx2_dcbnl_set_ops(netdev); + if (err) + goto err_free_zc_bmap; + } #endif otx2_qos_init(vf, qos_txqs); @@ -765,8 +798,6 @@ err_free_irq_vectors: err_free_netdev: pci_set_drvdata(pdev, NULL); free_netdev(netdev); -err_release_regions: - pci_release_regions(pdev); return err; } @@ -815,8 +846,6 @@ static void otx2vf_remove(struct pci_dev *pdev) pci_free_irq_vectors(vf->pdev); pci_set_drvdata(pdev, NULL); free_netdev(netdev); - - pci_release_regions(pdev); } static struct pci_driver otx2vf_driver = { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c index ce10caea8511..7d67b4cbaf71 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c @@ -11,6 +11,7 @@ #include <net/xdp.h> #include "otx2_common.h" +#include "otx2_struct.h" #include "otx2_xsk.h" int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool, @@ -131,7 +132,7 @@ int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qi set_bit(qidx, pf->af_xdp_zc_qidx); otx2_clean_up_rq(pf, qidx); /* Reconfigure RSS table as 'qidx' cannot be part of RSS now */ - otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP); + otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP, NULL); /* Kick start the NAPI context so that receiving will start */ return otx2_xsk_wakeup(pf->netdev, qidx, XDP_WAKEUP_RX); } @@ -152,7 +153,7 @@ int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qidx) clear_bit(qidx, pf->af_xdp_zc_qidx); xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); /* Reconfigure RSS table as 'qidx' now need to be part of RSS now */ - otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP); + otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP, NULL); return 0; } @@ -196,11 +197,39 @@ void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int sq->xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, qidx); } +static void otx2_xsk_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, + u16 qidx) +{ + struct nix_sqe_hdr_s *sqe_hdr; + struct otx2_snd_queue *sq; + int offset; + + sq = &pfvf->qset.sq[qidx]; + memset(sq->sqe_base + 8, 0, sq->sqe_size - 8); + + sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); + + if (!sqe_hdr->total) { + sqe_hdr->aura = sq->aura_id; + sqe_hdr->df = 1; + sqe_hdr->sq = qidx; + sqe_hdr->pnc = 1; + } + sqe_hdr->total = len; + sqe_hdr->sqe_id = sq->head; + + offset = sizeof(*sqe_hdr); + + otx2_xdp_sqe_add_sg(sq, NULL, iova, len, &offset, OTX2_AF_XDP_FRAME); + sqe_hdr->sizem1 = (offset / 16) - 1; + pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); +} + void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool, int queue, int budget) { struct xdp_desc *xdp_desc = pool->tx_descs; - int err, i, work_done = 0, batch; + int i, batch; budget = min(budget, otx2_read_free_sqe(pfvf, queue)); batch = xsk_tx_peek_release_desc_batch(pool, budget); @@ -211,15 +240,6 @@ void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool, dma_addr_t dma_addr; dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc[i].addr); - err = otx2_xdp_sq_append_pkt(pfvf, NULL, dma_addr, xdp_desc[i].len, - queue, OTX2_AF_XDP_FRAME); - if (!err) { - netdev_err(pfvf->netdev, "AF_XDP: Unable to transfer packet err%d\n", err); - break; - } - work_done++; + otx2_xsk_sq_append_pkt(pfvf, dma_addr, xdp_desc[i].len, queue); } - - if (work_done) - xsk_tx_release(pool); } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c index 0f844c14485a..5765bac119f0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c @@ -165,6 +165,11 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf, otx2_config_sched_shaping(pfvf, node, cfg, &num_regs); } else if (level == NIX_TXSCH_LVL_TL2) { + /* configure parent txschq */ + cfg->reg[num_regs] = NIX_AF_TL2X_PARENT(node->schq); + cfg->regval[num_regs] = (u64)hw->tx_link << 16; + num_regs++; + /* configure link cfg */ if (level == pfvf->qos.link_cfg_lvl) { cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link); @@ -1633,6 +1638,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force if (!node->is_static) dwrr_del_node = true; + WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER); /* destroy the leaf node */ otx2_qos_disable_sq(pfvf, qid); otx2_qos_destroy_node(pfvf, node); @@ -1677,9 +1683,6 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force } kfree(new_cfg); - /* update tx_real_queues */ - otx2_qos_update_tx_netdev_queues(pfvf); - return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c index c5dbae0e513b..2872adabc830 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c @@ -151,9 +151,10 @@ static void otx2_qos_sq_free_sqbs(struct otx2_nic *pfvf, int qidx) static void otx2_qos_sqb_flush(struct otx2_nic *pfvf, int qidx) { int sqe_tail, sqe_head; - u64 incr, *ptr, val; + void __iomem *ptr; + u64 incr, val; - ptr = (__force u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); + ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); incr = (u64)qidx << 32; val = otx2_atomic64_add(incr, ptr); sqe_head = (val >> 20) & 0x3F; @@ -256,6 +257,26 @@ out: return err; } +static int otx2_qos_nix_npa_ndc_sync(struct otx2_nic *pfvf) +{ + struct ndc_sync_op *req; + int rc; + + mutex_lock(&pfvf->mbox.lock); + + req = otx2_mbox_alloc_msg_ndc_sync_op(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->nix_lf_tx_sync = true; + req->npa_lf_sync = true; + rc = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + return rc; +} + void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx) { struct otx2_qset *qset = &pfvf->qset; @@ -285,6 +306,8 @@ void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx) otx2_qos_sqb_flush(pfvf, sq_idx); otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx)); + /* NIX/NPA NDC sync */ + otx2_qos_nix_npa_ndc_sync(pfvf); otx2_cleanup_tx_cqes(pfvf, cq); mutex_lock(&pfvf->mbox.lock); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c index 04e08e06f30f..25af98034e2e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c @@ -67,6 +67,8 @@ static int rvu_rep_mcam_flow_init(struct rep_dev *rep) rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp (&priv->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) + goto exit; for (ent = 0; ent < rsp->count; ent++) rep->flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent]; @@ -242,10 +244,10 @@ static int rvu_rep_devlink_port_register(struct rep_dev *rep) if (!(rep->pcifunc & RVU_PFVF_FUNC_MASK)) { attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; - attrs.phys.port_number = rvu_get_pf(rep->pcifunc); + attrs.phys.port_number = rvu_get_pf(priv->pdev, rep->pcifunc); } else { attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; - attrs.pci_vf.pf = rvu_get_pf(rep->pcifunc); + attrs.pci_vf.pf = rvu_get_pf(priv->pdev, rep->pcifunc); attrs.pci_vf.vf = rep->pcifunc & RVU_PFVF_FUNC_MASK; } @@ -670,7 +672,8 @@ int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack) rep->pcifunc = pcifunc; snprintf(ndev->name, sizeof(ndev->name), "Rpf%dvf%d", - rvu_get_pf(pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK)); + rvu_get_pf(priv->pdev, pcifunc), + (pcifunc & RVU_PFVF_FUNC_MASK)); ndev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | @@ -763,7 +766,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - err = pci_request_regions(pdev, DRV_NAME); + err = pcim_request_all_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); return err; @@ -772,7 +775,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "DMA mask config failed, abort\n"); - goto err_release_regions; + goto err_set_drv_data; } pci_set_master(pdev); @@ -780,7 +783,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) { err = -ENOMEM; - goto err_release_regions; + goto err_set_drv_data; } pci_set_drvdata(pdev, priv); @@ -797,7 +800,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = otx2_init_rsrc(pdev, priv); if (err) - goto err_release_regions; + goto err_set_drv_data; priv->iommu_domain = iommu_get_domain_for_dev(dev); @@ -820,9 +823,8 @@ err_detach_rsrc: otx2_disable_mbox_intr(priv); otx2_pfaf_mbox_destroy(priv); pci_free_irq_vectors(pdev); -err_release_regions: +err_set_drv_data: pci_set_drvdata(pdev, NULL); - pci_release_regions(pdev); return err; } @@ -842,7 +844,6 @@ static void rvu_rep_remove(struct pci_dev *pdev) otx2_pfaf_mbox_destroy(priv); pci_free_irq_vectors(priv->pdev); pci_set_drvdata(pdev, NULL); - pci_release_regions(pdev); } static struct pci_driver rvu_rep_driver = { diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.c b/drivers/net/ethernet/marvell/prestera/prestera_counter.c index 4cd53a2dae46..634f4543c1d7 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_counter.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.c @@ -336,8 +336,7 @@ prestera_counter_block_get_by_idx(struct prestera_counter *counter, u32 idx) static void prestera_counter_stats_work(struct work_struct *work) { - struct delayed_work *dl_work = - container_of(work, struct delayed_work, work); + struct delayed_work *dl_work = to_delayed_work(work); struct prestera_counter *counter = container_of(dl_work, struct prestera_counter, stats_dw); struct prestera_counter_block *block; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c index 35857dc19542..c45d108b2f6d 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c @@ -845,9 +845,9 @@ static int prestera_pci_probe(struct pci_dev *pdev, goto err_pci_enable_device; } - err = pci_request_regions(pdev, driver_name); + err = pcim_request_all_regions(pdev, driver_name); if (err) { - dev_err(&pdev->dev, "pci_request_regions failed\n"); + dev_err(&pdev->dev, "pcim_request_all_regions failed\n"); goto err_pci_request_regions; } @@ -938,7 +938,6 @@ err_pci_dev_alloc: err_pp_ioremap: err_mem_ioremap: err_dma_mask: - pci_release_regions(pdev); err_pci_request_regions: err_pci_enable_device: return err; @@ -953,7 +952,6 @@ static void prestera_pci_remove(struct pci_dev *pdev) pci_free_irq_vectors(pdev); destroy_workqueue(fw->wq); prestera_fw_uninit(fw); - pci_release_regions(pdev); } static const struct pci_device_id prestera_pci_devices[] = { diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 2bf426cea6dd..68f8a1e36aa6 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -353,7 +353,7 @@ static void rxq_refill(struct net_device *dev) static inline void rxq_refill_timer_wrapper(struct timer_list *t) { - struct pxa168_eth_private *pep = from_timer(pep, t, timeout); + struct pxa168_eth_private *pep = timer_container_of(pep, t, timeout); napi_schedule(&pep->napi); } @@ -1175,7 +1175,7 @@ static int pxa168_eth_stop(struct net_device *dev) /* Write to ICR to clear interrupts. */ wrl(pep, INT_W_CLEAR, 0); napi_disable(&pep->napi); - del_timer_sync(&pep->timeout); + timer_delete_sync(&pep->timeout); netif_carrier_off(dev); free_irq(dev->irq, dev); rxq_deinit(dev); @@ -1229,9 +1229,9 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget) int work_done = 0; /* - * We call txq_reclaim every time since in NAPI interupts are disabled - * and due to this we miss the TX_DONE interrupt,which is not updated in - * interrupt status register. + * We call txq_reclaim every time since in NAPI interrupts are disabled + * and due to this we miss the TX_DONE interrupt, which is not updated + * in interrupt status register. */ txq_reclaim(dev, 0); if (netif_queue_stopped(dev) diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index a1bada9eaaf6..05349a0b2db1 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -1494,7 +1494,7 @@ static int xm_check_link(struct net_device *dev) */ static void xm_link_timer(struct timer_list *t) { - struct skge_port *skge = from_timer(skge, t, link_timer); + struct skge_port *skge = timer_container_of(skge, t, link_timer); struct net_device *dev = skge->netdev; struct skge_hw *hw = skge->hw; int port = skge->port; @@ -2662,7 +2662,7 @@ static int skge_down(struct net_device *dev) netif_tx_disable(dev); if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC) - del_timer_sync(&skge->link_timer); + timer_delete_sync(&skge->link_timer); napi_disable(&skge->napi); netif_carrier_off(dev); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index d7121c836508..3831f533b9db 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -2961,7 +2961,7 @@ static int sky2_rx_hung(struct net_device *dev) static void sky2_watchdog(struct timer_list *t) { - struct sky2_hw *hw = from_timer(hw, t, watchdog_timer); + struct sky2_hw *hw = timer_container_of(hw, t, watchdog_timer); /* Check for lost IRQ once a second */ if (sky2_read32(hw, B0_ISRC)) { @@ -5052,7 +5052,7 @@ static int sky2_suspend(struct device *dev) if (!hw) return 0; - del_timer_sync(&hw->watchdog_timer); + timer_delete_sync(&hw->watchdog_timer); cancel_work_sync(&hw->restart_work); rtnl_lock(); |