summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom/bnxt/bnxt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt/bnxt.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c383
1 files changed, 216 insertions, 167 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 6a884df44612..0911eb3b8007 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1614,7 +1614,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
- (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+ (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
u16 vlan_proto = tpa_info->metadata >>
RX_CMP_FLAGS2_METADATA_TPID_SFT;
u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
@@ -1832,7 +1832,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if ((rxcmp1->rx_cmp_flags2 &
cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
- (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+ (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
@@ -3538,7 +3538,7 @@ static void bnxt_free_vnic_attributes(struct bnxt *bp)
}
if (vnic->rss_table) {
- dma_free_coherent(&pdev->dev, PAGE_SIZE,
+ dma_free_coherent(&pdev->dev, vnic->rss_table_size,
vnic->rss_table,
vnic->rss_table_dma_addr);
vnic->rss_table = NULL;
@@ -3603,7 +3603,13 @@ vnic_skip_grps:
continue;
/* Allocate rss table and hash key */
- vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
+
+ vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
+ vnic->rss_table = dma_alloc_coherent(&pdev->dev,
+ vnic->rss_table_size,
&vnic->rss_table_dma_addr,
GFP_KERNEL);
if (!vnic->rss_table) {
@@ -3611,8 +3617,6 @@ vnic_skip_grps:
goto out;
}
- size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
-
vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
}
@@ -4505,10 +4509,12 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
switch (tunnel_type) {
case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
- req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
+ req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
+ bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
break;
case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
- req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
+ req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
+ bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
break;
default:
break;
@@ -4543,10 +4549,11 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
switch (tunnel_type) {
case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
- bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
+ bp->vxlan_fw_dst_port_id =
+ le16_to_cpu(resp->tunnel_dst_port_id);
break;
case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
- bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
+ bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
break;
default:
break;
@@ -4826,9 +4833,112 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
}
}
+static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
+{
+ int entries;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
+ else
+ entries = HW_HASH_INDEX_SIZE;
+
+ bp->rss_indir_tbl_entries = entries;
+ bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
+ GFP_KERNEL);
+ if (!bp->rss_indir_tbl)
+ return -ENOMEM;
+ return 0;
+}
+
+static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
+{
+ u16 max_rings, max_entries, pad, i;
+
+ if (!bp->rx_nr_rings)
+ return;
+
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ max_rings = bp->rx_nr_rings - 1;
+ else
+ max_rings = bp->rx_nr_rings;
+
+ max_entries = bnxt_get_rxfh_indir_size(bp->dev);
+
+ for (i = 0; i < max_entries; i++)
+ bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
+
+ pad = bp->rss_indir_tbl_entries - max_entries;
+ if (pad)
+ memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
+}
+
+static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
+{
+ u16 i, tbl_size, max_ring = 0;
+
+ if (!bp->rss_indir_tbl)
+ return 0;
+
+ tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+ for (i = 0; i < tbl_size; i++)
+ max_ring = max(max_ring, bp->rss_indir_tbl[i]);
+ return max_ring;
+}
+
+int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ return 2;
+ return 1;
+}
+
+static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
+ u16 i, j;
+
+ /* Fill the RSS indirection table with ring group ids */
+ for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
+ if (!no_rss)
+ j = bp->rss_indir_tbl[i];
+ vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
+ }
+}
+
+static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
+{
+ __le16 *ring_tbl = vnic->rss_table;
+ struct bnxt_rx_ring_info *rxr;
+ u16 tbl_size, i;
+
+ tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+
+ for (i = 0; i < tbl_size; i++) {
+ u16 ring_id, j;
+
+ j = bp->rss_indir_tbl[i];
+ rxr = &bp->rx_ring[j];
+
+ ring_id = rxr->rx_ring_struct.fw_ring_id;
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ }
+}
+
+static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
+ else
+ __bnxt_fill_hw_rss_tbl(bp, vnic);
+}
+
static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
{
- u32 i, j, max_rings;
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input req = {0};
@@ -4838,24 +4948,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
if (set_rss) {
+ bnxt_fill_hw_rss_tbl(bp, vnic);
req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
- if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
- if (BNXT_CHIP_TYPE_NITRO_A0(bp))
- max_rings = bp->rx_nr_rings - 1;
- else
- max_rings = bp->rx_nr_rings;
- } else {
- max_rings = 1;
- }
-
- /* Fill the RSS indirection table with ring group ids */
- for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
- if (j == max_rings)
- j = 0;
- vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
- }
-
req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
req.hash_key_tbl_addr =
cpu_to_le64(vnic->rss_hash_key_dma_addr);
@@ -4867,9 +4962,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
- u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
struct hwrm_vnic_rss_cfg_input req = {0};
+ dma_addr_t ring_tbl_map;
+ u32 i, nr_ctxs;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
@@ -4877,31 +4972,18 @@ static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
return 0;
}
+ bnxt_fill_hw_rss_tbl(bp, vnic);
req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
- req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
- nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
- for (i = 0, k = 0; i < nr_ctxs; i++) {
- __le16 *ring_tbl = vnic->rss_table;
+ ring_tbl_map = vnic->rss_table_dma_addr;
+ nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
+ for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
int rc;
+ req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
req.ring_table_pair_index = i;
req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
- for (j = 0; j < 64; j++) {
- u16 ring_id;
-
- ring_id = rxr->rx_ring_struct.fw_ring_id;
- *ring_tbl++ = cpu_to_le16(ring_id);
- ring_id = bnxt_cp_ring_for_rx(bp, rxr);
- *ring_tbl++ = cpu_to_le16(ring_id);
- rxr++;
- k++;
- if (k == max_rings) {
- k = 0;
- rxr = &bp->rx_ring[0];
- }
- }
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return rc;
@@ -5139,6 +5221,14 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (flags &
VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
+
+ /* Older P5 fw before EXT_HW_STATS support did not set
+ * VLAN_STRIP_CAP properly.
+ */
+ if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
+ ((bp->flags & BNXT_FLAG_CHIP_P5) &&
+ !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
+ bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
if (bp->max_tpa_v2)
bp->hw_ring_stats_size =
@@ -5992,6 +6082,21 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
rx = rx_rings << 1;
cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
bp->tx_nr_rings = tx;
+
+ /* If we cannot reserve all the RX rings, reset the RSS map only
+ * if absolutely necessary
+ */
+ if (rx_rings != bp->rx_nr_rings) {
+ netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
+ rx_rings, bp->rx_nr_rings);
+ if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
+ (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
+ bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
+ bnxt_get_max_rss_ring(bp) >= rx_rings)) {
+ netdev_warn(bp->dev, "RSS table entries reverting to default\n");
+ bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
+ }
+ }
bp->rx_nr_rings = rx_rings;
bp->cp_nr_rings = cp;
@@ -6955,7 +7060,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
struct hwrm_func_qcaps_input req = {0};
struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- u32 flags;
+ u32 flags, flags_ext;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
req.fid = cpu_to_le16(0xffff);
@@ -6980,6 +7085,12 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
+ if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
+ bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
+
+ flags_ext = le32_to_cpu(resp->flags_ext);
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
@@ -7470,16 +7581,12 @@ static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
{
- if (bp->vxlan_port_cnt) {
+ if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
bnxt_hwrm_tunnel_dst_port_free(
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
- }
- bp->vxlan_port_cnt = 0;
- if (bp->nge_port_cnt) {
+ if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
bnxt_hwrm_tunnel_dst_port_free(
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
- }
- bp->nge_port_cnt = 0;
}
static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
@@ -7634,7 +7741,7 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
{
int rc, i, nr_ctxs;
- nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
+ nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
for (i = 0; i < nr_ctxs; i++) {
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
if (rc) {
@@ -8196,6 +8303,9 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
rc = bnxt_init_int_mode(bp);
bnxt_ulp_irq_restart(bp, rc);
}
+ if (!netif_is_rxfh_configured(bp->dev))
+ bnxt_set_dflt_rss_indir_tbl(bp);
+
if (rc) {
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
return rc;
@@ -9194,7 +9304,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
if (irq_re_init)
- udp_tunnel_get_rx_info(bp->dev);
+ udp_tunnel_nic_reset_ntf(bp->dev);
set_bit(BNXT_STATE_OPEN, &bp->state);
bnxt_enable_int(bp);
@@ -9835,24 +9945,16 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
/* Both CTAG and STAG VLAN accelaration on the RX side have to be
* turned on or off together.
*/
- vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_STAG_RX);
- if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_STAG_RX)) {
- if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
- features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_STAG_RX);
+ vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
+ if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
+ if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
+ features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
else if (vlan_features)
- features |= NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_STAG_RX;
+ features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
}
#ifdef CONFIG_BNXT_SRIOV
- if (BNXT_VF(bp)) {
- if (bp->vf.vlan) {
- features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_STAG_RX);
- }
- }
+ if (BNXT_VF(bp) && bp->vf.vlan)
+ features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
#endif
return features;
}
@@ -9875,7 +9977,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
flags &= ~BNXT_FLAG_TPA;
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
flags |= BNXT_FLAG_STRIP_VLAN;
if (features & NETIF_F_NTUPLE)
@@ -10353,24 +10455,6 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_cfg_ntp_filters(bp);
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
bnxt_hwrm_exec_fwd_req(bp);
- if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
- bnxt_hwrm_tunnel_dst_port_alloc(
- bp, bp->vxlan_port,
- TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
- }
- if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
- bnxt_hwrm_tunnel_dst_port_free(
- bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
- }
- if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
- bnxt_hwrm_tunnel_dst_port_alloc(
- bp, bp->nge_port,
- TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
- }
- if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
- bnxt_hwrm_tunnel_dst_port_free(
- bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
- }
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
bnxt_hwrm_port_qstats(bp);
bnxt_hwrm_port_qstats_ext(bp);
@@ -10967,6 +11051,9 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
timer_setup(&bp->timer, bnxt_timer, 0);
bp->current_interval = BNXT_TIMER_INTERVAL;
+ bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
+ bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
+
clear_bit(BNXT_STATE_OPEN, &bp->state);
return 0;
@@ -11294,84 +11381,33 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
#endif /* CONFIG_RFS_ACCEL */
-static void bnxt_udp_tunnel_add(struct net_device *dev,
- struct udp_tunnel_info *ti)
+static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
{
- struct bnxt *bp = netdev_priv(dev);
-
- if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
- return;
-
- if (!netif_running(dev))
- return;
+ struct bnxt *bp = netdev_priv(netdev);
+ struct udp_tunnel_info ti;
+ unsigned int cmd;
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
- return;
+ udp_tunnel_nic_get_port(netdev, table, 0, &ti);
+ if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
+ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
+ else
+ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
- bp->vxlan_port_cnt++;
- if (bp->vxlan_port_cnt == 1) {
- bp->vxlan_port = ti->port;
- set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (bp->nge_port_cnt && bp->nge_port != ti->port)
- return;
+ if (ti.port)
+ return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
- bp->nge_port_cnt++;
- if (bp->nge_port_cnt == 1) {
- bp->nge_port = ti->port;
- set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
- }
- break;
- default:
- return;
- }
-
- bnxt_queue_sp_work(bp);
+ return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
}
-static void bnxt_udp_tunnel_del(struct net_device *dev,
- struct udp_tunnel_info *ti)
-{
- struct bnxt *bp = netdev_priv(dev);
-
- if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
- return;
-
- if (!netif_running(dev))
- return;
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
- return;
- bp->vxlan_port_cnt--;
-
- if (bp->vxlan_port_cnt != 0)
- return;
-
- set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (!bp->nge_port_cnt || bp->nge_port != ti->port)
- return;
- bp->nge_port_cnt--;
-
- if (bp->nge_port_cnt != 0)
- return;
-
- set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
- break;
- default:
- return;
- }
-
- bnxt_queue_sp_work(bp);
-}
+static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
+ .sync_table = bnxt_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+ UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ },
+};
static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u32 filter_mask,
@@ -11469,8 +11505,8 @@ static const struct net_device_ops bnxt_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = bnxt_rx_flow_steer,
#endif
- .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
- .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_bpf = bnxt_xdp,
.ndo_xdp_xmit = bnxt_xdp_xmit,
.ndo_bridge_getlink = bnxt_bridge_getlink,
@@ -11510,6 +11546,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
+ kfree(bp->rss_indir_tbl);
+ bp->rss_indir_tbl = NULL;
bnxt_free_port_stats(bp);
free_netdev(dev);
}
@@ -11958,11 +11996,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
+ dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
+
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_GRE_CSUM;
dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
+ if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
+ dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
+ if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
+ dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
if (BNXT_SUPPORTS_TPA(bp))
dev->hw_features |= NETIF_F_GRO_HW;
dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
@@ -12018,7 +12060,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_fw_init_one_p3(bp);
- if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
+ if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
bp->flags |= BNXT_FLAG_STRIP_VLAN;
rc = bnxt_init_int_mode(bp);
@@ -12030,6 +12072,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ rc = bnxt_alloc_rss_indir_tbl(bp);
+ if (rc)
+ goto init_err_pci_clean;
+ bnxt_set_dflt_rss_indir_tbl(bp);
+
if (BNXT_PF(bp)) {
if (!bnxt_pf_wq) {
bnxt_pf_wq =
@@ -12074,6 +12121,8 @@ init_err_pci_clean:
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
+ kfree(bp->rss_indir_tbl);
+ bp->rss_indir_tbl = NULL;
init_err_free:
free_netdev(dev);