summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice/ice_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_main.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c278
1 files changed, 170 insertions, 108 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 5ef28052c0f8..306a4e5b2320 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -10,6 +10,7 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
+#include "ice_devlink.h"
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 8
@@ -706,7 +707,6 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
/* Get FEC mode based on negotiated link info */
switch (vsi->port_info->phy.link_info.fec_info) {
case ICE_AQ_LINK_25G_RS_528_FEC_EN:
- /* fall through */
case ICE_AQ_LINK_25G_RS_544_FEC_EN:
fec = "RS-FEC";
break;
@@ -1029,6 +1029,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
if (ice_handle_link_event(pf, &event))
dev_err(dev, "Could not handle link event\n");
break;
+ case ice_aqc_opc_event_lan_overflow:
+ ice_vf_lan_overflow_event(pf, &event);
+ break;
case ice_mbx_opc_send_msg_to_pf:
ice_vc_process_vf_msg(pf, &event);
break;
@@ -1185,20 +1188,28 @@ static void ice_service_timer(struct timer_list *t)
* ice_handle_mdd_event - handle malicious driver detect event
* @pf: pointer to the PF structure
*
- * Called from service task. OICR interrupt handler indicates MDD event
+ * Called from service task. OICR interrupt handler indicates MDD event.
+ * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
+ * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
+ * disable the queue, the PF can be configured to reset the VF using ethtool
+ * private flag mdd-auto-reset-vf.
*/
static void ice_handle_mdd_event(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- bool mdd_detected = false;
u32 reg;
int i;
- if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state))
+ if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) {
+ /* Since the VF MDD event logging is rate limited, check if
+ * there are pending MDD events.
+ */
+ ice_print_vfs_mdd_events(pf);
return;
+ }
- /* find what triggered the MDD event */
+ /* find what triggered an MDD event */
reg = rd32(hw, GL_MDET_TX_PQM);
if (reg & GL_MDET_TX_PQM_VALID_M) {
u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
@@ -1214,7 +1225,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
- mdd_detected = true;
}
reg = rd32(hw, GL_MDET_TX_TCLAN);
@@ -1232,7 +1242,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
- mdd_detected = true;
}
reg = rd32(hw, GL_MDET_RX);
@@ -1250,85 +1259,85 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_RX, 0xffffffff);
- mdd_detected = true;
}
- if (mdd_detected) {
- bool pf_mdd_detected = false;
-
- reg = rd32(hw, PF_MDET_TX_PQM);
- if (reg & PF_MDET_TX_PQM_VALID_M) {
- wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
- dev_info(dev, "TX driver issue detected, PF reset issued\n");
- pf_mdd_detected = true;
- }
+ /* check to see if this PF caused an MDD event */
+ reg = rd32(hw, PF_MDET_TX_PQM);
+ if (reg & PF_MDET_TX_PQM_VALID_M) {
+ wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
+ }
- reg = rd32(hw, PF_MDET_TX_TCLAN);
- if (reg & PF_MDET_TX_TCLAN_VALID_M) {
- wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
- dev_info(dev, "TX driver issue detected, PF reset issued\n");
- pf_mdd_detected = true;
- }
+ reg = rd32(hw, PF_MDET_TX_TCLAN);
+ if (reg & PF_MDET_TX_TCLAN_VALID_M) {
+ wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
+ }
- reg = rd32(hw, PF_MDET_RX);
- if (reg & PF_MDET_RX_VALID_M) {
- wr32(hw, PF_MDET_RX, 0xFFFF);
- dev_info(dev, "RX driver issue detected, PF reset issued\n");
- pf_mdd_detected = true;
- }
- /* Queue belongs to the PF initiate a reset */
- if (pf_mdd_detected) {
- set_bit(__ICE_NEEDS_RESTART, pf->state);
- ice_service_task_schedule(pf);
- }
+ reg = rd32(hw, PF_MDET_RX);
+ if (reg & PF_MDET_RX_VALID_M) {
+ wr32(hw, PF_MDET_RX, 0xFFFF);
+ if (netif_msg_rx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
}
- /* check to see if one of the VFs caused the MDD */
+ /* Check to see if one of the VFs caused an MDD event, and then
+ * increment counters and set print pending
+ */
ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
- bool vf_mdd_detected = false;
-
reg = rd32(hw, VP_MDET_TX_PQM(i));
if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
- vf_mdd_detected = true;
- dev_info(dev, "TX driver issue detected on VF %d\n",
- i);
+ vf->mdd_tx_events.count++;
+ set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
+ i);
}
reg = rd32(hw, VP_MDET_TX_TCLAN(i));
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
- vf_mdd_detected = true;
- dev_info(dev, "TX driver issue detected on VF %d\n",
- i);
+ vf->mdd_tx_events.count++;
+ set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
+ i);
}
reg = rd32(hw, VP_MDET_TX_TDPU(i));
if (reg & VP_MDET_TX_TDPU_VALID_M) {
wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
- vf_mdd_detected = true;
- dev_info(dev, "TX driver issue detected on VF %d\n",
- i);
+ vf->mdd_tx_events.count++;
+ set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
+ i);
}
reg = rd32(hw, VP_MDET_RX(i));
if (reg & VP_MDET_RX_VALID_M) {
wr32(hw, VP_MDET_RX(i), 0xFFFF);
- vf_mdd_detected = true;
- dev_info(dev, "RX driver issue detected on VF %d\n",
- i);
- }
-
- if (vf_mdd_detected) {
- vf->num_mdd_events++;
- if (vf->num_mdd_events &&
- vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD)
- dev_info(dev, "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
- i, vf->num_mdd_events);
+ vf->mdd_rx_events.count++;
+ set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ if (netif_msg_rx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
+ i);
+
+ /* Since the queue is disabled on VF Rx MDD events, the
+ * PF can be configured to reset the VF through ethtool
+ * private flag mdd-auto-reset-vf.
+ */
+ if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+ ice_reset_vf(&pf->vf[i], false);
}
}
+
+ ice_print_vfs_mdd_events(pf);
}
/**
@@ -1510,7 +1519,7 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
hw->adminq.num_sq_entries = ICE_AQ_LEN;
hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
- hw->mailboxq.num_rq_entries = ICE_MBXRQ_LEN;
+ hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
@@ -1916,8 +1925,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) {
ret = ice_down(vsi);
if (ret) {
- NL_SET_ERR_MSG_MOD(extack,
- "Preparing device for XDP attach failed");
+ NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
return ret;
}
}
@@ -1926,13 +1934,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
vsi->num_xdp_txq = vsi->alloc_txq;
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
if (xdp_ring_err)
- NL_SET_ERR_MSG_MOD(extack,
- "Setting up XDP Tx resources failed");
+ NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err)
- NL_SET_ERR_MSG_MOD(extack,
- "Freeing XDP Tx resources failed");
+ NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
} else {
ice_vsi_assign_bpf_prog(vsi, prog);
}
@@ -1965,8 +1971,7 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct ice_vsi *vsi = np->vsi;
if (vsi->type != ICE_VSI_PF) {
- NL_SET_ERR_MSG_MOD(xdp->extack,
- "XDP can be loaded only on PF VSI");
+ NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
return -EINVAL;
}
@@ -1993,6 +1998,14 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
u32 val;
+ /* Disable anti-spoof detection interrupt to prevent spurious event
+ * interrupts during a function reset. Anti-spoof functionally is
+ * still supported.
+ */
+ val = rd32(hw, GL_MDCK_TX_TDPU);
+ val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
+ wr32(hw, GL_MDCK_TX_TDPU, val);
+
/* clear things first */
wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
rd32(hw, PFINT_OICR); /* read to clear */
@@ -2042,8 +2055,16 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
}
if (oicr & PFINT_OICR_VFLR_M) {
- ena_mask &= ~PFINT_OICR_VFLR_M;
- set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
+ /* disable any further VFLR event notifications */
+ if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
+ u32 reg = rd32(hw, PFINT_OICR_ENA);
+
+ reg &= ~PFINT_OICR_VFLR_M;
+ wr32(hw, PFINT_OICR_ENA, reg);
+ } else {
+ ena_mask &= ~PFINT_OICR_VFLR_M;
+ set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
+ }
}
if (oicr & PFINT_OICR_GRST_M) {
@@ -2351,10 +2372,16 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
u8 mac_addr[ETH_ALEN];
int err;
+ err = ice_devlink_create_port(pf);
+ if (err)
+ return err;
+
netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
vsi->alloc_rxq);
- if (!netdev)
- return -ENOMEM;
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_destroy_devlink_port;
+ }
vsi->netdev = netdev;
np = netdev_priv(netdev);
@@ -2384,7 +2411,9 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
err = register_netdev(vsi->netdev);
if (err)
- return err;
+ goto err_destroy_devlink_port;
+
+ devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
netif_carrier_off(vsi->netdev);
@@ -2392,6 +2421,11 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
netif_tx_stop_all_queues(vsi->netdev);
return 0;
+
+err_destroy_devlink_port:
+ ice_devlink_destroy_port(pf);
+
+ return err;
}
/**
@@ -2461,16 +2495,19 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
if (vsi->info.pvid)
return -EINVAL;
- /* Enable VLAN pruning when VLAN 0 is added */
- if (unlikely(!vid)) {
+ /* VLAN 0 is added by default during load/reset */
+ if (!vid)
+ return 0;
+
+ /* Enable VLAN pruning when a VLAN other than 0 is added */
+ if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
ret = ice_cfg_vlan_pruning(vsi, true, false);
if (ret)
return ret;
}
- /* Add all VLAN IDs including 0 to the switch filter. VLAN ID 0 is
- * needed to continue allowing all untagged packets since VLAN prune
- * list is applied to all packets by the switch
+ /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
+ * packets aren't pruned by the device's internal switch on Rx
*/
ret = ice_vsi_add_vlan(vsi, vid);
if (!ret) {
@@ -2500,6 +2537,10 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
if (vsi->info.pvid)
return -EINVAL;
+ /* don't allow removal of VLAN 0 */
+ if (!vid)
+ return 0;
+
/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
* information
*/
@@ -2507,8 +2548,8 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
if (ret)
return ret;
- /* Disable VLAN pruning when VLAN 0 is removed */
- if (unlikely(!vid))
+ /* Disable pruning when VLAN 0 is the only VLAN rule */
+ if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
ret = ice_cfg_vlan_pruning(vsi, false, false);
vsi->vlan_ena = false;
@@ -2945,7 +2986,6 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
}
break;
case ICE_ERR_BUF_TOO_SHORT:
- /* fall-through */
case ICE_ERR_CFG:
dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
break;
@@ -2977,7 +3017,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
default:
break;
}
- /* fall-through */
+ fallthrough;
default:
dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
*status);
@@ -3069,30 +3109,22 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf)
* followed by a EUI-64 identifier (PCIe Device Serial Number)
*/
struct pci_dev *pdev = pf->pdev;
- char *opt_fw_filename = NULL;
- u32 dword;
- u8 dsn[8];
- int pos;
+ char *opt_fw_filename;
+ u64 dsn;
/* Determine the name of the optional file using the DSN (two
* dwords following the start of the DSN Capability).
*/
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- if (pos) {
- opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
- if (!opt_fw_filename)
- return NULL;
-
- pci_read_config_dword(pdev, pos + 4, &dword);
- put_unaligned_le32(dword, &dsn[0]);
- pci_read_config_dword(pdev, pos + 8, &dword);
- put_unaligned_le32(dword, &dsn[4]);
- snprintf(opt_fw_filename, NAME_MAX,
- "%sice-%02x%02x%02x%02x%02x%02x%02x%02x.pkg",
- ICE_DDP_PKG_PATH,
- dsn[7], dsn[6], dsn[5], dsn[4],
- dsn[3], dsn[2], dsn[1], dsn[0]);
- }
+ dsn = pci_get_dsn(pdev);
+ if (!dsn)
+ return NULL;
+
+ opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
+ if (!opt_fw_filename)
+ return NULL;
+
+ snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llX.pkg",
+ ICE_DDP_PKG_PATH, dsn);
return opt_fw_filename;
}
@@ -3166,7 +3198,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
return err;
}
- pf = devm_kzalloc(dev, sizeof(*pf), GFP_KERNEL);
+ pf = ice_allocate_pf(dev);
if (!pf)
return -ENOMEM;
@@ -3204,6 +3236,12 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
+ err = ice_devlink_register(pf);
+ if (err) {
+ dev_err(dev, "ice_devlink_register failed: %d\n", err);
+ goto err_exit_unroll;
+ }
+
#ifndef CONFIG_DYNAMIC_DEBUG
if (debug < -1)
hw->debug_mask = debug;
@@ -3238,6 +3276,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_init_pf_unroll;
}
+ ice_devlink_init_regions(pf);
+
pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
if (!pf->num_alloc_vsi) {
err = -EIO;
@@ -3336,6 +3376,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
return 0;
err_alloc_sw_unroll:
+ ice_devlink_destroy_port(pf);
set_bit(__ICE_SERVICE_DIS, pf->state);
set_bit(__ICE_DOWN, pf->state);
devm_kfree(dev, pf->first_sw);
@@ -3346,8 +3387,10 @@ err_init_interrupt_unroll:
devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
ice_deinit_pf(pf);
+ ice_devlink_destroy_regions(pf);
ice_deinit_hw(hw);
err_exit_unroll:
+ ice_devlink_unregister(pf);
pci_disable_pcie_error_reporting(pdev);
return err;
}
@@ -3370,11 +3413,15 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
+ if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
+ set_bit(__ICE_VF_RESETS_DISABLED, pf->state);
+ ice_free_vfs(pf);
+ }
+
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
- if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
- ice_free_vfs(pf);
+ ice_devlink_destroy_port(pf);
ice_vsi_release_all(pf);
ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) {
@@ -3383,7 +3430,10 @@ static void ice_remove(struct pci_dev *pdev)
ice_vsi_free_q_vectors(pf->vsi[i]);
}
ice_deinit_pf(pf);
+ ice_devlink_destroy_regions(pf);
ice_deinit_hw(&pf->hw);
+ ice_devlink_unregister(pf);
+
/* Issue a PFR as part of the prescribed driver unload flow. Do not
* do it via ice_schedule_reset() since there is no need to rebuild
* and the service task is already stopped.
@@ -3534,15 +3584,26 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822X_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
/* required last entry */
{ 0, }
};
@@ -3961,7 +4022,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
* Tx queue group list was configured and the context bits were
* programmed using ice_vsi_cfg_txqs
*/
- err = ice_vsi_start_rx_rings(vsi);
+ err = ice_vsi_start_all_rx_rings(vsi);
if (err)
return err;
@@ -4340,7 +4401,7 @@ int ice_down(struct ice_vsi *vsi)
vsi->vsi_num, tx_err);
}
- rx_err = ice_vsi_stop_rx_rings(vsi);
+ rx_err = ice_vsi_stop_all_rx_rings(vsi);
if (rx_err)
netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
vsi->vsi_num, rx_err);
@@ -5027,6 +5088,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
/**
* ice_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: Tx queue
*/
static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
@@ -5064,13 +5126,13 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
/* Read interrupt register */
val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
- netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
+ netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, txqueue, tx_ring->next_to_clean,
head, tx_ring->next_to_use, val);
}
pf->tx_timeout_last_recovery = jiffies;
- netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
+ netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
pf->tx_timeout_recovery_level, txqueue);
switch (pf->tx_timeout_recovery_level) {