summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c5
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c66
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c47
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c3
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c34
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c3
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c3
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c5
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c86
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c29
39 files changed, 349 insertions, 148 deletions
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 40933bf5a710..60dde29974bf 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1309,11 +1309,11 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct bcm_sysport_tx_ring *ring;
+ unsigned long flags, desc_flags;
struct bcm_sysport_cb *cb;
struct netdev_queue *txq;
u32 len_status, addr_lo;
unsigned int skb_len;
- unsigned long flags;
dma_addr_t mapping;
u16 queue;
int ret;
@@ -1373,8 +1373,10 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
ring->desc_count--;
/* Ports are latched, so write upper address first */
+ spin_lock_irqsave(&priv->desc_lock, desc_flags);
tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
+ spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
/* Check ring space and update SW control flow */
if (ring->desc_count == 0)
@@ -2013,6 +2015,7 @@ static int bcm_sysport_open(struct net_device *dev)
}
/* Initialize both hardware and software ring */
+ spin_lock_init(&priv->desc_lock);
for (i = 0; i < dev->num_tx_queues; i++) {
ret = bcm_sysport_init_tx_ring(priv, i);
if (ret) {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 984f76e74b43..16b73bb9acc7 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -711,6 +711,7 @@ struct bcm_sysport_priv {
int wol_irq;
/* Transmit rings */
+ spinlock_t desc_lock;
struct bcm_sysport_tx_ring *tx_rings;
/* Receive queue */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 5f259641437a..c888ddee1fc4 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -589,9 +589,9 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
* Internal or external PHY with MDIO access
*/
phydev = phy_attach(priv->dev, phy_name, pd->phy_interface);
- if (!phydev) {
+ if (IS_ERR(phydev)) {
dev_err(kdev, "failed to register PHY device\n");
- return -ENODEV;
+ return PTR_ERR(phydev);
}
} else {
/*
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 2085844227fe..e54e70ebdd05 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -388,6 +388,8 @@ struct dpaa2_eth_ch_stats {
__u64 bytes_per_cdan;
};
+#define DPAA2_ETH_CH_STATS 7
+
/* Maximum number of queues associated with a DPNI */
#define DPAA2_ETH_MAX_TCS 8
#define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index adb8ce5306ee..3fdbf87dccb1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -278,7 +278,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
/* Per-channel stats */
for (k = 0; k < priv->num_channels; k++) {
ch_stats = &priv->channel[k]->stats;
- for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64) - 1; j++)
+ for (j = 0; j < DPAA2_ETH_CH_STATS; j++)
*((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
}
i += j;
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 83ae56c310d3..326b56b49216 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -738,10 +738,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
* is not set to GqiRda, choose the queue format in a priority order:
* DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.
*/
- if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
- dev_info(&priv->pdev->dev,
- "Driver is running with GQI RDA queue format.\n");
- } else if (dev_op_dqo_rda) {
+ if (dev_op_dqo_rda) {
priv->queue_format = GVE_DQO_RDA_FORMAT;
dev_info(&priv->pdev->dev,
"Driver is running with DQO RDA queue format.\n");
@@ -753,6 +750,9 @@ int gve_adminq_describe_device(struct gve_priv *priv)
"Driver is running with GQI RDA queue format.\n");
supported_features_mask =
be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
+ } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
+ dev_info(&priv->pdev->dev,
+ "Driver is running with GQI RDA queue format.\n");
} else {
priv->queue_format = GVE_GQI_QPL_FORMAT;
if (dev_op_gqi_qpl)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 3f7a9a4c59d5..63f5abcc6bf4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -839,6 +839,8 @@ struct hnae3_handle {
u8 netdev_flags;
struct dentry *hnae3_dbgfs;
+ /* protects concurrent contention between debugfs commands */
+ struct mutex dbgfs_lock;
/* Network interface message level enabled bits */
u32 msg_enable;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 081295bff765..c381f8af67f0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -1226,6 +1226,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
if (ret)
return ret;
+ mutex_lock(&handle->dbgfs_lock);
save_buf = &hns3_dbg_cmd[index].buf;
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
@@ -1238,15 +1239,15 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
read_buf = *save_buf;
} else {
read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
- if (!read_buf)
- return -ENOMEM;
+ if (!read_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
/* save the buffer addr until the last read operation */
*save_buf = read_buf;
- }
- /* get data ready for the first time to read */
- if (!*ppos) {
+ /* get data ready for the first time to read */
ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
read_buf, hns3_dbg_cmd[index].buf_len);
if (ret)
@@ -1255,8 +1256,10 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
size = simple_read_from_buffer(buffer, count, ppos, read_buf,
strlen(read_buf));
- if (size > 0)
+ if (size > 0) {
+ mutex_unlock(&handle->dbgfs_lock);
return size;
+ }
out:
/* free the buffer for the last read operation */
@@ -1265,6 +1268,7 @@ out:
*save_buf = NULL;
}
+ mutex_unlock(&handle->dbgfs_lock);
return ret;
}
@@ -1337,6 +1341,8 @@ int hns3_dbg_init(struct hnae3_handle *handle)
debugfs_create_dir(hns3_dbg_dentry[i].name,
handle->hnae3_dbgfs);
+ mutex_init(&handle->dbgfs_lock);
+
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
@@ -1363,6 +1369,7 @@ int hns3_dbg_init(struct hnae3_handle *handle)
return 0;
out:
+ mutex_destroy(&handle->dbgfs_lock);
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
return ret;
@@ -1378,6 +1385,7 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
hns3_dbg_cmd[i].buf = NULL;
}
+ mutex_destroy(&handle->dbgfs_lock);
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index fdc66fae0960..c5ac6ecf36e1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -114,7 +114,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg));
- trace_hclge_vf_mbx_send(hdev, req);
+ if (test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state))
+ trace_hclge_vf_mbx_send(hdev, req);
/* synchronous send */
if (need_resp) {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index cfdbf8c08d18..4e7c04047f91 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2046,6 +2046,7 @@ static void iavf_watchdog_task(struct work_struct *work)
}
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ mutex_unlock(&adapter->crit_lock);
queue_delayed_work(iavf_wq,
&adapter->watchdog_task,
msecs_to_jiffies(10));
@@ -2076,16 +2077,14 @@ static void iavf_watchdog_task(struct work_struct *work)
iavf_detect_recover_hung(&adapter->vsi);
break;
case __IAVF_REMOVE:
- mutex_unlock(&adapter->crit_lock);
- return;
default:
+ mutex_unlock(&adapter->crit_lock);
return;
}
/* check for hw reset */
reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
if (!reg_val) {
- iavf_change_state(adapter, __IAVF_RESETTING);
adapter->flags |= IAVF_FLAG_RESET_PENDING;
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 1efc635cc0f5..fafe020e46ee 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -6,6 +6,18 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
+static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
+{
+ rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL);
+ return !!rx_ring->xdp_buf;
+}
+
+static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring)
+{
+ rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
+ return !!rx_ring->rx_buf;
+}
+
/**
* __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
* @qs_cfg: gathered variables needed for PF->VSI queues assignment
@@ -492,8 +504,11 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index, ring->q_vector->napi.napi_id);
+ kfree(ring->rx_buf);
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
+ if (!ice_alloc_rx_buf_zc(ring))
+ return -ENOMEM;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len =
@@ -508,6 +523,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
+ if (!ice_alloc_rx_buf(ring))
+ return -ENOMEM;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq,
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index bf7247c6f58e..442b031b0edc 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -705,7 +705,7 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
scaled_ppm = -scaled_ppm;
}
- while ((u64)scaled_ppm > div_u64(U64_MAX, incval)) {
+ while ((u64)scaled_ppm > div64_u64(U64_MAX, incval)) {
/* handle overflow by scaling down the scaled_ppm and
* the divisor, losing some precision
*/
@@ -1540,19 +1540,16 @@ static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
if (err)
continue;
- /* Check if the timestamp is valid */
- if (!(raw_tstamp & ICE_PTP_TS_VALID))
+ /* Check if the timestamp is invalid or stale */
+ if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
+ raw_tstamp == tx->tstamps[idx].cached_tstamp)
continue;
- /* clear the timestamp register, so that it won't show valid
- * again when re-used.
- */
- ice_clear_phy_tstamp(hw, tx->quad, phy_idx);
-
/* The timestamp is valid, so we'll go ahead and clear this
* index and then send the timestamp up to the stack.
*/
spin_lock(&tx->lock);
+ tx->tstamps[idx].cached_tstamp = raw_tstamp;
clear_bit(idx, tx->in_use);
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index f71ad317d6c8..53c15fc9d996 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -55,15 +55,21 @@ struct ice_perout_channel {
* struct ice_tx_tstamp - Tracking for a single Tx timestamp
* @skb: pointer to the SKB for this timestamp request
* @start: jiffies when the timestamp was first requested
+ * @cached_tstamp: last read timestamp
*
* This structure tracks a single timestamp request. The SKB pointer is
* provided when initiating a request. The start time is used to ensure that
* we discard old requests that were not fulfilled within a 2 second time
* window.
+ * Timestamp values in the PHY are read only and do not get cleared except at
+ * hardware reset or when a new timestamp value is captured. The cached_tstamp
+ * field is used to detect the case where a new timestamp has not yet been
+ * captured, ensuring that we avoid sending stale timestamp data to the stack.
*/
struct ice_tx_tstamp {
struct sk_buff *skb;
unsigned long start;
+ u64 cached_tstamp;
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index bc3ba19dc88f..dccf09eefc75 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -419,7 +419,10 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
}
rx_skip_free:
- memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
+ if (rx_ring->xsk_pool)
+ memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
+ else
+ memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
/* Zero out the descriptor ring */
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
@@ -446,8 +449,13 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
rx_ring->xdp_prog = NULL;
- devm_kfree(rx_ring->dev, rx_ring->rx_buf);
- rx_ring->rx_buf = NULL;
+ if (rx_ring->xsk_pool) {
+ kfree(rx_ring->xdp_buf);
+ rx_ring->xdp_buf = NULL;
+ } else {
+ kfree(rx_ring->rx_buf);
+ rx_ring->rx_buf = NULL;
+ }
if (rx_ring->desc) {
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
@@ -475,8 +483,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
/* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_buf);
rx_ring->rx_buf =
- devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count,
- GFP_KERNEL);
+ kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
if (!rx_ring->rx_buf)
return -ENOMEM;
@@ -505,7 +512,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
return 0;
err:
- devm_kfree(dev, rx_ring->rx_buf);
+ kfree(rx_ring->rx_buf);
rx_ring->rx_buf = NULL;
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index c56dd1749903..b7b3bd4816f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -24,7 +24,6 @@
#define ICE_MAX_DATA_PER_TXD_ALIGNED \
(~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
-#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
#define ICE_MAX_TXQ_PER_TXQG 128
/* Attempt to maximize the headroom available for incoming frames. We use a 2K
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index bb9a80847298..c895351b25e0 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -12,6 +12,11 @@
#include "ice_txrx_lib.h"
#include "ice_lib.h"
+static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
+{
+ return &rx_ring->xdp_buf[idx];
+}
+
/**
* ice_qp_reset_stats - Resets all stats for rings of given index
* @vsi: VSI that contains rings of interest
@@ -372,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
dma_addr_t dma;
rx_desc = ICE_RX_DESC(rx_ring, ntu);
- xdp = &rx_ring->xdp_buf[ntu];
+ xdp = ice_xdp_buf(rx_ring, ntu);
nb_buffs = min_t(u16, count, rx_ring->count - ntu);
nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
@@ -390,14 +395,9 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
}
ntu += nb_buffs;
- if (ntu == rx_ring->count) {
- rx_desc = ICE_RX_DESC(rx_ring, 0);
- xdp = rx_ring->xdp_buf;
+ if (ntu == rx_ring->count)
ntu = 0;
- }
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.status_error0 = 0;
ice_release_rx_desc(rx_ring, ntu);
return count == nb_buffs;
@@ -419,19 +419,18 @@ static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
/**
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
* @rx_ring: Rx ring
- * @xdp_arr: Pointer to the SW ring of xdp_buff pointers
+ * @xdp: Pointer to XDP buffer
*
* This function allocates a new skb from a zero-copy Rx buffer.
*
* Returns the skb on success, NULL on failure.
*/
static struct sk_buff *
-ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
+ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
- struct xdp_buff *xdp = *xdp_arr;
+ unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int datasize = xdp->data_end - xdp->data;
- unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
struct sk_buff *skb;
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
@@ -445,7 +444,6 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
skb_metadata_set(skb, metasize);
xsk_buff_free(xdp);
- *xdp_arr = NULL;
return skb;
}
@@ -507,7 +505,6 @@ out_failure:
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
struct ice_tx_ring *xdp_ring;
unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog;
@@ -522,7 +519,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
unsigned int size, xdp_res = 0;
- struct xdp_buff **xdp;
+ struct xdp_buff *xdp;
struct sk_buff *skb;
u16 stat_err_bits;
u16 vlan_tag = 0;
@@ -540,31 +537,35 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
*/
dma_rmb();
+ xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
+
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
- if (!size)
- break;
+ if (!size) {
+ xdp->data = NULL;
+ xdp->data_end = NULL;
+ xdp->data_hard_start = NULL;
+ xdp->data_meta = NULL;
+ goto construct_skb;
+ }
- xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean];
- xsk_buff_set_size(*xdp, size);
- xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool);
+ xsk_buff_set_size(xdp, size);
+ xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
- xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring);
+ xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
if (xdp_res) {
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
xdp_xmit |= xdp_res;
else
- xsk_buff_free(*xdp);
+ xsk_buff_free(xdp);
- *xdp = NULL;
total_rx_bytes += size;
total_rx_packets++;
- cleaned_count++;
ice_bump_ntc(rx_ring);
continue;
}
-
+construct_skb:
/* XDP_PASS path */
skb = ice_construct_skb_zc(rx_ring, xdp);
if (!skb) {
@@ -572,7 +573,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
break;
}
- cleaned_count++;
ice_bump_ntc(rx_ring);
if (eth_skb_pad(skb)) {
@@ -594,8 +594,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
ice_receive_skb(rx_ring, skb, vlan_tag);
}
- if (cleaned_count >= ICE_RX_BUF_WRITE)
- failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
+ failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
@@ -811,15 +810,14 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
*/
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
- u16 i;
-
- for (i = 0; i < rx_ring->count; i++) {
- struct xdp_buff **xdp = &rx_ring->xdp_buf[i];
+ u16 count_mask = rx_ring->count - 1;
+ u16 ntc = rx_ring->next_to_clean;
+ u16 ntu = rx_ring->next_to_use;
- if (!xdp)
- continue;
+ for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+ struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
- *xdp = NULL;
+ xsk_buff_free(xdp);
}
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index fd54d3ef890b..446894dde182 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7648,6 +7648,20 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
struct vf_mac_filter *entry = NULL;
int ret = 0;
+ if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
+ !vf_data->trusted) {
+ dev_warn(&pdev->dev,
+ "VF %d requested MAC filter but is administratively denied\n",
+ vf);
+ return -EINVAL;
+ }
+ if (!is_valid_ether_addr(addr)) {
+ dev_warn(&pdev->dev,
+ "VF %d attempted to set invalid MAC filter\n",
+ vf);
+ return -EINVAL;
+ }
+
switch (info) {
case E1000_VF_MAC_FILTER_CLR:
/* remove all unicast MAC filters related to the current VF */
@@ -7661,20 +7675,6 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
}
break;
case E1000_VF_MAC_FILTER_ADD:
- if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
- !vf_data->trusted) {
- dev_warn(&pdev->dev,
- "VF %d requested MAC filter but is administratively denied\n",
- vf);
- return -EINVAL;
- }
- if (!is_valid_ether_addr(addr)) {
- dev_warn(&pdev->dev,
- "VF %d attempted to set invalid MAC filter\n",
- vf);
- return -EINVAL;
- }
-
/* try to find empty slot in the list */
list_for_each(pos, &adapter->vf_macs.l) {
entry = list_entry(pos, struct vf_mac_filter, l);
@@ -9254,7 +9254,7 @@ static int __maybe_unused igb_suspend(struct device *dev)
return __igb_shutdown(to_pci_dev(dev), NULL, 0);
}
-static int __maybe_unused igb_resume(struct device *dev)
+static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -9297,17 +9297,24 @@ static int __maybe_unused igb_resume(struct device *dev)
wr32(E1000_WUS, ~0);
- rtnl_lock();
+ if (!rpm)
+ rtnl_lock();
if (!err && netif_running(netdev))
err = __igb_open(netdev, true);
if (!err)
netif_device_attach(netdev);
- rtnl_unlock();
+ if (!rpm)
+ rtnl_unlock();
return err;
}
+static int __maybe_unused igb_resume(struct device *dev)
+{
+ return __igb_resume(dev, false);
+}
+
static int __maybe_unused igb_runtime_idle(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
@@ -9326,7 +9333,7 @@ static int __maybe_unused igb_runtime_suspend(struct device *dev)
static int __maybe_unused igb_runtime_resume(struct device *dev)
{
- return igb_resume(dev);
+ return __igb_resume(dev, true);
}
static void igb_shutdown(struct pci_dev *pdev)
@@ -9442,7 +9449,7 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the igb_resume routine.
+ * resembles the first-half of the __igb_resume routine.
**/
static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
{
@@ -9482,7 +9489,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
- * second-half of the igb_resume routine.
+ * second-half of the __igb_resume routine.
*/
static void igb_io_resume(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 74ccd622251a..4d988da68394 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2859,6 +2859,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_hw_init:
+ netif_napi_del(&adapter->rx_ring->napi);
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
err_sw_init:
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index b2ef9fde97b3..b6807e16eea9 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -636,7 +636,7 @@ s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
ltrv = rd32(IGC_LTRMAXV);
if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) {
ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max |
- (scale_min << IGC_LTRMAXV_SCALE_SHIFT);
+ (scale_max << IGC_LTRMAXV_SCALE_SHIFT);
wr32(IGC_LTRMAXV, ltrv);
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0f9f022260d7..45e2ec4d264d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5531,6 +5531,10 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
if (!speed && hw->mac.ops.get_link_capabilities) {
ret = hw->mac.ops.get_link_capabilities(hw, &speed,
&autoneg);
+ /* remove NBASE-T speeds from default autonegotiation
+ * to accommodate broken network switches in the field
+ * which cannot cope with advertised NBASE-T speeds
+ */
speed &= ~(IXGBE_LINK_SPEED_5GB_FULL |
IXGBE_LINK_SPEED_2_5GB_FULL);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 9724ffb16518..e4b50c7781ff 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -3405,6 +3405,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
/* flush pending Tx transactions */
ixgbe_clear_tx_pending(hw);
+ /* set MDIO speed before talking to the PHY in case it's the 1st time */
+ ixgbe_set_mdio_speed(hw);
+
/* PHY ops must be identified and initialized prior to reset */
status = hw->phy.ops.init(hw);
if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 0da09ea81980..96bd6f2b21ed 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -71,6 +71,8 @@ struct xrx200_priv {
struct xrx200_chan chan_tx;
struct xrx200_chan chan_rx;
+ u16 rx_buf_size;
+
struct net_device *net_dev;
struct device *dev;
@@ -97,6 +99,16 @@ static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
xrx200_pmac_w32(priv, val, offset);
}
+static int xrx200_max_frame_len(int mtu)
+{
+ return VLAN_ETH_HLEN + mtu;
+}
+
+static int xrx200_buffer_size(int mtu)
+{
+ return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
+}
+
/* drop all the packets from the DMA ring */
static void xrx200_flush_dma(struct xrx200_chan *ch)
{
@@ -109,8 +121,7 @@ static void xrx200_flush_dma(struct xrx200_chan *ch)
break;
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
- (ch->priv->net_dev->mtu + VLAN_ETH_HLEN +
- ETH_FCS_LEN);
+ ch->priv->rx_buf_size;
ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM;
}
@@ -158,21 +169,21 @@ static int xrx200_close(struct net_device *net_dev)
static int xrx200_alloc_skb(struct xrx200_chan *ch)
{
- int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
struct sk_buff *skb = ch->skb[ch->dma.desc];
+ struct xrx200_priv *priv = ch->priv;
dma_addr_t mapping;
int ret = 0;
- ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
- len);
+ ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(priv->net_dev,
+ priv->rx_buf_size);
if (!ch->skb[ch->dma.desc]) {
ret = -ENOMEM;
goto skip;
}
- mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
- len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
+ mapping = dma_map_single(priv->dev, ch->skb[ch->dma.desc]->data,
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, mapping))) {
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
ch->skb[ch->dma.desc] = skb;
ret = -ENOMEM;
@@ -184,7 +195,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
wmb();
skip:
ch->dma.desc_base[ch->dma.desc].ctl =
- LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len;
+ LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size;
return ret;
}
@@ -356,6 +367,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
int ret = 0;
net_dev->mtu = new_mtu;
+ priv->rx_buf_size = xrx200_buffer_size(new_mtu);
if (new_mtu <= old_mtu)
return ret;
@@ -375,6 +387,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
ret = xrx200_alloc_skb(ch_rx);
if (ret) {
net_dev->mtu = old_mtu;
+ priv->rx_buf_size = xrx200_buffer_size(old_mtu);
break;
}
dev_kfree_skb_any(skb);
@@ -505,7 +518,8 @@ static int xrx200_probe(struct platform_device *pdev)
net_dev->netdev_ops = &xrx200_netdev_ops;
SET_NETDEV_DEV(net_dev, dev);
net_dev->min_mtu = ETH_ZLEN;
- net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
+ priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);
/* load the memory ranges */
priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 4369a3ffad45..c687dc9aa973 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -54,12 +54,14 @@ int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
u32 dev_id, u32 hw_id)
{
- struct prestera_port *port = NULL;
+ struct prestera_port *port = NULL, *tmp;
read_lock(&sw->port_list_lock);
- list_for_each_entry(port, &sw->port_list, list) {
- if (port->dev_id == dev_id && port->hw_id == hw_id)
+ list_for_each_entry(tmp, &sw->port_list, list) {
+ if (tmp->dev_id == dev_id && tmp->hw_id == hw_id) {
+ port = tmp;
break;
+ }
}
read_unlock(&sw->port_list_lock);
@@ -68,12 +70,14 @@ struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
{
- struct prestera_port *port = NULL;
+ struct prestera_port *port = NULL, *tmp;
read_lock(&sw->port_list_lock);
- list_for_each_entry(port, &sw->port_list, list) {
- if (port->id == id)
+ list_for_each_entry(tmp, &sw->port_list, list) {
+ if (tmp->id == id) {
+ port = tmp;
break;
+ }
}
read_unlock(&sw->port_list_lock);
@@ -764,23 +768,27 @@ static int prestera_netdev_port_event(struct net_device *lower,
struct net_device *dev,
unsigned long event, void *ptr)
{
- struct netdev_notifier_changeupper_info *info = ptr;
+ struct netdev_notifier_info *info = ptr;
+ struct netdev_notifier_changeupper_info *cu_info;
struct prestera_port *port = netdev_priv(dev);
struct netlink_ext_ack *extack;
struct net_device *upper;
- extack = netdev_notifier_info_to_extack(&info->info);
- upper = info->upper_dev;
+ extack = netdev_notifier_info_to_extack(info);
+ cu_info = container_of(info,
+ struct netdev_notifier_changeupper_info,
+ info);
switch (event) {
case NETDEV_PRECHANGEUPPER:
+ upper = cu_info->upper_dev;
if (!netif_is_bridge_master(upper) &&
!netif_is_lag_master(upper)) {
NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EINVAL;
}
- if (!info->linking)
+ if (!cu_info->linking)
break;
if (netdev_has_any_upper_dev(upper)) {
@@ -789,7 +797,7 @@ static int prestera_netdev_port_event(struct net_device *lower,
}
if (netif_is_lag_master(upper) &&
- !prestera_lag_master_check(upper, info->upper_info, extack))
+ !prestera_lag_master_check(upper, cu_info->upper_info, extack))
return -EOPNOTSUPP;
if (netif_is_lag_master(upper) && vlan_uses_dev(dev)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -805,14 +813,15 @@ static int prestera_netdev_port_event(struct net_device *lower,
break;
case NETDEV_CHANGEUPPER:
+ upper = cu_info->upper_dev;
if (netif_is_bridge_master(upper)) {
- if (info->linking)
+ if (cu_info->linking)
return prestera_bridge_port_join(upper, port,
extack);
else
prestera_bridge_port_leave(upper, port);
} else if (netif_is_lag_master(upper)) {
- if (info->linking)
+ if (cu_info->linking)
return prestera_lag_port_add(port, upper);
else
prestera_lag_port_del(port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 217e3b351dfe..c34833ff1dde 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -8494,7 +8494,8 @@ mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
u8 mac_profile;
int err;
- if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
+ if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
+ !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 2e25798c610e..7f49042484bd 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -321,6 +321,8 @@ static int ks8851_probe_par(struct platform_device *pdev)
return ret;
netdev->irq = platform_get_irq(pdev, 0);
+ if (netdev->irq < 0)
+ return netdev->irq;
return ks8851_probe_common(netdev, dev, msg_enable);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 7160b42f51dd..d0111cb3b40e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -201,7 +201,7 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
struct qlcnic_info *, u16);
int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
-void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
+int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
struct qlcnic_vf_info *, u16);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index dd03be3fc82a..42a44c97572a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -432,7 +432,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
- int i, num_vlans;
+ int i, num_vlans, ret;
u16 *vlans;
if (sriov->allowed_vlans)
@@ -443,7 +443,9 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
sriov->num_allowed_vlans);
- qlcnic_sriov_alloc_vlans(adapter);
+ ret = qlcnic_sriov_alloc_vlans(adapter);
+ if (ret)
+ return ret;
if (!sriov->any_vlan)
return 0;
@@ -2154,7 +2156,7 @@ static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
return err;
}
-void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
+int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_vf_info *vf;
@@ -2164,7 +2166,11 @@ void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
vf = &sriov->vf_info[i];
vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
sizeof(*vf->sriov_vlans), GFP_KERNEL);
+ if (!vf->sriov_vlans)
+ return -ENOMEM;
}
+
+ return 0;
}
void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 447720b93e5a..e90fa97c0ae6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -597,7 +597,9 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
if (err)
goto del_flr_queue;
- qlcnic_sriov_alloc_vlans(adapter);
+ err = qlcnic_sriov_alloc_vlans(adapter);
+ if (err)
+ goto del_flr_queue;
return err;
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 6aa81229b68a..e77a5cb4e40d 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -609,6 +609,9 @@ static size_t ef100_update_stats(struct efx_nic *efx,
ef100_common_stat_mask(mask);
ef100_ethtool_stat_mask(mask);
+ if (!mc_stats)
+ return 0;
+
efx_nic_copy_stats(efx, mc_stats);
efx_nic_update_stats(ef100_stat_desc, EF100_STAT_COUNT, mask,
stats, mc_stats, false);
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index 966f13e7475d..11a6aee852e9 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -728,7 +728,10 @@ static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
efx->rx_bufs_per_page);
rx_queue->page_ring = kcalloc(page_ring_size,
sizeof(*rx_queue->page_ring), GFP_KERNEL);
- rx_queue->page_ptr_mask = page_ring_size - 1;
+ if (!rx_queue->page_ring)
+ rx_queue->page_ptr_mask = 0;
+ else
+ rx_queue->page_ptr_mask = page_ring_size - 1;
}
void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index 68fc7d317693..0983abc0cc5f 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -150,7 +150,10 @@ static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
efx->rx_bufs_per_page);
rx_queue->page_ring = kcalloc(page_ring_size,
sizeof(*rx_queue->page_ring), GFP_KERNEL);
- rx_queue->page_ptr_mask = page_ring_size - 1;
+ if (!rx_queue->page_ring)
+ rx_queue->page_ptr_mask = 0;
+ else
+ rx_queue->page_ptr_mask = page_ring_size - 1;
}
static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 89381f796985..dd6f69ced4ee 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -2072,6 +2072,11 @@ static int smc911x_drv_probe(struct platform_device *pdev)
ndev->dma = (unsigned char)-1;
ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0) {
+ ret = ndev->irq;
+ goto release_both;
+ }
+
lp = netdev_priv(ndev);
lp->netdev = ndev;
#ifdef SMC_DYNAMIC_BUS_CONFIG
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 6924a6aacbd5..c469abc91fa1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -33,6 +33,7 @@ struct rk_gmac_ops {
void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed);
void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
+ bool regs_valid;
u32 regs[];
};
@@ -1092,6 +1093,7 @@ static const struct rk_gmac_ops rk3568_ops = {
.set_to_rmii = rk3568_set_to_rmii,
.set_rgmii_speed = rk3568_set_gmac_speed,
.set_rmii_speed = rk3568_set_gmac_speed,
+ .regs_valid = true,
.regs = {
0xfe2a0000, /* gmac0 */
0xfe010000, /* gmac1 */
@@ -1383,7 +1385,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
* to be distinguished.
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res) {
+ if (res && ops->regs_valid) {
int i = 0;
while (ops->regs[i]) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index 66fc8be34bb7..e2e0f977875d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -26,7 +26,7 @@
#define ETHER_CLK_SEL_FREQ_SEL_125M (BIT(9) | BIT(8))
#define ETHER_CLK_SEL_FREQ_SEL_50M BIT(9)
#define ETHER_CLK_SEL_FREQ_SEL_25M BIT(8)
-#define ETHER_CLK_SEL_FREQ_SEL_2P5M BIT(0)
+#define ETHER_CLK_SEL_FREQ_SEL_2P5M 0
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN BIT(0)
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC BIT(10)
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV BIT(11)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 5f129733aabd..873b9e3e5da2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -172,6 +172,19 @@ struct stmmac_flow_entry {
int is_l4;
};
+/* Rx Frame Steering */
+enum stmmac_rfs_type {
+ STMMAC_RFS_T_VLAN,
+ STMMAC_RFS_T_MAX,
+};
+
+struct stmmac_rfs_entry {
+ unsigned long cookie;
+ int in_use;
+ int type;
+ int tc;
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
@@ -289,6 +302,10 @@ struct stmmac_priv {
struct stmmac_tc_entry *tc_entries;
unsigned int flow_entries_max;
struct stmmac_flow_entry *flow_entries;
+ unsigned int rfs_entries_max[STMMAC_RFS_T_MAX];
+ unsigned int rfs_entries_cnt[STMMAC_RFS_T_MAX];
+ unsigned int rfs_entries_total;
+ struct stmmac_rfs_entry *rfs_entries;
/* Pulse Per Second output */
struct stmmac_pps_cfg pps[STMMAC_PPS_MAX];
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index da8306f60730..8ded4be08b00 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1461,16 +1461,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ if (priv->dma_cap.addr64 <= 32)
+ gfp |= GFP_DMA32;
if (!buf->page) {
- buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->page)
return -ENOMEM;
buf->page_offset = stmmac_rx_offset(priv);
}
if (priv->sph && !buf->sec_page) {
- buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
return -ENOMEM;
@@ -4482,6 +4486,10 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int entry = rx_q->dirty_rx;
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ if (priv->dma_cap.addr64 <= 32)
+ gfp |= GFP_DMA32;
while (dirty-- > 0) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
@@ -4494,13 +4502,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
p = rx_q->dma_rx + entry;
if (!buf->page) {
- buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->page)
break;
}
if (priv->sph && !buf->sec_page) {
- buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 580cc035536b..be9b58b2abf9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -102,7 +102,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
time.tv_nsec = priv->plat->est->btr_reserve[0];
time.tv_sec = priv->plat->est->btr_reserve[1];
basetime = timespec64_to_ktime(time);
- cycle_time = priv->plat->est->ctr[1] * NSEC_PER_SEC +
+ cycle_time = (u64)priv->plat->est->ctr[1] * NSEC_PER_SEC +
priv->plat->est->ctr[0];
time = stmmac_calc_tas_basetime(basetime,
current_time_ns,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 1c4ea0b1b845..d0a2b289f460 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -232,11 +232,33 @@ static int tc_setup_cls_u32(struct stmmac_priv *priv,
}
}
+static int tc_rfs_init(struct stmmac_priv *priv)
+{
+ int i;
+
+ priv->rfs_entries_max[STMMAC_RFS_T_VLAN] = 8;
+
+ for (i = 0; i < STMMAC_RFS_T_MAX; i++)
+ priv->rfs_entries_total += priv->rfs_entries_max[i];
+
+ priv->rfs_entries = devm_kcalloc(priv->device,
+ priv->rfs_entries_total,
+ sizeof(*priv->rfs_entries),
+ GFP_KERNEL);
+ if (!priv->rfs_entries)
+ return -ENOMEM;
+
+ dev_info(priv->device, "Enabled RFS Flow TC (entries=%d)\n",
+ priv->rfs_entries_total);
+
+ return 0;
+}
+
static int tc_init(struct stmmac_priv *priv)
{
struct dma_features *dma_cap = &priv->dma_cap;
unsigned int count;
- int i;
+ int ret, i;
if (dma_cap->l3l4fnum) {
priv->flow_entries_max = dma_cap->l3l4fnum;
@@ -250,10 +272,14 @@ static int tc_init(struct stmmac_priv *priv)
for (i = 0; i < priv->flow_entries_max; i++)
priv->flow_entries[i].idx = i;
- dev_info(priv->device, "Enabled Flow TC (entries=%d)\n",
+ dev_info(priv->device, "Enabled L3L4 Flow TC (entries=%d)\n",
priv->flow_entries_max);
}
+ ret = tc_rfs_init(priv);
+ if (ret)
+ return -ENOMEM;
+
if (!priv->plat->fpe_cfg) {
priv->plat->fpe_cfg = devm_kzalloc(priv->device,
sizeof(*priv->plat->fpe_cfg),
@@ -607,16 +633,45 @@ static int tc_del_flow(struct stmmac_priv *priv,
return ret;
}
+static struct stmmac_rfs_entry *tc_find_rfs(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls,
+ bool get_free)
+{
+ int i;
+
+ for (i = 0; i < priv->rfs_entries_total; i++) {
+ struct stmmac_rfs_entry *entry = &priv->rfs_entries[i];
+
+ if (entry->cookie == cls->cookie)
+ return entry;
+ if (get_free && entry->in_use == false)
+ return entry;
+ }
+
+ return NULL;
+}
+
#define VLAN_PRIO_FULL_MASK (0x07)
static int tc_add_vlan_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
+ struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
struct flow_match_vlan match;
+ if (!entry) {
+ entry = tc_find_rfs(priv, cls, true);
+ if (!entry)
+ return -ENOENT;
+ }
+
+ if (priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN] >=
+ priv->rfs_entries_max[STMMAC_RFS_T_VLAN])
+ return -ENOENT;
+
/* Nothing to do here */
if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
return -EINVAL;
@@ -638,6 +693,12 @@ static int tc_add_vlan_flow(struct stmmac_priv *priv,
prio = BIT(match.key->vlan_priority);
stmmac_rx_queue_prio(priv, priv->hw, prio, tc);
+
+ entry->in_use = true;
+ entry->cookie = cls->cookie;
+ entry->tc = tc;
+ entry->type = STMMAC_RFS_T_VLAN;
+ priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]++;
}
return 0;
@@ -646,20 +707,19 @@ static int tc_add_vlan_flow(struct stmmac_priv *priv,
static int tc_del_vlan_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
- struct flow_dissector *dissector = rule->match.dissector;
- int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
+ struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
- /* Nothing to do here */
- if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
- return -EINVAL;
+ if (!entry || !entry->in_use || entry->type != STMMAC_RFS_T_VLAN)
+ return -ENOENT;
- if (tc < 0) {
- netdev_err(priv->dev, "Invalid traffic class\n");
- return -EINVAL;
- }
+ stmmac_rx_queue_prio(priv, priv->hw, 0, entry->tc);
+
+ entry->in_use = false;
+ entry->cookie = 0;
+ entry->tc = 0;
+ entry->type = 0;
- stmmac_rx_queue_prio(priv, priv->hw, 0, tc);
+ priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]--;
return 0;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index c092cb61416a..ffbbda8f4d41 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1844,13 +1844,14 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
if (ret < 0) {
dev_err(dev, "%pOF error reading port_id %d\n",
port_np, ret);
- return ret;
+ goto of_node_put;
}
if (!port_id || port_id > common->port_num) {
dev_err(dev, "%pOF has invalid port_id %u %s\n",
port_np, port_id, port_np->name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto of_node_put;
}
port = am65_common_get_port(common, port_id);
@@ -1866,8 +1867,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
(AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
- if (IS_ERR(port->slave.mac_sl))
- return PTR_ERR(port->slave.mac_sl);
+ if (IS_ERR(port->slave.mac_sl)) {
+ ret = PTR_ERR(port->slave.mac_sl);
+ goto of_node_put;
+ }
port->disabled = !of_device_is_available(port_np);
if (port->disabled) {
@@ -1880,7 +1883,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
ret = PTR_ERR(port->slave.ifphy);
dev_err(dev, "%pOF error retrieving port phy: %d\n",
port_np, ret);
- return ret;
+ goto of_node_put;
}
port->slave.mac_only =
@@ -1889,10 +1892,12 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
/* get phy/link info */
if (of_phy_is_fixed_link(port_np)) {
ret = of_phy_register_fixed_link(port_np);
- if (ret)
- return dev_err_probe(dev, ret,
+ if (ret) {
+ ret = dev_err_probe(dev, ret,
"failed to register fixed-link phy %pOF\n",
port_np);
+ goto of_node_put;
+ }
port->slave.phy_node = of_node_get(port_np);
} else {
port->slave.phy_node =
@@ -1902,14 +1907,15 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
if (!port->slave.phy_node) {
dev_err(dev,
"slave[%d] no phy found\n", port_id);
- return -ENODEV;
+ ret = -ENODEV;
+ goto of_node_put;
}
ret = of_get_phy_mode(port_np, &port->slave.phy_if);
if (ret) {
dev_err(dev, "%pOF read phy-mode err %d\n",
port_np, ret);
- return ret;
+ goto of_node_put;
}
ret = of_get_mac_address(port_np, port->slave.mac_addr);
@@ -1932,6 +1938,11 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
}
return 0;
+
+of_node_put:
+ of_node_put(port_np);
+ of_node_put(node);
+ return ret;
}
static void am65_cpsw_pcpu_stats_free(void *data)