summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c155
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c158
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c254
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c194
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c114
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c53
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c100
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c52
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c10
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.c27
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c27
16 files changed, 799 insertions, 423 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 818ac2c7c7ea..dd11c57027bb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -423,6 +423,30 @@ static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
return (*ppos = len);
}
+static int hns3_dbg_check_cmd(struct hnae3_handle *handle, char *cmd_buf)
+{
+ int ret = 0;
+
+ if (strncmp(cmd_buf, "help", 4) == 0)
+ hns3_dbg_help(handle);
+ else if (strncmp(cmd_buf, "queue info", 10) == 0)
+ ret = hns3_dbg_queue_info(handle, cmd_buf);
+ else if (strncmp(cmd_buf, "queue map", 9) == 0)
+ ret = hns3_dbg_queue_map(handle);
+ else if (strncmp(cmd_buf, "bd info", 7) == 0)
+ ret = hns3_dbg_bd_info(handle, cmd_buf);
+ else if (strncmp(cmd_buf, "dev capability", 14) == 0)
+ hns3_dbg_dev_caps(handle);
+ else if (strncmp(cmd_buf, "dev spec", 8) == 0)
+ hns3_dbg_dev_specs(handle);
+ else if (handle->ae_algo->ops->dbg_run_cmd)
+ ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
+ else
+ ret = -EOPNOTSUPP;
+
+ return ret;
+}
+
static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
size_t count, loff_t *ppos)
{
@@ -430,7 +454,7 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
struct hns3_nic_priv *priv = handle->priv;
char *cmd_buf, *cmd_buf_tmp;
int uncopied_bytes;
- int ret = 0;
+ int ret;
if (*ppos != 0)
return 0;
@@ -461,23 +485,7 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
count = cmd_buf_tmp - cmd_buf + 1;
}
- if (strncmp(cmd_buf, "help", 4) == 0)
- hns3_dbg_help(handle);
- else if (strncmp(cmd_buf, "queue info", 10) == 0)
- ret = hns3_dbg_queue_info(handle, cmd_buf);
- else if (strncmp(cmd_buf, "queue map", 9) == 0)
- ret = hns3_dbg_queue_map(handle);
- else if (strncmp(cmd_buf, "bd info", 7) == 0)
- ret = hns3_dbg_bd_info(handle, cmd_buf);
- else if (strncmp(cmd_buf, "dev capability", 14) == 0)
- hns3_dbg_dev_caps(handle);
- else if (strncmp(cmd_buf, "dev spec", 8) == 0)
- hns3_dbg_dev_specs(handle);
- else if (handle->ae_algo->ops->dbg_run_cmd)
- ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
- else
- ret = -EOPNOTSUPP;
-
+ ret = hns3_dbg_check_cmd(handle, cmd_buf);
if (ret)
hns3_dbg_help(handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 6546b47bef88..1bd0ddfaec4d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -189,38 +189,53 @@ static bool hclge_is_special_opcode(u16 opcode)
return false;
}
-static int hclge_cmd_convert_err_code(u16 desc_ret)
+struct errcode {
+ u32 imp_errcode;
+ int common_errno;
+};
+
+static void hclge_cmd_copy_desc(struct hclge_hw *hw, struct hclge_desc *desc,
+ int num)
{
- switch (desc_ret) {
- case HCLGE_CMD_EXEC_SUCCESS:
- return 0;
- case HCLGE_CMD_NO_AUTH:
- return -EPERM;
- case HCLGE_CMD_NOT_SUPPORTED:
- return -EOPNOTSUPP;
- case HCLGE_CMD_QUEUE_FULL:
- return -EXFULL;
- case HCLGE_CMD_NEXT_ERR:
- return -ENOSR;
- case HCLGE_CMD_UNEXE_ERR:
- return -ENOTBLK;
- case HCLGE_CMD_PARA_ERR:
- return -EINVAL;
- case HCLGE_CMD_RESULT_ERR:
- return -ERANGE;
- case HCLGE_CMD_TIMEOUT:
- return -ETIME;
- case HCLGE_CMD_HILINK_ERR:
- return -ENOLINK;
- case HCLGE_CMD_QUEUE_ILLEGAL:
- return -ENXIO;
- case HCLGE_CMD_INVALID:
- return -EBADR;
- default:
- return -EIO;
+ struct hclge_desc *desc_to_use;
+ int handle = 0;
+
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
}
}
+static int hclge_cmd_convert_err_code(u16 desc_ret)
+{
+ struct errcode hclge_cmd_errcode[] = {
+ {HCLGE_CMD_EXEC_SUCCESS, 0},
+ {HCLGE_CMD_NO_AUTH, -EPERM},
+ {HCLGE_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
+ {HCLGE_CMD_QUEUE_FULL, -EXFULL},
+ {HCLGE_CMD_NEXT_ERR, -ENOSR},
+ {HCLGE_CMD_UNEXE_ERR, -ENOTBLK},
+ {HCLGE_CMD_PARA_ERR, -EINVAL},
+ {HCLGE_CMD_RESULT_ERR, -ERANGE},
+ {HCLGE_CMD_TIMEOUT, -ETIME},
+ {HCLGE_CMD_HILINK_ERR, -ENOLINK},
+ {HCLGE_CMD_QUEUE_ILLEGAL, -ENXIO},
+ {HCLGE_CMD_INVALID, -EBADR},
+ };
+ u32 errcode_count = ARRAY_SIZE(hclge_cmd_errcode);
+ u32 i;
+
+ for (i = 0; i < errcode_count; i++)
+ if (hclge_cmd_errcode[i].imp_errcode == desc_ret)
+ return hclge_cmd_errcode[i].common_errno;
+
+ return -EIO;
+}
+
static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
int num, int ntc)
{
@@ -244,6 +259,44 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
return hclge_cmd_convert_err_code(desc_ret);
}
+static int hclge_cmd_check_result(struct hclge_hw *hw, struct hclge_desc *desc,
+ int num, int ntc)
+{
+ struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
+ bool is_completed = false;
+ u32 timeout = 0;
+ int handle, ret;
+
+ /**
+ * If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
+ do {
+ if (hclge_cmd_csq_done(hw)) {
+ is_completed = true;
+ break;
+ }
+ udelay(1);
+ timeout++;
+ } while (timeout < hw->cmq.tx_timeout);
+ }
+
+ if (!is_completed)
+ ret = -EBADE;
+ else
+ ret = hclge_cmd_check_retval(hw, desc, num, ntc);
+
+ /* Clean the command send queue */
+ handle = hclge_cmd_csq_clean(hw);
+ if (handle < 0)
+ ret = handle;
+ else if (handle != num)
+ dev_warn(&hdev->pdev->dev,
+ "cleaned %d, need to clean %d\n", handle, num);
+ return ret;
+}
+
/**
* hclge_cmd_send - send command to command queue
* @hw: pointer to the hw struct
@@ -257,11 +310,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
{
struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
struct hclge_cmq_ring *csq = &hw->cmq.csq;
- struct hclge_desc *desc_to_use;
- bool complete = false;
- u32 timeout = 0;
- int handle = 0;
- int retval;
+ int ret;
int ntc;
spin_lock_bh(&hw->cmq.csq.lock);
@@ -285,49 +334,17 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
* which will be use for hardware to write back
*/
ntc = hw->cmq.csq.next_to_use;
- while (handle < num) {
- desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
- *desc_to_use = desc[handle];
- (hw->cmq.csq.next_to_use)++;
- if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
- hw->cmq.csq.next_to_use = 0;
- handle++;
- }
+
+ hclge_cmd_copy_desc(hw, desc, num);
/* Write to hardware */
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
- /**
- * If the command is sync, wait for the firmware to write back,
- * if multi descriptors to be sent, use the first one to check
- */
- if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
- do {
- if (hclge_cmd_csq_done(hw)) {
- complete = true;
- break;
- }
- udelay(1);
- timeout++;
- } while (timeout < hw->cmq.tx_timeout);
- }
-
- if (!complete)
- retval = -EBADE;
- else
- retval = hclge_cmd_check_retval(hw, desc, num, ntc);
-
- /* Clean the command send queue */
- handle = hclge_cmd_csq_clean(hw);
- if (handle < 0)
- retval = handle;
- else if (handle != num)
- dev_warn(&hdev->pdev->dev,
- "cleaned %d, need to clean %d\n", handle, num);
+ ret = hclge_cmd_check_result(hw, desc, num, ntc);
spin_unlock_bh(&hw->cmq.csq.lock);
- return retval;
+ return ret;
}
static void hclge_set_default_capability(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index a0a33c02ce25..6b1d197df881 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -984,39 +984,39 @@ static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
}
-static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
+static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev)
{
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
- struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
- struct hclge_rx_priv_wl_buf *rx_priv_wl;
- struct hclge_rx_com_wl *rx_packet_cnt;
- struct hclge_rx_com_thrd *rx_com_thrd;
- struct hclge_rx_com_wl *rx_com_wl;
- enum hclge_opcode_type cmd;
- struct hclge_desc desc[2];
+ struct hclge_desc desc;
int i, ret;
- cmd = HCLGE_OPC_TX_BUFF_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
-
- tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
+ tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
- cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
+ struct hclge_desc desc;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
dev_info(&hdev->pdev->dev, "\n");
- rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
+ rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
le16_to_cpu(rx_buf_cmd->buf_num[i]));
@@ -1024,43 +1024,61 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
le16_to_cpu(rx_buf_cmd->shared_buf));
- cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_rx_com_wl *rx_com_wl;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
- rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
+ rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
dev_info(&hdev->pdev->dev, "\n");
dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
le16_to_cpu(rx_com_wl->com_wl.high),
le16_to_cpu(rx_com_wl->com_wl.low));
- cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev)
+{
+ struct hclge_rx_com_wl *rx_packet_cnt;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
- rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
+ rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
dev_info(&hdev->pdev->dev,
"rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
le16_to_cpu(rx_packet_cnt->com_wl.high),
le16_to_cpu(rx_packet_cnt->com_wl.low));
- dev_info(&hdev->pdev->dev, "\n");
- if (!hnae3_dev_dcb_supported(hdev)) {
- dev_info(&hdev->pdev->dev,
- "Only DCB-supported dev supports rx priv wl\n");
- return;
- }
- cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
- hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_rx_priv_wl_buf *rx_priv_wl;
+ struct hclge_desc desc[2];
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
@@ -1077,13 +1095,21 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
le16_to_cpu(rx_priv_wl->tc_wl[i].high),
le16_to_cpu(rx_priv_wl->tc_wl[i].low));
- cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
- hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_rx_com_thrd *rx_com_thrd;
+ struct hclge_desc desc[2];
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
dev_info(&hdev->pdev->dev, "\n");
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
@@ -1100,6 +1126,52 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
i + HCLGE_TC_NUM_ONE_DESC,
le16_to_cpu(rx_com_thrd->com_thrd[i].high),
le16_to_cpu(rx_com_thrd->com_thrd[i].low));
+
+ return 0;
+}
+
+static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
+{
+ enum hclge_opcode_type cmd;
+ int ret;
+
+ cmd = HCLGE_OPC_TX_BUFF_ALLOC;
+ ret = hclge_dbg_dump_tx_buf_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
+ ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
+ ret = hclge_dbg_dump_rx_common_wl_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
+ ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "\n");
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports rx priv wl\n");
+ return;
+ }
+
+ cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
+ ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
+ ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
return;
err_qos_cmd_send:
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 7d81ffed4dc0..34b744df6709 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
#include <linux/crash_dump.h>
+#include <net/ipv6.h>
#include <net/rtnetlink.h>
#include "hclge_cmd.h"
#include "hclge_dcb.h"
@@ -4500,22 +4501,12 @@ static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
return hash_sets;
}
-static int hclge_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
+ struct ethtool_rxnfc *nfc,
+ struct hclge_rss_input_tuple_cmd *req)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_rss_input_tuple_cmd *req;
- struct hclge_desc desc;
u8 tuple_sets;
- int ret;
-
- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- req = (struct hclge_rss_input_tuple_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
@@ -4560,6 +4551,32 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
return -EINVAL;
}
+ return 0;
+}
+
+static int hclge_set_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclge_rss_input_tuple_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
+
+ ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init rss tuple cmd, ret = %d\n", ret);
+ return ret;
+ }
+
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -4579,52 +4596,69 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
return 0;
}
-static int hclge_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
+ u8 *tuple_sets)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
- u8 tuple_sets;
-
- nfc->data = 0;
-
- switch (nfc->flow_type) {
+ switch (flow_type) {
case TCP_V4_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
break;
case UDP_V4_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
break;
case TCP_V6_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
break;
case UDP_V6_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
break;
case SCTP_V4_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
break;
case SCTP_V6_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
break;
case IPV4_FLOW:
case IPV6_FLOW:
- tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
+ *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
break;
default:
return -EINVAL;
}
- if (!tuple_sets)
- return 0;
+ return 0;
+}
+
+static u64 hclge_convert_rss_tuple(u8 tuple_sets)
+{
+ u64 tuple_data = 0;
if (tuple_sets & HCLGE_D_PORT_BIT)
- nfc->data |= RXH_L4_B_2_3;
+ tuple_data |= RXH_L4_B_2_3;
if (tuple_sets & HCLGE_S_PORT_BIT)
- nfc->data |= RXH_L4_B_0_1;
+ tuple_data |= RXH_L4_B_0_1;
if (tuple_sets & HCLGE_D_IP_BIT)
- nfc->data |= RXH_IP_DST;
+ tuple_data |= RXH_IP_DST;
if (tuple_sets & HCLGE_S_IP_BIT)
- nfc->data |= RXH_IP_SRC;
+ tuple_data |= RXH_IP_SRC;
+
+ return tuple_data;
+}
+
+static int hclge_get_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ u8 tuple_sets;
+ int ret;
+
+ nfc->data = 0;
+
+ ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
+ if (ret || !tuple_sets)
+ return ret;
+
+ nfc->data = hclge_convert_rss_tuple(tuple_sets);
return 0;
}
@@ -5508,12 +5542,10 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
BIT(INNER_IP_TOS);
/* check whether src/dst ip address used */
- if (!spec->ip6src[0] && !spec->ip6src[1] &&
- !spec->ip6src[2] && !spec->ip6src[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
*unused_tuple |= BIT(INNER_SRC_IP);
- if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
- !spec->ip6dst[2] && !spec->ip6dst[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
*unused_tuple |= BIT(INNER_DST_IP);
if (!spec->psrc)
@@ -5538,12 +5570,10 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
/* check whether src/dst ip address used */
- if (!spec->ip6src[0] && !spec->ip6src[1] &&
- !spec->ip6src[2] && !spec->ip6src[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
*unused_tuple |= BIT(INNER_SRC_IP);
- if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
- !spec->ip6dst[2] && !spec->ip6dst[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
*unused_tuple |= BIT(INNER_DST_IP);
if (!spec->l4_proto)
@@ -8323,36 +8353,18 @@ static void hclge_sync_mac_table(struct hclge_dev *hdev)
}
}
-void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
- enum HCLGE_MAC_ADDR_TYPE mac_type)
+static void hclge_build_del_list(struct list_head *list,
+ bool is_del_list,
+ struct list_head *tmp_del_list)
{
- int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
struct hclge_mac_node *mac_cfg, *tmp;
- struct hclge_dev *hdev = vport->back;
- struct list_head tmp_del_list, *list;
- int ret;
-
- if (mac_type == HCLGE_MAC_ADDR_UC) {
- list = &vport->uc_mac_list;
- unsync = hclge_rm_uc_addr_common;
- } else {
- list = &vport->mc_mac_list;
- unsync = hclge_rm_mc_addr_common;
- }
-
- INIT_LIST_HEAD(&tmp_del_list);
-
- if (!is_del_list)
- set_bit(vport->vport_id, hdev->vport_config_block);
-
- spin_lock_bh(&vport->mac_list_lock);
list_for_each_entry_safe(mac_cfg, tmp, list, node) {
switch (mac_cfg->state) {
case HCLGE_MAC_TO_DEL:
case HCLGE_MAC_ACTIVE:
list_del(&mac_cfg->node);
- list_add_tail(&mac_cfg->node, &tmp_del_list);
+ list_add_tail(&mac_cfg->node, tmp_del_list);
break;
case HCLGE_MAC_TO_ADD:
if (is_del_list) {
@@ -8362,10 +8374,18 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
break;
}
}
+}
- spin_unlock_bh(&vport->mac_list_lock);
+static void hclge_unsync_del_list(struct hclge_vport *vport,
+ int (*unsync)(struct hclge_vport *vport,
+ const unsigned char *addr),
+ bool is_del_list,
+ struct list_head *tmp_del_list)
+{
+ struct hclge_mac_node *mac_cfg, *tmp;
+ int ret;
- list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
+ list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
ret = unsync(vport, mac_cfg->mac_addr);
if (!ret || ret == -ENOENT) {
/* clear all mac addr from hardware, but remain these
@@ -8383,6 +8403,35 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
mac_cfg->state = HCLGE_MAC_TO_DEL;
}
}
+}
+
+void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
+ struct hclge_dev *hdev = vport->back;
+ struct list_head tmp_del_list, *list;
+
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ list = &vport->uc_mac_list;
+ unsync = hclge_rm_uc_addr_common;
+ } else {
+ list = &vport->mc_mac_list;
+ unsync = hclge_rm_mc_addr_common;
+ }
+
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ if (!is_del_list)
+ set_bit(vport->vport_id, hdev->vport_config_block);
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_build_del_list(list, is_del_list, &tmp_del_list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
spin_lock_bh(&vport->mac_list_lock);
@@ -8789,32 +8838,16 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
}
-static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
- bool is_kill, u16 vlan,
- __be16 proto)
+static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, u16 vlan,
+ struct hclge_desc *desc)
{
- struct hclge_vport *vport = &hdev->vport[vfid];
struct hclge_vlan_filter_vf_cfg_cmd *req0;
struct hclge_vlan_filter_vf_cfg_cmd *req1;
- struct hclge_desc desc[2];
u8 vf_byte_val;
u8 vf_byte_off;
int ret;
- /* if vf vlan table is full, firmware will close vf vlan filter, it
- * is unable and unnecessary to add new vlan id to vf vlan filter.
- * If spoof check is enable, and vf vlan is full, it shouldn't add
- * new vlan, because tx packets with these vlan id will be dropped.
- */
- if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
- if (vport->vf_info.spoofchk && vlan) {
- dev_err(&hdev->pdev->dev,
- "Can't add vlan due to spoof check is on and vf vlan table is full\n");
- return -EPERM;
- }
- return 0;
- }
-
hclge_cmd_setup_basic_desc(&desc[0],
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
hclge_cmd_setup_basic_desc(&desc[1],
@@ -8844,12 +8877,22 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
return ret;
}
+ return 0;
+}
+
+static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, struct hclge_desc *desc)
+{
+ struct hclge_vlan_filter_vf_cfg_cmd *req;
+
+ req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
+
if (!is_kill) {
#define HCLGE_VF_VLAN_NO_ENTRY 2
- if (!req0->resp_code || req0->resp_code == 1)
+ if (!req->resp_code || req->resp_code == 1)
return 0;
- if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
+ if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
set_bit(vfid, hdev->vf_vlan_full);
dev_warn(&hdev->pdev->dev,
"vf vlan table is full, vf vlan filter is disabled\n");
@@ -8858,10 +8901,10 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
dev_err(&hdev->pdev->dev,
"Add vf vlan filter fail, ret =%u.\n",
- req0->resp_code);
+ req->resp_code);
} else {
#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
- if (!req0->resp_code)
+ if (!req->resp_code)
return 0;
/* vf vlan filter is disabled when vf vlan table is full,
@@ -8869,17 +8912,46 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
* Just return 0 without warning, avoid massive verbose
* print logs when unload.
*/
- if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
+ if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
return 0;
dev_err(&hdev->pdev->dev,
"Kill vf vlan filter fail, ret =%u.\n",
- req0->resp_code);
+ req->resp_code);
}
return -EIO;
}
+static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, u16 vlan,
+ __be16 proto)
+{
+ struct hclge_vport *vport = &hdev->vport[vfid];
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* if vf vlan table is full, firmware will close vf vlan filter, it
+ * is unable and unnecessary to add new vlan id to vf vlan filter.
+ * If spoof check is enable, and vf vlan is full, it shouldn't add
+ * new vlan, because tx packets with these vlan id will be dropped.
+ */
+ if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
+ if (vport->vf_info.spoofchk && vlan) {
+ dev_err(&hdev->pdev->dev,
+ "Can't add vlan due to spoof check is on and vf vlan table is full\n");
+ return -EPERM;
+ }
+ return 0;
+ }
+
+ ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
+ if (ret)
+ return ret;
+
+ return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
+}
+
static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
u16 vlan_id, bool is_kill)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 0f93c2dd890d..46700c427849 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -176,36 +176,111 @@ void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
}
+struct vf_errcode {
+ u32 imp_errcode;
+ int common_errno;
+};
+
+static void hclgevf_cmd_copy_desc(struct hclgevf_hw *hw,
+ struct hclgevf_desc *desc, int num)
+{
+ struct hclgevf_desc *desc_to_use;
+ int handle = 0;
+
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
+ }
+}
+
static int hclgevf_cmd_convert_err_code(u16 desc_ret)
{
- switch (desc_ret) {
- case HCLGEVF_CMD_EXEC_SUCCESS:
- return 0;
- case HCLGEVF_CMD_NO_AUTH:
- return -EPERM;
- case HCLGEVF_CMD_NOT_SUPPORTED:
- return -EOPNOTSUPP;
- case HCLGEVF_CMD_QUEUE_FULL:
- return -EXFULL;
- case HCLGEVF_CMD_NEXT_ERR:
- return -ENOSR;
- case HCLGEVF_CMD_UNEXE_ERR:
- return -ENOTBLK;
- case HCLGEVF_CMD_PARA_ERR:
- return -EINVAL;
- case HCLGEVF_CMD_RESULT_ERR:
- return -ERANGE;
- case HCLGEVF_CMD_TIMEOUT:
- return -ETIME;
- case HCLGEVF_CMD_HILINK_ERR:
- return -ENOLINK;
- case HCLGEVF_CMD_QUEUE_ILLEGAL:
- return -ENXIO;
- case HCLGEVF_CMD_INVALID:
- return -EBADR;
- default:
- return -EIO;
+ struct vf_errcode hclgevf_cmd_errcode[] = {
+ {HCLGEVF_CMD_EXEC_SUCCESS, 0},
+ {HCLGEVF_CMD_NO_AUTH, -EPERM},
+ {HCLGEVF_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
+ {HCLGEVF_CMD_QUEUE_FULL, -EXFULL},
+ {HCLGEVF_CMD_NEXT_ERR, -ENOSR},
+ {HCLGEVF_CMD_UNEXE_ERR, -ENOTBLK},
+ {HCLGEVF_CMD_PARA_ERR, -EINVAL},
+ {HCLGEVF_CMD_RESULT_ERR, -ERANGE},
+ {HCLGEVF_CMD_TIMEOUT, -ETIME},
+ {HCLGEVF_CMD_HILINK_ERR, -ENOLINK},
+ {HCLGEVF_CMD_QUEUE_ILLEGAL, -ENXIO},
+ {HCLGEVF_CMD_INVALID, -EBADR},
+ };
+ u32 errcode_count = ARRAY_SIZE(hclgevf_cmd_errcode);
+ u32 i;
+
+ for (i = 0; i < errcode_count; i++)
+ if (hclgevf_cmd_errcode[i].imp_errcode == desc_ret)
+ return hclgevf_cmd_errcode[i].common_errno;
+
+ return -EIO;
+}
+
+static int hclgevf_cmd_check_retval(struct hclgevf_hw *hw,
+ struct hclgevf_desc *desc, int num, int ntc)
+{
+ u16 opcode, desc_ret;
+ int handle;
+
+ opcode = le16_to_cpu(desc[0].opcode);
+ for (handle = 0; handle < num; handle++) {
+ /* Get the result of hardware write back */
+ desc[handle] = hw->cmq.csq.desc[ntc];
+ ntc++;
+ if (ntc == hw->cmq.csq.desc_num)
+ ntc = 0;
}
+ if (likely(!hclgevf_is_special_opcode(opcode)))
+ desc_ret = le16_to_cpu(desc[num - 1].retval);
+ else
+ desc_ret = le16_to_cpu(desc[0].retval);
+ hw->cmq.last_status = desc_ret;
+
+ return hclgevf_cmd_convert_err_code(desc_ret);
+}
+
+static int hclgevf_cmd_check_result(struct hclgevf_hw *hw,
+ struct hclgevf_desc *desc, int num, int ntc)
+{
+ struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
+ bool is_completed = false;
+ u32 timeout = 0;
+ int handle, ret;
+
+ /* If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
+ do {
+ if (hclgevf_cmd_csq_done(hw)) {
+ is_completed = true;
+ break;
+ }
+ udelay(1);
+ timeout++;
+ } while (timeout < hw->cmq.tx_timeout);
+ }
+
+ if (!is_completed)
+ ret = -EBADE;
+ else
+ ret = hclgevf_cmd_check_retval(hw, desc, num, ntc);
+
+ /* Clean the command send queue */
+ handle = hclgevf_cmd_csq_clean(hw);
+ if (handle < 0)
+ ret = handle;
+ else if (handle != num)
+ dev_warn(&hdev->pdev->dev,
+ "cleaned %d, need to clean %d\n", handle, num);
+ return ret;
}
/* hclgevf_cmd_send - send command to command queue
@@ -220,13 +295,7 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
{
struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
- struct hclgevf_desc *desc_to_use;
- bool complete = false;
- u32 timeout = 0;
- int handle = 0;
- int status = 0;
- u16 retval;
- u16 opcode;
+ int ret;
int ntc;
spin_lock_bh(&hw->cmq.csq.lock);
@@ -250,67 +319,18 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
* which will be use for hardware to write back
*/
ntc = hw->cmq.csq.next_to_use;
- opcode = le16_to_cpu(desc[0].opcode);
- while (handle < num) {
- desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
- *desc_to_use = desc[handle];
- (hw->cmq.csq.next_to_use)++;
- if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
- hw->cmq.csq.next_to_use = 0;
- handle++;
- }
+
+ hclgevf_cmd_copy_desc(hw, desc, num);
/* Write to hardware */
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
hw->cmq.csq.next_to_use);
- /* If the command is sync, wait for the firmware to write back,
- * if multi descriptors to be sent, use the first one to check
- */
- if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
- do {
- if (hclgevf_cmd_csq_done(hw))
- break;
- udelay(1);
- timeout++;
- } while (timeout < hw->cmq.tx_timeout);
- }
-
- if (hclgevf_cmd_csq_done(hw)) {
- complete = true;
- handle = 0;
-
- while (handle < num) {
- /* Get the result of hardware write back */
- desc_to_use = &hw->cmq.csq.desc[ntc];
- desc[handle] = *desc_to_use;
-
- if (likely(!hclgevf_is_special_opcode(opcode)))
- retval = le16_to_cpu(desc[handle].retval);
- else
- retval = le16_to_cpu(desc[0].retval);
-
- status = hclgevf_cmd_convert_err_code(retval);
- hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
- ntc++;
- handle++;
- if (ntc == hw->cmq.csq.desc_num)
- ntc = 0;
- }
- }
-
- if (!complete)
- status = -EBADE;
-
- /* Clean the command send queue */
- handle = hclgevf_cmd_csq_clean(hw);
- if (handle != num)
- dev_warn(&hdev->pdev->dev,
- "cleaned %d, need to clean %d\n", handle, num);
+ ret = hclgevf_cmd_check_result(hw, desc, num, ntc);
spin_unlock_bh(&hw->cmq.csq.lock);
- return status;
+ return ret;
}
static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index ece31693e624..700e068764c8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -873,25 +873,13 @@ static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
return hash_sets;
}
-static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc,
+ struct hclgevf_rss_input_tuple_cmd *req)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- struct hclgevf_rss_input_tuple_cmd *req;
- struct hclgevf_desc desc;
u8 tuple_sets;
- int ret;
-
- if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
- return -EOPNOTSUPP;
-
- if (nfc->data &
- ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
@@ -936,6 +924,35 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
return -EINVAL;
}
+ return 0;
+}
+
+static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ struct hclgevf_rss_input_tuple_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ if (nfc->data &
+ ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
+
+ ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init rss tuple cmd, ret = %d\n", ret);
+ return ret;
+ }
+
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -954,56 +971,73 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
return 0;
}
-static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev,
+ int flow_type, u8 *tuple_sets)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- u8 tuple_sets;
-
- if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
- return -EOPNOTSUPP;
-
- nfc->data = 0;
-
- switch (nfc->flow_type) {
+ switch (flow_type) {
case TCP_V4_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en;
break;
case UDP_V4_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en;
break;
case TCP_V6_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en;
break;
case UDP_V6_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en;
break;
case SCTP_V4_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en;
break;
case SCTP_V6_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en;
break;
case IPV4_FLOW:
case IPV6_FLOW:
- tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
+ *tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
break;
default:
return -EINVAL;
}
- if (!tuple_sets)
- return 0;
+ return 0;
+}
+
+static u64 hclgevf_convert_rss_tuple(u8 tuple_sets)
+{
+ u64 tuple_data = 0;
if (tuple_sets & HCLGEVF_D_PORT_BIT)
- nfc->data |= RXH_L4_B_2_3;
+ tuple_data |= RXH_L4_B_2_3;
if (tuple_sets & HCLGEVF_S_PORT_BIT)
- nfc->data |= RXH_L4_B_0_1;
+ tuple_data |= RXH_L4_B_0_1;
if (tuple_sets & HCLGEVF_D_IP_BIT)
- nfc->data |= RXH_IP_DST;
+ tuple_data |= RXH_IP_DST;
if (tuple_sets & HCLGEVF_S_IP_BIT)
- nfc->data |= RXH_IP_SRC;
+ tuple_data |= RXH_IP_SRC;
+
+ return tuple_data;
+}
+
+static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 tuple_sets;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ nfc->data = 0;
+
+ ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type,
+ &tuple_sets);
+ if (ret || !tuple_sets)
+ return ret;
+
+ nfc->data = hclgevf_convert_rss_tuple(tuple_sets);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index dfeea587a27e..48a84c65804c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -121,7 +121,7 @@ static char *rpm_rx_stats_fields[] = {
"Packets received with FrameCheckSequenceErrors",
"Packets received with VLAN header",
"Error packets",
- "Packets recievd with unicast DMAC",
+ "Packets received with unicast DMAC",
"Packets received with multicast DMAC",
"Packets received with broadcast DMAC",
"Dropped packets",
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 237e5d3321d4..5fe74036a611 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -964,11 +964,11 @@ static int otx2_get_fecparam(struct net_device *netdev,
if (IS_ERR(rsp))
return PTR_ERR(rsp);
- if (rsp->fwdata.supported_fec <= FEC_MAX_INDEX) {
+ if (rsp->fwdata.supported_fec < FEC_MAX_INDEX) {
if (!rsp->fwdata.supported_fec)
fecparam->fec = ETHTOOL_FEC_NONE;
else
- fecparam->fec = fec[rsp->fwdata.supported_fec];
+ fecparam->fec = fec[rsp->fwdata.supported_fec - 1];
}
return 0;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index 8c2b03151736..49e052273f30 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -581,7 +581,7 @@ int prestera_bridge_port_event(struct net_device *dev, unsigned long event,
static int prestera_port_attr_br_flags_set(struct prestera_port *port,
struct net_device *dev,
- unsigned long flags)
+ struct switchdev_brport_flags flags)
{
struct prestera_bridge_port *br_port;
int err;
@@ -590,15 +590,20 @@ static int prestera_port_attr_br_flags_set(struct prestera_port *port,
if (!br_port)
return 0;
- err = prestera_hw_port_flood_set(port, flags & BR_FLOOD);
- if (err)
- return err;
+ if (flags.mask & BR_FLOOD) {
+ err = prestera_hw_port_flood_set(port, flags.val & BR_FLOOD);
+ if (err)
+ return err;
+ }
- err = prestera_hw_port_learning_set(port, flags & BR_LEARNING);
- if (err)
- return err;
+ if (flags.mask & BR_LEARNING) {
+ err = prestera_hw_port_learning_set(port,
+ flags.val & BR_LEARNING);
+ if (err)
+ return err;
+ }
- memcpy(&br_port->flags, &flags, sizeof(flags));
+ memcpy(&br_port->flags, &flags.val, sizeof(flags.val));
return 0;
}
@@ -695,7 +700,8 @@ err_port_stp_set:
}
static int prestera_port_obj_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr)
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
{
struct prestera_port *port = netdev_priv(dev);
int err = 0;
@@ -706,7 +712,7 @@ static int prestera_port_obj_attr_set(struct net_device *dev,
attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
- if (attr->u.brport_flags &
+ if (attr->u.brport_flags.mask &
~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
err = -EINVAL;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index c38b791e2406..9d623e38d783 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -166,7 +166,6 @@ config MLX5_FPGA_TLS
depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN
depends on MLX5_FPGA
- depends on XPS
select MLX5_EN_TLS
default n
help
@@ -181,7 +180,6 @@ config MLX5_TLS
depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN
- depends on XPS
select MLX5_ACCEL
select MLX5_EN_TLS
default n
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 20c4f3c2cf23..23b7e8d6386b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -653,11 +653,11 @@ err_port_bridge_vlan_learning_set:
return err;
}
-static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port
- *mlxsw_sp_port,
- unsigned long brport_flags)
+static int
+mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_brport_flags flags)
{
- if (brport_flags & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
return -EINVAL;
return 0;
@@ -665,7 +665,7 @@ static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *orig_dev,
- unsigned long brport_flags)
+ struct switchdev_brport_flags flags)
{
struct mlxsw_sp_bridge_port *bridge_port;
int err;
@@ -675,29 +675,37 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port)
return 0;
- err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
- MLXSW_SP_FLOOD_TYPE_UC,
- brport_flags & BR_FLOOD);
- if (err)
- return err;
+ if (flags.mask & BR_FLOOD) {
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
+ bridge_port,
+ MLXSW_SP_FLOOD_TYPE_UC,
+ flags.val & BR_FLOOD);
+ if (err)
+ return err;
+ }
- err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
- brport_flags & BR_LEARNING);
- if (err)
- return err;
+ if (flags.mask & BR_LEARNING) {
+ err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port,
+ bridge_port,
+ flags.val & BR_LEARNING);
+ if (err)
+ return err;
+ }
if (bridge_port->bridge_device->multicast_enabled)
goto out;
- err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
- MLXSW_SP_FLOOD_TYPE_MC,
- brport_flags &
- BR_MCAST_FLOOD);
- if (err)
- return err;
+ if (flags.mask & BR_MCAST_FLOOD) {
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
+ bridge_port,
+ MLXSW_SP_FLOOD_TYPE_MC,
+ flags.val & BR_MCAST_FLOOD);
+ if (err)
+ return err;
+ }
out:
- memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
+ memcpy(&bridge_port->flags, &flags.val, sizeof(flags.val));
return 0;
}
@@ -887,7 +895,8 @@ mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr)
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 1654a6e22a7d..d1a9cdbf7a3e 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1038,6 +1038,7 @@ EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask);
void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
u32 port_cfg;
if (!(BIT(port) & ocelot->bridge_mask))
@@ -1050,7 +1051,8 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
ocelot->bridge_fwd_mask |= BIT(port);
fallthrough;
case BR_STATE_LEARNING:
- port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA;
+ if (ocelot_port->learn_ena)
+ port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA;
break;
default:
@@ -1534,6 +1536,86 @@ int ocelot_get_max_mtu(struct ocelot *ocelot, int port)
}
EXPORT_SYMBOL(ocelot_get_max_mtu);
+static void ocelot_port_set_learning(struct ocelot *ocelot, int port,
+ bool enabled)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u32 val = 0;
+
+ if (enabled)
+ val = ANA_PORT_PORT_CFG_LEARN_ENA;
+
+ ocelot_rmw_gix(ocelot, val, ANA_PORT_PORT_CFG_LEARN_ENA,
+ ANA_PORT_PORT_CFG, port);
+
+ ocelot_port->learn_ena = enabled;
+}
+
+static void ocelot_port_set_ucast_flood(struct ocelot *ocelot, int port,
+ bool enabled)
+{
+ u32 val = 0;
+
+ if (enabled)
+ val = BIT(port);
+
+ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_UC);
+}
+
+static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port,
+ bool enabled)
+{
+ u32 val = 0;
+
+ if (enabled)
+ val = BIT(port);
+
+ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC);
+}
+
+static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port,
+ bool enabled)
+{
+ u32 val = 0;
+
+ if (enabled)
+ val = BIT(port);
+
+ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_BC);
+}
+
+int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
+ BR_BCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_port_pre_bridge_flags);
+
+void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & BR_LEARNING)
+ ocelot_port_set_learning(ocelot, port,
+ !!(flags.val & BR_LEARNING));
+
+ if (flags.mask & BR_FLOOD)
+ ocelot_port_set_ucast_flood(ocelot, port,
+ !!(flags.val & BR_FLOOD));
+
+ if (flags.mask & BR_MCAST_FLOOD)
+ ocelot_port_set_mcast_flood(ocelot, port,
+ !!(flags.val & BR_MCAST_FLOOD));
+
+ if (flags.mask & BR_BCAST_FLOOD)
+ ocelot_port_set_bcast_flood(ocelot, port,
+ !!(flags.val & BR_BCAST_FLOOD));
+}
+EXPORT_SYMBOL(ocelot_port_bridge_flags);
+
void ocelot_init_port(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
@@ -1583,6 +1665,9 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
REW_PORT_VLAN_CFG_PORT_TPID_M,
REW_PORT_VLAN_CFG, port);
+ /* Disable source address learning for standalone mode */
+ ocelot_port_set_learning(ocelot, port, false);
+
/* Enable vcap lookups */
ocelot_vcap_enable(ocelot, port);
}
@@ -1716,7 +1801,7 @@ int ocelot_init(struct ocelot *ocelot)
/* Setup flooding PGIDs */
for (i = 0; i < ocelot->num_flooding_pgids; i++)
ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
- ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
+ ANA_FLOODING_FLD_BROADCAST(PGID_BC) |
ANA_FLOODING_FLD_UNICAST(PGID_UC),
ANA_FLOODING, i);
ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
@@ -1737,15 +1822,18 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port);
}
- /* Allow broadcast MAC frames. */
for_each_nonreserved_multicast_dest_pgid(ocelot, i) {
u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0));
ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
}
- ocelot_write_rix(ocelot,
- ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
- ANA_PGID_PGID, PGID_MC);
+ /* Allow broadcast and unknown L2 multicast to the CPU. */
+ ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
+ ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
+ ANA_PGID_PGID, PGID_MC);
+ ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
+ ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
+ ANA_PGID_PGID, PGID_BC);
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4);
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 8f12fa45b1b5..b5ffe6724eb7 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -1005,7 +1005,8 @@ static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc)
}
static int ocelot_port_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr)
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
@@ -1025,6 +1026,13 @@ static int ocelot_port_attr_set(struct net_device *dev,
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled);
break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ err = ocelot_port_pre_bridge_flags(ocelot, port,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ ocelot_port_bridge_flags(ocelot, port, attr->u.brport_flags);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -1110,6 +1118,40 @@ static int ocelot_port_obj_del(struct net_device *dev,
return ret;
}
+static int ocelot_netdevice_bridge_join(struct ocelot *ocelot, int port,
+ struct net_device *bridge)
+{
+ struct switchdev_brport_flags flags;
+ int err;
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask;
+
+ err = ocelot_port_bridge_join(ocelot, port, bridge);
+ if (err)
+ return err;
+
+ ocelot_port_bridge_flags(ocelot, port, flags);
+
+ return 0;
+}
+
+static int ocelot_netdevice_bridge_leave(struct ocelot *ocelot, int port,
+ struct net_device *bridge)
+{
+ struct switchdev_brport_flags flags;
+ int err;
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask & ~BR_LEARNING;
+
+ err = ocelot_port_bridge_leave(ocelot, port, bridge);
+
+ ocelot_port_bridge_flags(ocelot, port, flags);
+
+ return err;
+}
+
static int ocelot_netdevice_changeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
@@ -1121,11 +1163,11 @@ static int ocelot_netdevice_changeupper(struct net_device *dev,
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking) {
- err = ocelot_port_bridge_join(ocelot, port,
- info->upper_dev);
+ err = ocelot_netdevice_bridge_join(ocelot, port,
+ info->upper_dev);
} else {
- err = ocelot_port_bridge_leave(ocelot, port,
- info->upper_dev);
+ err = ocelot_netdevice_bridge_leave(ocelot, port,
+ info->upper_dev);
}
}
if (netif_is_lag_master(info->upper_dev)) {
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 740a715c49c6..3473d296b2e2 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1576,7 +1576,7 @@ rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port *
static int
rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
- unsigned long brport_flags)
+ struct switchdev_brport_flags flags)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
unsigned long brport_flags_s;
@@ -1590,7 +1590,7 @@ rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
if (err)
return err;
- if (brport_flags & ~brport_flags_s)
+ if (flags.mask & ~brport_flags_s)
return -EINVAL;
return 0;
@@ -1598,14 +1598,14 @@ rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
static int
rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
- unsigned long brport_flags)
+ struct switchdev_brport_flags flags)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_attr_bridge_flags_set)
return -EOPNOTSUPP;
- return wops->port_attr_bridge_flags_set(rocker_port, brport_flags);
+ return wops->port_attr_bridge_flags_set(rocker_port, flags.val);
}
static int
@@ -2058,7 +2058,7 @@ static int rocker_port_attr_set(struct net_device *dev,
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
err = rocker_world_port_attr_pre_bridge_flags_set(rocker_port,
- attr->u.brport_flags);
+ attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
err = rocker_world_port_attr_bridge_flags_set(rocker_port,
diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
index 1067e7772dbf..d93ffd8a08b0 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
@@ -55,33 +55,38 @@ static int am65_cpsw_port_stp_state_set(struct am65_cpsw_port *port, u8 state)
static int am65_cpsw_port_attr_br_flags_set(struct am65_cpsw_port *port,
struct net_device *orig_dev,
- unsigned long brport_flags)
+ struct switchdev_brport_flags flags)
{
struct am65_cpsw_common *cpsw = port->common;
- bool unreg_mcast_add = false;
- if (brport_flags & BR_MCAST_FLOOD)
- unreg_mcast_add = true;
- netdev_dbg(port->ndev, "BR_MCAST_FLOOD: %d port %u\n",
- unreg_mcast_add, port->port_id);
+ if (flags.mask & BR_MCAST_FLOOD) {
+ bool unreg_mcast_add = false;
- cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(port->port_id),
- unreg_mcast_add);
+ if (flags.val & BR_MCAST_FLOOD)
+ unreg_mcast_add = true;
+
+ netdev_dbg(port->ndev, "BR_MCAST_FLOOD: %d port %u\n",
+ unreg_mcast_add, port->port_id);
+
+ cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(port->port_id),
+ unreg_mcast_add);
+ }
return 0;
}
static int am65_cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
- unsigned long flags)
+ struct switchdev_brport_flags flags)
{
- if (flags & ~(BR_LEARNING | BR_MCAST_FLOOD))
+ if (flags.mask & ~(BR_LEARNING | BR_MCAST_FLOOD))
return -EINVAL;
return 0;
}
static int am65_cpsw_port_attr_set(struct net_device *ndev,
- const struct switchdev_attr *attr)
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret;
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
index 9967cf985728..a72bb570756f 100644
--- a/drivers/net/ethernet/ti/cpsw_switchdev.c
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -57,33 +57,38 @@ static int cpsw_port_stp_state_set(struct cpsw_priv *priv, u8 state)
static int cpsw_port_attr_br_flags_set(struct cpsw_priv *priv,
struct net_device *orig_dev,
- unsigned long brport_flags)
+ struct switchdev_brport_flags flags)
{
struct cpsw_common *cpsw = priv->cpsw;
- bool unreg_mcast_add = false;
- if (brport_flags & BR_MCAST_FLOOD)
- unreg_mcast_add = true;
- dev_dbg(priv->dev, "BR_MCAST_FLOOD: %d port %u\n",
- unreg_mcast_add, priv->emac_port);
+ if (flags.mask & BR_MCAST_FLOOD) {
+ bool unreg_mcast_add = false;
- cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(priv->emac_port),
- unreg_mcast_add);
+ if (flags.val & BR_MCAST_FLOOD)
+ unreg_mcast_add = true;
+
+ dev_dbg(priv->dev, "BR_MCAST_FLOOD: %d port %u\n",
+ unreg_mcast_add, priv->emac_port);
+
+ cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(priv->emac_port),
+ unreg_mcast_add);
+ }
return 0;
}
static int cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
- unsigned long flags)
+ struct switchdev_brport_flags flags)
{
- if (flags & ~(BR_LEARNING | BR_MCAST_FLOOD))
+ if (flags.mask & ~(BR_LEARNING | BR_MCAST_FLOOD))
return -EINVAL;
return 0;
}
static int cpsw_port_attr_set(struct net_device *ndev,
- const struct switchdev_attr *attr)
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
{
struct cpsw_priv *priv = netdev_priv(ndev);
int ret;