diff options
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c')
| -rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 1590 |
1 files changed, 738 insertions, 852 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 1b9578d0bd80..8fcf220a120d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -6,199 +6,113 @@ #include <net/rtnetlink.h> #include "hclgevf_cmd.h" #include "hclgevf_main.h" +#include "hclgevf_regs.h" #include "hclge_mbx.h" #include "hnae3.h" +#include "hclgevf_devlink.h" +#include "hclge_comm_rss.h" +#include "hclgevf_trace.h" #define HCLGEVF_NAME "hclgevf" #define HCLGEVF_RESET_MAX_FAIL_CNT 5 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); +static void hclgevf_task_schedule(struct hclgevf_dev *hdev, + unsigned long delay); + static struct hnae3_ae_algo ae_algovf; static struct workqueue_struct *hclgevf_wq; static const struct pci_device_id ae_algovf_pci_tbl[] = { - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, /* required last entry */ {0, } }; -static const u8 hclgevf_hash_key[] = { - 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, - 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, - 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, - 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, - 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA -}; - MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); -static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, - HCLGEVF_CMDQ_TX_ADDR_H_REG, - HCLGEVF_CMDQ_TX_DEPTH_REG, - HCLGEVF_CMDQ_TX_TAIL_REG, - HCLGEVF_CMDQ_TX_HEAD_REG, - HCLGEVF_CMDQ_RX_ADDR_L_REG, - HCLGEVF_CMDQ_RX_ADDR_H_REG, - HCLGEVF_CMDQ_RX_DEPTH_REG, - HCLGEVF_CMDQ_RX_TAIL_REG, - HCLGEVF_CMDQ_RX_HEAD_REG, - HCLGEVF_VECTOR0_CMDQ_SRC_REG, - HCLGEVF_VECTOR0_CMDQ_STATE_REG, - HCLGEVF_CMDQ_INTR_EN_REG, - HCLGEVF_CMDQ_INTR_GEN_REG}; - -static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, - HCLGEVF_RST_ING, - HCLGEVF_GRO_EN_REG}; - -static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, - HCLGEVF_RING_RX_ADDR_H_REG, - HCLGEVF_RING_RX_BD_NUM_REG, - HCLGEVF_RING_RX_BD_LENGTH_REG, - HCLGEVF_RING_RX_MERGE_EN_REG, - HCLGEVF_RING_RX_TAIL_REG, - HCLGEVF_RING_RX_HEAD_REG, - HCLGEVF_RING_RX_FBD_NUM_REG, - HCLGEVF_RING_RX_OFFSET_REG, - HCLGEVF_RING_RX_FBD_OFFSET_REG, - HCLGEVF_RING_RX_STASH_REG, - HCLGEVF_RING_RX_BD_ERR_REG, - HCLGEVF_RING_TX_ADDR_L_REG, - HCLGEVF_RING_TX_ADDR_H_REG, - HCLGEVF_RING_TX_BD_NUM_REG, - HCLGEVF_RING_TX_PRIORITY_REG, - HCLGEVF_RING_TX_TC_REG, - HCLGEVF_RING_TX_MERGE_EN_REG, - HCLGEVF_RING_TX_TAIL_REG, - HCLGEVF_RING_TX_HEAD_REG, - HCLGEVF_RING_TX_FBD_NUM_REG, - HCLGEVF_RING_TX_OFFSET_REG, - HCLGEVF_RING_TX_EBD_NUM_REG, - HCLGEVF_RING_TX_EBD_OFFSET_REG, - HCLGEVF_RING_TX_BD_ERR_REG, - HCLGEVF_RING_EN_REG}; - -static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, - HCLGEVF_TQP_INTR_GL0_REG, - HCLGEVF_TQP_INTR_GL1_REG, - HCLGEVF_TQP_INTR_GL2_REG, - HCLGEVF_TQP_INTR_RL_REG}; - -static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) +/* hclgevf_cmd_send - send command to command queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor for describing the command + * @num : the number of descriptors to be sent + * + * This is the main send command for command queue, it + * sends the queue, cleans the queue, etc + */ +int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num) { - if (!handle->client) - return container_of(handle, struct hclgevf_dev, nic); - else if (handle->client->type == HNAE3_CLIENT_ROCE) - return container_of(handle, struct hclgevf_dev, roce); - else - return container_of(handle, struct hclgevf_dev, nic); + return hclge_comm_cmd_send(&hw->hw, desc, num); } -static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) +static void hclgevf_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, + int num, bool is_special) { - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_desc desc; - struct hclgevf_tqp *tqp; - int status; int i; - for (i = 0; i < kinfo->num_tqps; i++) { - tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); - hclgevf_cmd_setup_basic_desc(&desc, - HCLGEVF_OPC_QUERY_RX_STATUS, - true); - - desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) { - dev_err(&hdev->pdev->dev, - "Query tqp stat fail, status = %d,queue = %d\n", - status, i); - return status; - } - tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += - le32_to_cpu(desc.data[1]); - - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, - true); + trace_hclge_vf_cmd_send(hw, desc, 0, num); - desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) { - dev_err(&hdev->pdev->dev, - "Query tqp stat fail, status = %d,queue = %d\n", - status, i); - return status; - } - tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += - le32_to_cpu(desc.data[1]); - } + if (is_special) + return; - return 0; + for (i = 1; i < num; i++) + trace_hclge_vf_cmd_send(hw, &desc[i], i, num); } -static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) +static void hclgevf_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc, + int num, bool is_special) { - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hclgevf_tqp *tqp; - u64 *buff = data; int i; - for (i = 0; i < kinfo->num_tqps; i++) { - tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); - *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; - } - for (i = 0; i < kinfo->num_tqps; i++) { - tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); - *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; - } + if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) + return; - return buff; -} + trace_hclge_vf_cmd_get(hw, desc, 0, num); -static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; + if (is_special) + return; - return kinfo->num_tqps * 2; + for (i = 1; i < num; i++) + trace_hclge_vf_cmd_get(hw, &desc[i], i, num); } -static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - u8 *buff = data; - int i = 0; +static const struct hclge_comm_cmq_ops hclgevf_cmq_ops = { + .trace_cmd_send = hclgevf_trace_cmd_send, + .trace_cmd_get = hclgevf_trace_cmd_get, +}; - for (i = 0; i < kinfo->num_tqps; i++) { - struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], - struct hclgevf_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", - tqp->index); - buff += ETH_GSTRING_LEN; - } +void hclgevf_arq_init(struct hclgevf_dev *hdev) +{ + struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq; - for (i = 0; i < kinfo->num_tqps; i++) { - struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], - struct hclgevf_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", - tqp->index); - buff += ETH_GSTRING_LEN; - } + spin_lock(&cmdq->crq.lock); + /* initialize the pointers of async rx queue of mailbox */ + hdev->arq.hdev = hdev; + hdev->arq.head = 0; + hdev->arq.tail = 0; + atomic_set(&hdev->arq.count, 0); + spin_unlock(&cmdq->crq.lock); +} - return buff; +struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) +{ + if (!handle->client) + return container_of(handle, struct hclgevf_dev, nic); + else if (handle->client->type == HNAE3_CLIENT_ROCE) + return container_of(handle, struct hclgevf_dev, roce); + else + return container_of(handle, struct hclgevf_dev, nic); } -static void hclgevf_update_stats(struct hnae3_handle *handle, - struct net_device_stats *net_stats) +static void hclgevf_update_stats(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); int status; - status = hclgevf_tqps_update_stats(handle); + status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); if (status) dev_err(&hdev->pdev->dev, "VF update of TQPS stats fail, status = %d.\n", @@ -210,23 +124,21 @@ static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) if (strset == ETH_SS_TEST) return -EOPNOTSUPP; else if (strset == ETH_SS_STATS) - return hclgevf_tqps_get_sset_count(handle, strset); + return hclge_comm_tqps_get_sset_count(handle); return 0; } static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, - u8 *data) + u8 **data) { - u8 *p = (char *)data; - if (strset == ETH_SS_STATS) - p = hclgevf_tqps_get_strings(handle, p); + hclge_comm_tqps_get_strings(handle, data); } static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) { - hclgevf_tqps_get_stats(handle, data); + hclge_comm_tqps_get_stats(handle, data); } static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, @@ -239,23 +151,31 @@ static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, } } -static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) +static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) { + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE]; + struct hclge_basic_info *basic_info; struct hclge_vf_to_pf_msg send_msg; - u8 resp_msg; + unsigned long caps; int status; - hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0); - status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0); + status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, sizeof(resp_msg)); if (status) { dev_err(&hdev->pdev->dev, - "VF request to get TC info from PF failed %d", - status); + "failed to get basic info from pf, ret = %d", status); return status; } - hdev->hw_tc_map = resp_msg; + basic_info = (struct hclge_basic_info *)resp_msg; + + hdev->hw_tc_map = basic_info->hw_tc_map; + hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version); + caps = le32_to_cpu(basic_info->pf_caps); + if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps)) + set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); return 0; } @@ -286,10 +206,8 @@ static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) { #define HCLGEVF_TQPS_RSS_INFO_LEN 6 -#define HCLGEVF_TQPS_ALLOC_OFFSET 0 -#define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 -#define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 + struct hclge_mbx_vf_queue_info *queue_info; u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; struct hclge_vf_to_pf_msg send_msg; int status; @@ -304,12 +222,10 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) return status; } - memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], - sizeof(u16)); - memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], - sizeof(u16)); - memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], - sizeof(u16)); + queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg; + hdev->num_tqps = le16_to_cpu(queue_info->num_tqps); + hdev->rss_size_max = le16_to_cpu(queue_info->rss_size); + hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len); return 0; } @@ -317,9 +233,8 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) { #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 -#define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 -#define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 + struct hclge_mbx_vf_queue_depth *queue_depth; u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; struct hclge_vf_to_pf_msg send_msg; int ret; @@ -334,10 +249,9 @@ static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) return ret; } - memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], - sizeof(u16)); - memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], - sizeof(u16)); + queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg; + hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc); + hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc); return 0; } @@ -351,11 +265,11 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) int ret; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); - memcpy(send_msg.data, &queue_id, sizeof(queue_id)); + *(__le16 *)send_msg.data = cpu_to_le16(queue_id); ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, sizeof(resp_data)); if (!ret) - qid_in_pf = *(u16 *)resp_data; + qid_in_pf = le16_to_cpu(*(__le16 *)resp_data); return qid_in_pf; } @@ -384,11 +298,12 @@ static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) { - struct hclgevf_tqp *tqp; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct hclge_comm_tqp *tqp; int i; hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, - sizeof(struct hclgevf_tqp), GFP_KERNEL); + sizeof(struct hclge_comm_tqp), GFP_KERNEL); if (!hdev->htqp) return -ENOMEM; @@ -402,8 +317,28 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) tqp->q.buf_size = hdev->rx_buf_len; tqp->q.tx_desc_num = hdev->num_tx_desc; tqp->q.rx_desc_num = hdev->num_rx_desc; - tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + - i * HCLGEVF_TQP_REG_SIZE; + + /* need an extended offset to configure queues >= + * HCLGEVF_TQP_MAX_SIZE_DEV_V2. + */ + if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) + tqp->q.io_base = hdev->hw.hw.io_base + + HCLGEVF_TQP_REG_OFFSET + + i * HCLGEVF_TQP_REG_SIZE; + else + tqp->q.io_base = hdev->hw.hw.io_base + + HCLGEVF_TQP_REG_OFFSET + + HCLGEVF_TQP_EXT_REG_OFFSET + + (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * + HCLGEVF_TQP_REG_SIZE; + + /* when device supports tx push and has device memory, + * the queue can execute push mode or doorbell mode on + * device memory. + */ + if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) + tqp->q.mem_base = hdev->hw.hw.mem_base + + HCLGEVF_TQP_MEM_OFFSET(hdev, i); tqp++; } @@ -417,19 +352,20 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) struct hnae3_knic_private_info *kinfo; u16 new_tqps = hdev->num_tqps; unsigned int i; + u8 num_tc = 0; kinfo = &nic->kinfo; - kinfo->num_tc = 0; kinfo->num_tx_desc = hdev->num_tx_desc; kinfo->num_rx_desc = hdev->num_rx_desc; kinfo->rx_buf_len = hdev->rx_buf_len; - for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) + for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) if (hdev->hw_tc_map & BIT(i)) - kinfo->num_tc++; + num_tc++; - kinfo->rss_size - = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); - new_tqps = kinfo->rss_size * kinfo->num_tc; + num_tc = num_tc ? num_tc : 1; + kinfo->tc_info.num_tc = num_tc; + kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); + new_tqps = kinfo->rss_size * num_tc; kinfo->num_tqps = min(new_tqps, hdev->num_tqps); kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, @@ -447,7 +383,7 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) * and rss size with the actual vector numbers */ kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); - kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc, + kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc, kinfo->rss_size); return 0; @@ -480,12 +416,11 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) link_state = test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; - if (link_state != hdev->hw.mac.link) { + hdev->hw.mac.link = link_state; client->ops->link_status_change(handle, !!link_state); if (rclient && rclient->ops->link_status_change) rclient->ops->link_status_change(rhandle, !!link_state); - hdev->hw.mac.link = link_state; } clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); @@ -512,8 +447,10 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) nic->ae_algo = &ae_algovf; nic->pdev = hdev->pdev; - nic->numa_node_mask = hdev->numa_node_mask; + bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits, + MAX_NUMNODES); nic->flags |= HNAE3_SUPPORT_VF; + nic->kinfo.io_base = hdev->hw.hw.io_base; ret = hclgevf_knic_setup(hdev); if (ret) @@ -550,7 +487,7 @@ static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { vector->vector = pci_irq_vector(hdev->pdev, i); - vector->io_addr = hdev->hw.io_base + + vector->io_addr = hdev->hw.hw.io_base + HCLGEVF_VECTOR_REG_BASE + (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; hdev->vector_status[i] = 0; @@ -580,131 +517,11 @@ static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) return -EINVAL; } -static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, - const u8 hfunc, const u8 *key) -{ - struct hclgevf_rss_config_cmd *req; - unsigned int key_offset = 0; - struct hclgevf_desc desc; - int key_counts; - int key_size; - int ret; - - key_counts = HCLGEVF_RSS_KEY_SIZE; - req = (struct hclgevf_rss_config_cmd *)desc.data; - - while (key_counts) { - hclgevf_cmd_setup_basic_desc(&desc, - HCLGEVF_OPC_RSS_GENERIC_CONFIG, - false); - - req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); - req->hash_config |= - (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); - - key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); - memcpy(req->hash_key, - key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); - - key_counts -= key_size; - key_offset++; - ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Configure RSS config fail, status = %d\n", - ret); - return ret; - } - } - - return 0; -} - -static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) -{ - return HCLGEVF_RSS_KEY_SIZE; -} - -static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) -{ - return HCLGEVF_RSS_IND_TBL_SIZE; -} - -static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) -{ - const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; - struct hclgevf_rss_indirection_table_cmd *req; - struct hclgevf_desc desc; - int status; - int i, j; - - req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; - - for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, - false); - req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; - req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; - for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) - req->rss_result[j] = - indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; - - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) { - dev_err(&hdev->pdev->dev, - "VF failed(=%d) to set RSS indirection table\n", - status); - return status; - } - } - - return 0; -} - -static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) -{ - struct hclgevf_rss_tc_mode_cmd *req; - u16 tc_offset[HCLGEVF_MAX_TC_NUM]; - u16 tc_valid[HCLGEVF_MAX_TC_NUM]; - u16 tc_size[HCLGEVF_MAX_TC_NUM]; - struct hclgevf_desc desc; - u16 roundup_size; - unsigned int i; - int status; - - req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; - - roundup_size = roundup_pow_of_two(rss_size); - roundup_size = ilog2(roundup_size); - - for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { - tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); - tc_size[i] = roundup_size; - tc_offset[i] = rss_size * i; - } - - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); - for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { - hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, - (tc_valid[i] & 0x1)); - hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, - HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); - hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, - HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); - } - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) - dev_err(&hdev->pdev->dev, - "VF failed(=%d) to set rss tc mode\n", status); - - return status; -} - /* for revision 0x20, vf shared the same rss config with pf */ static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) { #define HCLGEVF_RSS_MBX_RESP_LEN 8 - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; struct hclge_vf_to_pf_msg send_msg; u16 msg_num, hash_key_index; @@ -712,7 +529,7 @@ static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) int ret; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); - msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / + msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / HCLGEVF_RSS_MBX_RESP_LEN; for (index = 0; index < msg_num; index++) { send_msg.data[0] = index; @@ -729,7 +546,7 @@ static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) if (index == msg_num - 1) memcpy(&rss_cfg->rss_hash_key[hash_key_index], &resp_msg[0], - HCLGEVF_RSS_KEY_SIZE - hash_key_index); + HCLGE_COMM_RSS_KEY_SIZE - hash_key_index); else memcpy(&rss_cfg->rss_hash_key[hash_key_index], &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); @@ -742,29 +559,11 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, u8 *hfunc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - int i, ret; - - if (handle->pdev->revision >= 0x21) { - /* Get hash algorithm */ - if (hfunc) { - switch (rss_cfg->hash_algo) { - case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: - *hfunc = ETH_RSS_HASH_TOP; - break; - case HCLGEVF_RSS_HASH_ALGO_SIMPLE: - *hfunc = ETH_RSS_HASH_XOR; - break; - default: - *hfunc = ETH_RSS_HASH_UNKNOWN; - break; - } - } + struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; + int ret; - /* Get the RSS Key required by the user */ - if (key) - memcpy(key, rss_cfg->rss_hash_key, - HCLGEVF_RSS_KEY_SIZE); + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { + hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc); } else { if (hfunc) *hfunc = ETH_RSS_HASH_TOP; @@ -773,13 +572,12 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, if (ret) return ret; memcpy(key, rss_cfg->rss_hash_key, - HCLGEVF_RSS_KEY_SIZE); + HCLGE_COMM_RSS_KEY_SIZE); } } - if (indir) - for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) - indir[i] = rss_cfg->rss_indirection_tbl[i]; + hclge_comm_get_rss_indir_tbl(rss_cfg, indir, + hdev->ae_dev->dev_specs.rss_ind_tbl_size); return 0; } @@ -788,237 +586,69 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, const u8 *key, const u8 hfunc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; int ret, i; - if (handle->pdev->revision >= 0x21) { - /* Set the RSS Hash Key if specififed by the user */ - if (key) { - switch (hfunc) { - case ETH_RSS_HASH_TOP: - rss_cfg->hash_algo = - HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; - break; - case ETH_RSS_HASH_XOR: - rss_cfg->hash_algo = - HCLGEVF_RSS_HASH_ALGO_SIMPLE; - break; - case ETH_RSS_HASH_NO_CHANGE: - break; - default: - return -EINVAL; - } - - ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, - key); - if (ret) - return ret; - - /* Update the shadow RSS key with user specified qids */ - memcpy(rss_cfg->rss_hash_key, key, - HCLGEVF_RSS_KEY_SIZE); - } + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { + ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, + hfunc); + if (ret) + return ret; } /* update the shadow RSS table with user specified qids */ - for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) + for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) rss_cfg->rss_indirection_tbl[i] = indir[i]; /* update the hardware */ - return hclgevf_set_rss_indir_table(hdev); -} - -static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) -{ - u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; - - if (nfc->data & RXH_L4_B_2_3) - hash_sets |= HCLGEVF_D_PORT_BIT; - else - hash_sets &= ~HCLGEVF_D_PORT_BIT; - - if (nfc->data & RXH_IP_SRC) - hash_sets |= HCLGEVF_S_IP_BIT; - else - hash_sets &= ~HCLGEVF_S_IP_BIT; - - if (nfc->data & RXH_IP_DST) - hash_sets |= HCLGEVF_D_IP_BIT; - else - hash_sets &= ~HCLGEVF_D_IP_BIT; - - if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) - hash_sets |= HCLGEVF_V_TAG_BIT; - - return hash_sets; + return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, + rss_cfg->rss_indirection_tbl); } static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, - struct ethtool_rxnfc *nfc) + const struct ethtool_rxfh_fields *nfc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - struct hclgevf_rss_input_tuple_cmd *req; - struct hclgevf_desc desc; - u8 tuple_sets; int ret; - if (handle->pdev->revision == 0x20) + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) return -EOPNOTSUPP; - if (nfc->data & - ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) - return -EINVAL; - - req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); - - req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; - req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; - req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; - req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; - req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; - req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; - req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; - req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; - - tuple_sets = hclgevf_get_rss_hash_bits(nfc); - switch (nfc->flow_type) { - case TCP_V4_FLOW: - req->ipv4_tcp_en = tuple_sets; - break; - case TCP_V6_FLOW: - req->ipv6_tcp_en = tuple_sets; - break; - case UDP_V4_FLOW: - req->ipv4_udp_en = tuple_sets; - break; - case UDP_V6_FLOW: - req->ipv6_udp_en = tuple_sets; - break; - case SCTP_V4_FLOW: - req->ipv4_sctp_en = tuple_sets; - break; - case SCTP_V6_FLOW: - if ((nfc->data & RXH_L4_B_0_1) || - (nfc->data & RXH_L4_B_2_3)) - return -EINVAL; - - req->ipv6_sctp_en = tuple_sets; - break; - case IPV4_FLOW: - req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; - break; - case IPV6_FLOW: - req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; - break; - default: - return -EINVAL; - } - - ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, + &hdev->rss_cfg, nfc); + if (ret) dev_err(&hdev->pdev->dev, - "Set rss tuple fail, status = %d\n", ret); - return ret; - } + "failed to set rss tuple, ret = %d.\n", ret); - rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; - rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; - rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; - rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; - rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; - rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; - rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; - rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; - return 0; + return ret; } static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, - struct ethtool_rxnfc *nfc) + struct ethtool_rxfh_fields *nfc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; u8 tuple_sets; + int ret; - if (handle->pdev->revision == 0x20) + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) return -EOPNOTSUPP; nfc->data = 0; - switch (nfc->flow_type) { - case TCP_V4_FLOW: - tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; - break; - case UDP_V4_FLOW: - tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; - break; - case TCP_V6_FLOW: - tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; - break; - case UDP_V6_FLOW: - tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; - break; - case SCTP_V4_FLOW: - tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; - break; - case SCTP_V6_FLOW: - tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; - break; - case IPV4_FLOW: - case IPV6_FLOW: - tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; - break; - default: - return -EINVAL; - } - - if (!tuple_sets) - return 0; + ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type, + &tuple_sets); + if (ret || !tuple_sets) + return ret; - if (tuple_sets & HCLGEVF_D_PORT_BIT) - nfc->data |= RXH_L4_B_2_3; - if (tuple_sets & HCLGEVF_S_PORT_BIT) - nfc->data |= RXH_L4_B_0_1; - if (tuple_sets & HCLGEVF_D_IP_BIT) - nfc->data |= RXH_IP_DST; - if (tuple_sets & HCLGEVF_S_IP_BIT) - nfc->data |= RXH_IP_SRC; + nfc->data = hclge_comm_convert_rss_tuple(tuple_sets); return 0; } -static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, - struct hclgevf_rss_cfg *rss_cfg) -{ - struct hclgevf_rss_input_tuple_cmd *req; - struct hclgevf_desc desc; - int ret; - - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); - - req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; - - req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; - req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; - req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; - req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; - req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; - req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; - req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; - req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; - - ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "Configure rss input fail, status = %d\n", ret); - return ret; -} - static int hclgevf_get_tc_size(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; return rss_cfg->rss_size; } @@ -1133,6 +763,7 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, bool en_uc_pmc, bool en_mc_pmc, bool en_bc_pmc) { + struct hnae3_handle *handle = &hdev->nic; struct hclge_vf_to_pf_msg send_msg; int ret; @@ -1141,6 +772,8 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, send_msg.en_bc = en_bc_pmc ? 1 : 0; send_msg.en_uc = en_uc_pmc ? 1 : 0; send_msg.en_mc = en_mc_pmc ? 1 : 0; + send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC, + &handle->priv_flags) ? 1 : 0; ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); if (ret) @@ -1154,10 +787,9 @@ static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, bool en_mc_pmc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct pci_dev *pdev = hdev->pdev; bool en_bc_pmc; - en_bc_pmc = pdev->revision != 0x20; + en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, en_bc_pmc); @@ -1168,6 +800,7 @@ static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); + hclgevf_task_schedule(hdev, 0); } static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) @@ -1184,40 +817,36 @@ static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) } } -static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, - int stream_id, bool enable) +static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id, + u16 stream_id, bool enable) { struct hclgevf_cfg_com_tqp_queue_cmd *req; - struct hclgevf_desc desc; - int status; + struct hclge_desc desc; req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, - false); + hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); req->stream_id = cpu_to_le16(stream_id); if (enable) req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) - dev_err(&hdev->pdev->dev, - "TQP enable fail, status =%d.\n", status); - - return status; + return hclgevf_cmd_send(&hdev->hw, &desc, 1); } -static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) +static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable) { - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hclgevf_tqp *tqp; - int i; + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret; + u16 i; - for (i = 0; i < kinfo->num_tqps; i++) { - tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); - memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); + for (i = 0; i < handle->kinfo.num_tqps; i++) { + ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable); + if (ret) + return ret; } + + return 0; } static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) @@ -1255,7 +884,7 @@ static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) ether_addr_copy(p, hdev->hw.mac.mac_addr); } -static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, +static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p, bool is_first) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -1334,7 +963,7 @@ static int hclgevf_update_mac_list(struct hnae3_handle *handle, /* if the mac addr is already in the mac list, no need to add a new * one into it, just check the mac addr state, convert it to a new - * new state, or just remove it, or do nothing. + * state, or just remove it, or do nothing. */ mac_node = hclgevf_find_mac_node(list, addr); if (mac_node) { @@ -1420,15 +1049,18 @@ static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, struct list_head *list, enum HCLGEVF_MAC_ADDR_TYPE mac_type) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclgevf_mac_addr_node *mac_node, *tmp; int ret; list_for_each_entry_safe(mac_node, tmp, list, node) { ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); if (ret) { + hnae3_format_mac_addr(format_mac_addr, + mac_node->mac_addr); dev_err(&hdev->pdev->dev, - "failed to configure mac %pM, state = %d, ret = %d\n", - mac_node->mac_addr, mac_node->state, ret); + "failed to configure mac %s, state = %d, ret = %d\n", + format_mac_addr, mac_node->state, ret); return; } if (mac_node->state == HCLGEVF_MAC_TO_ADD) { @@ -1460,8 +1092,7 @@ static void hclgevf_sync_from_add_list(struct list_head *add_list, kfree(mac_node); } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { mac_node->state = HCLGEVF_MAC_TO_DEL; - list_del(&mac_node->node); - list_add_tail(&mac_node->node, mac_list); + list_move_tail(&mac_node->node, mac_list); } else { list_del(&mac_node->node); kfree(mac_node); @@ -1486,8 +1117,7 @@ static void hclgevf_sync_from_del_list(struct list_head *del_list, list_del(&mac_node->node); kfree(mac_node); } else { - list_del(&mac_node->node); - list_add_tail(&mac_node->node, mac_list); + list_move_tail(&mac_node->node, mac_list); } } } @@ -1523,8 +1153,7 @@ static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, list_for_each_entry_safe(mac_node, tmp, list, node) { switch (mac_node->state) { case HCLGEVF_MAC_TO_DEL: - list_del(&mac_node->node); - list_add_tail(&mac_node->node, &tmp_del_list); + list_move_tail(&mac_node->node, &tmp_del_list); break; case HCLGEVF_MAC_TO_ADD: new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); @@ -1574,15 +1203,28 @@ static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) spin_unlock_bh(&hdev->mac_table.mac_list_lock); } +static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + struct hclge_vf_to_pf_msg send_msg; + + if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) + return -EOPNOTSUPP; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_ENABLE_VLAN_FILTER); + send_msg.data[0] = enable ? 1 : 0; + + return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); +} + static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, u16 vlan_id, bool is_kill) { -#define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 -#define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 -#define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_mbx_vlan_filter *vlan_filter; struct hclge_vf_to_pf_msg send_msg; int ret; @@ -1592,22 +1234,25 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, if (proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT; - /* When device is resetting, firmware is unable to handle - * mailbox. Just record the vlan id, and remove it after + /* When device is resetting or reset failed, firmware is unable to + * handle mailbox. Just record the vlan id, and remove it after * reset finished. */ - if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) { + if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { set_bit(vlan_id, hdev->vlan_del_fail_bmap); return -EBUSY; + } else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) { + clear_bit(vlan_id, hdev->vlan_del_fail_bmap); } hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, HCLGE_MBX_VLAN_FILTER); - send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; - memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, - sizeof(vlan_id)); - memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, - sizeof(proto)); + vlan_filter = (struct hclge_mbx_vlan_filter *)send_msg.data; + vlan_filter->is_kill = is_kill; + vlan_filter->vlan_id = cpu_to_le16(vlan_id); + vlan_filter->proto = cpu_to_le16(be16_to_cpu(proto)); + /* when remove hw vlan filter failed, record the vlan id, * and try to remove it from hw later, to be consistence * with stack. @@ -1626,25 +1271,29 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) int ret, sync_cnt = 0; u16 vlan_id; + if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID)) + return; + + rtnl_lock(); vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); while (vlan_id != VLAN_N_VID) { ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), vlan_id, true); if (ret) - return; + break; clear_bit(vlan_id, hdev->vlan_del_fail_bmap); sync_cnt++; if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) - return; + break; vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); } + rtnl_unlock(); } -static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) +static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclge_vf_to_pf_msg send_msg; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, @@ -1653,29 +1302,64 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); } -static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) +static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret; + + ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable); + if (ret) + return ret; + + hdev->rxvtag_strip_en = enable; + return 0; +} + +static int hclgevf_reset_tqp(struct hnae3_handle *handle) +{ +#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclge_vf_to_pf_msg send_msg; + u8 return_status = 0; int ret; + u16 i; /* disable vf queue before send queue reset msg to PF */ - ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); - if (ret) + ret = hclgevf_tqp_enable(handle, false); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n", + ret); return ret; + } hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); - memcpy(send_msg.data, &queue_id, sizeof(queue_id)); - return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); + + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status, + sizeof(return_status)); + if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE) + return ret; + + for (i = 1; i < handle->kinfo.num_tqps; i++) { + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); + *(__le16 *)send_msg.data = cpu_to_le16(i); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); + if (ret) + return ret; + } + + return 0; } static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_mbx_mtu_info *mtu_info; struct hclge_vf_to_pf_msg send_msg; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); - memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); + mtu_info = (struct hclge_mbx_mtu_info *)send_msg.data; + mtu_info->mtu = cpu_to_le32(new_mtu); + return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); } @@ -1701,6 +1385,37 @@ static int hclgevf_notify_client(struct hclgevf_dev *hdev, return ret; } +static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, + enum hnae3_reset_notify_type type) +{ + struct hnae3_client *client = hdev->roce_client; + struct hnae3_handle *handle = &hdev->roce; + int ret; + + if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client) + return 0; + + if (!client->ops->reset_notify) + return -EOPNOTSUPP; + + ret = client->ops->reset_notify(handle, type); + if (ret) + dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", + type, ret); + return ret; +} + +static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev, + enum hnae3_reset_type reset_type) +{ + /* When an incorrect reset type is executed, the get_reset_level + * function generates the HNAE3_NONE_RESET flag. As a result, this + * type do not need to pending. + */ + if (reset_type != HNAE3_NONE_RESET) + set_bit(reset_type, &hdev->reset_pending); +} + static int hclgevf_reset_wait(struct hclgevf_dev *hdev) { #define HCLGEVF_RESET_WAIT_US 20000 @@ -1712,13 +1427,13 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev) int ret; if (hdev->reset_type == HNAE3_VF_RESET) - ret = readl_poll_timeout(hdev->hw.io_base + + ret = readl_poll_timeout(hdev->hw.hw.io_base + HCLGEVF_VF_RST_ING, val, !(val & HCLGEVF_VF_RST_ING_BIT), HCLGEVF_RESET_WAIT_US, HCLGEVF_RESET_WAIT_TIMEOUT_US); else - ret = readl_poll_timeout(hdev->hw.io_base + + ret = readl_poll_timeout(hdev->hw.hw.io_base + HCLGEVF_RST_ING, val, !(val & HCLGEVF_RST_ING_BITS), HCLGEVF_RESET_WAIT_US, @@ -1727,7 +1442,7 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev) /* hardware completion status should be available by this time */ if (ret) { dev_err(&hdev->pdev->dev, - "could'nt get reset done status from h/w, timeout!\n"); + "couldn't get reset done status from h/w, timeout!\n"); return ret; } @@ -1735,7 +1450,10 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev) * might happen in case reset assertion was made by PF. Yes, this also * means we might end up waiting bit more even for VF reset. */ - msleep(5000); + if (hdev->reset_type == HNAE3_VF_FULL_RESET) + msleep(5000); + else + msleep(500); return 0; } @@ -1744,13 +1462,13 @@ static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) { u32 reg_val; - reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); + reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); if (enable) reg_val |= HCLGEVF_NIC_SW_RST_RDY; else reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; - hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, + hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); } @@ -1787,23 +1505,28 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) { #define HCLGEVF_RESET_SYNC_TIME 100 - struct hclge_vf_to_pf_msg send_msg; - int ret = 0; - if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { + struct hclge_vf_to_pf_msg send_msg; + int ret; + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to assert VF reset, ret = %d\n", ret); + return ret; + } hdev->rst_stats.vf_func_rst_cnt++; } - set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); /* inform hardware that preparatory work is done */ msleep(HCLGEVF_RESET_SYNC_TIME); hclgevf_reset_handshake(hdev, true); - dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", - hdev->reset_type, ret); + dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", + hdev->reset_type); - return ret; + return 0; } static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) @@ -1825,9 +1548,9 @@ static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", - hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG)); + hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG)); dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", - hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); + hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG)); dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); @@ -1842,7 +1565,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) hdev->rst_stats.rst_fail_cnt); if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) - set_bit(hdev->reset_type, &hdev->reset_pending); + hclgevf_set_reset_pending(hdev, hdev->reset_type); if (hclgevf_is_reset_pending(hdev)) { set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); @@ -1859,6 +1582,11 @@ static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) hdev->rst_stats.rst_cnt++; + /* perform reset of the stack & ae device for a client */ + ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + rtnl_lock(); /* bring down the nic to stop any ongoing TX/RX */ ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); @@ -1874,6 +1602,9 @@ static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) int ret; hdev->rst_stats.hw_rst_done_cnt++; + ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; rtnl_lock(); /* now, re-initialize the nic client and ae device */ @@ -1884,6 +1615,18 @@ static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) return ret; } + ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT); + /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1 + * times + */ + if (ret && + hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1) + return ret; + + ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT); + if (ret) + return ret; + hdev->last_reset_time = jiffies; hdev->rst_stats.rst_done_cnt++; hdev->rst_stats.rst_fail_cnt = 0; @@ -1916,8 +1659,7 @@ err_reset: hclgevf_reset_err_handle(hdev); } -static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, - unsigned long *addr) +static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr) { enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; @@ -1943,6 +1685,8 @@ static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, clear_bit(HNAE3_FLR_RESET, addr); } + clear_bit(HNAE3_NONE_RESET, addr); + return rst_level; } @@ -1952,15 +1696,15 @@ static void hclgevf_reset_event(struct pci_dev *pdev, struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); struct hclgevf_dev *hdev = ae_dev->priv; - dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); - if (hdev->default_reset_request) hdev->reset_level = - hclgevf_get_reset_level(hdev, - &hdev->default_reset_request); + hclgevf_get_reset_level(&hdev->default_reset_request); else hdev->reset_level = HNAE3_VF_FUNC_RESET; + dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n", + hdev->reset_level); + /* reset of this VF requested */ set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); hclgevf_reset_task_schedule(hdev); @@ -1971,8 +1715,20 @@ static void hclgevf_reset_event(struct pci_dev *pdev, static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, enum hnae3_reset_type rst_type) { +#define HCLGEVF_SUPPORT_RESET_TYPE \ + (BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \ + BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \ + BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET)) + struct hclgevf_dev *hdev = ae_dev->priv; + if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) { + /* To prevent reset triggered by hclge_reset_event */ + set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request); + dev_info(&hdev->pdev->dev, "unsupported reset type %d\n", + rst_type); + return; + } set_bit(rst_type, &hdev->default_reset_request); } @@ -1981,41 +1737,40 @@ static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) writel(en ? 1 : 0, vector->addr); } -static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) +static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) { -#define HCLGEVF_FLR_RETRY_WAIT_MS 500 -#define HCLGEVF_FLR_RETRY_CNT 5 +#define HCLGEVF_RESET_RETRY_WAIT_MS 500 +#define HCLGEVF_RESET_RETRY_CNT 5 struct hclgevf_dev *hdev = ae_dev->priv; int retry_cnt = 0; int ret; -retry: - down(&hdev->reset_sem); - set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); - hdev->reset_type = HNAE3_FLR_RESET; - ret = hclgevf_reset_prepare(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", - ret); - if (hdev->reset_pending || - retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) { - dev_err(&hdev->pdev->dev, - "reset_pending:0x%lx, retry_cnt:%d\n", - hdev->reset_pending, retry_cnt); - clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); - up(&hdev->reset_sem); - msleep(HCLGEVF_FLR_RETRY_WAIT_MS); - goto retry; - } + while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { + down(&hdev->reset_sem); + set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); + hdev->reset_type = rst_type; + ret = hclgevf_reset_prepare(hdev); + if (!ret && !hdev->reset_pending) + break; + + dev_err(&hdev->pdev->dev, + "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", + ret, hdev->reset_pending, retry_cnt); + clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); + msleep(HCLGEVF_RESET_RETRY_WAIT_MS); } - /* disable misc vector before FLR done */ + /* disable misc vector before reset done */ hclgevf_enable_vector(&hdev->misc_vector, false); - hdev->rst_stats.flr_rst_cnt++; + + if (hdev->reset_type == HNAE3_FLR_RESET) + hdev->rst_stats.flr_rst_cnt++; } -static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) +static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev) { struct hclgevf_dev *hdev = ae_dev->priv; int ret; @@ -2028,8 +1783,8 @@ static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) ret); hdev->reset_type = HNAE3_NONE_RESET; - clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); - up(&hdev->reset_sem); + if (test_and_clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + up(&hdev->reset_sem); } static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) @@ -2045,7 +1800,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) vector->vector_irq = pci_irq_vector(hdev->pdev, HCLGEVF_MISC_VECTOR_NUM); - vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; + vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; /* vector status always valid for Vector 0 */ hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; @@ -2057,6 +1812,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) { if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && + test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) && !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); @@ -2090,7 +1846,7 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) if (test_and_clear_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) { - /* PF has initmated that it is about to reset the hardware. + /* PF has intimated that it is about to reset the hardware. * We now have to poll & check if hardware has actually * completed the reset sequence. On hardware reset completion, * VF needs to reset the client and ae device. @@ -2098,9 +1854,9 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) hdev->reset_attempts = 0; hdev->last_reset_time = jiffies; - while ((hdev->reset_type = - hclgevf_get_reset_level(hdev, &hdev->reset_pending)) - != HNAE3_NONE_RESET) + hdev->reset_type = + hclgevf_get_reset_level(&hdev->reset_pending); + if (hdev->reset_type != HNAE3_NONE_RESET) hclgevf_reset(hdev); } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state)) { @@ -2129,14 +1885,14 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) */ if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { /* prepare for full reset of stack + pcie interface */ - set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); + hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET); /* "defer" schedule the reset task again */ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); } else { hdev->reset_attempts++; - set_bit(hdev->reset_level, &hdev->reset_pending); + hclgevf_set_reset_pending(hdev, hdev->reset_level); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); } hclgevf_reset_task_schedule(hdev); @@ -2165,7 +1921,7 @@ static void hclgevf_keep_alive(struct hclgevf_dev *hdev) struct hclge_vf_to_pf_msg send_msg; int ret; - if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) return; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); @@ -2180,6 +1936,10 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) unsigned long delta = round_jiffies_relative(HZ); struct hnae3_handle *handle = &hdev->nic; + if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state) || + test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) + return; + if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { delta = jiffies - hdev->last_serv_processed; @@ -2199,12 +1959,13 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) } if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) - hclgevf_tqps_update_stats(handle); + hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); - /* request the link status from the PF. PF would be able to tell VF - * about such updates in future so we might remove this later + /* VF does not need to request link status when this bit is set, because + * PF will push its link status to VFs when link status changed. */ - hclgevf_request_link_info(hdev); + if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state)) + hclgevf_request_link_info(hdev); hclgevf_update_link_mode(hdev); @@ -2239,7 +2000,7 @@ static void hclgevf_service_task(struct work_struct *work) static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) { - hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); + hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr); } static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, @@ -2249,15 +2010,14 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, /* fetch the events from their corresponding regs */ cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, - HCLGEVF_VECTOR0_CMDQ_STATE_REG); - + HCLGE_COMM_VECTOR0_CMDQ_STATE_REG); if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); dev_info(&hdev->pdev->dev, "receive reset interrupt 0x%x!\n", rst_ing_reg); - set_bit(HNAE3_VF_RESET, &hdev->reset_pending); + hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); - set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); hdev->rst_stats.vf_rst_cnt++; /* set up VF hardware reset status, its PF will clear @@ -2278,7 +2038,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, * register, so we should just write 0 to the bit we are * handling, and keep other bits as cmdq_stat_reg. */ - if (hdev->pdev->revision >= 0x21) + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); else *clearval = cmdq_stat_reg & @@ -2295,18 +2055,31 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, return HCLGEVF_VECTOR0_EVENT_OTHER; } +static void hclgevf_reset_timer(struct timer_list *t) +{ + struct hclgevf_dev *hdev = timer_container_of(hdev, t, reset_timer); + + hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST); + hclgevf_reset_task_schedule(hdev); +} + static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) { +#define HCLGEVF_RESET_DELAY 5 + enum hclgevf_evt_cause event_cause; struct hclgevf_dev *hdev = data; u32 clearval; hclgevf_enable_vector(&hdev->misc_vector, false); event_cause = hclgevf_check_evt_cause(hdev, &clearval); + if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) + hclgevf_clear_event_cause(hdev, clearval); switch (event_cause) { case HCLGEVF_VECTOR0_EVENT_RST: - hclgevf_reset_task_schedule(hdev); + mod_timer(&hdev->reset_timer, + jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY)); break; case HCLGEVF_VECTOR0_EVENT_MBX: hclgevf_mbx_handler(hdev); @@ -2315,10 +2088,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) break; } - if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { - hclgevf_clear_event_cause(hdev, clearval); - hclgevf_enable_vector(&hdev->misc_vector, true); - } + hclgevf_enable_vector(&hdev->misc_vector, true); return IRQ_HANDLED; } @@ -2327,6 +2097,12 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) { int ret; + hdev->gro_en = true; + + ret = hclgevf_get_basic_info(hdev); + if (ret) + return ret; + /* get current port based vlan state from PF */ ret = hclgevf_get_port_base_vlan_filter_state(hdev); if (ret) @@ -2342,12 +2118,7 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) if (ret) return ret; - ret = hclgevf_get_pf_media_type(hdev); - if (ret) - return ret; - - /* get tc configuration from PF */ - return hclgevf_get_tc_info(hdev); + return hclgevf_get_pf_media_type(hdev); } static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) @@ -2377,32 +2148,33 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) hdev->num_msi_left == 0) return -EINVAL; - roce->rinfo.base_vector = hdev->roce_base_vector; + roce->rinfo.base_vector = hdev->roce_base_msix_offset; roce->rinfo.netdev = nic->kinfo.netdev; - roce->rinfo.roce_io_base = hdev->hw.io_base; + roce->rinfo.roce_io_base = hdev->hw.hw.io_base; + roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; roce->pdev = nic->pdev; roce->ae_algo = nic->ae_algo; - roce->numa_node_mask = nic->numa_node_mask; - + bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits, + MAX_NUMNODES); return 0; } -static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) +static int hclgevf_config_gro(struct hclgevf_dev *hdev) { struct hclgevf_cfg_gro_status_cmd *req; - struct hclgevf_desc desc; + struct hclge_desc desc; int ret; - if (!hnae3_dev_gro_supported(hdev)) + if (!hnae3_ae_dev_gro_supported(hdev->ae_dev)) return 0; - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, + hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; - req->gro_en = en ? 1 : 0; + req->gro_en = hdev->gro_en ? 1 : 0; ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -2412,60 +2184,51 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) return ret; } -static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) -{ - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - struct hclgevf_rss_tuple_cfg *tuple_sets; - u32 i; - - rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; - rss_cfg->rss_size = hdev->nic.kinfo.rss_size; - tuple_sets = &rss_cfg->rss_tuple_sets; - if (hdev->pdev->revision >= 0x21) { - rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; - memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, - HCLGEVF_RSS_KEY_SIZE); - - tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; - tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; - tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; - tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; - tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; - tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; - tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; - tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; - } - - /* Initialize RSS indirect table */ - for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) - rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; -} - static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) { - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; + u16 tc_offset[HCLGE_COMM_MAX_TC_NUM]; + u16 tc_valid[HCLGE_COMM_MAX_TC_NUM]; + u16 tc_size[HCLGE_COMM_MAX_TC_NUM]; int ret; - if (hdev->pdev->revision >= 0x21) { - ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, - rss_cfg->rss_hash_key); + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { + ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, + rss_cfg->rss_algo, + rss_cfg->rss_hash_key); if (ret) return ret; - ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); + ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, rss_cfg); if (ret) return ret; } - ret = hclgevf_set_rss_indir_table(hdev); + ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, + rss_cfg->rss_indirection_tbl); if (ret) return ret; - return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); + hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map, + tc_offset, tc_valid, tc_size); + + return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, + tc_valid, tc_size); } -static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) +static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev, + bool rxvtag_strip_en) { + struct hnae3_handle *nic = &hdev->nic; + int ret; + + ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to enable rx vlan offload, ret = %d\n", ret); + return ret; + } + return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, false); } @@ -2492,8 +2255,7 @@ static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) } else { set_bit(HCLGEVF_STATE_DOWN, &hdev->state); - /* flush memory to make sure DOWN is seen by service task */ - smp_mb__before_atomic(); + smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */ hclgevf_flush_link_update(hdev); } } @@ -2502,30 +2264,28 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - hclgevf_reset_tqp_stats(handle); + clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); + clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state); + + hclge_comm_reset_tqp_stats(handle); hclgevf_request_link_info(hdev); hclgevf_update_link_mode(hdev); - clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); - return 0; } static void hclgevf_ae_stop(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - int i; set_bit(HCLGEVF_STATE_DOWN, &hdev->state); if (hdev->reset_type != HNAE3_VF_RESET) - for (i = 0; i < handle->kinfo.num_tqps; i++) - if (hclgevf_reset_tqp(handle, i)) - break; + hclgevf_reset_tqp(handle); - hclgevf_reset_tqp_stats(handle); + hclge_comm_reset_tqp_stats(handle); hclgevf_update_link_status(hdev, 0); } @@ -2545,13 +2305,7 @@ static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) static int hclgevf_client_start(struct hnae3_handle *handle) { - int ret; - - ret = hclgevf_set_alive(handle, true); - if (ret) - return ret; - - return 0; + return hclgevf_set_alive(handle, true); } static void hclgevf_client_stop(struct hnae3_handle *handle) @@ -2572,6 +2326,8 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev) clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); + /* timer needs to be initialized before misc irq */ + timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0); mutex_init(&hdev->mbx_resp.mbx_mutex); sema_init(&hdev->reset_sem, 1); @@ -2625,9 +2381,6 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev) hdev->num_msi = vectors; hdev->num_msi_left = vectors; - hdev->base_msi_vector = pdev->irq; - hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; - hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, sizeof(u16), GFP_KERNEL); if (!hdev->vector_status) { @@ -2712,7 +2465,7 @@ static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, struct hnae3_client *client) { struct hclgevf_dev *hdev = ae_dev->priv; - int rst_cnt = hdev->rst_stats.rst_cnt; + u32 rst_cnt = hdev->rst_stats.rst_cnt; int ret; ret = client->ops->init_instance(&hdev->nic); @@ -2754,6 +2507,7 @@ static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, if (ret) return ret; + set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); hnae3_set_client_init_flag(client, ae_dev, 1); return 0; @@ -2814,6 +2568,10 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client, /* un-init roce, if it exists */ if (hdev->roce_client) { + while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + msleep(HCLGEVF_WAIT_RESET_DONE); + clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); + hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); hdev->roce_client = NULL; hdev->roce.client = NULL; @@ -2822,6 +2580,8 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client, /* un-init nic/unic, if this was not called by roce client */ if (client->ops->uninit_instance && hdev->nic_client && client->type != HNAE3_CLIENT_ROCE) { + while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + msleep(HCLGEVF_WAIT_RESET_DONE); clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); client->ops->uninit_instance(&hdev->nic, 0); @@ -2830,6 +2590,27 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client, } } +static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclgevf_hw *hw = &hdev->hw; + + /* for device does not have device memory, return directly */ + if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) + return 0; + + hw->hw.mem_base = + devm_ioremap_wc(&pdev->dev, + pci_resource_start(pdev, HCLGEVF_MEM_BAR), + pci_resource_len(pdev, HCLGEVF_MEM_BAR)); + if (!hw->hw.mem_base) { + dev_err(&pdev->dev, "failed to map device memory\n"); + return -EFAULT; + } + + return 0; +} + static int hclgevf_pci_init(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; @@ -2844,7 +2625,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (ret) { - dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); + dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting\n"); goto err_disable_device; } @@ -2856,18 +2637,22 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) pci_set_master(pdev); hw = &hdev->hw; - hw->hdev = hdev; - hw->io_base = pci_iomap(pdev, 2, 0); - if (!hw->io_base) { + hw->hw.io_base = pci_iomap(pdev, 2, 0); + if (!hw->hw.io_base) { dev_err(&pdev->dev, "can't map configuration register space\n"); ret = -ENOMEM; - goto err_clr_master; + goto err_release_regions; } + ret = hclgevf_dev_mem_map(hdev); + if (ret) + goto err_unmap_io_base; + return 0; -err_clr_master: - pci_clear_master(pdev); +err_unmap_io_base: + pci_iounmap(pdev, hdev->hw.hw.io_base); +err_release_regions: pci_release_regions(pdev); err_disable_device: pci_disable_device(pdev); @@ -2879,8 +2664,10 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; - pci_iounmap(pdev, hdev->hw.io_base); - pci_clear_master(pdev); + if (hdev->hw.hw.mem_base) + devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); + + pci_iounmap(pdev, hdev->hw.hw.io_base); pci_release_regions(pdev); pci_disable_device(pdev); } @@ -2888,10 +2675,10 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) { struct hclgevf_query_res_cmd *req; - struct hclgevf_desc desc; + struct hclge_desc desc; int ret; - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); + hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RSRC, true); ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, @@ -2936,12 +2723,93 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) return 0; } +static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_MAX_NON_TSO_BD_NUM 8U + + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + + ae_dev->dev_specs.max_non_tso_bd_num = + HCLGEVF_MAX_NON_TSO_BD_NUM; + ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; + ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; + ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; + ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME; +} + +static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, + struct hclge_desc *desc) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct hclgevf_dev_specs_0_cmd *req0; + struct hclgevf_dev_specs_1_cmd *req1; + + req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; + req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data; + + ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; + ae_dev->dev_specs.rss_ind_tbl_size = + le16_to_cpu(req0->rss_ind_tbl_size); + ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); + ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); + ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); + ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); +} + +static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) +{ + struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; + + if (!dev_specs->max_non_tso_bd_num) + dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM; + if (!dev_specs->rss_ind_tbl_size) + dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; + if (!dev_specs->rss_key_size) + dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; + if (!dev_specs->max_int_gl) + dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; + if (!dev_specs->max_frm_size) + dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME; +} + +static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) +{ + struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; + int ret; + int i; + + /* set default specifications as devices lower than version V3 do not + * support querying specifications from firmware. + */ + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { + hclgevf_set_default_dev_specs(hdev); + return 0; + } + + for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { + hclgevf_cmd_setup_basic_desc(&desc[i], + HCLGE_OPC_QUERY_DEV_SPECS, true); + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + } + hclgevf_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true); + + ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); + if (ret) + return ret; + + hclgevf_parse_dev_specs(hdev, desc); + hclgevf_check_dev_specs(hdev); + + return 0; +} + static int hclgevf_pci_reset(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; int ret = 0; - if (hdev->reset_type == HNAE3_VF_FULL_RESET && + if ((hdev->reset_type == HNAE3_VF_FULL_RESET || + hdev->reset_type == HNAE3_FLR_RESET) && test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { hclgevf_misc_irq_uninit(hdev); hclgevf_uninit_msi(hdev); @@ -2980,6 +2848,18 @@ static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); } +static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev) +{ + if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) + hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1); +} + +static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev) +{ + if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) + hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0); +} + static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; @@ -2991,7 +2871,11 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) return ret; } - ret = hclgevf_cmd_init(hdev); + hclgevf_arq_init(hdev); + + ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, + &hdev->fw_version, false, + hdev->reset_pending); if (ret) { dev_err(&pdev->dev, "cmd failed %d\n", ret); return ret; @@ -3004,19 +2888,26 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) return ret; } - ret = hclgevf_config_gro(hdev, true); + ret = hclgevf_config_gro(hdev); if (ret) return ret; - ret = hclgevf_init_vlan_config(hdev); + ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en); if (ret) { dev_err(&hdev->pdev->dev, "failed(%d) to initialize VLAN config\n", ret); return ret; } + /* get current port based vlan state from PF */ + ret = hclgevf_get_port_base_vlan_filter_state(hdev); + if (ret) + return ret; + set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); + hclgevf_init_rxd_adv_layout(hdev); + dev_info(&hdev->pdev->dev, "Reset done\n"); return 0; @@ -3031,11 +2922,16 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) if (ret) return ret; - ret = hclgevf_cmd_queue_init(hdev); + ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); if (ret) goto err_cmd_queue_init; - ret = hclgevf_cmd_init(hdev); + hclgevf_arq_init(hdev); + + hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclgevf_cmq_ops); + ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, + &hdev->fw_version, false, + hdev->reset_pending); if (ret) goto err_cmd_init; @@ -3044,6 +2940,13 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) if (ret) goto err_cmd_init; + ret = hclgevf_query_dev_specs(hdev); + if (ret) { + dev_err(&pdev->dev, + "failed to query dev specifications, ret = %d\n", ret); + goto err_cmd_init; + } + ret = hclgevf_init_msi(hdev); if (ret) { dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); @@ -3076,12 +2979,18 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) if (ret) goto err_config; - ret = hclgevf_config_gro(hdev, true); + ret = hclgevf_config_gro(hdev); if (ret) goto err_config; /* Initialize RSS for this VF */ - hclgevf_rss_init_cfg(hdev); + ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev, + &hdev->rss_cfg); + if (ret) { + dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); + goto err_config; + } + ret = hclgevf_rss_init_hw(hdev); if (ret) { dev_err(&hdev->pdev->dev, @@ -3089,7 +2998,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } - /* ensure vf tbl list as empty before init*/ + /* ensure vf tbl list as empty before init */ ret = hclgevf_clear_vport_list(hdev); if (ret) { dev_err(&pdev->dev, @@ -3098,13 +3007,21 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } - ret = hclgevf_init_vlan_config(hdev); + ret = hclgevf_init_vlan_config(hdev, true); if (ret) { dev_err(&hdev->pdev->dev, "failed(%d) to initialize VLAN config\n", ret); goto err_config; } + hclgevf_init_rxd_adv_layout(hdev); + + ret = hclgevf_devlink_init(hdev); + if (ret) + goto err_config; + + set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); + hdev->last_reset_time = jiffies; dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); @@ -3119,7 +3036,7 @@ err_misc_irq_init: hclgevf_state_uninit(hdev); hclgevf_uninit_msi(hdev); err_cmd_init: - hclgevf_cmd_uninit(hdev); + hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); err_cmd_queue_init: hclgevf_pci_uninit(hdev); clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); @@ -3131,6 +3048,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) struct hclge_vf_to_pf_msg send_msg; hclgevf_state_uninit(hdev); + hclgevf_uninit_rxd_adv_layout(hdev); hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); @@ -3140,8 +3058,9 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) hclgevf_uninit_msi(hdev); } + hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); + hclgevf_devlink_uninit(hdev); hclgevf_pci_uninit(hdev); - hclgevf_cmd_uninit(hdev); hclgevf_uninit_mac_list(hdev); } @@ -3175,11 +3094,7 @@ static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) { - struct hnae3_handle *nic = &hdev->nic; - struct hnae3_knic_private_info *kinfo = &nic->kinfo; - - return min_t(u32, hdev->rss_size_max, - hdev->num_tqps / kinfo->num_tc); + return min(hdev->rss_size_max, hdev->num_tqps); } /** @@ -3222,7 +3137,7 @@ static void hclgevf_update_rss_size(struct hnae3_handle *handle, kinfo->req_rss_size = new_tqps_num; max_rss_size = min_t(u16, hdev->rss_size_max, - hdev->num_tqps / kinfo->num_tc); + hdev->num_tqps / kinfo->tc_info.num_tc); /* Use the user's configuration when it is not larger than * max_rss_size, otherwise, use the maximum specification value. @@ -3234,7 +3149,7 @@ static void hclgevf_update_rss_size(struct hnae3_handle *handle, (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) kinfo->rss_size = max_rss_size; - kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; + kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size; } static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, @@ -3242,6 +3157,9 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hnae3_knic_private_info *kinfo = &handle->kinfo; + u16 tc_offset[HCLGE_COMM_MAX_TC_NUM]; + u16 tc_valid[HCLGE_COMM_MAX_TC_NUM]; + u16 tc_size[HCLGE_COMM_MAX_TC_NUM]; u16 cur_rss_size = kinfo->rss_size; u16 cur_tqps = kinfo->num_tqps; u32 *rss_indir; @@ -3250,20 +3168,24 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, hclgevf_update_rss_size(handle, new_tqps_num); - ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); + hclge_comm_get_rss_tc_info(kinfo->rss_size, hdev->hw_tc_map, + tc_offset, tc_valid, tc_size); + ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, + tc_valid, tc_size); if (ret) return ret; - /* RSS indirection table has been configuared by user */ + /* RSS indirection table has been configured by user */ if (rxfh_configured) goto out; /* Reinitializes the rss indirect table according to the new RSS size */ - rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); + rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size, + sizeof(u32), GFP_KERNEL); if (!rss_indir) return -ENOMEM; - for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) + for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) rss_indir[i] = i % kinfo->rss_size; hdev->rss_cfg.rss_size = kinfo->rss_size; @@ -3280,7 +3202,7 @@ out: dev_info(&hdev->pdev->dev, "Channels changed, rss_size from %u to %u, tqps from %u to %u", cur_rss_size, kinfo->rss_size, - cur_tqps, kinfo->rss_size * kinfo->num_tc); + cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); return ret; } @@ -3294,7 +3216,7 @@ static int hclgevf_get_status(struct hnae3_handle *handle) static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, u8 *auto_neg, u32 *speed, - u8 *duplex) + u8 *duplex, u32 *lane_num) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -3316,8 +3238,15 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + bool gro_en_old = hdev->gro_en; + int ret; + + hdev->gro_en = enable; + ret = hclgevf_config_gro(hdev); + if (ret) + hdev->gro_en = gro_en_old; - return hclgevf_config_gro(hdev, enable); + return ret; } static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, @@ -3339,6 +3268,13 @@ static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); } +static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); +} + static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -3363,94 +3299,41 @@ static void hclgevf_get_link_mode(struct hnae3_handle *handle, *advertising = hdev->hw.mac.advertising; } -#define MAX_SEPARATE_NUM 4 -#define SEPARATOR_VALUE 0xFFFFFFFF -#define REG_NUM_PER_LINE 4 -#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) - -static int hclgevf_get_regs_len(struct hnae3_handle *handle) -{ - int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - - cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; - common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; - ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; - tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; - - return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + - tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; -} - -static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, - void *data) -{ - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - int i, j, reg_um, separator_num; - u32 *reg = data; - - *version = hdev->fw_version; - - /* fetching per-VF registers values from VF PCIe register space */ - reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); - separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; - for (i = 0; i < reg_um; i++) - *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); - for (i = 0; i < separator_num; i++) - *reg++ = SEPARATOR_VALUE; - - reg_um = sizeof(common_reg_addr_list) / sizeof(u32); - separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; - for (i = 0; i < reg_um; i++) - *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); - for (i = 0; i < separator_num; i++) - *reg++ = SEPARATOR_VALUE; - - reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); - separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; - for (j = 0; j < hdev->num_tqps; j++) { - for (i = 0; i < reg_um; i++) - *reg++ = hclgevf_read_dev(&hdev->hw, - ring_reg_addr_list[i] + - 0x200 * j); - for (i = 0; i < separator_num; i++) - *reg++ = SEPARATOR_VALUE; - } - - reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); - separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; - for (j = 0; j < hdev->num_msi_used - 1; j++) { - for (i = 0; i < reg_um; i++) - *reg++ = hclgevf_read_dev(&hdev->hw, - tqp_intr_reg_addr_list[i] + - 4 * j); - for (i = 0; i < separator_num; i++) - *reg++ = SEPARATOR_VALUE; - } -} - void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, - u8 *port_base_vlan_info, u8 data_size) + struct hclge_mbx_port_base_vlan *port_base_vlan) { struct hnae3_handle *nic = &hdev->nic; struct hclge_vf_to_pf_msg send_msg; + int ret; rtnl_lock(); - hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); - rtnl_unlock(); + + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { + dev_warn(&hdev->pdev->dev, + "is resetting when updating port based vlan info\n"); + rtnl_unlock(); + return; + } + + ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) { + rtnl_unlock(); + return; + } /* send msg to PF and wait update port based vlan info */ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, HCLGE_MBX_PORT_BASE_VLAN_CFG); - memcpy(send_msg.data, port_base_vlan_info, data_size); - hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); - - if (state == HNAE3_PORT_BASE_VLAN_DISABLE) - nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; - else - nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + memcpy(send_msg.data, port_base_vlan, sizeof(*port_base_vlan)); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + if (!ret) { + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) + nic->port_base_vlan_state = state; + else + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + } - rtnl_lock(); hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); rtnl_unlock(); } @@ -3458,8 +3341,8 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, - .flr_prepare = hclgevf_flr_prepare, - .flr_done = hclgevf_flr_done, + .reset_prepare = hclgevf_reset_prepare_general, + .reset_done = hclgevf_reset_done, .init_client_instance = hclgevf_init_client_instance, .uninit_client_instance = hclgevf_uninit_client_instance, .start = hclgevf_ae_start, @@ -3481,8 +3364,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .update_stats = hclgevf_update_stats, .get_strings = hclgevf_get_strings, .get_sset_count = hclgevf_get_sset_count, - .get_rss_key_size = hclgevf_get_rss_key_size, - .get_rss_indir_size = hclgevf_get_rss_indir_size, + .get_rss_key_size = hclge_comm_get_rss_key_size, .get_rss = hclgevf_get_rss, .set_rss = hclgevf_set_rss, .get_rss_tuple = hclgevf_get_rss_tuple, @@ -3490,6 +3372,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .get_tc_size = hclgevf_get_tc_size, .get_fw_version = hclgevf_get_fw_version, .set_vlan_filter = hclgevf_set_vlan_filter, + .enable_vlan_filter = hclgevf_enable_vlan_filter, .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, .reset_event = hclgevf_reset_event, .set_default_reset_request = hclgevf_set_def_reset_request, @@ -3511,6 +3394,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .get_link_mode = hclgevf_get_link_mode, .set_promisc_mode = hclgevf_set_promisc_mode, .request_update_promisc_mode = hclgevf_request_update_promisc_mode, + .get_cmdq_stat = hclgevf_get_cmdq_stat, }; static struct hnae3_ae_algo ae_algovf = { @@ -3518,11 +3402,11 @@ static struct hnae3_ae_algo ae_algovf = { .pdev_id_table = ae_algovf_pci_tbl, }; -static int hclgevf_init(void) +static int __init hclgevf_init(void) { pr_info("%s is initializing\n", HCLGEVF_NAME); - hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); + hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME); if (!hclgevf_wq) { pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); return -ENOMEM; @@ -3533,10 +3417,12 @@ static int hclgevf_init(void) return 0; } -static void hclgevf_exit(void) +static void __exit hclgevf_exit(void) { + hnae3_acquire_unload_lock(); hnae3_unregister_ae_algo(&ae_algovf); destroy_workqueue(hclgevf_wq); + hnae3_release_unload_lock(); } module_init(hclgevf_init); module_exit(hclgevf_exit); |
