diff options
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3/hns3vf')
6 files changed, 119 insertions, 806 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile deleted file mode 100644 index 51ff7d86ee90..000000000000 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0+ -# -# Makefile for the HISILICON network device drivers. -# - -ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 -ccflags-y += -I $(srctree)/$(src) - -obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o -hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o hclgevf_devlink.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c deleted file mode 100644 index e605c2c5bcce..000000000000 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ /dev/null @@ -1,556 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -// Copyright (c) 2016-2017 Hisilicon Limited. - -#include <linux/device.h> -#include <linux/dma-direction.h> -#include <linux/dma-mapping.h> -#include <linux/err.h> -#include <linux/pci.h> -#include <linux/slab.h> -#include "hclgevf_cmd.h" -#include "hclgevf_main.h" -#include "hnae3.h" - -#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev) - -static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring) -{ - int ntc = ring->next_to_clean; - int ntu = ring->next_to_use; - int used; - - used = (ntu - ntc + ring->desc_num) % ring->desc_num; - - return ring->desc_num - used - 1; -} - -static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring, - int head) -{ - int ntu = ring->next_to_use; - int ntc = ring->next_to_clean; - - if (ntu > ntc) - return head >= ntc && head <= ntu; - - return head >= ntc || head <= ntu; -} - -static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw) -{ - struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw); - struct hclgevf_cmq_ring *csq = &hw->cmq.csq; - int clean; - u32 head; - - head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG); - rmb(); /* Make sure head is ready before touch any data */ - - if (!hclgevf_is_valid_csq_clean_head(csq, head)) { - dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head, - csq->next_to_use, csq->next_to_clean); - dev_warn(&hdev->pdev->dev, - "Disabling any further commands to IMP firmware\n"); - set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); - return -EIO; - } - - clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; - csq->next_to_clean = head; - return clean; -} - -static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw) -{ - u32 head; - - head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG); - - return head == hw->cmq.csq.next_to_use; -} - -static bool hclgevf_is_special_opcode(u16 opcode) -{ - const u16 spec_opcode[] = {0x30, 0x31, 0x32}; - int i; - - for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { - if (spec_opcode[i] == opcode) - return true; - } - - return false; -} - -static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring) -{ - struct hclgevf_dev *hdev = ring->dev; - struct hclgevf_hw *hw = &hdev->hw; - u32 reg_val; - - if (ring->flag == HCLGEVF_TYPE_CSQ) { - reg_val = lower_32_bits(ring->desc_dma_addr); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val); - reg_val = upper_32_bits(ring->desc_dma_addr); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val); - - reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG); - reg_val &= HCLGEVF_NIC_SW_RST_RDY; - reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val); - - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); - } else { - reg_val = lower_32_bits(ring->desc_dma_addr); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val); - reg_val = upper_32_bits(ring->desc_dma_addr); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val); - - reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val); - - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); - } -} - -static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw) -{ - hclgevf_cmd_config_regs(&hw->cmq.csq); - hclgevf_cmd_config_regs(&hw->cmq.crq); -} - -static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring) -{ - int size = ring->desc_num * sizeof(struct hclgevf_desc); - - ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size, - &ring->desc_dma_addr, GFP_KERNEL); - if (!ring->desc) - return -ENOMEM; - - return 0; -} - -static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring) -{ - int size = ring->desc_num * sizeof(struct hclgevf_desc); - - if (ring->desc) { - dma_free_coherent(cmq_ring_to_dev(ring), size, - ring->desc, ring->desc_dma_addr); - ring->desc = NULL; - } -} - -static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type) -{ - struct hclgevf_hw *hw = &hdev->hw; - struct hclgevf_cmq_ring *ring = - (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq; - int ret; - - ring->dev = hdev; - ring->flag = ring_type; - - /* allocate CSQ/CRQ descriptor */ - ret = hclgevf_alloc_cmd_desc(ring); - if (ret) - dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret, - (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ"); - - return ret; -} - -void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, - enum hclgevf_opcode_type opcode, bool is_read) -{ - memset(desc, 0, sizeof(struct hclgevf_desc)); - desc->opcode = cpu_to_le16(opcode); - desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR | - HCLGEVF_CMD_FLAG_IN); - if (is_read) - desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR); - else - desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR); -} - -struct vf_errcode { - u32 imp_errcode; - int common_errno; -}; - -static void hclgevf_cmd_copy_desc(struct hclgevf_hw *hw, - struct hclgevf_desc *desc, int num) -{ - struct hclgevf_desc *desc_to_use; - int handle = 0; - - while (handle < num) { - desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; - *desc_to_use = desc[handle]; - (hw->cmq.csq.next_to_use)++; - if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num) - hw->cmq.csq.next_to_use = 0; - handle++; - } -} - -static int hclgevf_cmd_convert_err_code(u16 desc_ret) -{ - struct vf_errcode hclgevf_cmd_errcode[] = { - {HCLGEVF_CMD_EXEC_SUCCESS, 0}, - {HCLGEVF_CMD_NO_AUTH, -EPERM}, - {HCLGEVF_CMD_NOT_SUPPORTED, -EOPNOTSUPP}, - {HCLGEVF_CMD_QUEUE_FULL, -EXFULL}, - {HCLGEVF_CMD_NEXT_ERR, -ENOSR}, - {HCLGEVF_CMD_UNEXE_ERR, -ENOTBLK}, - {HCLGEVF_CMD_PARA_ERR, -EINVAL}, - {HCLGEVF_CMD_RESULT_ERR, -ERANGE}, - {HCLGEVF_CMD_TIMEOUT, -ETIME}, - {HCLGEVF_CMD_HILINK_ERR, -ENOLINK}, - {HCLGEVF_CMD_QUEUE_ILLEGAL, -ENXIO}, - {HCLGEVF_CMD_INVALID, -EBADR}, - }; - u32 errcode_count = ARRAY_SIZE(hclgevf_cmd_errcode); - u32 i; - - for (i = 0; i < errcode_count; i++) - if (hclgevf_cmd_errcode[i].imp_errcode == desc_ret) - return hclgevf_cmd_errcode[i].common_errno; - - return -EIO; -} - -static int hclgevf_cmd_check_retval(struct hclgevf_hw *hw, - struct hclgevf_desc *desc, int num, int ntc) -{ - u16 opcode, desc_ret; - int handle; - - opcode = le16_to_cpu(desc[0].opcode); - for (handle = 0; handle < num; handle++) { - /* Get the result of hardware write back */ - desc[handle] = hw->cmq.csq.desc[ntc]; - ntc++; - if (ntc == hw->cmq.csq.desc_num) - ntc = 0; - } - if (likely(!hclgevf_is_special_opcode(opcode))) - desc_ret = le16_to_cpu(desc[num - 1].retval); - else - desc_ret = le16_to_cpu(desc[0].retval); - hw->cmq.last_status = desc_ret; - - return hclgevf_cmd_convert_err_code(desc_ret); -} - -static int hclgevf_cmd_check_result(struct hclgevf_hw *hw, - struct hclgevf_desc *desc, int num, int ntc) -{ - struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev; - bool is_completed = false; - u32 timeout = 0; - int handle, ret; - - /* If the command is sync, wait for the firmware to write back, - * if multi descriptors to be sent, use the first one to check - */ - if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) { - do { - if (hclgevf_cmd_csq_done(hw)) { - is_completed = true; - break; - } - udelay(1); - timeout++; - } while (timeout < hw->cmq.tx_timeout); - } - - if (!is_completed) - ret = -EBADE; - else - ret = hclgevf_cmd_check_retval(hw, desc, num, ntc); - - /* Clean the command send queue */ - handle = hclgevf_cmd_csq_clean(hw); - if (handle < 0) - ret = handle; - else if (handle != num) - dev_warn(&hdev->pdev->dev, - "cleaned %d, need to clean %d\n", handle, num); - return ret; -} - -/* hclgevf_cmd_send - send command to command queue - * @hw: pointer to the hw struct - * @desc: prefilled descriptor for describing the command - * @num : the number of descriptors to be sent - * - * This is the main send command for command queue, it - * sends the queue, cleans the queue, etc - */ -int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num) -{ - struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev; - struct hclgevf_cmq_ring *csq = &hw->cmq.csq; - int ret; - int ntc; - - spin_lock_bh(&hw->cmq.csq.lock); - - if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) { - spin_unlock_bh(&hw->cmq.csq.lock); - return -EBUSY; - } - - if (num > hclgevf_ring_space(&hw->cmq.csq)) { - /* If CMDQ ring is full, SW HEAD and HW HEAD may be different, - * need update the SW HEAD pointer csq->next_to_clean - */ - csq->next_to_clean = hclgevf_read_dev(hw, - HCLGEVF_NIC_CSQ_HEAD_REG); - spin_unlock_bh(&hw->cmq.csq.lock); - return -EBUSY; - } - - /* Record the location of desc in the ring for this time - * which will be use for hardware to write back - */ - ntc = hw->cmq.csq.next_to_use; - - hclgevf_cmd_copy_desc(hw, desc, num); - - /* Write to hardware */ - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, - hw->cmq.csq.next_to_use); - - ret = hclgevf_cmd_check_result(hw, desc, num, ntc); - - spin_unlock_bh(&hw->cmq.csq.lock); - - return ret; -} - -static void hclgevf_set_default_capability(struct hclgevf_dev *hdev) -{ - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - - set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps); - set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps); - set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); -} - -static const struct hclgevf_caps_bit_map hclgevf_cmd_caps_bit_map0[] = { - {HCLGEVF_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, - {HCLGEVF_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, - {HCLGEVF_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, - {HCLGEVF_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, - {HCLGEVF_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, - {HCLGEVF_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, -}; - -static void hclgevf_parse_capability(struct hclgevf_dev *hdev, - struct hclgevf_query_version_cmd *cmd) -{ - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - u32 caps, i; - - caps = __le32_to_cpu(cmd->caps[0]); - for (i = 0; i < ARRAY_SIZE(hclgevf_cmd_caps_bit_map0); i++) - if (hnae3_get_bit(caps, hclgevf_cmd_caps_bit_map0[i].imp_bit)) - set_bit(hclgevf_cmd_caps_bit_map0[i].local_bit, - ae_dev->caps); -} - -static __le32 hclgevf_build_api_caps(void) -{ - u32 api_caps = 0; - - hnae3_set_bit(api_caps, HCLGEVF_API_CAP_FLEX_RSS_TBL_B, 1); - - return cpu_to_le32(api_caps); -} - -static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev) -{ - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - struct hclgevf_query_version_cmd *resp; - struct hclgevf_desc desc; - int status; - - resp = (struct hclgevf_query_version_cmd *)desc.data; - - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1); - resp->api_caps = hclgevf_build_api_caps(); - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) - return status; - - hdev->fw_version = le32_to_cpu(resp->firmware); - - ae_dev->dev_version = le32_to_cpu(resp->hardware) << - HNAE3_PCI_REVISION_BIT_SIZE; - ae_dev->dev_version |= hdev->pdev->revision; - - if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) - hclgevf_set_default_capability(hdev); - - hclgevf_parse_capability(hdev, resp); - - return status; -} - -int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev) -{ - int ret; - - /* Setup the lock for command queue */ - spin_lock_init(&hdev->hw.cmq.csq.lock); - spin_lock_init(&hdev->hw.cmq.crq.lock); - - hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT; - hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM; - hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM; - - ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ); - if (ret) { - dev_err(&hdev->pdev->dev, - "CSQ ring setup error %d\n", ret); - return ret; - } - - ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ); - if (ret) { - dev_err(&hdev->pdev->dev, - "CRQ ring setup error %d\n", ret); - goto err_csq; - } - - return 0; -err_csq: - hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); - return ret; -} - -static int hclgevf_firmware_compat_config(struct hclgevf_dev *hdev, bool en) -{ - struct hclgevf_firmware_compat_cmd *req; - struct hclgevf_desc desc; - u32 compat = 0; - - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_IMP_COMPAT_CFG, false); - - if (en) { - req = (struct hclgevf_firmware_compat_cmd *)desc.data; - - hnae3_set_bit(compat, HCLGEVF_SYNC_RX_RING_HEAD_EN_B, 1); - - req->compat = cpu_to_le32(compat); - } - - return hclgevf_cmd_send(&hdev->hw, &desc, 1); -} - -int hclgevf_cmd_init(struct hclgevf_dev *hdev) -{ - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - int ret; - - spin_lock_bh(&hdev->hw.cmq.csq.lock); - spin_lock(&hdev->hw.cmq.crq.lock); - - /* initialize the pointers of async rx queue of mailbox */ - hdev->arq.hdev = hdev; - hdev->arq.head = 0; - hdev->arq.tail = 0; - atomic_set(&hdev->arq.count, 0); - hdev->hw.cmq.csq.next_to_clean = 0; - hdev->hw.cmq.csq.next_to_use = 0; - hdev->hw.cmq.crq.next_to_clean = 0; - hdev->hw.cmq.crq.next_to_use = 0; - - hclgevf_cmd_init_regs(&hdev->hw); - - spin_unlock(&hdev->hw.cmq.crq.lock); - spin_unlock_bh(&hdev->hw.cmq.csq.lock); - - clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); - - /* Check if there is new reset pending, because the higher level - * reset may happen when lower level reset is being processed. - */ - if (hclgevf_is_reset_pending(hdev)) { - ret = -EBUSY; - goto err_cmd_init; - } - - /* get version and device capabilities */ - ret = hclgevf_cmd_query_version_and_capability(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, - "failed to query version and capabilities, ret = %d\n", ret); - goto err_cmd_init; - } - - dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n", - hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK, - HNAE3_FW_VERSION_BYTE3_SHIFT), - hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK, - HNAE3_FW_VERSION_BYTE2_SHIFT), - hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK, - HNAE3_FW_VERSION_BYTE1_SHIFT), - hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK, - HNAE3_FW_VERSION_BYTE0_SHIFT)); - - if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { - /* ask the firmware to enable some features, driver can work - * without it. - */ - ret = hclgevf_firmware_compat_config(hdev, true); - if (ret) - dev_warn(&hdev->pdev->dev, - "Firmware compatible features not enabled(%d).\n", - ret); - } - - return 0; - -err_cmd_init: - set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); - - return ret; -} - -static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw) -{ - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); -} - -void hclgevf_cmd_uninit(struct hclgevf_dev *hdev) -{ - hclgevf_firmware_compat_config(hdev, false); - set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); - /* wait to ensure that the firmware completes the possible left - * over commands. - */ - msleep(HCLGEVF_CMDQ_CLEAR_WAIT_TIME); - spin_lock_bh(&hdev->hw.cmq.csq.lock); - spin_lock(&hdev->hw.cmq.crq.lock); - hclgevf_cmd_uninit_regs(&hdev->hw); - spin_unlock(&hdev->hw.cmq.crq.lock); - spin_unlock_bh(&hdev->hw.cmq.csq.lock); - - hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); - hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); -} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index edc9e154061a..ab8329e7d2bd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -6,9 +6,8 @@ #include <linux/io.h> #include <linux/types.h> #include "hnae3.h" +#include "hclge_comm_cmd.h" -#define HCLGEVF_CMDQ_TX_TIMEOUT 30000 -#define HCLGEVF_CMDQ_CLEAR_WAIT_TIME 200 #define HCLGEVF_CMDQ_RX_INVLD_B 0 #define HCLGEVF_CMDQ_RX_OUTVLD_B 1 @@ -16,83 +15,6 @@ struct hclgevf_hw; struct hclgevf_dev; #define HCLGEVF_SYNC_RX_RING_HEAD_EN_B 4 -struct hclgevf_firmware_compat_cmd { - __le32 compat; - u8 rsv[20]; -}; - -struct hclgevf_desc { - __le16 opcode; - __le16 flag; - __le16 retval; - __le16 rsv; - __le32 data[6]; -}; - -struct hclgevf_desc_cb { - dma_addr_t dma; - void *va; - u32 length; -}; - -struct hclgevf_cmq_ring { - dma_addr_t desc_dma_addr; - struct hclgevf_desc *desc; - struct hclgevf_desc_cb *desc_cb; - struct hclgevf_dev *dev; - u32 head; - u32 tail; - - u16 buf_size; - u16 desc_num; - int next_to_use; - int next_to_clean; - u8 flag; - spinlock_t lock; /* Command queue lock */ -}; - -enum hclgevf_cmd_return_status { - HCLGEVF_CMD_EXEC_SUCCESS = 0, - HCLGEVF_CMD_NO_AUTH = 1, - HCLGEVF_CMD_NOT_SUPPORTED = 2, - HCLGEVF_CMD_QUEUE_FULL = 3, - HCLGEVF_CMD_NEXT_ERR = 4, - HCLGEVF_CMD_UNEXE_ERR = 5, - HCLGEVF_CMD_PARA_ERR = 6, - HCLGEVF_CMD_RESULT_ERR = 7, - HCLGEVF_CMD_TIMEOUT = 8, - HCLGEVF_CMD_HILINK_ERR = 9, - HCLGEVF_CMD_QUEUE_ILLEGAL = 10, - HCLGEVF_CMD_INVALID = 11, -}; - -enum hclgevf_cmd_status { - HCLGEVF_STATUS_SUCCESS = 0, - HCLGEVF_ERR_CSQ_FULL = -1, - HCLGEVF_ERR_CSQ_TIMEOUT = -2, - HCLGEVF_ERR_CSQ_ERROR = -3 -}; - -struct hclgevf_cmq { - struct hclgevf_cmq_ring csq; - struct hclgevf_cmq_ring crq; - u16 tx_timeout; /* Tx timeout */ - enum hclgevf_cmd_status last_status; -}; - -#define HCLGEVF_CMD_FLAG_IN_VALID_SHIFT 0 -#define HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT 1 -#define HCLGEVF_CMD_FLAG_NEXT_SHIFT 2 -#define HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT 3 -#define HCLGEVF_CMD_FLAG_NO_INTR_SHIFT 4 -#define HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT 5 - -#define HCLGEVF_CMD_FLAG_IN BIT(HCLGEVF_CMD_FLAG_IN_VALID_SHIFT) -#define HCLGEVF_CMD_FLAG_OUT BIT(HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT) -#define HCLGEVF_CMD_FLAG_NEXT BIT(HCLGEVF_CMD_FLAG_NEXT_SHIFT) -#define HCLGEVF_CMD_FLAG_WR BIT(HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT) -#define HCLGEVF_CMD_FLAG_NO_INTR BIT(HCLGEVF_CMD_FLAG_NO_INTR_SHIFT) -#define HCLGEVF_CMD_FLAG_ERR_INTR BIT(HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT) enum hclgevf_opcode_type { /* Generic command */ @@ -156,34 +78,6 @@ struct hclgevf_ctrl_vector_chain { u8 resv; }; -enum HCLGEVF_CAP_BITS { - HCLGEVF_CAP_UDP_GSO_B, - HCLGEVF_CAP_QB_B, - HCLGEVF_CAP_FD_FORWARD_TC_B, - HCLGEVF_CAP_PTP_B, - HCLGEVF_CAP_INT_QL_B, - HCLGEVF_CAP_HW_TX_CSUM_B, - HCLGEVF_CAP_TX_PUSH_B, - HCLGEVF_CAP_PHY_IMP_B, - HCLGEVF_CAP_TQP_TXRX_INDEP_B, - HCLGEVF_CAP_HW_PAD_B, - HCLGEVF_CAP_STASH_B, - HCLGEVF_CAP_UDP_TUNNEL_CSUM_B, - HCLGEVF_CAP_RXD_ADV_LAYOUT_B = 15, -}; - -enum HCLGEVF_API_CAP_BITS { - HCLGEVF_API_CAP_FLEX_RSS_TBL_B, -}; - -#define HCLGEVF_QUERY_CAP_LENGTH 3 -struct hclgevf_query_version_cmd { - __le32 firmware; - __le32 hardware; - __le32 api_caps; - __le32 caps[HCLGEVF_QUERY_CAP_LENGTH]; /* capabilities of device */ -}; - #define HCLGEVF_MSIX_OFT_ROCEE_S 0 #define HCLGEVF_MSIX_OFT_ROCEE_M (0xffff << HCLGEVF_MSIX_OFT_ROCEE_S) #define HCLGEVF_VEC_NUM_S 0 @@ -273,9 +167,6 @@ struct hclgevf_cfg_tx_queue_pointer_cmd { u8 rsv[14]; }; -#define HCLGEVF_TYPE_CRQ 0 -#define HCLGEVF_TYPE_CSQ 1 - /* this bit indicates that the driver is ready for hardware reset */ #define HCLGEVF_NIC_SW_RST_RDY_B 16 #define HCLGEVF_NIC_SW_RST_RDY BIT(HCLGEVF_NIC_SW_RST_RDY_B) @@ -285,6 +176,10 @@ struct hclgevf_cfg_tx_queue_pointer_cmd { #define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4 +#define hclgevf_cmd_setup_basic_desc(desc, opcode, is_read) \ + hclge_comm_cmd_setup_basic_desc(desc, (enum hclge_comm_opcode_type)opcode, \ + is_read) + struct hclgevf_dev_specs_0_cmd { __le32 rsv0; __le32 mac_entry_num; @@ -305,38 +200,6 @@ struct hclgevf_dev_specs_1_cmd { u8 rsv1[18]; }; -/* capabilities bits map between imp firmware and local driver */ -struct hclgevf_caps_bit_map { - u16 imp_bit; - u16 local_bit; -}; - -static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value) -{ - writel(value, base + reg); -} - -static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg) -{ - u8 __iomem *reg_addr = READ_ONCE(base); - - return readl(reg_addr + reg); -} - -#define hclgevf_write_dev(a, reg, value) \ - hclgevf_write_reg((a)->io_base, reg, value) -#define hclgevf_read_dev(a, reg) \ - hclgevf_read_reg((a)->io_base, reg) - -#define HCLGEVF_SEND_SYNC(flag) \ - ((flag) & HCLGEVF_CMD_FLAG_NO_INTR) - -int hclgevf_cmd_init(struct hclgevf_dev *hdev); -void hclgevf_cmd_uninit(struct hclgevf_dev *hdev); -int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev); - -int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num); -void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, - enum hclgevf_opcode_type opcode, - bool is_read); +int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num); +void hclgevf_arq_init(struct hclgevf_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 0568cc31d391..08bd6fe0f29e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -40,20 +40,20 @@ static const u8 hclgevf_hash_key[] = { MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); -static const u32 cmdq_reg_addr_list[] = {HCLGEVF_NIC_CSQ_BASEADDR_L_REG, - HCLGEVF_NIC_CSQ_BASEADDR_H_REG, - HCLGEVF_NIC_CSQ_DEPTH_REG, - HCLGEVF_NIC_CSQ_TAIL_REG, - HCLGEVF_NIC_CSQ_HEAD_REG, - HCLGEVF_NIC_CRQ_BASEADDR_L_REG, - HCLGEVF_NIC_CRQ_BASEADDR_H_REG, - HCLGEVF_NIC_CRQ_DEPTH_REG, - HCLGEVF_NIC_CRQ_TAIL_REG, - HCLGEVF_NIC_CRQ_HEAD_REG, - HCLGEVF_VECTOR0_CMDQ_SRC_REG, - HCLGEVF_VECTOR0_CMDQ_STATE_REG, - HCLGEVF_CMDQ_INTR_EN_REG, - HCLGEVF_CMDQ_INTR_GEN_REG}; +static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, + HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, + HCLGE_COMM_NIC_CSQ_DEPTH_REG, + HCLGE_COMM_NIC_CSQ_TAIL_REG, + HCLGE_COMM_NIC_CSQ_HEAD_REG, + HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, + HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, + HCLGE_COMM_NIC_CRQ_DEPTH_REG, + HCLGE_COMM_NIC_CRQ_TAIL_REG, + HCLGE_COMM_NIC_CRQ_HEAD_REG, + HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, + HCLGE_COMM_VECTOR0_CMDQ_STATE_REG, + HCLGE_COMM_CMDQ_INTR_EN_REG, + HCLGE_COMM_CMDQ_INTR_GEN_REG}; static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, HCLGEVF_RST_ING, @@ -92,6 +92,32 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, HCLGEVF_TQP_INTR_GL2_REG, HCLGEVF_TQP_INTR_RL_REG}; +/* hclgevf_cmd_send - send command to command queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor for describing the command + * @num : the number of descriptors to be sent + * + * This is the main send command for command queue, it + * sends the queue, cleans the queue, etc + */ +int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num) +{ + return hclge_comm_cmd_send(&hw->hw, desc, num, false); +} + +void hclgevf_arq_init(struct hclgevf_dev *hdev) +{ + struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq; + + spin_lock(&cmdq->crq.lock); + /* initialize the pointers of async rx queue of mailbox */ + hdev->arq.hdev = hdev; + hdev->arq.head = 0; + hdev->arq.tail = 0; + atomic_set(&hdev->arq.count, 0); + spin_unlock(&cmdq->crq.lock); +} + static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) { if (!handle->client) @@ -106,7 +132,7 @@ static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_desc desc; + struct hclge_desc desc; struct hclgevf_tqp *tqp; int status; int i; @@ -420,11 +446,11 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) * HCLGEVF_TQP_MAX_SIZE_DEV_V2. */ if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) - tqp->q.io_base = hdev->hw.io_base + + tqp->q.io_base = hdev->hw.hw.io_base + HCLGEVF_TQP_REG_OFFSET + i * HCLGEVF_TQP_REG_SIZE; else - tqp->q.io_base = hdev->hw.io_base + + tqp->q.io_base = hdev->hw.hw.io_base + HCLGEVF_TQP_REG_OFFSET + HCLGEVF_TQP_EXT_REG_OFFSET + (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * @@ -539,7 +565,7 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) nic->pdev = hdev->pdev; nic->numa_node_mask = hdev->numa_node_mask; nic->flags |= HNAE3_SUPPORT_VF; - nic->kinfo.io_base = hdev->hw.io_base; + nic->kinfo.io_base = hdev->hw.hw.io_base; ret = hclgevf_knic_setup(hdev); if (ret) @@ -576,7 +602,7 @@ static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { vector->vector = pci_irq_vector(hdev->pdev, i); - vector->io_addr = hdev->hw.io_base + + vector->io_addr = hdev->hw.hw.io_base + HCLGEVF_VECTOR_REG_BASE + (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; hdev->vector_status[i] = 0; @@ -611,7 +637,7 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, { struct hclgevf_rss_config_cmd *req; unsigned int key_offset = 0; - struct hclgevf_desc desc; + struct hclge_desc desc; int key_counts; int key_size; int ret; @@ -655,7 +681,7 @@ static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) { const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; struct hclgevf_rss_indirection_table_cmd *req; - struct hclgevf_desc desc; + struct hclge_desc desc; int rss_cfg_tbl_num; int status; int i, j; @@ -692,7 +718,7 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) u16 tc_offset[HCLGEVF_MAX_TC_NUM]; u16 tc_valid[HCLGEVF_MAX_TC_NUM]; u16 tc_size[HCLGEVF_MAX_TC_NUM]; - struct hclgevf_desc desc; + struct hclge_desc desc; u16 roundup_size; unsigned int i; int status; @@ -961,7 +987,7 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; struct hclgevf_rss_input_tuple_cmd *req; - struct hclgevf_desc desc; + struct hclge_desc desc; int ret; if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) @@ -1074,7 +1100,7 @@ static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, struct hclgevf_rss_cfg *rss_cfg) { struct hclgevf_rss_input_tuple_cmd *req; - struct hclgevf_desc desc; + struct hclge_desc desc; int ret; hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); @@ -1273,7 +1299,7 @@ static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id, u16 stream_id, bool enable) { struct hclgevf_cfg_com_tqp_queue_cmd *req; - struct hclgevf_desc desc; + struct hclge_desc desc; req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; @@ -1862,13 +1888,13 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev) int ret; if (hdev->reset_type == HNAE3_VF_RESET) - ret = readl_poll_timeout(hdev->hw.io_base + + ret = readl_poll_timeout(hdev->hw.hw.io_base + HCLGEVF_VF_RST_ING, val, !(val & HCLGEVF_VF_RST_ING_BIT), HCLGEVF_RESET_WAIT_US, HCLGEVF_RESET_WAIT_TIMEOUT_US); else - ret = readl_poll_timeout(hdev->hw.io_base + + ret = readl_poll_timeout(hdev->hw.hw.io_base + HCLGEVF_RST_ING, val, !(val & HCLGEVF_RST_ING_BITS), HCLGEVF_RESET_WAIT_US, @@ -1894,13 +1920,13 @@ static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) { u32 reg_val; - reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); + reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); if (enable) reg_val |= HCLGEVF_NIC_SW_RST_RDY; else reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; - hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, + hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); } @@ -1951,7 +1977,7 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) hdev->rst_stats.vf_func_rst_cnt++; } - set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); /* inform hardware that preparatory work is done */ msleep(HCLGEVF_RESET_SYNC_TIME); hclgevf_reset_handshake(hdev, true); @@ -1980,9 +2006,9 @@ static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", - hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG)); + hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG)); dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", - hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG)); + hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG)); dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); @@ -2219,7 +2245,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) vector->vector_irq = pci_irq_vector(hdev->pdev, HCLGEVF_MISC_VECTOR_NUM); - vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; + vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; /* vector status always valid for Vector 0 */ hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; @@ -2340,7 +2366,7 @@ static void hclgevf_keep_alive(struct hclgevf_dev *hdev) struct hclge_vf_to_pf_msg send_msg; int ret; - if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) return; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); @@ -2418,7 +2444,7 @@ static void hclgevf_service_task(struct work_struct *work) static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) { - hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); + hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr); } static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, @@ -2428,14 +2454,14 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, /* fetch the events from their corresponding regs */ cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, - HCLGEVF_VECTOR0_CMDQ_STATE_REG); + HCLGE_COMM_VECTOR0_CMDQ_STATE_REG); if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); dev_info(&hdev->pdev->dev, "receive reset interrupt 0x%x!\n", rst_ing_reg); set_bit(HNAE3_VF_RESET, &hdev->reset_pending); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); - set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); hdev->rst_stats.vf_rst_cnt++; /* set up VF hardware reset status, its PF will clear @@ -2559,8 +2585,8 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) roce->rinfo.base_vector = hdev->roce_base_msix_offset; roce->rinfo.netdev = nic->kinfo.netdev; - roce->rinfo.roce_io_base = hdev->hw.io_base; - roce->rinfo.roce_mem_base = hdev->hw.mem_base; + roce->rinfo.roce_io_base = hdev->hw.hw.io_base; + roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; roce->pdev = nic->pdev; roce->ae_algo = nic->ae_algo; @@ -2572,7 +2598,7 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) static int hclgevf_config_gro(struct hclgevf_dev *hdev) { struct hclgevf_cfg_gro_status_cmd *req; - struct hclgevf_desc desc; + struct hclge_desc desc; int ret; if (!hnae3_dev_gro_supported(hdev)) @@ -3042,11 +3068,11 @@ static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) return 0; - hw->mem_base = devm_ioremap_wc(&pdev->dev, - pci_resource_start(pdev, - HCLGEVF_MEM_BAR), - pci_resource_len(pdev, HCLGEVF_MEM_BAR)); - if (!hw->mem_base) { + hw->hw.mem_base = + devm_ioremap_wc(&pdev->dev, + pci_resource_start(pdev, HCLGEVF_MEM_BAR), + pci_resource_len(pdev, HCLGEVF_MEM_BAR)); + if (!hw->hw.mem_base) { dev_err(&pdev->dev, "failed to map device memory\n"); return -EFAULT; } @@ -3080,9 +3106,8 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) pci_set_master(pdev); hw = &hdev->hw; - hw->hdev = hdev; - hw->io_base = pci_iomap(pdev, 2, 0); - if (!hw->io_base) { + hw->hw.io_base = pci_iomap(pdev, 2, 0); + if (!hw->hw.io_base) { dev_err(&pdev->dev, "can't map configuration register space\n"); ret = -ENOMEM; goto err_clr_master; @@ -3095,7 +3120,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) return 0; err_unmap_io_base: - pci_iounmap(pdev, hdev->hw.io_base); + pci_iounmap(pdev, hdev->hw.hw.io_base); err_clr_master: pci_clear_master(pdev); pci_release_regions(pdev); @@ -3109,10 +3134,10 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; - if (hdev->hw.mem_base) - devm_iounmap(&pdev->dev, hdev->hw.mem_base); + if (hdev->hw.hw.mem_base) + devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); - pci_iounmap(pdev, hdev->hw.io_base); + pci_iounmap(pdev, hdev->hw.hw.io_base); pci_clear_master(pdev); pci_release_regions(pdev); pci_disable_device(pdev); @@ -3121,7 +3146,7 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) { struct hclgevf_query_res_cmd *req; - struct hclgevf_desc desc; + struct hclge_desc desc; int ret; hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); @@ -3184,7 +3209,7 @@ static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) } static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, - struct hclgevf_desc *desc) + struct hclge_desc *desc) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); struct hclgevf_dev_specs_0_cmd *req0; @@ -3220,7 +3245,7 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) { - struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; + struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; int ret; int i; @@ -3235,7 +3260,7 @@ static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS, true); - desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT); + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); } hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS, true); @@ -3317,7 +3342,10 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) return ret; } - ret = hclgevf_cmd_init(hdev); + hclgevf_arq_init(hdev); + ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, + &hdev->fw_version, false, + hdev->reset_pending); if (ret) { dev_err(&pdev->dev, "cmd failed %d\n", ret); return ret; @@ -3363,11 +3391,14 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) if (ret) goto err_devlink_init; - ret = hclgevf_cmd_queue_init(hdev); + ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); if (ret) goto err_cmd_queue_init; - ret = hclgevf_cmd_init(hdev); + hclgevf_arq_init(hdev); + ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, + &hdev->fw_version, false, + hdev->reset_pending); if (ret) goto err_cmd_init; @@ -3467,7 +3498,7 @@ err_misc_irq_init: hclgevf_state_uninit(hdev); hclgevf_uninit_msi(hdev); err_cmd_init: - hclgevf_cmd_uninit(hdev); + hclge_comm_cmd_uninit(hdev->ae_dev, false, &hdev->hw.hw); err_cmd_queue_init: hclgevf_devlink_uninit(hdev); err_devlink_init: @@ -3491,7 +3522,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) hclgevf_uninit_msi(hdev); } - hclgevf_cmd_uninit(hdev); + hclge_comm_cmd_uninit(hdev->ae_dev, false, &hdev->hw.hw); hclgevf_devlink_uninit(hdev); hclgevf_pci_uninit(hdev); hclgevf_uninit_mac_list(hdev); @@ -3703,7 +3734,7 @@ static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); } static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index f6f736c0091c..20db6edab306 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -32,21 +32,6 @@ #define HCLGEVF_VECTOR_REG_OFFSET 0x4 #define HCLGEVF_VECTOR_VF_OFFSET 0x100000 -/* bar registers for cmdq */ -#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000 -#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004 -#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008 -#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010 -#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014 -#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018 -#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701C -#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020 -#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024 -#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028 - -#define HCLGEVF_CMDQ_INTR_EN_REG 0x27108 -#define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C - /* bar registers for common func */ #define HCLGEVF_GRO_EN_REG 0x28000 #define HCLGEVF_RXD_ADV_LAYOUT_EN_REG 0x28008 @@ -86,10 +71,6 @@ #define HCLGEVF_TQP_INTR_GL2_REG 0x20300 #define HCLGEVF_TQP_INTR_RL_REG 0x20900 -/* Vector0 interrupt CMDQ event source register(RW) */ -#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100 -/* Vector0 interrupt CMDQ event status register(RO) */ -#define HCLGEVF_VECTOR0_CMDQ_STATE_REG 0x27104 /* CMDQ register bits for RX event(=MBX event) */ #define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1 /* RST register bits for RESET event */ @@ -133,6 +114,11 @@ #define HCLGEVF_STATS_TIMER_INTERVAL 36U +#define hclgevf_read_dev(a, reg) \ + hclge_comm_read_reg((a)->hw.io_base, reg) +#define hclgevf_write_dev(a, reg, value) \ + hclge_comm_write_reg((a)->hw.io_base, reg, value) + enum hclgevf_evt_cause { HCLGEVF_VECTOR0_EVENT_RST, HCLGEVF_VECTOR0_EVENT_MBX, @@ -154,7 +140,6 @@ enum hclgevf_states { HCLGEVF_STATE_RST_HANDLING, HCLGEVF_STATE_MBX_SERVICE_SCHED, HCLGEVF_STATE_MBX_HANDLING, - HCLGEVF_STATE_CMD_DISABLE, HCLGEVF_STATE_LINK_UPDATING, HCLGEVF_STATE_PROMISC_CHANGED, HCLGEVF_STATE_RST_FAIL, @@ -173,12 +158,9 @@ struct hclgevf_mac { }; struct hclgevf_hw { - void __iomem *io_base; - void __iomem *mem_base; + struct hclge_comm_hw hw; int num_vec; - struct hclgevf_cmq cmq; struct hclgevf_mac mac; - void *hdev; /* hchgevf device it is part of */ }; /* TQP stats */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index c5ac6ecf36e1..d5e0a3f762f7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -53,7 +53,8 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, } while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) { - if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, + &hdev->hw.hw.comm_state)) return -EIO; usleep_range(HCLGEVF_SLEEP_USECOND, HCLGEVF_SLEEP_USECOND * 2); @@ -97,7 +98,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u8 *resp_data, u16 resp_len) { struct hclge_mbx_vf_to_pf_cmd *req; - struct hclgevf_desc desc; + struct hclge_desc desc; int status; req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; @@ -151,9 +152,9 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw) { - u32 tail = hclgevf_read_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG); + u32 tail = hclgevf_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG); - return tail == hw->cmq.crq.next_to_use; + return tail == hw->hw.cmq.crq.next_to_use; } static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev, @@ -212,14 +213,15 @@ static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev, void hclgevf_mbx_handler(struct hclgevf_dev *hdev) { struct hclge_mbx_pf_to_vf_cmd *req; - struct hclgevf_cmq_ring *crq; - struct hclgevf_desc *desc; + struct hclge_comm_cmq_ring *crq; + struct hclge_desc *desc; u16 flag; - crq = &hdev->hw.cmq.crq; + crq = &hdev->hw.hw.cmq.crq; while (!hclgevf_cmd_crq_empty(&hdev->hw)) { - if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) { + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, + &hdev->hw.hw.comm_state)) { dev_info(&hdev->pdev->dev, "vf crq need init\n"); return; } @@ -269,7 +271,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) } /* Write back CMDQ_RQ header pointer, M7 need this pointer */ - hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG, + hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, crq->next_to_use); } @@ -296,7 +298,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) /* process all the async queue messages */ while (tail != hdev->arq.head) { - if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) { + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, + &hdev->hw.hw.comm_state)) { dev_info(&hdev->pdev->dev, "vf crq need init in async\n"); return; |